diff --git a/Dockerfile b/Dockerfile index 460d08c..832f558 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,29 +1,44 @@ -FROM python:3.11-slim +# Stage 1: Build environment +FROM python:3.11-slim AS build-stage -RUN pip install poetry +# Install build tools and Poetry +RUN apt-get update && apt-get install -y build-essential \ + && pip install poetry -# Set the working directory WORKDIR /app -# Install build tools for Snakemake (gcc, make, etc.) -RUN apt-get update && apt-get install -y build-essential +# Copy dependency files and install dependencies +COPY pyproject.toml poetry.lock /app/ +RUN poetry config virtualenvs.create false \ + && poetry install --no-interaction --no-ansi -# Copy the pyproject.toml file -COPY pyproject.toml /app/ +# Copy and install the application +COPY . /app +RUN poetry install -# Install the dependencies -RUN poetry install --no-root +# Stage 2: Snakemake runtime environment +FROM snakemake/snakemake:latest -# Copy the rest of the application files -COPY . /app +# Install Poetry +RUN pip install poetry -# Install the package -RUN poetry install +WORKDIR /app + +# Copy the application from the build stage +COPY --from=build-stage /app /app + +# Install dependencies +RUN pip install -r <(poetry export --format requirements.txt --without-hashes) \ + && pip install -e . + +# Set up non-root user +RUN groupadd -r snakemake && useradd -r -g snakemake snakemake \ + && chown -R snakemake:snakemake /app -# Install Snakemake using Poetry -RUN poetry add snakemake +USER snakemake -# Set the entry point for the container -ENTRYPOINT ["poetry", "run"] +# Configure Python path +ENV PYTHONPATH="/app:${PYTHONPATH}" -CMD ["help"] +# Set the entry point +ENTRYPOINT ["snakemake"] \ No newline at end of file diff --git a/Snakefile b/Snakefile index bdf07b4..4089745 100644 --- a/Snakefile +++ b/Snakefile @@ -1,20 +1,13 @@ -# TODO - Refactor to input args to the Snakemake file -WORKFLOW_IDS = range(1,11) +from snakemake.io import directory + VERSIONS = ['1'] OUTPUT_DIRS = "data" MERGED_FILE = "merged.ttl" - - -def list_expected_files(): - files = [] - for wf_id in WORKFLOW_IDS: - for ver in VERSIONS: - files.append(f"{OUTPUT_DIRS}/{wf_id}_{ver}_ro-crate-metadata.json") - return files +ro_crate_metadata_dir = "ro-crate-metadata/" rule all: input: - MERGED_FILE + "ro-crate-metadata" rule source_ro_crates: output: @@ -23,12 +16,20 @@ rule source_ro_crates: """ # Create the output directory if it doesn't exist: mkdir -p {OUTPUT_DIRS} + + # Add the current directory to PYTHONPATH, creating it if it doesn't exist + export PYTHONPATH="${{PYTHONPATH:+$PYTHONPATH:}}$(pwd)" - # Run the source_crates script to download the RO Crate metadata: - python workflowhub_graph/source_crates.py --workflow-ids 1-10 --prod --all-versions - - # After sourcing, check which files were actually created: - python workflowhub_graph/check_outputs.py --workflow-ids 1-10 --versions {VERSIONS} --output-dir {OUTPUT_DIRS} + # Run the source_crates script to download the RO Crate metadata, + # then check the output files and generate created_files.json: + + # - all versions of all workflows: + # python workflowhub_graph/source_crates.py --prod --all-versions + # python workflowhub_graph/check_outputs.py --versions {VERSIONS} --output-dir {OUTPUT_DIRS} + + # - all versions of first 10 workflows: + python workflowhub_graph/source_crates.py --workflow-ids 1-20 --prod --all-versions + python workflowhub_graph/check_outputs.py --workflow-ids 1-20 --versions {VERSIONS} --output-dir {OUTPUT_DIRS} """ rule report_created_files: @@ -65,3 +66,36 @@ rule merge_files: shell(f""" python workflowhub_graph/merge.py {output[0]} -p "data/*.json" """) + +rule create_ro_crate: + input: + MERGED_FILE + params: + workflow_file = "Snakefile" + output: + directory("ro-crate-metadata/") + shell: + """ + # Create a new virtual environment + python -m venv rocrate_env + + # Activate the virtual environment + source rocrate_env/bin/activate + + # Upgrade pip to avoid any potential issues + pip install --upgrade pip + + # pip uninstall urllib3 + + # Install required packages + pip install requests urllib3 rocrate rocrate-zenodo + + # Run the create_ro_crate script + python workflowhub_graph/create_ro_crate.py {input} {params.workflow_file} {output} + + # Deactivate the virtual environment + deactivate + + # Remove the virtual environment to clean up + rm -rf rocrate_env + """ diff --git a/merged.ttl b/merged.ttl index 7e5b935..c040502 100644 --- a/merged.ttl +++ b/merged.ttl @@ -1,868 +1,76848 @@ @prefix dct: . @prefix ns1: . +@prefix ns2: . +@prefix rel: . @prefix schema1: . @prefix xsd: . - a schema1:CreativeWork ; + a schema1:CreativeWork ; dct:conformsTo ; - schema1:about . + schema1:about . - a schema1:CreativeWork ; - schema1:about . + a schema1:CreativeWork ; + schema1:about . - a schema1:CreativeWork ; + a schema1:CreativeWork ; dct:conformsTo ; - schema1:about . + schema1:about . - a schema1:CreativeWork ; - schema1:about . + a schema1:CreativeWork ; + schema1:about . - a schema1:CreativeWork ; + a schema1:CreativeWork ; dct:conformsTo ; - schema1:about . + schema1:about . - a schema1:CreativeWork ; - schema1:about . + a schema1:CreativeWork ; + schema1:about . - a schema1:CreativeWork ; + a schema1:CreativeWork ; dct:conformsTo ; - schema1:about . + schema1:about . - a schema1:CreativeWork ; - schema1:about . + a schema1:CreativeWork ; + schema1:about . - a schema1:CreativeWork ; + a schema1:CreativeWork ; dct:conformsTo ; - schema1:about . + schema1:about . - a schema1:CreativeWork ; - schema1:about . + a schema1:CreativeWork ; + schema1:about . - a schema1:CreativeWork ; + a schema1:CreativeWork ; dct:conformsTo ; - schema1:about . + schema1:about . - a schema1:CreativeWork ; - schema1:about . + a schema1:CreativeWork ; + schema1:about . - a schema1:CreativeWork ; + a schema1:CreativeWork ; dct:conformsTo ; - schema1:about . + schema1:about . - a schema1:CreativeWork ; - schema1:about . + a schema1:CreativeWork ; + schema1:about . - a schema1:CreativeWork ; - dct:conformsTo ; - schema1:about . + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/MaxMTpc" . - a schema1:CreativeWork ; - schema1:about . + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/MinCountPerCell" . - a schema1:CreativeWork ; - dct:conformsTo ; - schema1:about . + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/MinGenesPerCell" . - a schema1:CreativeWork ; - schema1:about . + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/genecount_qc_plot" . - a schema1:Person ; - schema1:name "Dannon Baker" . + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/mito_qc_plot" . - a schema1:Person ; - schema1:name "Björn Grüning" . + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/qc_anndata_object" . - a schema1:Person ; - schema1:name "Delphine Larivière" . + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/top_genes_plot" . - a schema1:Person ; - schema1:name "Gildas Le Corguillé" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Andrew Lonie" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Nicholas Keener" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Sergei Kosakovsky Pond" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Wolfgang Maier" . + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . - a schema1:Person ; - schema1:name "Anton Nekrutenko" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "James Taylor" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Steven Weaver" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Marius van den Beek" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Dave Bouvier" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "John Chilton" . + a schema1:Person ; + schema1:name "Anna Syme" . - a schema1:Person ; - schema1:name "Nate Coraor" . + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . - a schema1:Person ; - schema1:name "Frederik Coppens" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Bert Droesbeke" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Ignacio Eguinoa" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Simon Gladman" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:ComputerLanguage ; - schema1:alternateName "CWL" ; - schema1:identifier ; - schema1:name "Common Workflow Language" ; - schema1:url . + a schema1:CreativeWork ; + schema1:about . - a schema1:ComputerLanguage ; - schema1:identifier ; - schema1:name "Galaxy" ; - schema1:url . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Dannon Baker" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Björn Grüning" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Delphine Larivière" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Gildas Le Corguillé" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Andrew Lonie" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Nicholas Keener" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Sergei Kosakovsky Pond" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Wolfgang Maier" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Anton Nekrutenko" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "James Taylor" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Steven Weaver" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Marius van den Beek" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Dave Bouvier" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "John Chilton" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Nate Coraor" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Frederik Coppens" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Bert Droesbeke" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Ignacio Eguinoa" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Simon Gladman" . + a schema1:CreativeWork ; + schema1:about . - a schema1:ComputerLanguage ; - schema1:alternateName "CWL" ; - schema1:identifier ; - schema1:name "Common Workflow Language" ; - schema1:url . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:ComputerLanguage ; - schema1:identifier ; - schema1:name "Galaxy" ; - schema1:url . + a schema1:CreativeWork ; + schema1:about . - a ; - dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; - schema1:name "NC_045512" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Dannon Baker" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Björn Grüning" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Delphine Larivière" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Gildas Le Corguillé" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Andrew Lonie" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Nicholas Keener" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Sergei Kosakovsky Pond" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Wolfgang Maier" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Anton Nekrutenko" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "James Taylor" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Steven Weaver" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Marius van den Beek" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Dave Bouvier" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "John Chilton" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Nate Coraor" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Frederik Coppens" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Bert Droesbeke" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Ignacio Eguinoa" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Simon Gladman" . + a schema1:CreativeWork ; + schema1:about . - a schema1:ComputerLanguage ; - schema1:alternateName "CWL" ; - schema1:identifier ; - schema1:name "Common Workflow Language" ; - schema1:url . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:ComputerLanguage ; - schema1:identifier ; - schema1:name "Galaxy" ; - schema1:url . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Dannon Baker" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Björn Grüning" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Delphine Larivière" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Gildas Le Corguillé" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Andrew Lonie" . + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . - a schema1:Person ; - schema1:name "Nicholas Keener" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Sergei Kosakovsky Pond" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Wolfgang Maier" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Anton Nekrutenko" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "James Taylor" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Steven Weaver" . + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . - a schema1:Person ; - schema1:name "Marius van den Beek" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Dave Bouvier" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "John Chilton" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Nate Coraor" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Frederik Coppens" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Bert Droesbeke" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Ignacio Eguinoa" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Simon Gladman" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:ComputerLanguage ; - schema1:alternateName "CWL" ; - schema1:identifier ; - schema1:name "Common Workflow Language" ; - schema1:url . + a schema1:CreativeWork ; + schema1:about . - a schema1:ComputerLanguage ; - schema1:identifier ; - schema1:name "Galaxy" ; - schema1:url . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Dannon Baker" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Björn Grüning" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Delphine Larivière" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Gildas Le Corguillé" . + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#Amel_4.5_scaffolds.fa.gz" . - a schema1:Person ; - schema1:name "Andrew Lonie" . + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#amel_OGSv3.2.gff3.gz" . - a schema1:Person ; - schema1:name "Nicholas Keener" . + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#forager.bw" . - a schema1:Person ; - schema1:name "Sergei Kosakovsky Pond" . + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#forager_Amel4.5_accepted_hits.bam" . - a schema1:Person ; - schema1:name "Wolfgang Maier" . + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_5" . - a schema1:Person ; - schema1:name "Anton Nekrutenko" . + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_6" . - a schema1:Person ; - schema1:name "James Taylor" . + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_7" . - a schema1:Person ; - schema1:name "Steven Weaver" . + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_8" . - a schema1:Person ; - schema1:name "Marius van den Beek" . + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_9" . - a schema1:Person ; - schema1:name "Dave Bouvier" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "John Chilton" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Nate Coraor" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Frederik Coppens" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Bert Droesbeke" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Ignacio Eguinoa" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Simon Gladman" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:ComputerLanguage ; - schema1:alternateName "CWL" ; - schema1:identifier ; - schema1:name "Common Workflow Language" ; - schema1:url . + a schema1:CreativeWork ; + schema1:about . - a schema1:ComputerLanguage ; - schema1:identifier ; - schema1:name "Galaxy" ; - schema1:url . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Dannon Baker" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Björn Grüning" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Delphine Larivière" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Gildas Le Corguillé" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Andrew Lonie" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Nicholas Keener" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Sergei Kosakovsky Pond" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Wolfgang Maier" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Anton Nekrutenko" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "James Taylor" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Steven Weaver" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Marius van den Beek" . + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "bruno.depaulakinoshita@bsc.es" ; + schema1:identifier "bruno.depaulakinoshita@bsc.es" ; + schema1:url "https://orcid.org/0000-0001-8250-4074" . - a schema1:Person ; - schema1:name "Dave Bouvier" . + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about ; + schema1:author . - a schema1:Person ; - schema1:name "John Chilton" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Nate Coraor" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Frederik Coppens" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Bert Droesbeke" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Ignacio Eguinoa" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Simon Gladman" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:ComputerLanguage ; - schema1:alternateName "CWL" ; - schema1:identifier ; - schema1:name "Common Workflow Language" ; - schema1:url . + a schema1:CreativeWork ; + schema1:about . - a schema1:ComputerLanguage ; - schema1:identifier ; - schema1:name "Galaxy" ; - schema1:url . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Dannon Baker" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Björn Grüning" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Delphine Larivière" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Gildas Le Corguillé" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Andrew Lonie" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Nicholas Keener" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Sergei Kosakovsky Pond" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Wolfgang Maier" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Anton Nekrutenko" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "James Taylor" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Steven Weaver" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Marius van den Beek" . + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . - a schema1:Person ; - schema1:name "Dave Bouvier" . + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_2" . - a schema1:Person ; - schema1:name "John Chilton" . + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_3" . - a schema1:Person ; - schema1:name "Nate Coraor" . + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_4" . - a schema1:Person ; - schema1:name "Frederik Coppens" . + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_5" . - a schema1:Person ; - schema1:name "Bert Droesbeke" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Ignacio Eguinoa" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Simon Gladman" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:ComputerLanguage ; - schema1:alternateName "CWL" ; - schema1:identifier ; - schema1:name "Common Workflow Language" ; - schema1:url . + a schema1:CreativeWork ; + schema1:about . - a schema1:ComputerLanguage ; - schema1:identifier ; - schema1:name "Galaxy" ; - schema1:url . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:ComputerLanguage ; - schema1:alternateName "CWL" ; - schema1:identifier ; - schema1:name "Common Workflow Language" ; - schema1:url . + a schema1:CreativeWork ; + schema1:about . - a ; - dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; - schema1:name "auto_kmer_choice" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a ; - dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; - schema1:name "careful" . + a schema1:CreativeWork ; + schema1:about . - a ; - dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; - schema1:name "cov_cutoff" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a ; - dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; - schema1:name "cov_state" . + a schema1:CreativeWork ; + schema1:about . - a ; - dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; - schema1:name "fastq1" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a ; - dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; - schema1:name "fastq1_type" . + a schema1:CreativeWork ; + schema1:about . - a ; - dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; - schema1:name "fastq2" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a ; - dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; - schema1:name "fastq2_type" . + a schema1:CreativeWork ; + schema1:about . - a ; - dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; - schema1:name "fastq_file_type" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a ; - dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; - schema1:name "iontorrent" . + a schema1:CreativeWork ; + schema1:about . - a ; - dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; - schema1:name "kmers" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a ; - dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; - schema1:name "libraries_fwd_rev" . + a schema1:CreativeWork ; + schema1:about . - a ; + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; - schema1:name "libraries_metadata" . + schema1:name "#main/_anonymous_output_1" . - a ; + a ; dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; - schema1:name "libraries_mono" . + schema1:name "#main/_anonymous_output_2" . - a ; + a ; dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; - schema1:name "mode" . + schema1:name "#main/myamber_to_pdb.pdb" . - a ; + a ; dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; - schema1:name "nanopore_reads" . + schema1:name "#main/mycanonical_fasta.fasta" . - a ; + a ; dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; - schema1:name "onlyassembler" . + schema1:name "#main/myextract_chain.pdb" . - a ; + a ; dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; - schema1:name "pacbio_reads" . + schema1:name "#main/myextract_model.pdb" . - a ; + a ; dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; - schema1:name "sanger_reads" . + schema1:name "#main/myfix_altlocs.pdb" . - a ; + a ; dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; - schema1:name "sc" . + schema1:name "#main/myfix_amides.pdb" . - a ; + a ; dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; - schema1:name "trusted_contigs" . + schema1:name "#main/myfix_backbone.pdb" . - a ; + a ; dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; - schema1:name "untrusted_contigs" . + schema1:name "#main/myfix_chirality.pdb" . - a ; + a ; dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; - schema1:name "all_log_spades" . + schema1:name "#main/myfix_side_chain.pdb" . - a ; + a ; dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; - schema1:name "assembly_graph_spades" . + schema1:name "#main/myfix_ssbonds.pdb" . - a ; + a ; dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; - schema1:name "assembly_graph_unicycler" . + schema1:name "#main/myleap_gen_top.crd" . - a ; + a ; dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; - schema1:name "assembly_graph_with_scaffolds_spades" . + schema1:name "#main/myleap_gen_top.pdb" . - a ; + a ; dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; - schema1:name "assembly_image_spades" . + schema1:name "#main/myleap_gen_top.top" . - a ; + a ; dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; - schema1:name "assembly_image_unicycler" . + schema1:name "#main/mypdb.pdb" . - a ; + a ; dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; - schema1:name "assembly_info_spades" . + schema1:name "#main/myreduce_remove_hydrogens.pdb" . - a ; + a ; dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; - schema1:name "assembly_info_unicycler" . + schema1:name "#main/myremove_molecules.pdb" . - a ; + a ; dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; - schema1:name "assembly_unicycler" . + schema1:name "#main/myremove_pdb_water.pdb" . - a ; + a ; dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; - schema1:name "out_contig_stats_spades" . + schema1:name "#main/mysander_mdrun.cpout" . - a ; + a ; dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; - schema1:name "out_contigs_spades" . + schema1:name "#main/mysander_mdrun.cprst" . - a ; + a ; dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; - schema1:name "out_scaffold_stats_spades" . + schema1:name "#main/mysander_mdrun.crd" . - a ; + a ; dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; - schema1:name "out_scaffolds_spades" . + schema1:name "#main/mysander_mdrun.log" . - a schema1:Person ; - schema1:name "Dannon Baker" . + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/mysander_mdrun.mdinfo" . - a schema1:Person ; - schema1:name "Björn Grüning" . + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/mysander_mdrun.rst" . - a schema1:Person ; - schema1:name "Delphine Larivière" . + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/mystructure_check.json" . - a schema1:Person ; - schema1:name "Gildas Le Corguillé" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Andrew Lonie" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Nicholas Keener" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Sergei Kosakovsky Pond" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Wolfgang Maier" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Anton Nekrutenko" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "James Taylor" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Steven Weaver" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Marius van den Beek" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Dave Bouvier" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "John Chilton" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Nate Coraor" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Frederik Coppens" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Bert Droesbeke" . + a schema1:CreativeWork ; + schema1:about . - a schema1:Person ; - schema1:name "Ignacio Eguinoa" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Person ; - schema1:name "Simon Gladman" . + a schema1:CreativeWork ; + schema1:about . - a schema1:ComputerLanguage ; - schema1:alternateName "CWL" ; - schema1:identifier ; - schema1:name "Common Workflow Language" ; - schema1:url . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:ComputerLanguage ; - schema1:identifier ; - schema1:name "Galaxy" ; - schema1:url . + a schema1:CreativeWork ; + schema1:about . - a schema1:Organization, - schema1:Project ; - schema1:name "CWL workflow SARS-CoV-2" . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Dataset ; - dct:conformsTo , - ; - schema1:author , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - ; - schema1:description "This workflow uses Illumina and Oxford Nanopore reads that were pre-processed to remove human-derived sequences. Two assembly tools are used: spades and unicycler. In addition to assemblies (actual sequences) the two tools produce assembly graphs that can be used for visualization of assembly with bandage. More info can be found at https://covid19.galaxyproject.org/genomics/" ; - schema1:hasPart , - , - ; - schema1:identifier "https://workflowhub.eu/workflows/5?version=1" ; - schema1:license "MIT" ; - schema1:mainEntity ; - schema1:name "Research Object Crate for Genomics - Assembly of the genome sequence" ; - schema1:sdDatePublished "2024-06-17 10:59:52 +0100" ; - schema1:url "https://workflowhub.eu/workflows/5/ro_crate?version=1" . + a schema1:CreativeWork ; + schema1:about . - a schema1:HowTo, - schema1:MediaObject, - schema1:SoftwareSourceCode ; - schema1:contentSize 4086 ; - schema1:programmingLanguage . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:MediaObject, - schema1:SoftwareSourceCode, - ; - dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; - schema1:contentSize 15846 ; - schema1:creator , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - ; - schema1:dateCreated "2020-04-10T12:32:21Z" ; - schema1:dateModified "2023-01-16T13:39:53Z" ; - schema1:description "This workflow uses Illumina and Oxford Nanopore reads that were pre-processed to remove human-derived sequences. Two assembly tools are used: spades and unicycler. In addition to assemblies (actual sequences) the two tools produce assembly graphs that can be used for visualization of assembly with bandage. More info can be found at https://covid19.galaxyproject.org/genomics/" ; - schema1:image ; - schema1:keywords "covid-19" ; - schema1:license "https://spdx.org/licenses/MIT" ; - schema1:name "Genomics - Assembly of the genome sequence" ; - schema1:producer ; - schema1:programmingLanguage ; - schema1:sdPublisher ; - schema1:subjectOf ; - schema1:url "https://workflowhub.eu/workflows/5?version=1" ; - schema1:version 1 ; - ns1:input , - , - . + a schema1:CreativeWork ; + schema1:about . - a , - schema1:ImageObject, - schema1:MediaObject ; - schema1:contentSize 7739 . + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . - a schema1:Dataset ; + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:endTime "2021-03-22" ; + schema1:name "COMPSs RO-Crate automatically generated for Python applications" ; + schema1:object . + + a schema1:CreativeWork ; dct:conformsTo , ; - schema1:author , - , - , - , - , - , - , - , - , - , - , + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_30" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_31" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_32" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_33" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_34" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_35" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_36" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_37" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_38" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_39" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_40" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_41" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_42" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_43" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_44" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_45" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_46" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_47" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_48" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_49" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_50" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_51" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_52" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_53" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_54" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_9" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/coding_seqs.fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/genome.fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/non_coding_seqs.fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/bed_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/no_orf_seqs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/orf_seqs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/orf_seqs_prob" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/orf_seqs_prob_best" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/out_file1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/out_gtf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/output" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/output_pos" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:PropertyValue ; + schema1:name "DISLIB_GPU_AVAILABLE" ; + schema1:value "True" . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:PropertyValue ; + schema1:name "COMPSS_HOME" ; + schema1:value "/apps/GPP/COMPSs/3.3/" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_MASTER_NODE" ; + schema1:value "gs11r2b04" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_MPIRUN_TYPE" ; + schema1:value "impi" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_WORKER_INIT_TIMEOUT" ; + schema1:value "360000" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_WORKER_NODES" ; + schema1:value "" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_ACCOUNT" ; + schema1:value "bsc19" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_CPUS_PER_NODE" ; + schema1:value "112" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_END_TIME" ; + schema1:value "1720186659" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_GID" ; + schema1:value "50000" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_ID" ; + schema1:value "3644777" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NAME" ; + schema1:value "kmeans_california" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NODELIST" ; + schema1:value "gs11r2b04" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NUM_NODES" ; + schema1:value "1" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_PARTITION" ; + schema1:value "gpp" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_QOS" ; + schema1:value "gp_debug" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_START_TIME" ; + schema1:value "1720179459" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_UID" ; + schema1:value "4486" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_USER" ; + schema1:value "bsc019756" . + + a schema1:PropertyValue ; + schema1:name "SLURM_MEM_PER_CPU" ; + schema1:value "2000" . + + a schema1:PropertyValue ; + schema1:name "SLURM_SUBMIT_DIR" ; + schema1:value "/gpfs/scratch/bsc19/bsc019756/Housing_Clustering" . + + a schema1:PropertyValue ; + schema1:name "SLURM_SUBMIT_HOST" ; + schema1:value "glogin1" . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_9" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/Ploidy" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/genome_size" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/max_depth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/transition_parameter" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/genome.fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/Repeats_output_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/output_log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/output_masked_genome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/output_repeat_catalog" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/seeds" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/sequences" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/merged_transcriptomes.fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/busco_sum" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/busco_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/out_bed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/out_cds" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/out_gff3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/out_lo_cds" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/out_lo_pep" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/out_pep" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/lineage" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/output" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/community" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/pure_culture" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/which_figures" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/figures" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/results" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/html_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/output_feature_lengths" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/output_short" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/conformer_output" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:PropertyValue ; + schema1:name "COMPSS_HOME" ; + schema1:value "/Users/rsirvent/opt/COMPSs/" . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/lineage" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_9" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_3" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_30" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_31" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_32" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_33" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_34" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_35" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_36" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_37" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_38" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_39" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_40" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_41" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_42" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_43" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_44" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_45" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_9" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/Ploidy" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/genome_size" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/max_depth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/transition_parameter" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_2" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_6" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:Person ; + schema1:name "Anna Syme" . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:PropertyValue ; + schema1:name "COMPSS_BINDINGS_DEBUG" ; + schema1:value "1" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_HOME" ; + schema1:value "/apps/GPP/COMPSs/3.3.1/" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_MASTER_NODE" ; + schema1:value "gs05r2b06" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_MPIRUN_TYPE" ; + schema1:value "impi" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_PYTHON_VERSION" ; + schema1:value "3.12.1" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_WORKER_INIT_TIMEOUT" ; + schema1:value "360000" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_WORKER_NODES" ; + schema1:value " gs05r2b07 gs05r2b10" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_ACCOUNT" ; + schema1:value "bsc19" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_CPUS_PER_NODE" ; + schema1:value "112(x3)" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_END_TIME" ; + schema1:value "1718717219" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_GID" ; + schema1:value "50000" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_ID" ; + schema1:value "3166653" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NAME" ; + schema1:value "sparseLU_prov" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NODELIST" ; + schema1:value "gs05r2b[06-07,10]" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NUM_NODES" ; + schema1:value "3" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_PARTITION" ; + schema1:value "gpp" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_QOS" ; + schema1:value "gp_debug" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_START_TIME" ; + schema1:value "1718716919" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_UID" ; + schema1:value "2952" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_USER" ; + schema1:value "bsc019057" . + + a schema1:PropertyValue ; + schema1:name "SLURM_MEM_PER_CPU" ; + schema1:value "2000" . + + a schema1:PropertyValue ; + schema1:name "SLURM_SUBMIT_DIR" ; + schema1:value "/gpfs/home/bsc/bsc019057/COMPSs-DP/tutorial_apps/java/sparseLU" . + + a schema1:PropertyValue ; + schema1:name "SLURM_SUBMIT_HOST" ; + schema1:value "glogin2" . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:Person ; + schema1:name "Sánchez, I." . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:PropertyValue ; + schema1:name "COMPSS_HOME" ; + schema1:value "/Users/rsirvent/opt/COMPSs/" . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:endTime "2021-03-22" ; + schema1:name "COMPSs RO-Crate automatically generated for Python applications" ; + schema1:object . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "javier.conejero@bsc.es" ; + schema1:identifier "javier.conejero@bsc.es" ; + schema1:url "https://orcid.org/0000-0001-6401-6229" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "poiata@ipgp.fr" ; + schema1:identifier "poiata@ipgp.fr" ; + schema1:url "https://orcid.org/0000-0002-6412-3179" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "satriano@ipgp.fr" ; + schema1:identifier "satriano@ipgp.fr" ; + schema1:url "https://orcid.org/0000-0002-3039-2530" . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:Person ; + schema1:name "Melero, R." . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/analysis_metadata" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/bamqc_html_normal" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/bamqc_html_tumor" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/bamqc_normal_genome_results" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/bamqc_tumor_genome_results" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/fastqc_html_normal_fw_after" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/fastqc_html_normal_fw_before" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/fastqc_html_normal_rv_after" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/fastqc_html_normal_rv_before" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/fastqc_html_tumor_fw_after" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/fastqc_html_tumor_fw_before" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/fastqc_html_tumor_rv_after" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/fastqc_html_tumor_rv_before" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/multiqc_html_post_trim" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/multiqc_html_pre_trim" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:PropertyValue ; + schema1:name "DISLIB_GPU_AVAILABLE" ; + schema1:value "True" . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/bd_ensemble.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/bd_ensemble.mdcrd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/bd_ensemble.pcz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/bd_ensemble_rmsd.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/bd_ensemble_rmsd.dcd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/bd_ensemble_uncompressed.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/bfactor_all.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/bfactor_all.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/dmd_ensemble.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/dmd_ensemble.mdcrd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/hinges_bfactor_report.json" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/mypdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/nma_ensemble.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/nma_ensemble.mdcrd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/pcz_collectivity.json" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/pcz_evecs.json" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/pcz_proj1.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/pcz_proj1.dcd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/pcz_report.json" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/pcz_stiffness.json" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/structure_ca.pdb" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/JBrowse" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_5" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/hisat2_summary_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/output_alignments" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/output_gtf" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:PropertyValue ; + schema1:name "DISLIB_GPU_AVAILABLE" ; + schema1:value "True" . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/Reel_life_survey_fish_modif.tabular" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_9" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:PropertyValue ; + schema1:name "DISLIB_GPU_AVAILABLE" ; + schema1:value "True" . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/Sample" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/Contigs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/Groups" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_8" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:Person ; + schema1:name "Sánchez, I." . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/mygodmd_prep.aln" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/origin-target.godmd.dcd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/origin-target.godmd.ene.out" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/origin-target.godmd.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/origin-target.godmd.mdcrd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/origin-target.godmd.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/origin.chains.nolig.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/origin.chains.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/origin.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/target.chains.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/target.pdb" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/Sample" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:Place ; + schema1:geo _:b42 ; + schema1:name "Europe" . + + a schema1:PropertyValue ; + schema1:description "Pseudoid of the person included in the cohort" ; + schema1:name "person_id" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort an essential worker? (at the time of entering the cohort)" ; + schema1:name "essential_worker_bl" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort institutionalized? (at the time of entering the cohort)" ; + schema1:name "institutionalized_bl" . + + a schema1:PropertyValue ; + schema1:description "Brand of first dose of the vaccine" ; + schema1:name "dose_1_brand_cd" . + + a schema1:PropertyValue ; + schema1:description "Date of the first dose of the vaccine" ; + schema1:name "dose_1_dt" . + + a schema1:PropertyValue ; + schema1:description "Brand of second dose of the vaccine" ; + schema1:name "dose_2_brand_cd" . + + a schema1:PropertyValue ; + schema1:description "Date of the second dose of the vaccine" ; + schema1:name "dose_2_dt" . + + a schema1:PropertyValue ; + schema1:description "Brand of third dose of the vaccine" ; + schema1:name "dose_3_brand_cd" . + + a schema1:PropertyValue ; + schema1:description "Date of the third dose of the vaccine" ; + schema1:name "dose_3_dt" . + + a schema1:PropertyValue ; + schema1:description "Number of doses (independently of the vaccine brand) received by a person during the period of study" ; + schema1:name "number_doses" . + + a schema1:PropertyValue ; + schema1:description "Date on which the person included in the cohort is considered fully vaccinated" ; + schema1:name "fully_vaccinated_dt" . + + a schema1:PropertyValue ; + schema1:description "Age of the person included in the cohort (at the time of entering the cohort)" ; + schema1:maxValue 115 ; + schema1:minValue 5 ; + schema1:name "age_nm" ; + schema1:unitText "years" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort fully vaccinated?" ; + schema1:name "fully_vaccinated_bl" . + + a schema1:PropertyValue ; + schema1:description "Vaccine types received for 1st and 2nd dose concatenated with hyphen" ; + schema1:name "vaccination_schedule_cd" . + + a schema1:PropertyValue ; + schema1:description "Date of diagnosis or test result positive of the last known/confirmed infection" ; + schema1:name "confirmed_case_dt" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort a confirmed COVID-19 case?" ; + schema1:name "confirmed_case_bl" . + + a schema1:PropertyValue ; + schema1:description "If the person experienced a confirmed infection proceeding the last confirmed infection with more than 60 days, date of diagnosis of this previous infection" ; + schema1:name "previous_infection_dt" . + + a schema1:PropertyValue ; + schema1:description "Did the person experience a confirmed infection proceeding the last confirmed infection?" ; + schema1:name "previous_infection_bl" . + + a schema1:PropertyValue ; + schema1:description "Type of COVID-19 test used to detect the last known SARS-CoV-2 infection (i.e. first positive test for SARS-CoV-2)" ; + schema1:name "test_type_cd" . + + a schema1:PropertyValue ; + schema1:description "Identified variant (WHO label) for last known SARS-CoV-2 infection" ; + schema1:name "variant_cd" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from type II diabetes (at the time of entering the cohort)?" ; + schema1:name "diabetes_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from obesity (at the time of entering the cohort)? " ; + schema1:name "obesity_bl" . + + a schema1:PropertyValue ; + schema1:description "Sex of the person included in the cohort (at the time of entering the cohort)" ; + schema1:name "sex_cd" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from heart failure (at the time of entering the cohort)?" ; + schema1:name "heart_failure_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from chronic lung disease (at the time of entering the cohort)?" ; + schema1:name "copd_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from solid cancer without metastatis (at the time of entering the cohort)?" ; + schema1:name "solid_tumor_without_metastasis_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from chronic kidney disease (at the time of entering the cohort)?" ; + schema1:name "chronic_kidney_disease_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from sickle cell disease (at the time of entering the cohort)?" ; + schema1:name "sickle_cell_disease_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from hypertension (at the time of entering the cohort)?" ; + schema1:name "hypertension_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from blood cancer (leukemia) (at the time of entering the cohort)?" ; + schema1:name "blood_cancer_bl" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort a transplant patient (i.e. solid organ transplantation) (at the time of entering the cohort)?" ; + schema1:name "transplanted_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from a HIV infection (at the time of entering the cohort)?" ; + schema1:name "hiv_infection_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from a primary immuno deficiency disease (lymphoma) (at the time of entering the cohort)?" ; + schema1:name "primary_immunodeficiency_bl" . + + a schema1:PropertyValue ; + schema1:description "Socioeconomic level of the person in the cohort (at the time of entering the cohort)" ; + schema1:name "socecon_lvl_cd" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from drugs induced immunosuppression (at the time of entering the cohort)?" ; + schema1:name "immunosuppression_bl" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort pregnant (at the time of entering the cohort)?" ; + schema1:name "pregnancy_bl" . + + a schema1:GeoShape ; + schema1:box "66.78398 11.50546 44.4272 -15.84884" . + + a schema1:PropertyValue ; + schema1:description "Area of residence of the person included in the cohort (NUTS 3) (at the time of entering the cohort)" ; + schema1:name "residence_area_cd" . + + a schema1:PropertyValue ; + schema1:description "Country of residence of the person included in the cohort (at the time of entering the cohort)" ; + schema1:name "country_cd" . + + a schema1:PropertyValue ; + schema1:description "Is the country of residence different from the country that is doing the analyses?" ; + schema1:name "foreign_bl" . + + a schema1:PropertyValue ; + schema1:description "Date of death of the person (if the person has died)" ; + schema1:name "exitus_dt" . + + a schema1:PropertyValue ; + schema1:description "Date of death of the person (if the person has died during the period of study)" ; + schema1:name "exitus_bl" . + + a schema1:MediaObject ; + schema1:encodingFormat "" ; + schema1:name "Dockerfile" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/rproj" ; + schema1:name "Analytical pipeline R project file (RPROJ)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/pdf" ; + schema1:name "Documentation analytical pipeline (PDF)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/yml" ; + schema1:name ".yml file listing dependencies (YML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/csv" ; + schema1:name "Synthetic dataset with 10k entries (CSV)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/r" ; + schema1:name "Installation required R packages (R)" . + + a schema1:Dataset ; + schema1:description "Analytical pipeline output using synthetic data with 10k entries" ; + schema1:hasPart , + , + , + , + , + ; + schema1:name "Output analytical pipeline" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report with dataset statistics, missing data profiles, potential alerts, and detailed per-variable information (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report summarizing compliance with the logic validation rules (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report summarizing used imputation methods and number of imputed values (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report to assess matching balance in the obtained study population (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report including the results of survival analysis in the unmatched study population, a table with baseline characteristics of the matched study population and CONSORT diagram (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report including the analytic results for the meta-analysis (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/r" ; + schema1:name "Upload the csv file and create a DuckDB database file (R)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Exploration and Data Quality Assessment (DQA) of the original data mapped at each Data Hub (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Validation (applying logic validation rules) of the original data mapped at each Data Hub (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Imputation of missing values in the original data mapped at each Data Hub (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Iterative matching of the exposed and unexposed, and assessing covariate balance in matched population (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/r" ; + schema1:name "Iterative matching of the exposed and unexposed (R)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Descriptive analysis of the matched and unmatched study population (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Survival analysis of the matched study population (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Quarto script sourcing consecutive scripts of the analytical pipeline (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Quarto document providing a log of the individual code block executions (HTML)" . + + a schema1:Dataset ; + schema1:description "Consecutive scripts of the analytical pipeline" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + ; + schema1:name "Scripts analytical pipeline" . + + a schema1:MediaObject ; + schema1:description "Interactive report showcasing the structural causal model (DAG) to answer the research question" ; + schema1:encodingFormat "text/html" ; + schema1:name "COVID-19 vaccine effectiveness causal model v.1.1.0 (HTML)" . + + a schema1:MediaObject ; + schema1:description "Quarto RMarkdown script to produce the structural causal model" ; + schema1:encodingFormat , + "text/markdown" ; + schema1:name "COVID-19 vaccine effectiveness causal model v.1.1.0 (QMD)" . + + a schema1:Dataset ; + schema1:about ; + schema1:hasPart , + , + , + , + ; + schema1:name "Metadata for Common data model specification" . + + a schema1:MediaObject ; + dct:conformsTo ; + schema1:encodingFormat "text/csv" . + + a schema1:MediaObject ; + dct:conformsTo ; + schema1:encodingFormat "text/csv" . + + a schema1:MediaObject ; + dct:conformsTo ; + schema1:encodingFormat "text/csv" . + + a schema1:MediaObject ; + dct:conformsTo ; + schema1:encodingFormat "text/csv" . + + a schema1:MediaObject ; + schema1:description "Machine-readable version" ; + schema1:encodingFormat "application/ld+json" ; + schema1:mainEntity ; + schema1:name "COVID-19 vaccine effectiveness data model specification dataspice (JSON)" . + + a schema1:DataDownload, + schema1:MediaObject ; + schema1:description "synthetic data set to test the causal model proposed by the baseline use case in BY-COVID WP5 assessing the effectiveness of the COVID-19 vaccine(s)" ; + schema1:encodingFormat "text/csv" ; + schema1:name "vaccine_effectiveness_synthetic_pop_10k_v.1.1.0" ; + schema1:sameAs , + . + + a schema1:Dataset ; + schema1:about ; + schema1:hasPart ; + schema1:name "Documentation for Common data model specification" . + + a schema1:MediaObject ; + schema1:about ; + schema1:description "Human-readable version (interactive report)" ; + schema1:encodingFormat "text/html" ; + schema1:name "COVID-19 vaccine effectiveness data model specification dataspice (HTML)" . + + a schema1:MediaObject ; + schema1:description "Human-readable version (Excel)" ; + schema1:encodingFormat "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" ; + schema1:name "vaccine effectiveness data model specification (XLXS)" . + + a schema1:MediaObject ; + schema1:description "SARS-CoV-2 vaccine effectiveness assessment data management plan (machine-readable)" ; + schema1:encodingFormat "text/json" ; + schema1:name "Data management plan v.0.0.1 (JSON)" . + + a schema1:MediaObject ; + schema1:description "SARS-CoV-2 vaccine effectiveness assessment data management plan (human-readable)" ; + schema1:encodingFormat "text/pdf" ; + schema1:name "Data management plan v.0.0.1 (PDF)" . + + a schema1:MediaObject ; + schema1:description "SARS-CoV-2 vaccine effectiveness assessment study protocol" ; + schema1:encodingFormat "text/pdf" ; + schema1:name "Study protocol v.1.0.3 (PDF)" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo ; + schema1:conditionsOfAccess "The scripts (software) accompanying the data model specification are offered \"as-is\", without warranty, and disclaiming liability for damages resulting from using it." ; + schema1:description "Jupyter notebook with Python scripting and commenting to generate the synthetic dataset" ; + schema1:name "COVID-19 vaccine effectiveness synthetic dataset generation script (IPYNB)" ; + schema1:programmingLanguage ; + schema1:url "" ; + ns1:output . + + a schema1:MediaObject ; + schema1:encodingFormat "application/json" ; + schema1:name "pandas-profiling config" . + + a schema1:MediaObject ; + schema1:description "Interactive report of the exploratory data analysis (EDA) of the synthetic dataset" ; + schema1:encodingFormat "text/html" ; + schema1:name "COVID-19 vaccine effectiveness synthetic dataset EDA (HTML)" ; + schema1:version "1.1.0" . + + a schema1:MediaObject ; + schema1:description "Machine-readable version of the exploratory data analysis (EDA) of the synthetic dataset" ; + schema1:encodingFormat "application/json" ; + schema1:name "COVID-19 vaccine effectiveness synthetic dataset EDA (JSON)" ; + schema1:version "1.1.0" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/csv" ; + schema1:name "Synthetic dataset with 650k entries" ; + schema1:version "1.1.1" . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:PropertyValue ; + schema1:name "COMPSS_BINDINGS_DEBUG" ; + schema1:value "1" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_HOME" ; + schema1:value "/apps/GPP/COMPSs/3.3/" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_MASTER_NODE" ; + schema1:value "gs06r3b72" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_MPIRUN_TYPE" ; + schema1:value "impi" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_PYTHON_VERSION" ; + schema1:value "3.12.1" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_WORKER_INIT_TIMEOUT" ; + schema1:value "120000" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_WORKER_NODES" ; + schema1:value "" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_ACCOUNT" ; + schema1:value "bsc19" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_CPUS_PER_NODE" ; + schema1:value "112" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_END_TIME" ; + schema1:value "1714482541" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_GID" ; + schema1:value "50000" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_ID" ; + schema1:value "1236485" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NAME" ; + schema1:value "matmul-DP" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NODELIST" ; + schema1:value "gs06r3b72" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NUM_NODES" ; + schema1:value "1" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_PARTITION" ; + schema1:value "gpp" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_QOS" ; + schema1:value "gp_debug" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_START_TIME" ; + schema1:value "1714482241" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_UID" ; + schema1:value "2952" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_USER" ; + schema1:value "bsc019057" . + + a schema1:PropertyValue ; + schema1:name "SLURM_MEM_PER_CPU" ; + schema1:value "2000" . + + a schema1:PropertyValue ; + schema1:name "SLURM_SUBMIT_DIR" ; + schema1:value "/gpfs/home/bsc/bsc019057/WorkflowHub/reproducible_matmul" . + + a schema1:PropertyValue ; + schema1:name "SLURM_SUBMIT_HOST" ; + schema1:value "glogin2" . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:PropertyValue ; + schema1:name "COMPSS_HOME" ; + schema1:value "/apps/GPP/COMPSs/3.3.1/" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_MASTER_NODE" ; + schema1:value "gs10r3b56" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_MPIRUN_TYPE" ; + schema1:value "impi" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_NUM_CPUS" ; + schema1:value "4" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_PYTHON_VERSION" ; + schema1:value "3.12.1" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_THREADED_DESERIALIZATION" ; + schema1:value "True" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_WORKER_INIT_TIMEOUT" ; + schema1:value "360000" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_WORKER_NODES" ; + schema1:value " gs10r3b61 gs10r3b66" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_ACCOUNT" ; + schema1:value "bsc19" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_CPUS_PER_NODE" ; + schema1:value "112(x3)" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_END_TIME" ; + schema1:value "1720107267" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_GID" ; + schema1:value "50000" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_ID" ; + schema1:value "3618551" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NAME" ; + schema1:value "COMPSs" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NODELIST" ; + schema1:value "gs10r3b[56,61,66]" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NUM_NODES" ; + schema1:value "3" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_PARTITION" ; + schema1:value "gpp" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_QOS" ; + schema1:value "gp_debug" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_START_TIME" ; + schema1:value "1720100067" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_UID" ; + schema1:value "4373" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_USER" ; + schema1:value "bsc019959" . + + a schema1:PropertyValue ; + schema1:name "SLURM_MEM_PER_CPU" ; + schema1:value "2000" . + + a schema1:PropertyValue ; + schema1:name "SLURM_SUBMIT_DIR" ; + schema1:value "/gpfs/scratch/bsc19/bsc19959/randomsvd" . + + a schema1:PropertyValue ; + schema1:name "SLURM_SUBMIT_HOST" ; + schema1:value "glogin2" . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:Place ; + schema1:geo _:b42 ; + schema1:name "Europe" . + + a schema1:PropertyValue ; + schema1:description "Pseudoid of the person included in the cohort" ; + schema1:name "person_id" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort an essential worker? (at the time of entering the cohort)" ; + schema1:name "essential_worker_bl" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort institutionalized? (at the time of entering the cohort)" ; + schema1:name "institutionalized_bl" . + + a schema1:PropertyValue ; + schema1:description "Brand of first dose of the vaccine" ; + schema1:name "dose_1_brand_cd" . + + a schema1:PropertyValue ; + schema1:description "Date of the first dose of the vaccine" ; + schema1:name "dose_1_dt" . + + a schema1:PropertyValue ; + schema1:description "Brand of second dose of the vaccine" ; + schema1:name "dose_2_brand_cd" . + + a schema1:PropertyValue ; + schema1:description "Date of the second dose of the vaccine" ; + schema1:name "dose_2_dt" . + + a schema1:PropertyValue ; + schema1:description "Brand of third dose of the vaccine" ; + schema1:name "dose_3_brand_cd" . + + a schema1:PropertyValue ; + schema1:description "Date of the third dose of the vaccine" ; + schema1:name "dose_3_dt" . + + a schema1:PropertyValue ; + schema1:description "Number of doses (independently of the vaccine brand) received by a person during the period of study" ; + schema1:name "number_doses" . + + a schema1:PropertyValue ; + schema1:description "Date on which the person included in the cohort is considered fully vaccinated" ; + schema1:name "fully_vaccinated_dt" . + + a schema1:PropertyValue ; + schema1:description "Age of the person included in the cohort (at the time of entering the cohort)" ; + schema1:maxValue 115 ; + schema1:minValue 5 ; + schema1:name "age_nm" ; + schema1:unitText "years" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort fully vaccinated?" ; + schema1:name "fully_vaccinated_bl" . + + a schema1:PropertyValue ; + schema1:description "Vaccine types received for 1st and 2nd dose concatenated with hyphen" ; + schema1:name "vaccination_schedule_cd" . + + a schema1:PropertyValue ; + schema1:description "Date of diagnosis or test result positive of the last known/confirmed infection" ; + schema1:name "confirmed_case_dt" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort a confirmed COVID-19 case?" ; + schema1:name "confirmed_case_bl" . + + a schema1:PropertyValue ; + schema1:description "If the person experienced a confirmed infection proceeding the last confirmed infection with more than 60 days, date of diagnosis of this previous infection" ; + schema1:name "previous_infection_dt" . + + a schema1:PropertyValue ; + schema1:description "Did the person experience a confirmed infection proceeding the last confirmed infection?" ; + schema1:name "previous_infection_bl" . + + a schema1:PropertyValue ; + schema1:description "Type of COVID-19 test used to detect the last known SARS-CoV-2 infection (i.e. first positive test for SARS-CoV-2)" ; + schema1:name "test_type_cd" . + + a schema1:PropertyValue ; + schema1:description "Identified variant (WHO label) for last known SARS-CoV-2 infection" ; + schema1:name "variant_cd" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from type II diabetes (at the time of entering the cohort)?" ; + schema1:name "diabetes_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from obesity (at the time of entering the cohort)? " ; + schema1:name "obesity_bl" . + + a schema1:PropertyValue ; + schema1:description "Sex of the person included in the cohort (at the time of entering the cohort)" ; + schema1:name "sex_cd" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from heart failure (at the time of entering the cohort)?" ; + schema1:name "heart_failure_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from chronic lung disease (at the time of entering the cohort)?" ; + schema1:name "copd_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from solid cancer without metastatis (at the time of entering the cohort)?" ; + schema1:name "solid_tumor_without_metastasis_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from chronic kidney disease (at the time of entering the cohort)?" ; + schema1:name "chronic_kidney_disease_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from sickle cell disease (at the time of entering the cohort)?" ; + schema1:name "sickle_cell_disease_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from hypertension (at the time of entering the cohort)?" ; + schema1:name "hypertension_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from blood cancer (leukemia) (at the time of entering the cohort)?" ; + schema1:name "blood_cancer_bl" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort a transplant patient (i.e. solid organ transplantation) (at the time of entering the cohort)?" ; + schema1:name "transplanted_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from a HIV infection (at the time of entering the cohort)?" ; + schema1:name "hiv_infection_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from a primary immuno deficiency disease (lymphoma) (at the time of entering the cohort)?" ; + schema1:name "primary_immunodeficiency_bl" . + + a schema1:PropertyValue ; + schema1:description "Socioeconomic level of the person in the cohort (at the time of entering the cohort)" ; + schema1:name "socecon_lvl_cd" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from drugs induced immunosuppression (at the time of entering the cohort)?" ; + schema1:name "immunosuppression_bl" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort pregnant (at the time of entering the cohort)?" ; + schema1:name "pregnancy_bl" . + + a schema1:GeoShape ; + schema1:box "66.78398 11.50546 44.4272 -15.84884" . + + a schema1:PropertyValue ; + schema1:description "Area of residence of the person included in the cohort (NUTS 3) (at the time of entering the cohort)" ; + schema1:name "residence_area_cd" . + + a schema1:PropertyValue ; + schema1:description "Country of residence of the person included in the cohort (at the time of entering the cohort)" ; + schema1:name "country_cd" . + + a schema1:PropertyValue ; + schema1:description "Is the country of residence different from the country that is doing the analyses?" ; + schema1:name "foreign_bl" . + + a schema1:PropertyValue ; + schema1:description "Date of death of the person (if the person has died)" ; + schema1:name "exitus_dt" . + + a schema1:PropertyValue ; + schema1:description "Date of death of the person (if the person has died during the period of study)" ; + schema1:name "exitus_bl" . + + a schema1:MediaObject ; + schema1:encodingFormat "" ; + schema1:name "Dockerfile" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/rproj" ; + schema1:name "Analytical pipeline R project file (RPROJ)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/pdf" ; + schema1:name "Documentation analytical pipeline (PDF)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/yml" ; + schema1:name ".yml file listing dependencies (YML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/csv" ; + schema1:name "Synthetic dataset with 10k entries (CSV)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/r" ; + schema1:name "Installation required R packages (R)" . + + a schema1:Dataset ; + schema1:description "Analytical pipeline output using synthetic data with 10k entries" ; + schema1:hasPart , + , + , + , + , + ; + schema1:name "Output analytical pipeline" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report with dataset statistics, missing data profiles, potential alerts, and detailed per-variable information (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report summarizing compliance with the logic validation rules (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report summarizing used imputation methods and number of imputed values (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report to assess matching balance in the obtained study population (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report including the results of survival analysis in the unmatched study population, a table with baseline characteristics of the matched study population and CONSORT diagram (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report including the analytic results for the meta-analysis (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/r" ; + schema1:name "Upload the csv file and create a DuckDB database file (R)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Exploration and Data Quality Assessment (DQA) of the original data mapped at each Data Hub (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Validation (applying logic validation rules) of the original data mapped at each Data Hub (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Imputation of missing values in the original data mapped at each Data Hub (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Iterative matching of the exposed and unexposed, and assessing covariate balance in matched population (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/r" ; + schema1:name "Iterative matching of the exposed and unexposed (R)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Descriptive analysis of the matched and unmatched study population (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Survival analysis of the matched study population (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Quarto script sourcing consecutive scripts of the analytical pipeline (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Quarto document providing a log of the individual code block executions (HTML)" . + + a schema1:Dataset ; + schema1:description "Consecutive scripts of the analytical pipeline" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + ; + schema1:name "Scripts analytical pipeline" . + + a schema1:MediaObject ; + schema1:description "Interactive report showcasing the structural causal model (DAG) to answer the research question" ; + schema1:encodingFormat "text/html" ; + schema1:name "COVID-19 vaccine effectiveness causal model v.1.1.0 (HTML)" . + + a schema1:MediaObject ; + schema1:description "Quarto RMarkdown script to produce the structural causal model" ; + schema1:encodingFormat , + "text/markdown" ; + schema1:name "COVID-19 vaccine effectiveness causal model v.1.1.0 (QMD)" . + + a schema1:Dataset ; + schema1:about ; + schema1:hasPart , + , + , + , + ; + schema1:name "Metadata for Common data model specification" . + + a schema1:MediaObject ; + dct:conformsTo ; + schema1:encodingFormat "text/csv" . + + a schema1:MediaObject ; + dct:conformsTo ; + schema1:encodingFormat "text/csv" . + + a schema1:MediaObject ; + dct:conformsTo ; + schema1:encodingFormat "text/csv" . + + a schema1:MediaObject ; + dct:conformsTo ; + schema1:encodingFormat "text/csv" . + + a schema1:MediaObject ; + schema1:description "Machine-readable version" ; + schema1:encodingFormat "application/ld+json" ; + schema1:mainEntity ; + schema1:name "COVID-19 vaccine effectiveness data model specification dataspice (JSON)" . + + a schema1:DataDownload, + schema1:MediaObject ; + schema1:description "synthetic data set to test the causal model proposed by the baseline use case in BY-COVID WP5 assessing the effectiveness of the COVID-19 vaccine(s)" ; + schema1:encodingFormat "text/csv" ; + schema1:name "vaccine_effectiveness_synthetic_pop_10k_v.1.1.0" ; + schema1:sameAs , + . + + a schema1:Dataset ; + schema1:about ; + schema1:hasPart ; + schema1:name "Documentation for Common data model specification" . + + a schema1:MediaObject ; + schema1:about ; + schema1:description "Human-readable version (interactive report)" ; + schema1:encodingFormat "text/html" ; + schema1:name "COVID-19 vaccine effectiveness data model specification dataspice (HTML)" . + + a schema1:MediaObject ; + schema1:description "Human-readable version (Excel)" ; + schema1:encodingFormat "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" ; + schema1:name "vaccine effectiveness data model specification (XLXS)" . + + a schema1:MediaObject ; + schema1:description "SARS-CoV-2 vaccine effectiveness assessment data management plan (machine-readable)" ; + schema1:encodingFormat "text/json" ; + schema1:name "Data management plan v.0.0.1 (JSON)" . + + a schema1:MediaObject ; + schema1:description "SARS-CoV-2 vaccine effectiveness assessment data management plan (human-readable)" ; + schema1:encodingFormat "text/pdf" ; + schema1:name "Data management plan v.0.0.1 (PDF)" . + + a schema1:MediaObject ; + schema1:description "SARS-CoV-2 vaccine effectiveness assessment study protocol" ; + schema1:encodingFormat "text/pdf" ; + schema1:name "Study protocol v.1.0.3 (PDF)" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo ; + schema1:conditionsOfAccess "The scripts (software) accompanying the data model specification are offered \"as-is\", without warranty, and disclaiming liability for damages resulting from using it." ; + schema1:description "Jupyter notebook with Python scripting and commenting to generate the synthetic dataset" ; + schema1:name "COVID-19 vaccine effectiveness synthetic dataset generation script (IPYNB)" ; + schema1:programmingLanguage ; + schema1:url "" ; + ns1:output . + + a schema1:MediaObject ; + schema1:encodingFormat "application/json" ; + schema1:name "pandas-profiling config" . + + a schema1:MediaObject ; + schema1:description "Interactive report of the exploratory data analysis (EDA) of the synthetic dataset" ; + schema1:encodingFormat "text/html" ; + schema1:name "COVID-19 vaccine effectiveness synthetic dataset EDA (HTML)" ; + schema1:version "1.1.0" . + + a schema1:MediaObject ; + schema1:description "Machine-readable version of the exploratory data analysis (EDA) of the synthetic dataset" ; + schema1:encodingFormat "application/json" ; + schema1:name "COVID-19 vaccine effectiveness synthetic dataset EDA (JSON)" ; + schema1:version "1.1.0" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/csv" ; + schema1:name "Synthetic dataset with 650k entries" ; + schema1:version "1.1.1" . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/genome.fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/Repeats_output_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/output_log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/output_masked_genome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/output_repeat_catalog" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/seeds" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/sequences" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/QCFilteredAnnDataObject" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/1k_cell_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/1k_gene_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/marker_dot_plot" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/marker_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/processed_anndata_object" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/umap_cluster_plot" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/umap_sample_plot" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/SENTINEL2A_20230210-111817-461_L2A_T30TWS_D.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/SENTINEL2A_20230214-105638-781_L2A_T31UET_D.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_9" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_2" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:Place ; + schema1:geo _:b42 ; + schema1:name "Europe" . + + a schema1:PropertyValue ; + schema1:description "Pseudoid of the person included in the cohort" ; + schema1:name "person_id" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort an essential worker? (at the time of entering the cohort)" ; + schema1:name "essential_worker_bl" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort institutionalized? (at the time of entering the cohort)" ; + schema1:name "institutionalized_bl" . + + a schema1:PropertyValue ; + schema1:description "Brand of first dose of the vaccine" ; + schema1:name "dose_1_brand_cd" . + + a schema1:PropertyValue ; + schema1:description "Date of the first dose of the vaccine" ; + schema1:name "dose_1_dt" . + + a schema1:PropertyValue ; + schema1:description "Brand of second dose of the vaccine" ; + schema1:name "dose_2_brand_cd" . + + a schema1:PropertyValue ; + schema1:description "Date of the second dose of the vaccine" ; + schema1:name "dose_2_dt" . + + a schema1:PropertyValue ; + schema1:description "Brand of third dose of the vaccine" ; + schema1:name "dose_3_brand_cd" . + + a schema1:PropertyValue ; + schema1:description "Date of the third dose of the vaccine" ; + schema1:name "dose_3_dt" . + + a schema1:PropertyValue ; + schema1:description "Number of doses (independently of the vaccine brand) received by a person during the period of study" ; + schema1:name "number_doses" . + + a schema1:PropertyValue ; + schema1:description "Date on which the person included in the cohort is considered fully vaccinated" ; + schema1:name "fully_vaccinated_dt" . + + a schema1:PropertyValue ; + schema1:description "Age of the person included in the cohort (at the time of entering the cohort)" ; + schema1:maxValue 115 ; + schema1:minValue 5 ; + schema1:name "age_nm" ; + schema1:unitText "years" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort fully vaccinated?" ; + schema1:name "fully_vaccinated_bl" . + + a schema1:PropertyValue ; + schema1:description "Vaccine types received for 1st and 2nd dose concatenated with hyphen" ; + schema1:name "vaccination_schedule_cd" . + + a schema1:PropertyValue ; + schema1:description "Date of diagnosis or test result positive of the last known/confirmed infection" ; + schema1:name "confirmed_case_dt" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort a confirmed COVID-19 case?" ; + schema1:name "confirmed_case_bl" . + + a schema1:PropertyValue ; + schema1:description "If the person experienced a confirmed infection proceeding the last confirmed infection with more than 60 days, date of diagnosis of this previous infection" ; + schema1:name "previous_infection_dt" . + + a schema1:PropertyValue ; + schema1:description "Did the person experience a confirmed infection proceeding the last confirmed infection?" ; + schema1:name "previous_infection_bl" . + + a schema1:PropertyValue ; + schema1:description "Type of COVID-19 test used to detect the last known SARS-CoV-2 infection (i.e. first positive test for SARS-CoV-2)" ; + schema1:name "test_type_cd" . + + a schema1:PropertyValue ; + schema1:description "Identified variant (WHO label) for last known SARS-CoV-2 infection" ; + schema1:name "variant_cd" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from type II diabetes (at the time of entering the cohort)?" ; + schema1:name "diabetes_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from obesity (at the time of entering the cohort)? " ; + schema1:name "obesity_bl" . + + a schema1:PropertyValue ; + schema1:description "Sex of the person included in the cohort (at the time of entering the cohort)" ; + schema1:name "sex_cd" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from heart failure (at the time of entering the cohort)?" ; + schema1:name "heart_failure_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from chronic lung disease (at the time of entering the cohort)?" ; + schema1:name "copd_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from solid cancer without metastatis (at the time of entering the cohort)?" ; + schema1:name "solid_tumor_without_metastasis_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from chronic kidney disease (at the time of entering the cohort)?" ; + schema1:name "chronic_kidney_disease_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from sickle cell disease (at the time of entering the cohort)?" ; + schema1:name "sickle_cell_disease_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from hypertension (at the time of entering the cohort)?" ; + schema1:name "hypertension_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from blood cancer (leukemia) (at the time of entering the cohort)?" ; + schema1:name "blood_cancer_bl" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort a transplant patient (i.e. solid organ transplantation) (at the time of entering the cohort)?" ; + schema1:name "transplanted_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from a HIV infection (at the time of entering the cohort)?" ; + schema1:name "hiv_infection_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from a primary immuno deficiency disease (lymphoma) (at the time of entering the cohort)?" ; + schema1:name "primary_immunodeficiency_bl" . + + a schema1:PropertyValue ; + schema1:description "Socioeconomic level of the person in the cohort (at the time of entering the cohort)" ; + schema1:name "socecon_lvl_cd" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from drugs induced immunosuppression (at the time of entering the cohort)?" ; + schema1:name "immunosuppression_bl" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort pregnant (at the time of entering the cohort)?" ; + schema1:name "pregnancy_bl" . + + a schema1:GeoShape ; + schema1:box "66.78398 11.50546 44.4272 -15.84884" . + + a schema1:PropertyValue ; + schema1:description "Area of residence of the person included in the cohort (NUTS 3) (at the time of entering the cohort)" ; + schema1:name "residence_area_cd" . + + a schema1:PropertyValue ; + schema1:description "Country of residence of the person included in the cohort (at the time of entering the cohort)" ; + schema1:name "country_cd" . + + a schema1:PropertyValue ; + schema1:description "Is the country of residence different from the country that is doing the analyses?" ; + schema1:name "foreign_bl" . + + a schema1:PropertyValue ; + schema1:description "Date of death of the person (if the person has died)" ; + schema1:name "exitus_dt" . + + a schema1:PropertyValue ; + schema1:description "Date of death of the person (if the person has died during the period of study)" ; + schema1:name "exitus_bl" . + + a schema1:MediaObject ; + schema1:encodingFormat "" ; + schema1:name "Dockerfile" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/rproj" ; + schema1:name "Analytical pipeline R project file (RPROJ)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/pdf" ; + schema1:name "Documentation analytical pipeline (PDF)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/yml" ; + schema1:name ".yml file listing dependencies (YML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/csv" ; + schema1:name "Synthetic dataset with 10k entries (CSV)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/r" ; + schema1:name "Installation required R packages (R)" . + + a schema1:Dataset ; + schema1:description "Analytical pipeline output using synthetic data with 10k entries" ; + schema1:hasPart , + , + , + , + , + ; + schema1:name "Output analytical pipeline" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report with dataset statistics, missing data profiles, potential alerts, and detailed per-variable information (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report summarizing compliance with the logic validation rules (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report summarizing used imputation methods and number of imputed values (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report to assess matching balance in the obtained study population (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report including the results of survival analysis in the unmatched study population, a table with baseline characteristics of the matched study population and CONSORT diagram (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report including the analytic results for the meta-analysis (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/r" ; + schema1:name "Upload the csv file and create a DuckDB database file (R)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Exploration and Data Quality Assessment (DQA) of the original data mapped at each Data Hub (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Validation (applying logic validation rules) of the original data mapped at each Data Hub (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Imputation of missing values in the original data mapped at each Data Hub (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Iterative matching of the exposed and unexposed, and assessing covariate balance in matched population (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/r" ; + schema1:name "Iterative matching of the exposed and unexposed (R)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Descriptive analysis of the matched and unmatched study population (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Survival analysis of the matched study population (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Quarto script sourcing consecutive scripts of the analytical pipeline (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Quarto document providing a log of the individual code block executions (HTML)" . + + a schema1:Dataset ; + schema1:description "Consecutive scripts of the analytical pipeline" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + ; + schema1:name "Scripts analytical pipeline" . + + a schema1:MediaObject ; + schema1:description "Interactive report showcasing the structural causal model (DAG) to answer the research question" ; + schema1:encodingFormat "text/html" ; + schema1:name "COVID-19 vaccine effectiveness causal model v.1.1.0 (HTML)" . + + a schema1:MediaObject ; + schema1:description "Quarto RMarkdown script to produce the structural causal model" ; + schema1:encodingFormat , + "text/markdown" ; + schema1:name "COVID-19 vaccine effectiveness causal model v.1.1.0 (QMD)" . + + a schema1:Dataset ; + schema1:about ; + schema1:hasPart , + , + , + , + ; + schema1:name "Metadata for Common data model specification" . + + a schema1:MediaObject ; + dct:conformsTo ; + schema1:encodingFormat "text/csv" . + + a schema1:MediaObject ; + dct:conformsTo ; + schema1:encodingFormat "text/csv" . + + a schema1:MediaObject ; + dct:conformsTo ; + schema1:encodingFormat "text/csv" . + + a schema1:MediaObject ; + dct:conformsTo ; + schema1:encodingFormat "text/csv" . + + a schema1:MediaObject ; + schema1:description "Machine-readable version" ; + schema1:encodingFormat "application/ld+json" ; + schema1:mainEntity ; + schema1:name "COVID-19 vaccine effectiveness data model specification dataspice (JSON)" . + + a schema1:DataDownload, + schema1:MediaObject ; + schema1:description "synthetic data set to test the causal model proposed by the baseline use case in BY-COVID WP5 assessing the effectiveness of the COVID-19 vaccine(s)" ; + schema1:encodingFormat "text/csv" ; + schema1:name "vaccine_effectiveness_synthetic_pop_10k_v.1.1.0" ; + schema1:sameAs , + . + + a schema1:Dataset ; + schema1:about ; + schema1:hasPart ; + schema1:name "Documentation for Common data model specification" . + + a schema1:MediaObject ; + schema1:about ; + schema1:description "Human-readable version (interactive report)" ; + schema1:encodingFormat "text/html" ; + schema1:name "COVID-19 vaccine effectiveness data model specification dataspice (HTML)" . + + a schema1:MediaObject ; + schema1:description "Human-readable version (Excel)" ; + schema1:encodingFormat "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" ; + schema1:name "vaccine effectiveness data model specification (XLXS)" . + + a schema1:MediaObject ; + schema1:description "SARS-CoV-2 vaccine effectiveness assessment data management plan (machine-readable)" ; + schema1:encodingFormat "text/json" ; + schema1:name "Data management plan v.0.0.1 (JSON)" . + + a schema1:MediaObject ; + schema1:description "SARS-CoV-2 vaccine effectiveness assessment data management plan (human-readable)" ; + schema1:encodingFormat "text/pdf" ; + schema1:name "Data management plan v.0.0.1 (PDF)" . + + a schema1:MediaObject ; + schema1:description "SARS-CoV-2 vaccine effectiveness assessment study protocol" ; + schema1:encodingFormat "text/pdf" ; + schema1:name "Study protocol v.1.0.3 (PDF)" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo ; + schema1:conditionsOfAccess "The scripts (software) accompanying the data model specification are offered \"as-is\", without warranty, and disclaiming liability for damages resulting from using it." ; + schema1:description "Jupyter notebook with Python scripting and commenting to generate the synthetic dataset" ; + schema1:name "COVID-19 vaccine effectiveness synthetic dataset generation script (IPYNB)" ; + schema1:programmingLanguage ; + schema1:url "" ; + ns1:output . + + a schema1:MediaObject ; + schema1:encodingFormat "application/json" ; + schema1:name "pandas-profiling config" . + + a schema1:MediaObject ; + schema1:description "Interactive report of the exploratory data analysis (EDA) of the synthetic dataset" ; + schema1:encodingFormat "text/html" ; + schema1:name "COVID-19 vaccine effectiveness synthetic dataset EDA (HTML)" ; + schema1:version "1.1.0" . + + a schema1:MediaObject ; + schema1:description "Machine-readable version of the exploratory data analysis (EDA) of the synthetic dataset" ; + schema1:encodingFormat "application/json" ; + schema1:name "COVID-19 vaccine effectiveness synthetic dataset EDA (JSON)" ; + schema1:version "1.1.0" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/csv" ; + schema1:name "Synthetic dataset with 650k entries" ; + schema1:version "1.1.1" . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:PropertyValue ; + schema1:name "DISLIB_GPU_AVAILABLE" ; + schema1:value "True" . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/genome.fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/Repeats_output_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/output_log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/output_masked_genome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/output_repeat_catalog" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/seeds" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/sequences" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:MediaObject ; + schema1:dateModified "2021-09-28T10:02:17.833Z" ; + schema1:description "Launcher for the md_list workflow." ; + schema1:name "md_launch.py" . + + a schema1:MediaObject ; + schema1:dateModified "2021-09-28T10:02:17.833Z" ; + schema1:description "Launcher for the md_muts_sets and md_add_muts_wt workflows" ; + schema1:name "mdmut_launch.py" . + + a schema1:Dataset ; + schema1:dateModified "2021-09-28T10:02:17.833Z" ; + schema1:description "Molecular dynamics workflows" ; + schema1:hasPart , + , + , + , + ; + schema1:name "MD" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:author , + ; + schema1:dateModified "2021-09-03T00:00:00.000Z" ; + schema1:description "Applies a list of mutations over the initial structure obtaining a set of structures (initial structure + one mutation, initial structure + two mutations, initial structure + three mutations, ...). Finally performs a system setup and runs a molecular dynamics simulation for each of the structures in the set." ; + schema1:hasPart , + , + , + ; + schema1:isBasedOn ; + schema1:name "md_add_muts_wt.py" ; + schema1:programmingLanguage ; + schema1:runtimePlatform . + + a schema1:MediaObject ; + schema1:dateModified "2021-09-28T10:02:17.833Z" ; + schema1:description "Input and path configuration for md_list.py workflow" ; + schema1:name "md_list.yaml" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:author , + ; + schema1:dateModified "2021-09-03T00:00:00.000Z" ; + schema1:description "Performs a system setup and runs a molecular dynamics simulation for each one of the listed mutations in a given structure" ; + schema1:hasPart , + , + , + ; + schema1:isBasedOn ; + schema1:name "md_muts_sets.py" ; + schema1:programmingLanguage ; + schema1:runtimePlatform . + + a schema1:MediaObject ; + schema1:dateModified "2021-09-28T10:02:17.833Z" ; + schema1:description "Input and path configuration for md_muts_sets.py workflow" ; + schema1:name "md_muts_sets.yaml" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about ; + schema1:author ; + schema1:license . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_30" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_31" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_32" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_33" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_34" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_35" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_36" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_37" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_38" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_39" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_40" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_41" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_42" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_43" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_44" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_45" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_9" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_30" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_31" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_32" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_33" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_34" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_35" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_36" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_37" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_38" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_39" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_40" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_41" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_42" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_43" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_44" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_45" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_46" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_47" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_48" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_49" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_50" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_51" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_52" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_53" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_54" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_9" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_4" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/Sequences" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/Taxonomy" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_9" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:Person ; + schema1:name "Anna Syme" . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "KNIME" ; + schema1:url . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:DefinedTerm ; + schema1:description "Tabular data represented as comma-separated values in a text file." ; + schema1:name "CSV" . + + a schema1:CreativeWork ; + schema1:name "Workflow RO-Crate Profile" ; + schema1:version "0.2.0" . + + a schema1:Organization ; + schema1:name "eScience Lab" ; + schema1:parentOrganization ; + schema1:url "https://esciencelab.org.uk/" . + + a schema1:SoftwareApplication ; + schema1:license ; + schema1:name "BioExcel Building Blocks" ; + schema1:publisher ; + schema1:softwareHelp "https://mmb.irbbarcelona.org/biobb/documentation/source" ; + schema1:url "https://github.com/bioexcel/biobb" ; + schema1:version "3.6.0" . + + a schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:creator , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2021-06-23T10:42:46Z" ; + schema1:dateModified "2022-10-27T16:39:25Z" ; + schema1:description """ Joint multi-omics dimensionality reduction approaches for CAKUT data using peptidome and proteome data\r + \r + **Brief description**\r + In (Cantini et al. 2020), Cantini et al. evaluated 9 representative joint dimensionality reduction (jDR) methods for multi-omics integration and analysis and . The methods are Regularized Generalized Canonical Correlation Analysis (RGCCA), Multiple co-inertia analysis (MCIA), Multi-Omics Factor Analysis (MOFA), Multi-Study Factor Analysis (MSFA), iCluster, Integrative NMF (intNMF), Joint and Individual Variation Explained (JIVE), tensorial Independent Component Analysis (tICA), and matrix-tri-factorization (scikit-fusion) (Tenenhaus, Tenenhaus, and Groenen 2017; Bady et al. 2004; Argelaguet et al. 2018; De Vito et al. 2019; Shen, Olshen, and Ladanyi 2009; Chalise and Fridley 2017; Lock et al. 2013; Teschendorff et al. 2018; Žitnik and Zupan 2015).\r +\r +The authors provided their benchmarking procedure, multi-omics mix (momix), as Jupyter Notebook on GitHub (https://github.com/ComputationalSystemsBiology/momix-notebook) and project environment through Conda. In momix, the factorization methods are called from an R script, and parameters of the methods are also set in that script. We did not modify the parameters of the methods in the provided script. We set factor number to 2.\r +""" ; + schema1:keywords "rare diseases, workflow, Proteomics, protein, mirna prediction" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "EJP-RD WP13 case-study CAKUT momix analysis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/126?version=1" ; + schema1:version 1 . + + a schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:creator , + ; + schema1:dateCreated "2022-11-22T09:57:32Z" ; + schema1:dateModified "2022-11-22T09:59:30Z" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/gmx-protein-ligand-complex-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/295?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy Protein Ligand Complex MD Setup" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/295?version=2" ; + schema1:version 2 ; + ns1:input , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:creator , + , + ; + schema1:dateCreated "2022-12-14T16:05:01Z" ; + schema1:dateModified "2022-12-14T16:06:43Z" ; + schema1:description "An example workflow to allow users to run the Specimen Data Refinery tools on data provided in an input CSV file." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/373?version=1" ; + schema1:keywords "Default-SDR" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "De novo digitisation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/373?version=2" ; + schema1:version 2 ; + ns1:input , + ; + ns1:output , + , + , + . + + a schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:dateCreated "2024-07-10T14:17:34Z" ; + schema1:dateModified "2024-07-12T10:53:48Z" ; + schema1:description """The second-level complexity workflow is one among a collection of workflows designed to address tasks up to CTF estimation. In addition to the functionalities provided by the layer 0 workflow, this workflow aims to enhance the quality of acquisition images using quality protocols.\r +\r +**Quality control protocols**\r +\r +* **Movie max shift**: automatic reject those movies whose frames move more than a given threshold. \r +\r +* **Tilt analysis**: quality score based in the Power Spectrum Density (astigmatism and tilt) \r +\r +* **CTF consensus**: acts as a filter discarding micrographs based on their CTF (limit resolution, defocus, astigmatism, etc.).\r +\r +**Advantages:** \r +\r +* More control of the acquisition quality\r +\r +* Reduce unnecessary processing time and storage""" ; + schema1:image "https://workflowhub.eu/workflows/2273/diagram?version=2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/599?version=1" ; + schema1:isPartOf ; + schema1:keywords "image processing, cryoem, spa, scipion" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "CEITEC layer 1 workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/599?version=2" ; + schema1:version 2 . + +<#ont____assembly_flye_ahrenslab-inputs-ftp://biftp.informatik.uni-freiburg.de/pub/T0/Ahrens/SRR6982805.fastq> a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ftp://biftp.informatik.uni-freiburg.de/pub/T0/Ahrens/SRR6982805.fastq" . + +<#ont___workflow_wick_et_al_-inputs-https://ndownloader.figshare.com/files/8811145> a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "https://ndownloader.figshare.com/files/8811145" . + +<#ont___workflow_wick_et_al_-inputs-https://ndownloader.figshare.com/files/8811148> a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "https://ndownloader.figshare.com/files/8811148" . + +<#ont___workflow_wick_et_al_-inputs-https://ndownloader.figshare.com/files/8812159> a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "https://ndownloader.figshare.com/files/8812159" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/476" ; + schema1:name "Vasiliki Panagi" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "body" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "is_availability" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "result_modifiers" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "results" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_file" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "BridgeDB cache" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Differential gene expression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Differential miRNA expression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "STRING identifier mapping" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mRNA expression correlation" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Correlation limit" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "miRNA mRNA correlation" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "miRTarBase data" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "miRTarBase limit" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MOGAMUN cores" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MOGAMUN generations" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Maximum subnetwork size" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Subnetwork merge threshold" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Minimum subnetwork size" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MOGAMUN runs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "STRING data" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "STRING filter" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "STRING limit" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Variant Burden" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Full network" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Subnetworks" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux p9r1n05 4.14.0-115.el7a.ppc64le #1 SMP Tue Sep 25 12:28:39 EDT 2018 ppc64le ppc64le ppc64le GNU/Linux SLURM_JOB_NAME=COMPSs SLURM_JOB_QOS=bsc_cs SLURM_JOB_GPUS=0,1,2,3 SLURM_MEM_PER_CPU=3000 COMPSS_BINDINGS_DEBUG=1 SLURM_JOB_ID=12794885 SLURM_JOB_USER=bsc44973 COMPSS_HOME=/apps/COMPSs/3.3/ SLURM_JOB_UID=4586 SLURM_SUBMIT_DIR=/gpfs/projects/bsc44/PTF_WF_clean SLURM_JOB_NODELIST=p9r1n[05,07-11] SLURM_JOB_GID=17215 SLURM_JOB_CPUS_PER_NODE=160(x6) SLURM_SUBMIT_HOST=p9login1 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=bsc44 SLURM_JOB_NUM_NODES=6 COMPSS_MASTER_NODE=p9r1n05 COMPSS_WORKER_NODES= p9r1n07 p9r1n08 p9r1n09 p9r1n10 p9r1n11" ; + schema1:endTime "2024-03-04T20:06:30+00:00" ; + schema1:instrument ; + schema1:name "COMPSs ptf_workflow.py execution at cte-power9 with JOB_ID 12794885" ; + schema1:object , + , + , + , + , + ; + schema1:result ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "null" . + + a schema1:MediaObject ; + schema1:contentSize 5459 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:description "Auxiliary File" ; + schema1:name "out_run.txt" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 5029 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_ellipsoids.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 7365 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_ensemble_kagan.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 9604 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_ensemble_mare.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 9561 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_ensemble_mare_bu.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 20681 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_ensemble_sampling.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 16765 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_ensemble_sampling_MC.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 42717 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_ensemble_sampling_RS.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 11254 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_figures.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 27146 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_hazard_curves.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 20921 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_hazard_curves_sim.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 28417 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_hazard_curves_sim_mc.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 13430 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_lambda_bsps_load.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 15145 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_lambda_bsps_sep.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 13058 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_load_event.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 5827 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_mix_utilities.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 11156 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_parser.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 14598 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_pre_selection.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 48135 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_preload.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 7853 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_preload_curves.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 48133 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_preload_save.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 18922 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_probability_scenarios.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 28266 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_save.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 3214 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_scaling_laws.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 23223 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_short_term.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 11144 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_sptha_utilities.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 734 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_time_tracker.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 6302 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "Step2_create_ts_input_for_ptf_mod.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 6584 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "Step2_extract_ts_mod.py" . + + a schema1:MediaObject ; + schema1:contentSize 8440 ; + schema1:description "Auxiliary File" ; + schema1:name "launch_pycompss.py.bk" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 9120 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "run_kagan.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 10139 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "run_mare.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 20845 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "run_step1.py" . + + a schema1:MediaObject ; + schema1:contentSize 19355 ; + schema1:description "Auxiliary File" ; + schema1:name "run_step1.py.old" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 15324 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "run_step3.py" . + + a schema1:MediaObject ; + schema1:contentSize 2695 ; + schema1:description "Auxiliary File" ; + schema1:name "Create_config.sh" . + + a schema1:MediaObject ; + schema1:contentSize 2465 ; + schema1:description "Auxiliary File" ; + schema1:name "Create_config.sh.bk" . + + a schema1:MediaObject ; + schema1:contentSize 2514 ; + schema1:description "Auxiliary File" ; + schema1:name "Step2_config_simul.sh" . + + a schema1:MediaObject ; + schema1:contentSize 2257 ; + schema1:description "Auxiliary File" ; + schema1:name "Step2_simul_BS.sh" . + + a schema1:MediaObject ; + schema1:contentSize 628 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 4873 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Gareth Price" . + + a schema1:MediaObject ; + schema1:description "Workflow documentation" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Tim Dudgeon" . + + a schema1:Person ; + schema1:name "Simon Bray" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/fragment-based-docking-scoring/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "canuConcurrency" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "corMaxEvidenceErate" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "database" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "diploidOrganism" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "falseValue" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fix" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "genomeSize" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "illuminaClip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "leading" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "maxFragmentLens" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "minReadLen" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "minThreads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "minlen" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "orientation" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pacBioDataDir" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pacBioInBam" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pacBioTmpDir" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "partialMatch" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "phredsPe" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "polishedAssembly" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "prefix" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "readsPe1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "readsPe2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "repBaseLibrary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "slidingWindow" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "taxons" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "trailing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "trueValue" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "arrowAssembly" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "assemblyMasked" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "assemblyMerged" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "canuAssembly" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "contaminatedReads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "correctedReads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deconClassification" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deconReport" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "decontaminatedReads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pilonAssembly" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sortedBamIndexFileOut" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "trimmedReadFiles1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "trimmedReadFiles2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "trimmedReads" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/261" ; + schema1:name "Pasi Korhonen" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_30" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_31" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_32" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_33" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_34" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_35" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_36" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_37" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_38" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_39" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_40" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_41" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_42" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_43" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_44" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fasta" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "annotation_metadata" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "cancer_hotspots" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "cgi_biomarkers" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "cgi_genes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "civic_genes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "civic_variants" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "dbsnp_vcf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "final_variants" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gene_cards_germline" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gene_cards_loh" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gene_cards_somatic" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gene_reports_tabular" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "germline_cancer_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "loh_cancer_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "loh_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "maf_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mutations_summary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "somatic_cancer_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "somatic_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "uniprot_cancer_genes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "variant_reports_tabular" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "wolf_tutorial.zip?download=1" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myeditconf.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfix_side_chain.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygenion.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygenion.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_energy.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_image.xtc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_rgyr.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_rms.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_trjconv_str.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygrompp.tpr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.cpt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.edr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.xtc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb2gmx.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb2gmx.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysolvate.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysolvate.zip" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux p9r2n12 4.14.0-115.el7a.ppc64le #1 SMP Tue Sep 25 12:28:39 EDT 2018 ppc64le ppc64le ppc64le GNU/Linux SLURM_JOB_NAME=COMPSs SLURM_JOB_QOS=bsc_cs SLURM_JOB_GPUS=0,1,2,3 SLURM_MEM_PER_CPU=3000 COMPSS_BINDINGS_DEBUG=1 SLURM_JOB_ID=12783189 SLURM_JOB_USER=bsc44973 COMPSS_HOME=/apps/COMPSs/3.3/ SLURM_JOB_UID=4586 SLURM_SUBMIT_DIR=/gpfs/projects/bsc44/PTF_WF_clean SLURM_JOB_NODELIST=p9r2n[12-13,15],p9r3n[13-15] SLURM_JOB_GID=17215 SLURM_JOB_CPUS_PER_NODE=160(x6) SLURM_SUBMIT_HOST=p9login1 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=bsc44 SLURM_JOB_NUM_NODES=6 COMPSS_MASTER_NODE=p9r2n12 COMPSS_WORKER_NODES= p9r2n13 p9r2n15 p9r3n13 p9r3n14 p9r3n15" ; + schema1:endTime "2024-03-01T16:01:15+00:00" ; + schema1:instrument ; + schema1:name "COMPSs ptf_workflow.py execution at cte-power9 with JOB_ID 12783189" ; + schema1:object , + , + , + , + , + ; + schema1:result ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "null" . + + a schema1:MediaObject ; + schema1:contentSize 5459 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:description "Auxiliary File" ; + schema1:name "out_run.txt" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 5029 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_ellipsoids.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 7365 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_ensemble_kagan.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 9604 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_ensemble_mare.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 9561 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_ensemble_mare_bu.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 20681 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_ensemble_sampling.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 16765 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_ensemble_sampling_MC.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 42717 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_ensemble_sampling_RS.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 11254 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_figures.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 27146 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_hazard_curves.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 20921 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_hazard_curves_sim.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 28417 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_hazard_curves_sim_mc.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 13430 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_lambda_bsps_load.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 15145 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_lambda_bsps_sep.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 13058 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_load_event.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 5827 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_mix_utilities.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 11156 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_parser.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 14598 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_pre_selection.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 48135 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_preload.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 7853 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_preload_curves.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 48133 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_preload_save.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 18922 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_probability_scenarios.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 28266 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_save.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 3214 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_scaling_laws.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 23223 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_short_term.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 11144 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_sptha_utilities.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 734 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_time_tracker.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 6302 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "Step2_create_ts_input_for_ptf_mod.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 6584 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "Step2_extract_ts_mod.py" . + + a schema1:MediaObject ; + schema1:contentSize 8440 ; + schema1:description "Auxiliary File" ; + schema1:name "launch_pycompss.py.bk" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 9120 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "run_kagan.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 10139 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "run_mare.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 20845 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "run_step1.py" . + + a schema1:MediaObject ; + schema1:contentSize 19355 ; + schema1:description "Auxiliary File" ; + schema1:name "run_step1.py.old" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 15324 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "run_step3.py" . + + a schema1:MediaObject ; + schema1:contentSize 2695 ; + schema1:description "Auxiliary File" ; + schema1:name "Create_config.sh" . + + a schema1:MediaObject ; + schema1:contentSize 2465 ; + schema1:description "Auxiliary File" ; + schema1:name "Create_config.sh.bk" . + + a schema1:MediaObject ; + schema1:contentSize 2514 ; + schema1:description "Auxiliary File" ; + schema1:name "Step2_config_simul.sh" . + + a schema1:MediaObject ; + schema1:contentSize 2257 ; + schema1:description "Auxiliary File" ; + schema1:name "Step2_simul_BS.sh" . + + a schema1:MediaObject ; + schema1:contentSize 629 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 4873 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Darwin MacBook-Pro-Raul-2018.local 23.1.0 Darwin Kernel Version 23.1.0: Mon Oct 9 21:27:27 PDT 2023; root:xnu-10002.41.9~6/RELEASE_X86_64 x86_64 COMPSS_HOME=/Users/rsirvent/opt/COMPSs/" ; + schema1:endTime "2023-12-04T14:19:45+00:00" ; + schema1:instrument ; + schema1:name "COMPSs Wordcount.java execution at MacBook-Pro-Raul-2018.local" ; + schema1:object ; + schema1:result . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 566 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 1843 ; + schema1:description "Auxiliary File" ; + schema1:name "Readme" . + + a schema1:MediaObject ; + schema1:contentSize 9568 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "application/java-archive" ; + schema1:name "wordcount.jar" . + + a schema1:MediaObject ; + schema1:contentSize 4235 ; + schema1:description "Auxiliary File" ; + schema1:name "pom.xml" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 2653 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "Wordcount.java" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 703 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "WordcountItf.java" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 743 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "WordcountItf.java" . + + a schema1:MediaObject ; + schema1:contentSize 289 ; + schema1:description "Auxiliary File" ; + schema1:name "project.xml" . + + a schema1:MediaObject ; + schema1:contentSize 983 ; + schema1:description "Auxiliary File" ; + schema1:name "resources.xml" . + + a schema1:MediaObject ; + schema1:contentSize 138 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 2407 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/chipseq-pe/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Javier Garrayo-Ventas" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Scaffolding-HiC-VGP8/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Michael Franklin; Jiaan Yu; Juny Kesumadewi" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Janis" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "align_and_sort_sortsam_tmpDir" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "cutadapt_adapters" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gatk_intervals" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "known_indels" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mills_indels" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sample_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "snps_1000gp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "snps_dbsnp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "out_bam" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "out_fastqc_reports" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "out_performance_summary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "out_variants_bamstats" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "out_variants_gatk" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "out_variants_gatk_split" . + + a schema1:Person ; + schema1:name "Lucille Delisle" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/rnaseq-pe/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Dannon Baker" . + + a schema1:Person ; + schema1:name "Björn Grüning" . + + a schema1:Person ; + schema1:name "Delphine Larivière" . + + a schema1:Person ; + schema1:name "Gildas Le Corguillé" . + + a schema1:Person ; + schema1:name "Andrew Lonie" . + + a schema1:Person ; + schema1:name "Nicholas Keener" . + + a schema1:Person ; + schema1:name "Sergei Kosakovsky Pond" . + + a schema1:Person ; + schema1:name "Wolfgang Maier" . + + a schema1:Person ; + schema1:name "Anton Nekrutenko" . + + a schema1:Person ; + schema1:name "James Taylor" . + + a schema1:Person ; + schema1:name "Steven Weaver" . + + a schema1:Person ; + schema1:name "Marius van den Beek" . + + a schema1:Person ; + schema1:name "Dave Bouvier" . + + a schema1:Person ; + schema1:name "John Chilton" . + + a schema1:Person ; + schema1:name "Nate Coraor" . + + a schema1:Person ; + schema1:name "Frederik Coppens" . + + a schema1:Person ; + schema1:name "Bert Droesbeke" . + + a schema1:Person ; + schema1:name "Ignacio Eguinoa" . + + a schema1:Person ; + schema1:name "Simon Gladman" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "CONFIG.AUTOSUBMIT_VERSION" ; + schema1:value "4.0.98" . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "CONFIG.MAXWAITINGJOBS" ; + schema1:value 20 . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "CONFIG.TOTALJOBS" ; + schema1:value 20 . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "DEFAULT.CUSTOM_CONFIG" ; + schema1:value "/home/kinow/autosubmit/a000/proj/git_project/conf/bootstrap" . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "DEFAULT.EXPID" ; + schema1:value "a000" . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "DEFAULT.HPCARCH" ; + schema1:value "local" . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "EXPERIMENT.CALENDAR" ; + schema1:value "standard" . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "EXPERIMENT.CHUNKSIZE" ; + schema1:value 0 . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "EXPERIMENT.CHUNKSIZEUNIT" ; + schema1:value "year" . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "EXPERIMENT.DATELIST" ; + schema1:value 19910101 . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "EXPERIMENT.MEMBERS" ; + schema1:value "fc0" . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "EXPERIMENT.NUMCHUNKS" ; + schema1:value 0 . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "GIT.FETCH_SINGLE_BRANCH" ; + schema1:value true . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "GIT.PROJECT_BRANCH" ; + schema1:value "master" . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "GIT.PROJECT_COMMIT" ; + schema1:value "" . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "GIT.PROJECT_ORIGIN" ; + schema1:value "https://github.com/kinow/auto-mhm-test-domains.git" . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "GIT.PROJECT_SUBMODULES" ; + schema1:value "" . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "MHM.BRANCH_NAME" ; + schema1:value "develop" . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "MHM.DOMAIN" ; + schema1:value 1 . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "MHM.EVAL_PERIOD_DURATION_YEARS" ; + schema1:value 2 . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "MHM.SINGULARITY_CONTAINER" ; + schema1:value "/tmp/mhm.sif" . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "PROJECT.PROJECT_DESTINATION" ; + schema1:value "git_project" . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "PROJECT.PROJECT_TYPE" ; + schema1:value "git" . + + a schema1:ComputerLanguage ; + schema1:alternateName "AS" ; + schema1:citation "https://doi.org/10.1109/HPCSim.2016.7568429" ; + schema1:name "Autosubmit" ; + schema1:url "https://autosubmit.readthedocs.io/" ; + schema1:version "4.0.98" . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Autosubmit mHM test domains" ; + schema1:endTime "2023-11-03T23:41:48" ; + schema1:instrument ; + schema1:name "Run mHM" ; + schema1:object , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:result ; + schema1:startTime "2023-11-03T23:42:31" . + + a schema1:Dataset . + + a schema1:MediaObject ; + schema1:contentSize 780 ; + schema1:dateModified "2023-11-03T22:41:19" ; + schema1:name "minimal.yml" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:Dataset . + + a schema1:MediaObject ; + schema1:contentSize 820 ; + schema1:dateModified "2023-11-03T22:43:08" ; + schema1:encodingFormat "application/binary" ; + schema1:name "job_list_a000.pkl" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-11-03T22:41:31" ; + schema1:encodingFormat "application/binary" ; + schema1:name "job_packages_a000.db" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:Dataset . + + a schema1:MediaObject ; + schema1:contentSize 17902 ; + schema1:dateModified "2023-11-03T22:41:31" ; + schema1:encodingFormat "application/pdf" ; + schema1:name "a000_20231103_2341.pdf" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:Dataset . + + a schema1:Dataset . + + a schema1:MediaObject ; + schema1:contentSize 139 ; + schema1:dateModified "2023-11-03T22:41:31" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_20231103_2341.txt" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:Dataset . + + a schema1:MediaObject ; + schema1:contentSize 4241 ; + schema1:dateModified "2023-11-03T22:41:34" ; + schema1:encodingFormat "text/plain" ; + schema1:name "20231103_234125_create.log" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:dateModified "2023-11-03T22:41:27" ; + schema1:encodingFormat "text/plain" ; + schema1:name "20231103_234125_create_err.log" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 22671 ; + schema1:dateModified "2023-11-03T22:43:18" ; + schema1:encodingFormat "text/plain" ; + schema1:name "20231103_234138_run.log" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:dateModified "2023-11-03T22:41:41" ; + schema1:encodingFormat "text/plain" ; + schema1:name "20231103_234138_run_err.log" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 607 ; + schema1:dateModified "2023-11-03T22:43:08" ; + schema1:encodingFormat "text/plain" ; + schema1:name "jobs_active_status.log" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:dateModified "2023-11-03T22:43:08" ; + schema1:encodingFormat "text/plain" ; + schema1:name "jobs_failed_status.log" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3635 ; + schema1:dateModified "2023-11-03T22:43:08" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_COPY_GRAPH.20231103234308.err" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 20 ; + schema1:dateModified "2023-11-03T22:43:08" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_COPY_GRAPH.20231103234308.out" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3028 ; + schema1:dateModified "2023-11-03T22:42:56" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_COPY_GRAPH.cmd" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:dateModified "2023-11-03T22:42:56" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_COPY_GRAPH_COMPLETED" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 22 ; + schema1:dateModified "2023-11-03T22:42:56" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_COPY_GRAPH_STAT" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 5082 ; + schema1:dateModified "2023-11-03T22:42:56" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_GRAPH.20231103234243.err" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1356 ; + schema1:dateModified "2023-11-03T22:42:56" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_GRAPH.20231103234243.out" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 5843 ; + schema1:dateModified "2023-11-03T22:41:54" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_LOCAL_SETUP.20231103234154.err" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4324 ; + schema1:dateModified "2023-11-03T22:41:54" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_LOCAL_SETUP.20231103234154.out" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4802 ; + schema1:dateModified "2023-11-03T22:41:42" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_LOCAL_SETUP.cmd" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:dateModified "2023-11-03T22:41:48" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_LOCAL_SETUP_COMPLETED" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 22 ; + schema1:dateModified "2023-11-03T22:41:48" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_LOCAL_SETUP_STAT" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2550 ; + schema1:dateModified "2023-11-03T22:42:07" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_REMOTE_SETUP.20231103234206.err" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 20 ; + schema1:dateModified "2023-11-03T22:42:07" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_REMOTE_SETUP.20231103234206.out" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3127 ; + schema1:dateModified "2023-11-03T22:42:31" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_SIM.20231103234231.err" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 5264 ; + schema1:dateModified "2023-11-03T22:42:31" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_SIM.20231103234231.out" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 19235 ; + schema1:dateModified "2023-11-03T22:42:19" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_SYNC.20231103234219.err" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116 ; + schema1:dateModified "2023-11-03T22:42:19" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_SYNC.20231103234219.out" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3028 ; + schema1:dateModified "2023-11-03T22:42:56" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_COPY_GRAPH.cmd" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:dateModified "2023-11-03T22:43:08" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_COPY_GRAPH_COMPLETED" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 22 ; + schema1:dateModified "2023-11-03T22:43:08" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_COPY_GRAPH_STAT" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 54 ; + schema1:dateModified "2023-11-03T22:43:08" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_COPY_GRAPH_TOTAL_STATS" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3677 ; + schema1:dateModified "2023-11-03T22:42:31" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_GRAPH.cmd" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:dateModified "2023-11-03T22:42:55" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_GRAPH_COMPLETED" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 22 ; + schema1:dateModified "2023-11-03T22:42:55" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_GRAPH_STAT" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 54 ; + schema1:dateModified "2023-11-03T22:42:56" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_GRAPH_TOTAL_STATS" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4802 ; + schema1:dateModified "2023-11-03T22:41:42" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_LOCAL_SETUP.cmd" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:dateModified "2023-11-03T22:41:54" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_LOCAL_SETUP_COMPLETED" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 22 ; + schema1:dateModified "2023-11-03T22:41:54" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_LOCAL_SETUP_STAT" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 54 ; + schema1:dateModified "2023-11-03T22:41:54" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_LOCAL_SETUP_TOTAL_STATS" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2643 ; + schema1:dateModified "2023-11-03T22:41:54" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_REMOTE_SETUP.cmd" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:dateModified "2023-11-03T22:42:06" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_REMOTE_SETUP_COMPLETED" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 22 ; + schema1:dateModified "2023-11-03T22:42:06" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_REMOTE_SETUP_STAT" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 54 ; + schema1:dateModified "2023-11-03T22:42:06" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_REMOTE_SETUP_TOTAL_STATS" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2872 ; + schema1:dateModified "2023-11-03T22:42:19" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_SIM.cmd" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:dateModified "2023-11-03T22:42:31" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_SIM_COMPLETED" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 22 ; + schema1:dateModified "2023-11-03T22:42:31" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_SIM_STAT" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 54 ; + schema1:dateModified "2023-11-03T22:42:31" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_SIM_TOTAL_STATS" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3065 ; + schema1:dateModified "2023-11-03T22:42:06" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_SYNC.cmd" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:dateModified "2023-11-03T22:42:18" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_SYNC_COMPLETED" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 22 ; + schema1:dateModified "2023-11-03T22:42:19" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_SYNC_STAT" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 54 ; + schema1:dateModified "2023-11-03T22:42:19" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_SYNC_TOTAL_STATS" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "QCFilteredAnnDataObject" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "1k_cell_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "1k_gene_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "marker_dot_plot" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "marker_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "processed_anndata_object" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "umap_cluster_plot" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "umap_sample_plot" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Sample" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:MediaObject ; + schema1:contentSize 183 . + + a schema1:MediaObject ; + schema1:contentSize 486 . + + a schema1:MediaObject ; + schema1:contentSize 558 . + + a schema1:MediaObject ; + schema1:contentSize 1302 . + + a schema1:Dataset . + + a schema1:MediaObject ; + schema1:contentSize 43925 . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-only-VGP3/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "Docker" ; + schema1:identifier ; + schema1:name "Docker" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/VGP-meryldb-creation-trio/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Matthias Bernt" . + + a schema1:Organization ; + schema1:name "UFZ Leipzig" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/dada2/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_energy_min_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_energy_min_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_gppnvt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_energy_nvt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_energy_nvt_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_gppnpt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_energy_npt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_energy_npt_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step17_gppmd_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_rmsfirst_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_rmsfirst_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_pdb_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_pdb_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_rmsexp_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_rmsexp_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step21_rgyr_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_image_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step23_dry_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_editconf_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_gppion_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step7_genion_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_gppmin_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Checkpoint file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Structures - Raw structure" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Resulting protein structure" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GROMACS topology file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Topologies GROMACS portable binary run" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Trajectories - Raw trajectory" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Trajectories - Post-processed trajectory" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "System Setup Observables - Potential Energy" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "System Setup Observables - Pressure and density" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "System Setup Observables - Temperature" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Simulation Analysis" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Simulation Analysis" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Simulation Analysis" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/357" ; + schema1:name "Tatiana Gurbich" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Scipion" ; + schema1:url . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 53166 . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Engy Nasr" . + + a schema1:Person ; + schema1:name "Bérénice Batut" . + + a schema1:Person ; + schema1:name "Paul Zierep" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/allele-based-pathogen-identification/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_baredSC-1d-logNorm_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_baredSC-1d-logNorm_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/baredsc/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_baredSC-2d-logNorm_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_baredSC-2d-logNorm_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/baredsc/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Smitha Sukumar" . + + a schema1:Person ; + schema1:name "Elena Martinez" . + + a schema1:Person ; + schema1:name "Christina Adler" . + + a schema1:Person ; + schema1:name "Henry Lydecker" . + + a schema1:Person ; + schema1:name "Fang Wang" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/141" ; + schema1:name "Tracy Chew" . + + a schema1:ComputerLanguage ; + schema1:name "Shell Script" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-decontamination-VGP9/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "STAR parameter" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "turtle file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "maximum memory usage in megabytes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Run STAR" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "number of threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "STAR" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bowtie2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Genome fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GTF" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "kallisto" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Proteins" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Transcripts" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "WDL" ; + schema1:identifier ; + schema1:name "Workflow Description Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:name "Simon Bray" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/galaxyproject/iwc/actions/workflows/workflow_test.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-atac-cutandrun_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-atac-cutandrun_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-chip-pe_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-chip-pe_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-chip-sr_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-chip-sr_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Scipion" ; + schema1:url . + + a schema1:Person ; + schema1:name "GAPARS Horizon 2020 European project" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "classifications-000312899389-000316591628.csv.part_00000" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Cali Willet" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/141" ; + schema1:name "Tracy Chew" . + + a schema1:ComputerLanguage ; + schema1:name "Shell Script" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:MediaObject ; + schema1:contentSize 187 . + + a schema1:MediaObject ; + schema1:contentSize 74 . + + a schema1:MediaObject ; + schema1:contentSize 324 . + + a schema1:Dataset . + + a schema1:MediaObject ; + schema1:contentSize 5131 . + + a schema1:MediaObject ; + schema1:contentSize 8805 . + + a schema1:MediaObject ; + schema1:contentSize 6645 . + + a schema1:MediaObject ; + schema1:contentSize 44006 . + + a schema1:MediaObject ; + schema1:contentSize 21291 . + + a schema1:MediaObject ; + schema1:contentSize 75358 . + + a schema1:MediaObject ; + schema1:contentSize 847 . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.0" . + + a schema1:MediaObject ; + schema1:contentSize 2165 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 133 ; + schema1:description "Parameters passed as arguments to the COMPSs application through the command line" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_command_line_arguments.txt" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "Docker" ; + schema1:identifier ; + schema1:name "Docker" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Perl" ; + schema1:url . + + a schema1:Person ; + schema1:name "Sarai Varona and Miguel Juliá and Sara Monzon and Alexander Peltzer and Alison Meynert and Edgar Garriga Nogales and Erik Garrison and Gisela Gabernet and Harshil Patel and Joao Curado and Jose Espinosa-Carrasco and Katrin Sameith and Marta Pozuelo and Maxime Garcia and Michael Heuer and Phil Ewels and Simon Heumos and Stephen Kelly and Thanh Le Viet and Isabel Cuesta" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "" . + + a schema1:Person ; + schema1:name "Susana Posada Céspedes" . + + a schema1:Person ; + schema1:name "Niko Beerenwinkel" . + + a schema1:Person ; + schema1:name "" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sars-cov-2-consensus-from-variation/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:Person ; + schema1:name "Lucille Delisle" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/rnaseq-sr/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_energy_min_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_energy_min_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_gppnvt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_energy_nvt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_energy_nvt_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_gppnpt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_energy_npt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_energy_npt_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step17_gppmd_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_rmsfirst_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_rmsfirst_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_pdb_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_pdb_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_rmsexp_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_rmsexp_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step21_rgyr_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_image_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step23_dry_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_editconf_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_gppion_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step7_genion_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_gppmin_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Checkpoint file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Structures - Raw structure" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Resulting protein structure" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GROMACS topology file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Topologies GROMACS portable binary run" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Trajectories - Raw trajectory" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Trajectories - Post-processed trajectory" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "System Setup Observables - Potential Energy" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "System Setup Observables - Pressure and density" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "System Setup Observables - Temperature" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Simulation Analysis" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Simulation Analysis" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Simulation Analysis" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Helmholtz-Zentrum für Umweltforschung - UFZ" . + + a schema1:Organization ; + schema1:name "Helmholtz-Zentrum für Umweltforschung - UFZ" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_IIa-denoising-se_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_IIa-denoising-se_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/qiime2-II-denoising/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_IIb-denoising-pe_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_IIb-denoising-pe_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/qiime2-II-denoising/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Reference filters files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output Destination" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filter rRNA" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Forward reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Identifier" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "kallisto index file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Maximum memory in MB" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Reverse reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filtered statistics" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "kallisto output" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/chipseq-sr/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/445" ; + schema1:name "Diego De Panis" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "consensus_genome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "drug_resistance_report_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "html_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_txt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "report_output" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "variants_report_html" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Dannon Baker" . + + a schema1:Person ; + schema1:name "Björn Grüning" . + + a schema1:Person ; + schema1:name "Delphine Larivière" . + + a schema1:Person ; + schema1:name "Gildas Le Corguillé" . + + a schema1:Person ; + schema1:name "Andrew Lonie" . + + a schema1:Person ; + schema1:name "Nicholas Keener" . + + a schema1:Person ; + schema1:name "Sergei Kosakovsky Pond" . + + a schema1:Person ; + schema1:name "Wolfgang Maier" . + + a schema1:Person ; + schema1:name "Anton Nekrutenko" . + + a schema1:Person ; + schema1:name "James Taylor" . + + a schema1:Person ; + schema1:name "Steven Weaver" . + + a schema1:Person ; + schema1:name "Marius van den Beek" . + + a schema1:Person ; + schema1:name "Dave Bouvier" . + + a schema1:Person ; + schema1:name "John Chilton" . + + a schema1:Person ; + schema1:name "Nate Coraor" . + + a schema1:Person ; + schema1:name "Frederik Coppens" . + + a schema1:Person ; + schema1:name "Bert Droesbeke" . + + a schema1:Person ; + schema1:name "Ignacio Eguinoa" . + + a schema1:Person ; + schema1:name "Simon Gladman" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "NC_045512" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "basecalling model" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "identifier used" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "kraken_database" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "nanopore FASTQ reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "number of threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Flye de novo assembler for single-molecule reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken2 reports" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Medaka polisher" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "QUAlity assessment" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "KNIME" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_scrna-seq-fastq-to-matrix-10x-cellplex_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_scrna-seq-fastq-to-matrix-10x-cellplex_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/fastq-to-matrix-10x/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_scrna-seq-fastq-to-matrix-10x-v3_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_scrna-seq-fastq-to-matrix-10x-v3_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/fastq-to-matrix-10x/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Wolfgang Maier" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "called_variants" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastp_html_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastp_pe" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "filtered_mapped_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mapped_reads_stats" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "markduplicates_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "markduplicates_stats" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "preprocessing_and_mapping_reports" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "realigned_deduplicated_filtered_mapped_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "realigned_deduplicated_filtered_mapped_reads_with_indel_quals" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "soft_filtered_variants" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Darwin MacBook-Pro-Raul-2018.local 22.5.0 Darwin Kernel Version 22.5.0: Mon Apr 24 20:51:50 PDT 2023; root:xnu-8796.121.2~5/RELEASE_X86_64 x86_64 COMPSS_HOME=/Users/rsirvent/opt/COMPSs/" ; + schema1:endTime "2023-05-30T08:48:00+00:00" ; + schema1:instrument ; + schema1:name "COMPSs matmul_files.py execution at MacBook-Pro-Raul-2018.local" ; + schema1:object , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:result , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.2" . + + a schema1:MediaObject ; + schema1:contentSize 250 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 1429 ; + schema1:description "Auxiliary File" ; + schema1:name "README" . + + a schema1:MediaObject ; + schema1:contentSize 4414 ; + schema1:description "Auxiliary File" ; + schema1:name "pom.xml" . + + a schema1:MediaObject ; + schema1:contentSize 2061 ; + schema1:description "Auxiliary File" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 0 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "__init__.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1627 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "matmul_tasks.py" . + + a schema1:MediaObject ; + schema1:contentSize 25 ; + schema1:description "COMPSs command line execution command, including parameters passed" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_command_line_arguments.txt" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "Gareth Price" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "metrics" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/assembly-with-flye/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/chipseq-pe/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux nvb4 2.6.32-642.6.2.el6.x86_64 #1 SMP Mon Oct 24 10:22:33 EDT 2016 x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=COMPSs SLURM_JOB_QOS=bsc_cs SLURM_JOB_GPUS=0,1,2,3 SLURM_MEM_PER_CPU=8000 SLURM_JOB_ID=1930225 SLURM_JOB_USER=bsc19959 COMPSS_HOME=/apps/COMPSs/TrunkCTCache/ SLURM_JOB_UID=4373 SLURM_SUBMIT_DIR=/gpfs/scratch/bsc19/bsc19959/cache SLURM_JOB_NODELIST=nvb[4-9,12-21] SLURM_JOB_CPUS_PER_NODE=16(x16) SLURM_SUBMIT_HOST=nvblogin1 SLURM_JOB_PARTITION=projects SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=16 COMPSS_MASTER_NODE=nvb4 COMPSS_WORKER_NODES= nvb5 nvb6 nvb7 nvb8 nvb9 nvb12 nvb13 nvb14 nvb15 nvb16 nvb17 nvb18 nvb19 nvb20 nvb21" ; + schema1:endTime "2024-03-22T11:45:33+00:00" ; + schema1:instrument ; + schema1:name "COMPSs cch_matmul_test.py execution at bsc_nvidia with JOB_ID 1930225" ; + schema1:result , + , + , + ; + schema1:startTime "2024-03-22T11:39:34+00:00" ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:SoftwareApplication ; + schema1:name "Dislib" ; + schema1:version "0.9.x" . + + a schema1:MediaObject ; + schema1:contentSize 4113 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_bsc_nvidia_SLURM_JOB_ID_1930225" ; + schema1:contentSize 64 ; + schema1:description "COMPSs console standard error log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-1930225.err" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_bsc_nvidia_SLURM_JOB_ID_1930225" ; + schema1:contentSize 2864 ; + schema1:description "COMPSs console standard output log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-1930225.out" . + + a schema1:MediaObject ; + schema1:contentSize 716 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 1203 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Romane Libouban" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/repeatmasking/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-decontamination-VGP9/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:ComputerLanguage ; + schema1:name "Bpipe" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-only-VGP3/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:name "Matthias Bernt" . + + a schema1:Organization ; + schema1:name "UFZ Leipzig" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/dada2/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_scrna-seq-fastq-to-matrix-10x-cellplex_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_scrna-seq-fastq-to-matrix-10x-cellplex_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/fastq-to-matrix-10x/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_scrna-seq-fastq-to-matrix-10x-v3_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_scrna-seq-fastq-to-matrix-10x-v3_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/fastq-to-matrix-10x/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Plasmids" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux gs11r2b04 5.14.0-284.30.1.el9_2.x86_64 #1 SMP PREEMPT_DYNAMIC Fri Aug 25 09:13:12 EDT 2023 x86_64 x86_64 x86_64 GNU/Linux" ; + schema1:endTime "2024-07-05T11:38:51+00:00" ; + schema1:instrument ; + schema1:name "COMPSs main.py execution at marenostrum5 with JOB_ID 3644777" ; + schema1:object , + ; + schema1:result , + , + , + ; + schema1:startTime "2024-07-05T11:37:39+00:00" ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 1388 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_marenostrum5_SLURM_JOB_ID_3644777" ; + schema1:contentSize 100 ; + schema1:description "COMPSs console standard error log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-3644777.err" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_marenostrum5_SLURM_JOB_ID_3644777" ; + schema1:contentSize 1723 ; + schema1:description "COMPSs console standard output log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-3644777.out" . + + a schema1:MediaObject ; + schema1:contentSize 590 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 996 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/lcms-preprocessing/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path_gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path_itp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path_top" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bits_set" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "blacklist" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bwt_algorithm" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chromosome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_lratio" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_exomeDepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_manta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_bf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "generate_bwa_indexes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_exome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "min_mapping_quality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "read_group" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_amb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_ann" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_bwt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fai" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_pac" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_sa" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samples" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_bwa_mem" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastqc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_samtools" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "html_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "json_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_all" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_bam_filtering" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_exomedepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_manta" . + + a schema1:Person ; + schema1:name "Daniel López-López" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/chipseq-sr/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s01r2b26 4.18.0-305.19.1.el8_4.x86_64 #1 SMP Tue Sep 7 07:07:31 EDT 2021 x86_64 x86_64 x86_64 GNU/Linux COMPSS_CONTAINER_ENGINE=SINGULARITY SLURM_JOB_ID=2497335 COMPSS_HOME=/apps/COMPSs/3.3.pr/ COMPSS_MASTER_WORKING_DIR=" ; + schema1:endTime "2023-10-20T11:05:06+00:00" ; + schema1:instrument ; + schema1:name "COMPSs Workflow.py execution at s01r2b26 with JOB_ID 2497335" ; + schema1:object , + , + ; + schema1:result , + ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.2.rc2310" . + + a schema1:MediaObject ; + schema1:contentSize 4057 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 612 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 1497 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:Person ; + schema1:name "Dannon Baker" . + + a schema1:Person ; + schema1:name "Björn Grüning" . + + a schema1:Person ; + schema1:name "Delphine Larivière" . + + a schema1:Person ; + schema1:name "Gildas Le Corguillé" . + + a schema1:Person ; + schema1:name "Andrew Lonie" . + + a schema1:Person ; + schema1:name "Nicholas Keener" . + + a schema1:Person ; + schema1:name "Sergei Kosakovsky Pond" . + + a schema1:Person ; + schema1:name "Wolfgang Maier" . + + a schema1:Person ; + schema1:name "Anton Nekrutenko" . + + a schema1:Person ; + schema1:name "James Taylor" . + + a schema1:Person ; + schema1:name "Steven Weaver" . + + a schema1:Person ; + schema1:name "Marius van den Beek" . + + a schema1:Person ; + schema1:name "Dave Bouvier" . + + a schema1:Person ; + schema1:name "John Chilton" . + + a schema1:Person ; + schema1:name "Nate Coraor" . + + a schema1:Person ; + schema1:name "Frederik Coppens" . + + a schema1:Person ; + schema1:name "Bert Droesbeke" . + + a schema1:Person ; + schema1:name "Ignacio Eguinoa" . + + a schema1:Person ; + schema1:name "Simon Gladman" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Scaffolding-HiC-VGP8/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/atacseq/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "Wolfgang Maier" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "1_based_masking_regions" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "called_variant_sites" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chrom_pos_ref_called_variants" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chrom_pos_ref_called_variants_with_0_based_start" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chrom_pos_ref_called_variants_with_0_based_start_end" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chrom_pos_ref_failed_variants" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chrom_pos_ref_failed_variants_with_0_based_start" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chrom_pos_ref_failed_variants_with_0_based_start_end" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "consensus" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "consensus_variants" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "coverage_depth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "failed_variant_sites" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "filter_failed_variants" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "low_cov_regions" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "low_cov_regions_plus_filter_failed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "low_cov_regions_plus_filter_failed_combined" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "masking_regions" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "masking_regions_with_1_based_start" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "multisample_consensus_fasta" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "VCF" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/445" ; + schema1:name "Diego De Panis" . + + a schema1:Person ; + schema1:name "Engy Nasr" . + + a schema1:Person ; + schema1:name "Bérénice Batut" . + + a schema1:Person ; + schema1:name "Paul Zierep" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/pathogen-detection-pathogfair-samples-aggregation-and-visualisation/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a schema1:Person ; + schema1:name "Javier Garrayo-Ventas" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sars-cov-2-pe-illumina-artic-variant-calling/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "db_host" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "db_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "db_password" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "db_user" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "filters" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_file" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bowtie2 index" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filer rRNA" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "forward reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GTF file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "kallisto index" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Max memory" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filename prefix" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reverse reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "number of threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bowtie2 output" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "FASTQC" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "FeatureCounts output" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filtered reads folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "kallisto output" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/gcms-metams/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/chipseq-pe/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Alex L Mitchell" . + + a schema1:Person ; + schema1:name "Lorna J Richardson" . + + a schema1:Person ; + schema1:name "Ekaterina Sakharova" . + + a schema1:Person ; + schema1:name "Maxim Scheremetjew" . + + a schema1:Person ; + schema1:name "Anton Korobeynikov" . + + a schema1:Person ; + schema1:name "Alex Shlemov" . + + a schema1:Person ; + schema1:name "Olga Kunyavskaya" . + + a schema1:Person ; + schema1:name "Alla Lapidus" . + + a schema1:Person ; + schema1:name "Robert D Finn" . + + a schema1:Person ; + schema1:name "Alexandre Almeida" . + + a schema1:Person ; + schema1:name "Martin Beracochea" . + + a schema1:Person ; + schema1:name "Miguel Boland" . + + a schema1:Person ; + schema1:name "Josephine Burgin" . + + a schema1:Person ; + schema1:name "Guy Cochrane" . + + a schema1:Person ; + schema1:name "Michael R Crusoe" . + + a schema1:Person ; + schema1:name "Varsha Kale" . + + a schema1:Person ; + schema1:name "Simon C Potter" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "5.8s_pattern" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "5s_pattern" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CGC_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CGC_postfixes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "EggNOG_data_dir" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "EggNOG_db" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "EggNOG_diamond_db" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "HMM_gathering_bit_score" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "HMM_name_database" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "HMM_omit_alignment" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "InterProScan_applications" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "InterProScan_databases" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "InterProScan_outputFormat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "cgc_chunk_size" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "forward_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "func_ann_names_hmmer" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "func_ann_names_ips" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "go_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hmmsearch_header" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ips_header" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ko_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lsu_db" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lsu_label" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lsu_otus" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lsu_tax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "other_ncRNA_models" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "protein_chunk_size_IPS" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "protein_chunk_size_hmm" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "qc_min_length" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reverse_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rfam_model_clans" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rfam_models" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "single_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ssu_db" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ssu_label" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ssu_otus" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ssu_tax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chunking_nucleotides" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chunking_proteins" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "completed_flag_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "compressed_files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastp_filtering_json_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "functional_annotation_folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hashsum_paired" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hashsum_single" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "motus_output" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "no_cds_flag_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "no_tax_flag_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "qc-statistics" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "qc-status" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "qc_summary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rna-count" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sequence-categorisation_folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "stats" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "taxonomy-summary_folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "data_matrix" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gmt_filepath" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "index_col" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "outdir" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samples_on_rows" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "separator" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "resdir" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "ABRomics" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/amr_gene_detection/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_index_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_mdp_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_ndx_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_solvent_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dhdl_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_heteroatom_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_itp_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_molecule_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_ndx_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path_gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path_itp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path_top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_str_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xtc_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a schema1:Person ; + schema1:name "Cali Willet" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/141" ; + schema1:name "Tracy Chew" . + + a schema1:ComputerLanguage ; + schema1:name "Shell Script" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_molecule_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Darwin MacBook-Pro-Raul-2018.local 22.5.0 Darwin Kernel Version 22.5.0: Mon Apr 24 20:51:50 PDT 2023; root:xnu-8796.121.2~5/RELEASE_X86_64 x86_64 COMPSS_HOME=/Users/rsirvent/opt/COMPSs/" ; + schema1:endTime "2023-05-30T07:15:34+00:00" ; + schema1:instrument ; + schema1:name "COMPSs Matmul.java execution at MacBook-Pro-Raul-2018.local" ; + schema1:object , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:result , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.2" . + + a schema1:MediaObject ; + schema1:contentSize 342 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 2092 ; + schema1:description "Auxiliary File" ; + schema1:name "Readme" . + + a schema1:MediaObject ; + schema1:contentSize 4804 ; + schema1:description "Auxiliary File" ; + schema1:name "pom.xml" . + + a schema1:MediaObject ; + schema1:contentSize 2823 ; + schema1:description "Auxiliary File" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 3615 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "Matmul.java" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1182 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "MatmulImpl.java" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1162 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "MatmulItf.java" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 4341 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "Block.java" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 940 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "MatmulImpl.java" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1464 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "MatmulItf.java" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 2598 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "Block.java" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 3844 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "Matmul.java" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 932 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "MatmulItf.java" . + + a schema1:MediaObject ; + schema1:contentSize 289 ; + schema1:description "Auxiliary File" ; + schema1:name "project.xml" . + + a schema1:MediaObject ; + schema1:contentSize 983 ; + schema1:description "Auxiliary File" ; + schema1:name "resources.xml" . + + a schema1:MediaObject ; + schema1:contentSize 24 ; + schema1:description "COMPSs command line execution command, including parameters passed" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_command_line_arguments.txt" . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "concat_traj.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mybd_flexserv_bd_ensemble.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mybd_flexserv_bd_ensemble.mdcrd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myconcoord_disco_bfactor.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myconcoord_disco_rmsd.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myconcoord_disco_traj.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myconcoord_dist.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myconcoord_dist.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myconcoord_dist.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_disco_traj.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_flexserv_bd_rmsd.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_flexserv_bd_traj_fitted.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_flexserv_dmd_rmsd.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_flexserv_dmd_traj_fitted.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_flexserv_nma_ensemble.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_flexserv_nma_rmsd.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_imods_ensemble.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_mask_backbone" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_mask_ca.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_meta_traj_fitted.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_meta_traj_rmsd.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_nolb_ensemble.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_nolb_rmsd.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_pcz_proj1.dcd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_prody_anm_traj.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_prody_rms.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mydmd_flexserv_dmd_ensemble.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mydmd_flexserv_dmd_ensemble.mdcrd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myextract_model.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myextract_monomer.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_cluster.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_cluster.xpm" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_cluster.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_concat.cluster.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myimod_imc.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myimod_imode_evecs.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymake_gmx_ndx.ndx" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mynma_flexserv_nma_ensemble.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mynma_flexserv_nma_ensemble.mdcrd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mynolb_ensemble.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypcz_bfactor_all.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypcz_bfactor_all.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypcz_collectivity.json" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypcz_evecs.json" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypcz_hinges_bfactor_report.json" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypcz_hinges_fcte_report.json" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypcz_proj1.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypcz_report.json" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypcz_stiffness.json" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myprody_anm_traj.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mytrjcat_concat_traj.trr" . + + a schema1:Person ; + schema1:name "Peter van Heusden" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sars-cov-2-pe-illumina-artic-ivar-analysis/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_30" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_gmx.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_gmx.itp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_gmx.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myappend_ligand.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mybabel_minimize.mol2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycat_pdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myeditconf.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myextract_heteroatoms.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myextract_molecule.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfix_side_chain.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygenion.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygenion.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygenrestr.itp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_energy.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_image.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_rgyr.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_rms_exp.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_trjconv_str_lig.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_trjconv_str_prot.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygrompp_ion.tpr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymake_ndx.ndx" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.cpt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.edr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.xtc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb2gmx.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb2gmx.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb_prot.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myreduce_add_hydrogens.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysolvate.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysolvate.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Alignment score from Kpax to analyse structures" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CATH family ids" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Obsolete and inconsistent CATH domain StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filename for residue-mapped CATH domain StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Core domain structure (.pdb)" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Database to select to compute core average structure" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "To store all the domain-like StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "To store all failed domain StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filename to store family ids per iteration" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Iteration number" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Threshold for minimum domain length" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Parameter file for current iteration" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The directory for storing all PDB files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Pfam family ids" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Obsolete and inconsistent Pfam domain StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filename for residue-mapped Pfam domain StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CATH cross-mapped domain StIs from previous iteration" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Pfam cross-mapped domain StIs from previous iteration" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Score threshold for given alignment score from Kpax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Directory for storing all SIFTS files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "To store all the true domain StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filename to store unmapped and not properly aligned instances from CATH" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filename to store unmapped but structurally well aligned instances from CATH" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filename to store unmapped and not properly aligned instances from Pfam" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filename to store unmapped but structurally well aligned instances from Pfam" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filename with alignment scores for unmapped instances" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Alignment results for CATH unmapped instances" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Alignment results for Pfam unmapped instances" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Domain-like StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Failed domain StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "All CATH cross-mapped domin StIs family-wise together" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "All Pfam domain StIs cross-mapped to CATH family-wise" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Alignment results from Kpax for all cross-mapped families" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Average structures per cross-mapped Pfam family for CATH StIs at family level" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Core domain StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Core domain structure (.pdb)" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Cross-mapped families with CATH domain StIs passing the threshold" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Cross-mapped families with Pfam domain StIs passing the threshold" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Merged cross-mapped and residue-mapped domain StIs from CATH" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Merged cross-mapped and residue-mapped domain StIs from Pfam" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Family ids per iteration" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Parameter file for next iteration of the workflow" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Average structures per cross-mapped CATH family for Pfam StIs at family level" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Obsolete and inconsistent domain StIs from CATH" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Obsolete and inconsistent domain StIs from Pfam" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "All CATH residue-mapped domain StIs with domain labels" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "All Pfam residue-mapped domain StIs with domain labels" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "True domain StIs per iteration" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "All un-mapped domin StIs from CATH" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Failed domain StIs from CATH" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Domain-like StIs from CATH" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "All Pfam un-mapped domin StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Failed domain StIs from Pfam" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Domain-like StIs from Pfam" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "BridgeDB cache" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Differential gene expression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Differential miRNA expression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "STRING identifier mapping" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mRNA expression correlation" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of correlation edges to use, USE FOR TESTING ONLY" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "miRNA mRNA correlation" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "miRTarBase data" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of miRTarBase edges to use, USE FOR TESTING ONLY" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of cores to let MOGAMUN use" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of generation to let the genetic algorithm in MOGAMUN evolve" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The maximum size of subnetworks during postprocessing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "the minimum Jaccard Index overlap between two subnetworks to allow them to be merged" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The minimum size of subnetworks during postprocessing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of parallel runs to let MOGAMUN do, these parallel runs are combined in postprocessing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "STRING data" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The minimum score for a STRING edge to be included in the analysis" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of STRING edges to use, USE FOR TESTING ONLY" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Variant Burden" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "full_graph" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "subnetworks" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq_files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "readgroup" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sample_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sorted_bam" . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "metrics" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken_taxonomy_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken_taxonomy_table_modified" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Taxonomic_prediction_report" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "filters" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_file" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:name "Oliver Woolland" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/159" ; + schema1:name "Oliver Woolland" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/445" ; + schema1:name "Diego De Panis" . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rnaseq_left_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rnaseq_right_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sample_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sars_cov_2_reference_genome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "indels" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "snps" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/26" ; + schema1:name "Ambarish Kumar" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "basecalling model" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "configuration_command" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "nanopore reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "identifier used" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "kraken_database" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "number of threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Flye de novo assembler for single-molecule reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Guppy for CPU" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken2 reports" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Krona taxonomy visualization" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Medaka polisher" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "FASTQ files merged" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MinION-Quality-Check" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "QUAlity assessment" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Scaffolding-HiC-VGP8/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "KNIME" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myautodock_vina_run.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myautodock_vina_run.pdbqt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mybabel_convert.ent" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mybox.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycat_pdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myextract_model_pdbqt.pdbqt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myextract_molecule.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfpocket_filter.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfpocket_run.json" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfpocket_run.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfpocket_select.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfpocket_select.pqr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myideal_sdf.sdf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mystr_check_add_hydrogens.pdb" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Javier Garrayo-Ventas" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:MediaObject ; + schema1:contentSize 177 . + + a schema1:MediaObject ; + schema1:contentSize 91 . + + a schema1:MediaObject ; + schema1:contentSize 724 . + + a schema1:MediaObject ; + schema1:contentSize 1043 . + + a schema1:Dataset . + + a schema1:MediaObject ; + schema1:contentSize 6645 . + + a schema1:Organization ; + schema1:name "Helmholtz-Zentrum für Umweltforschung - UFZ" . + + a schema1:Organization ; + schema1:name "Helmholtz-Zentrum für Umweltforschung - UFZ" . + + a schema1:Organization ; + schema1:name "Helmholtz-Zentrum für Umweltforschung - UFZ" . + + a schema1:Organization ; + schema1:name "Helmholtz-Zentrum für Umweltforschung - UFZ" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_Ia-import-multiplexed-se_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_Ia-import-multiplexed-se_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/qiime2-I-import/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_Ib-import-multiplexed-pe_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_Ib-import-multiplexed-pe_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/qiime2-I-import/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_Ic-import-demultiplexed-se_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_Ic-import-demultiplexed-se_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/qiime2-I-import/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_Id-import-demultiplexed-pe_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_Id-import-demultiplexed-pe_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/qiime2-I-import/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Egon Willighagen" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/262" ; + schema1:name "Marvin Martens" . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s01r1b45 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=COMPSs COMPSS_PYTHON_VERSION=3.9.10 SLURM_JOB_QOS=debug SLURM_MEM_PER_CPU=1880 SLURM_JOB_ID=31897997 SLURM_JOB_USER=bsc19057 COMPSS_HOME=/apps/COMPSs/3.3/ SLURM_JOB_UID=2952 SLURM_SUBMIT_DIR=/gpfs/home/bsc19/bsc19057/Tutorial_2024/lysozyme_in_water SLURM_JOB_NODELIST=s01r1b[45,47] SLURM_JOB_GID=2950 SLURM_JOB_CPUS_PER_NODE=48(x2) COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login3 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=2 COMPSS_MASTER_NODE=s01r1b45 COMPSS_WORKER_NODES= s01r1b47" ; + schema1:endTime "2024-03-05T11:04:52+00:00" ; + schema1:instrument ; + schema1:name "COMPSs lysozyme_in_water_full.py execution at marenostrum4 with JOB_ID 31897997" ; + schema1:object , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:result , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 2912 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 1581 ; + schema1:description "Auxiliary File" ; + schema1:name "launch_full.sh" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 7601 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "lysozyme_in_water.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 8868 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "lysozyme_in_water_full_no_mpi.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 10194 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "lysozyme_in_water_full_singularity.py" . + + a schema1:MediaObject ; + schema1:contentSize 764 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 727 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "BDD_Kakila_v2_2021021_observateur.tsv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "BDD_Kakila_v2_20210221_observation.tsv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "BDD_Kakila_v2_20210221_organisme.tsv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "BDD_Kakila_v2_20210221_secteur_geog.tsv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "BDD_Kakila_v2_20210221_sortie.tsv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "BDD_Kakila_v2_20210221_taxon.tsv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kakila_database_of_marine_mammal_observation_data_in_the_AGOA_sanctuary_-_French_Antilles.xml" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:Person ; + schema1:name "Engy Nasr" . + + a schema1:Person ; + schema1:name "Bérénice Batut" . + + a schema1:Person ; + schema1:name "Paul Zierep" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/gene-based-pathogen-identification/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Run binning workflow" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "BUSCO dataset" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Deduplicate reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output Destination (prov only)" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Contamination reference file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gtdbtk data directory" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Identifier used" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Forward reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Reverse reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Keep filtered reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken2 database" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Memory usage (MB)" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "When working with metagenomes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "PacBio reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ONT Basecalling model" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "PacBio reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Pilon fix list" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Run GEM workflow" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Use Flye" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Use Pilon" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Run SMETANA" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Use SPAdes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Number of threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Keep mapped reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Assembly output" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Binning output" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Community GEM output" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken2 reports" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Read filtering output" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Read filtering output" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s11r2b43 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=COMPSs COMPSS_PYTHON_VERSION=3.8.2 SLURM_JOB_QOS=debug SLURM_MEM_PER_CPU=1880 SLURM_JOB_ID=30498011 SLURM_JOB_USER=bsc19959 COMPSS_HOME=/apps/COMPSs/3.3.pr/ SLURM_JOB_UID=4373 SLURM_SUBMIT_DIR=/gpfs/scratch/bsc19/bsc19959/randomsvd SLURM_JOB_NODELIST=s11r2b[43-44] SLURM_JOB_GID=2950 SLURM_JOB_CPUS_PER_NODE=48(x2) COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login1 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=2 COMPSS_MASTER_NODE=s11r2b43 COMPSS_WORKER_NODES= s11r2b44" ; + schema1:endTime "2023-11-02T10:54:22+00:00" ; + schema1:instrument ; + schema1:name "COMPSs random_svd_compss.py execution at marenostrum4 with JOB_ID 30498011" ; + schema1:result ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.2.rc2310" . + + a schema1:MediaObject ; + schema1:contentSize 3183 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 482 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 1456 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Purge-duplicates-one-haplotype-VGP6b/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Fernando Cruz (CNAG)" . + + a schema1:Person ; + schema1:name "Francisco Camara (CNAG)" . + + a schema1:Person ; + schema1:name "Tyler Alioto (CNAG)" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/parallel-accession-download/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sars-cov-2-variation-reporting/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Bpipe" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CPAT_header_tab" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GRCh38_p13_genome_fa_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Pfam-A_hmm_dat_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Pfam-A_hmm_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "active_site_dat_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gencode_v43_annotation_gtf_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gencode_v43_lncRNA_transcripts_fa_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gencode_v43_pc_transcripts_fa_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gencode_v43_transcripts_fa_gz" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Mitogenome-assembly-VGP0/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-atac-cutandrun_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-atac-cutandrun_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-chip-pe_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-chip-pe_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-chip-sr_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-chip-sr_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/612" ; + schema1:name "Zafran Hussain Shah" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/galaxyproject/iwc/actions/workflows/workflow_test.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s02r2b54 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=svd_lanczos COMPSS_PYTHON_VERSION=3.9.10 SLURM_JOB_QOS=debug SLURM_MEM_PER_CPU=1880 SLURM_JOB_ID=31198136 SLURM_JOB_USER=bsc19756 COMPSS_HOME=/apps/COMPSs/3.3/ SLURM_JOB_UID=4486 SLURM_SUBMIT_DIR=/gpfs/scratch/bsc19/bsc19756/Variable_nsv_lanczos SLURM_JOB_NODELIST=s02r2b[54,56,58] SLURM_JOB_GID=2950 SLURM_JOB_CPUS_PER_NODE=48(x3) COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login1 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=3 COMPSS_MASTER_NODE=s02r2b54 COMPSS_WORKER_NODES= s02r2b56 s02r2b58" ; + schema1:endTime "2023-12-19T11:21:53+00:00" ; + schema1:instrument ; + schema1:name "COMPSs lanczos_dislib_version.py execution at marenostrum4 with JOB_ID 31198136" ; + schema1:result ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 4624 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 487 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 898 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "David F. Nieuwenhuijse" . + + a schema1:Person ; + schema1:name "Alexey Sokolov" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Purge-duplicates-one-haplotype-VGP6b/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Clinical Bioinformatics Unit" . + + a schema1:Person ; + schema1:name "Pathology Department" . + + a schema1:Person ; + schema1:name "Eramus Medical Center" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "Roberto Melero" . + + a schema1:Person ; + schema1:name "Marta Martinez" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/83" ; + schema1:name "Carlos Oscar Sorzano Sanchez" . + + a schema1:ComputerLanguage ; + schema1:name "Scipion" ; + schema1:url . + + a schema1:Person ; + schema1:name "Tim Dudgeon" . + + a schema1:Person ; + schema1:name "Simon Bray" . + + a schema1:Person ; + schema1:name "Gianmauro Cuccuru" . + + a schema1:Person ; + schema1:name "Björn Grüning" . + + a schema1:Person ; + schema1:name "Rachael Skyner" . + + a schema1:Person ; + schema1:name "Jack Scantlebury" . + + a schema1:Person ; + schema1:name "Susan Leung" . + + a schema1:Person ; + schema1:name "Frank von Delft" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bits_set" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "blacklist" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bwt_algorithm" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chromosome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_lratio" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_exomeDepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_manta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_bf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_exome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "min_mapping_quality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "read_group" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samples" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_bwa_mem" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastqc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_samtools" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "html_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "json_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_all" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_bam_filtering" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_exomedepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_manta" . + + a schema1:Person ; + schema1:name "Daniel López-López" . + + a schema1:ComputerLanguage ; + schema1:name "Bpipe" ; + schema1:url . + + a schema1:Person ; + schema1:name "Dannon Baker" . + + a schema1:Person ; + schema1:name "Björn Grüning" . + + a schema1:Person ; + schema1:name "Delphine Larivière" . + + a schema1:Person ; + schema1:name "Gildas Le Corguillé" . + + a schema1:Person ; + schema1:name "Andrew Lonie" . + + a schema1:Person ; + schema1:name "Nicholas Keener" . + + a schema1:Person ; + schema1:name "Sergei Kosakovsky Pond" . + + a schema1:Person ; + schema1:name "Wolfgang Maier" . + + a schema1:Person ; + schema1:name "Anton Nekrutenko" . + + a schema1:Person ; + schema1:name "James Taylor" . + + a schema1:Person ; + schema1:name "Steven Weaver" . + + a schema1:Person ; + schema1:name "Marius van den Beek" . + + a schema1:Person ; + schema1:name "Dave Bouvier" . + + a schema1:Person ; + schema1:name "John Chilton" . + + a schema1:Person ; + schema1:name "Nate Coraor" . + + a schema1:Person ; + schema1:name "Frederik Coppens" . + + a schema1:Person ; + schema1:name "Bert Droesbeke" . + + a schema1:Person ; + schema1:name "Ignacio Eguinoa" . + + a schema1:Person ; + schema1:name "Simon Gladman" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "IndexName" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "alignments_are_sorted" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bankfile" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "barcode_tag" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "base_correction" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bonferroni" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bq2_handling" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "call_indels" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "cancer" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "cancerSamples" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "canon" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "classic" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "comment" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "count" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "csvFile" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "def_alt_bq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "def_alt_jq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "defqual" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "del_baq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "disable_trim_poly_g" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "duplicate_scoring_strategy" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "empty_text" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_source_qual" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exclude_unmapped" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "extractFields" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "filterInterval" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "force_polyg_tail_trimming" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "formatEff" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "geneId" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "genome_reference" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hgvs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "html_report_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ignore_vcf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "illumina_1_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "importGenome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "interval" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "keepflags" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lof" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "max_depth_cov" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "max_mapping_quality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "min_alt_bq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "min_alt_jq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "min_bq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "min_cov" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "min_jq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "min_length_required" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "min_mq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "motif" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "nextProt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "noGenome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "noHgvs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "noLof" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "noMotif" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "noNextProt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "noShiftHgvs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "noStats" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "no_EffectType" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "no_baq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "no_default_filter" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "no_downstream" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "no_ext_base_alignment_quality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "no_idaq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "no_intergenic" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "no_intron" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "no_mapping_quality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "no_upstream" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "no_utr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "oicr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "onlyProtein" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "onlyReg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "only_indels" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "optical_duplicate_pixel_distance" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "outputFormat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pvalue_cutoff" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "qualified_phred_quality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reads_forward" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reads_reverse" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_in" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "region" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "remove_duplicates" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "replace_non_match" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "separator" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sequenceOntology" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sort_order" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "spliceRegionExonSize" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "spliceRegionIntronMax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "spliceRegionIntronMin" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "spliceSiteSize" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "strict" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_lf_call" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "transcripts" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "udLength" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "unqualified_phred_quality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "use_orphan" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "validation_stringency" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "validation_stringency_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "multiqc_fastp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "multiqc_markdups" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "multiqc_samtoolsstats" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "out_snpsift" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "statsFile_snpeff" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "stats_bam" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "All the Sarek team" . + + a schema1:Person ; + schema1:name "nf-core comunity and people in the IMPaCT-Data project." . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/parallel-accession-download/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-atac-cutandrun_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-atac-cutandrun_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-chip-pe_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-chip-pe_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-chip-sr_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-chip-sr_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Muon Spectroscopy Computational Project" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Copper-out.cell" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Copper.castep" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Copper.den_fmt" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "endpoint" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "query_file" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mutations_list" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step00_cat_pdb_input_structure2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step00_cat_pdb_output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step0_reduce_remove_hydrogens_input_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step0_reduce_remove_hydrogens_output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step100_make_ndx_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step100_make_ndx_output_ndx_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_mdrun_min_output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_mdrun_min_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_mdrun_min_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_mdrun_min_output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_grompp_nvt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_grompp_nvt_output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_mdrun_nvt_output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_mdrun_nvt_output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_mdrun_nvt_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_mdrun_nvt_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_mdrun_nvt_output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_grompp_npt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_grompp_npt_output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_mdrun_npt_output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_mdrun_npt_output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_mdrun_npt_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_mdrun_npt_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_mdrun_npt_output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step15_grompp_md_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step15_grompp_md_output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_mdrun_md_output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_mdrun_md_output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_mdrun_md_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_mdrun_md_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_mdrun_md_output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step17_gmx_image1_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step17_gmx_image1_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_gmx_image2_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_gmx_image2_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_gmx_trjconv_str_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_gmx_trjconv_str_output_str_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_extract_molecule_output_molecule_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_gmx_energy_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_gmx_energy_output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step21_gmx_rgyr_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step21_gmx_rgyr_output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_rmsd_first_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_rmsd_first_output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step23_rmsd_exp_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step23_rmsd_exp_output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step24_grompp_md_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step24_grompp_md_output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_fix_side_chain_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_mutate_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_pdb2gmx_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_pdb2gmx_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_pdb2gmx_output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_editconf_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_editconf_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_solvate_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_solvate_output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step7_grompp_genion_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step7_grompp_genion_output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_genion_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_genion_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_genion_output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_grompp_min_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_grompp_min_output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Collected Simulation Data" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Sample" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sra-manifest-to-concatenated-fastqs/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Milad Miladi" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "Milad Miladi" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fast5-Signals-Raw.tar.gz" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/fragment-based-docking-scoring/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "RECETOX SpecDat" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/753" ; + schema1:name "Zargham Ahmad" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CPAT_header_tab" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GRCh38_p13_genome_fa_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Pfam-A_hmm_dat_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Pfam-A_hmm_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "active_site_dat_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gencode_v43_annotation_gtf_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gencode_v43_lncRNA_transcripts_fa_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gencode_v43_pc_transcripts_fa_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gencode_v43_transcripts_fa_gz" . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s01r1b41 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=kmeans_prov COMPSS_PYTHON_VERSION=3.9.10 SLURM_JOB_QOS=debug SLURM_MEM_PER_CPU=1880 COMPSS_BINDINGS_DEBUG=1 SLURM_JOB_ID=30650595 SLURM_JOB_USER=bsc19057 COMPSS_HOME=/apps/COMPSs/3.3.pr/ SLURM_JOB_UID=2952 SLURM_SUBMIT_DIR=/gpfs/home/bsc19/bsc19057/COMPSs-DP/tutorial_apps/java/kmeans SLURM_JOB_NODELIST=s01r1b41 SLURM_JOB_GID=2950 SLURM_JOB_CPUS_PER_NODE=48 COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login3 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=1 COMPSS_MASTER_NODE=s01r1b41 COMPSS_WORKER_NODES=" ; + schema1:endTime "2023-11-10T13:57:34+00:00" ; + schema1:instrument ; + schema1:name "COMPSs KMeans.java execution at marenostrum4 with JOB_ID 30650595" ; + schema1:result ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.2.rc2311" . + + a schema1:MediaObject ; + schema1:contentSize 811 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 1802 ; + schema1:description "Auxiliary File" ; + schema1:name "Readme" . + + a schema1:MediaObject ; + schema1:contentSize 10022 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "application/java-archive" ; + schema1:name "kmeans.jar" . + + a schema1:MediaObject ; + schema1:contentSize 4349 ; + schema1:description "Auxiliary File" ; + schema1:name "pom.xml" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 6565 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "KMeansDataSet.java" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1638 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "KMeansItf.java" . + + a schema1:MediaObject ; + schema1:contentSize 289 ; + schema1:description "Auxiliary File" ; + schema1:name "project.xml" . + + a schema1:MediaObject ; + schema1:contentSize 983 ; + schema1:description "Auxiliary File" ; + schema1:name "resources.xml" . + + a schema1:MediaObject ; + schema1:contentSize 189 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 2801 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_Velocyto-on10X-filtered-barcodes_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_Velocyto-on10X-filtered-barcodes_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/velocyto/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_Velocyto-on10X-from-bundled_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_Velocyto-on10X-from-bundled_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/velocyto/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Darwin MacBook-Pro-Raul-2018.local 23.4.0 Darwin Kernel Version 23.4.0: Fri Mar 15 00:11:05 PDT 2024; root:xnu-10063.101.17~1/RELEASE_X86_64 x86_64" ; + schema1:endTime "2024-04-30T10:49:33+00:00" ; + schema1:instrument ; + schema1:name "COMPSs matmul_files.py execution at MacBook-Pro-Raul-2018.local" ; + schema1:object , + , + , + , + , + , + , + , + , + , + , + ; + schema1:result , + , + , + , + ; + schema1:startTime "2024-04-30T10:49:25+00:00" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3.rc2402" . + + a schema1:MediaObject ; + schema1:contentSize 244 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1627 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "matmul_tasks.py" . + + a schema1:MediaObject ; + schema1:contentSize 170 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 2279 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "matmul_reproducibility.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:Person ; + schema1:name "Special thanks to Sujeevan Ratnasingham and the team at CBG for the creation of the BCDM data exchange format that this pipeline operates on" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s18r2b06 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=wordcount_files COMPSS_PYTHON_VERSION=3.9.10 SLURM_JOB_QOS=debug SLURM_MEM_PER_CPU=1880 COMPSS_BINDINGS_DEBUG=1 SLURM_JOB_ID=30498188 SLURM_JOB_USER=bsc19057 COMPSS_HOME=/apps/COMPSs/3.3.pr/ SLURM_JOB_UID=2952 SLURM_SUBMIT_DIR=/gpfs/home/bsc19/bsc19057/COMPSs-DP/tutorial_apps/python/wordcount SLURM_JOB_NODELIST=s18r2b06 SLURM_JOB_GID=2950 SLURM_JOB_CPUS_PER_NODE=48 COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login3 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=1 COMPSS_MASTER_NODE=s18r2b06 COMPSS_WORKER_NODES=" ; + schema1:endTime "2023-11-02T10:55:02+00:00" ; + schema1:instrument ; + schema1:name "COMPSs wordcount.py execution at marenostrum4 with JOB_ID 30498188" ; + schema1:object , + , + , + ; + schema1:result ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.2.rc2310" . + + a schema1:MediaObject ; + schema1:contentSize 569 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 778 ; + schema1:description "Auxiliary File" ; + schema1:name "README" . + + a schema1:MediaObject ; + schema1:contentSize 4486 ; + schema1:description "Auxiliary File" ; + schema1:name "pom.xml" . + + a schema1:MediaObject ; + schema1:contentSize 284 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 1553 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/445" ; + schema1:name "Diego De Panis" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_grompp_npt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_grompp_md_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_mdrun_md_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_pdb_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_editconf_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_grompp_genion_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_genion_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_grompp_min_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_make_ndx_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_grompp_nvt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "whole workflow output" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:MediaObject ; + schema1:contentSize 177 . + + a schema1:MediaObject ; + schema1:contentSize 88 . + + a schema1:MediaObject ; + schema1:contentSize 274 . + + a schema1:MediaObject ; + schema1:contentSize 994 . + + a schema1:Dataset . + + a schema1:MediaObject ; + schema1:contentSize 21291 . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/cutandrun/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Nathaniel Butterworth" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/141" ; + schema1:name "Tracy Chew" . + + a schema1:ComputerLanguage ; + schema1:name "Shell Script" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step00_extract_molecule_input_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step00_extract_molecule_output_molecule_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step0_cat_pdb_input_structure2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step0_cat_pdb_output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_sander_mdrun_energy_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_sander_mdrun_energy_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_sander_mdrun_energy_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_sander_mdrun_energy_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_process_minout_energy_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_process_minout_energy_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_sander_mdrun_warm_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_sander_mdrun_warm_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_sander_mdrun_warm_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_sander_mdrun_warm_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_process_mdout_warm_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_process_mdout_warm_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_sander_mdrun_nvt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_sander_mdrun_nvt_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_sander_mdrun_nvt_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_sander_mdrun_nvt_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step15_process_mdout_nvt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step15_process_mdout_nvt_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_sander_mdrun_npt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_sander_mdrun_npt_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_sander_mdrun_npt_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_sander_mdrun_npt_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step17_process_mdout_npt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step17_process_mdout_npt_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_sander_mdrun_md_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_sander_mdrun_md_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_sander_mdrun_md_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_sander_mdrun_md_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_rmsd_first_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_rmsd_first_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_pdb4amber_run_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_rmsd_exp_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_rmsd_exp_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step21_cpptraj_rgyr_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step21_cpptraj_rgyr_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_cpptraj_image_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_cpptraj_image_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_leap_gen_top_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_leap_gen_top_input_frcmod_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_leap_gen_top_input_lib_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_leap_gen_top_output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_leap_gen_top_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_leap_gen_top_output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_sander_mdrun_minH_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_sander_mdrun_minH_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_sander_mdrun_minH_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_sander_mdrun_minH_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_process_minout_minH_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_process_minout_minH_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_sander_mdrun_min_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_sander_mdrun_min_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_sander_mdrun_min_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_sander_mdrun_min_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_process_minout_min_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_process_minout_min_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step7_amber_to_pdb_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_leap_solvate_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_leap_solvate_input_frcmod_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_leap_solvate_input_lib_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_leap_solvate_output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_leap_solvate_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_leap_solvate_output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_leap_add_ions_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_leap_add_ions_input_frcmod_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_leap_add_ions_input_lib_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_leap_add_ions_output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_leap_add_ions_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_leap_add_ions_output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_molecule_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_fpocket_select_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_fpocket_select_input_pockets_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_fpocket_select_output_pocket_pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_fpocket_select_output_pocket_pqr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_box_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_box_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_babel_convert_prep_lig_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_babel_convert_prep_lig_input_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_babel_convert_prep_lig_output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_str_check_add_hydrogens_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_str_check_add_hydrogens_input_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_str_check_add_hydrogens_output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_autodock_vina_run_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_autodock_vina_run_output_pdbqt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_babel_convert_pose_pdb_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_babel_convert_pose_pdb_output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pocket_pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pocket_pqr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdbqt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path" . + + a schema1:Person ; + schema1:name "" . + + a schema1:Person ; + schema1:name "Jesse van Dam" . + + a schema1:Person ; + schema1:name "Peter Schaap" . + + a schema1:Person ; + schema1:name "" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Reverse read length" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Forward primer" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "forward reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Reference database" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Reverse read length" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Reverse primer" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reverse reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Sample name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "files_to_folder_fastqc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "files_to_folder_ngtax" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output Destination" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Contamination reference file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "identifier used" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Keep mapped reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken2 database" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Maximum memory in MB" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Nanopore reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CWL base step number" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Number of threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filtered nanopore reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filtering reports folder" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "COMPSs" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_30" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_31" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_32" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_33" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_34" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_35" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_36" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_37" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_38" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_39" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_40" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myamber_to_pdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_image.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_rgyr.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_rms.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb4amber_run.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myprocess_mdout.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myprocess_minout.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.cpout" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.cprst" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.mdinfo" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.netcdf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.rst" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.trj" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:Person ; + schema1:name "ABRomics" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/bacterial_genome_annotation/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:Person ; + schema1:name "ERGA" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lineage" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "assembly_gfa" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "consensus" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output1" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/445" ; + schema1:name "Diego De Panis" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/galaxyproject/iwc/actions/workflows/workflow_test.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "champbloc_ivr.csv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "champbloc_qecb.csv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ficheterrain.csv" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Dannon Baker" . + + a schema1:Person ; + schema1:name "Björn Grüning" . + + a schema1:Person ; + schema1:name "Delphine Larivière" . + + a schema1:Person ; + schema1:name "Gildas Le Corguillé" . + + a schema1:Person ; + schema1:name "Andrew Lonie" . + + a schema1:Person ; + schema1:name "Nicholas Keener" . + + a schema1:Person ; + schema1:name "Sergei Kosakovsky Pond" . + + a schema1:Person ; + schema1:name "Wolfgang Maier" . + + a schema1:Person ; + schema1:name "Anton Nekrutenko" . + + a schema1:Person ; + schema1:name "James Taylor" . + + a schema1:Person ; + schema1:name "Steven Weaver" . + + a schema1:Person ; + schema1:name "Marius van den Beek" . + + a schema1:Person ; + schema1:name "Dave Bouvier" . + + a schema1:Person ; + schema1:name "John Chilton" . + + a schema1:Person ; + schema1:name "Nate Coraor" . + + a schema1:Person ; + schema1:name "Frederik Coppens" . + + a schema1:Person ; + schema1:name "Bert Droesbeke" . + + a schema1:Person ; + schema1:name "Ignacio Eguinoa" . + + a schema1:Person ; + schema1:name "Simon Gladman" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "poterlowicz-lab" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "poterlowicz-lab" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Normal_r1.fastq.gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Normal_r2.fastq.gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Tumor_r1.fastq.gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Tumor_r2.fastq.gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "capture_targets_chr5_12_17.bed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "out_chr_sorted_circos" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "out_ratio_log2_circos" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_png" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Occurrence.csv" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:Dataset . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:Dataset . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "barcodes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "binning_method" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "block_size" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "clip_max" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "clusterDataR" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "featureSelectionDataR" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "features" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "filterDataR" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "findNeighborsR" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "find_markersR" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "k" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "loadDataR" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "loess_span" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "margin" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "matrix" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "minCells" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "minFeatures" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "nCountRNAmax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "nCountRNAmin" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "nFeatureRNAmax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "nFeatureRNAmin" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "neighbors_method" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "normalization_method" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "normalizeDataR" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "num_bin" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "num_features" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pattern" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "percentMTmax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "percentMTmin" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "projectName" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "runPCAR" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "runTSNER" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "runUmapR" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "running_step" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "scaleDataR" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "scale_factor" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "selection_method" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "verbose" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "clusterDataOutput" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "filterDataOutput" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "filterDataPlot" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "findFeaturesOutput" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "findFeaturesPlot" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "findMarkersOutput" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "findNeighborsOutput" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "loadDataOutput" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "loadDataPlot" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "normalizeDataOutput" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "runPCAOutput" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "runPCAPlot1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "runPCAPlot2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "runPCAPlot3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "runTSNEOutput" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "runUMAPOutput" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "runUMAPOutputPlot" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "scaleDataOutput" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Simon Bray" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/galaxyproject/iwc/actions/workflows/workflow_test.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "RMD" ; + schema1:name "R markdown" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Amplicons" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:Person ; + schema1:name "Laure Quintric" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/145" ; + schema1:name "Alexandre Cormier" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/146" ; + schema1:name "Patrick Durand" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/148" ; + schema1:name "Laura Leroi" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Data_R1.fastq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Data_R2.fastq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken_taxonomy_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken_taxonomy_table_modified" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Taxonomic_prediction_report" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "Simon Bray" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/protein-ligand-complex-parameterization/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/159" ; + schema1:name "Oliver Woolland" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step0_extract_model_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step0_extract_model_input_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step0_extract_model_output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_cpptraj_convert_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_cpptraj_convert_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_bd_run_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_bd_run_output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_bd_run_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_cpptraj_rms_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_cpptraj_rms_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_cpptraj_rms_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_dmd_run_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_dmd_run_output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_dmd_run_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_cpptraj_rms_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_cpptraj_rms_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_cpptraj_rms_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step15_nma_run_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step15_nma_run_output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step15_nma_run_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_cpptraj_rms_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_cpptraj_rms_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step17_cpptraj_convert_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step17_cpptraj_convert_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_nolb_nma_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_nolb_nma_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_cpptraj_rms_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_cpptraj_rms_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_extract_chain_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_extract_chain_output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_cpptraj_convert_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_cpptraj_convert_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step21_imod_imode_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step21_imod_imode_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_imod_imc_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_imod_imc_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step23_cpptraj_rms_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step23_cpptraj_rms_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step24_cpptraj_convert_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step24_cpptraj_convert_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step26_make_ndx_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step26_make_ndx_output_ndx_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step27_gmx_cluster_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step27_gmx_cluster_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step28_cpptraj_rms_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step28_cpptraj_rms_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step28_cpptraj_rms_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step29_pcz_zip_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step29_pcz_zip_output_pcz_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_cpptraj_mask_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_cpptraj_mask_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step30_pcz_zip_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step30_pcz_zip_output_pcz_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step31_pcz_info_output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step32_pcz_evecs_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step32_pcz_evecs_output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step33_pcz_animate_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step33_pcz_animate_output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step34_cpptraj_convert_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step34_cpptraj_convert_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step35_pcz_bfactor_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step35_pcz_bfactor_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step35_pcz_bfactor_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step36_pcz_hinges_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step36_pcz_hinges_output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step37_pcz_hinges_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step37_pcz_hinges_output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step38_pcz_hinges_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step38_pcz_hinges_output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step39_pcz_stiffness_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step39_pcz_stiffness_output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_cpptraj_mask_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_cpptraj_mask_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step40_pcz_collectivity_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step40_pcz_collectivity_output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_concoord_disco_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_concoord_dist_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_concoord_dist_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_concoord_dist_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_concoord_dist_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_concoord_disco_output_bfactor_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_concoord_disco_output_rmsd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_concoord_disco_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_cpptraj_rms_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_cpptraj_rms_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step7_cpptraj_convert_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step7_cpptraj_convert_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_prody_anm_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_prody_anm_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_cpptraj_rms_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_cpptraj_rms_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_ndx_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pcz_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pcz_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rmsd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_bfactor_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:name "Tim Dudgeon" . + + a schema1:Person ; + schema1:name "Simon Bray" . + + a schema1:Person ; + schema1:name "Gianmauro Cuccuru" . + + a schema1:Person ; + schema1:name "Björn Grüning" . + + a schema1:Person ; + schema1:name "Rachael Skyner" . + + a schema1:Person ; + schema1:name "Jack Scantlebury" . + + a schema1:Person ; + schema1:name "Susan Leung" . + + a schema1:Person ; + schema1:name "Frank von Delft" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "Martin Hölzer" . + + a schema1:Person ; + schema1:name "Alexandre Almeida" . + + a schema1:Person ; + schema1:name "Guillermo Rangel-Pineros and Ekaterina Sakharova" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Illumina beadchip array file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Clinical data" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Microarray data" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Additional network data table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GSEA edge table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GSEA node table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Module eigengene edge data" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Module eigengene node data" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Network data table" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "COMPSs" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Genome/bin" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output Destination (prov only)" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Identifier used" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "solver" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "number of threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CarveMe GEMs folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GEMstats" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MEMOTE outputs folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Protein files folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "SMETANA output" . + + a schema1:Person ; + schema1:name "ABRomics" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/bacterial-genome-assembly/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/ChIPseq_PE/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_COVID-19-PE-ARTIC-ILLUMINA_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_COVID-19-PE-ARTIC-ILLUMINA_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sars-cov-2-pe-illumina-artic-variant-calling/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Sample" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "barcodes.tsv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "genes.tsv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "matrix.mtx" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bits_set" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "blacklist" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bwt_algorithm" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chromosome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_lratio" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_exomeDepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_manta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_bf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "generate_bwa_indexes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_exome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "min_mapping_quality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "read_group" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_amb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_ann" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_bwt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fai" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_pac" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_sa" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samples" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_bwa_mem" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastqc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_samtools" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "html_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "json_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_all" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_bam_filtering" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_exomedepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_manta" . + + a schema1:Person ; + schema1:name "Daniel López-López" . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/generic-variant-calling-wgs-pe/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:MediaObject ; + schema1:contentSize 183 . + + a schema1:MediaObject ; + schema1:contentSize 2110 . + + a schema1:MediaObject ; + schema1:contentSize 572 . + + a schema1:MediaObject ; + schema1:contentSize 1442 . + + a schema1:Dataset . + + a schema1:MediaObject ; + schema1:contentSize 44222 . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:name "Tim Dudgeon" . + + a schema1:Person ; + schema1:name "Simon Bray" . + + a schema1:Person ; + schema1:name "Gianmauro Cuccuru" . + + a schema1:Person ; + schema1:name "Björn Grüning" . + + a schema1:Person ; + schema1:name "Rachael Skyner" . + + a schema1:Person ; + schema1:name "Jack Scantlebury" . + + a schema1:Person ; + schema1:name "Susan Leung" . + + a schema1:Person ; + schema1:name "Frank von Delft" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s10r1b54 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=sparseLU-java-DP SLURM_JOB_QOS=debug SLURM_MEM_PER_CPU=1880 SLURM_JOB_ID=29155949 SLURM_JOB_USER=bsc19057 COMPSS_HOME=/apps/COMPSs/3.2.pr/ SLURM_JOB_UID=2952 SLURM_SUBMIT_DIR=/gpfs/home/bsc19/bsc19057/COMPSs-DP/tutorial_apps/java/sparseLU SLURM_JOB_NODELIST=s10r1b[54,56] SLURM_JOB_GID=2950 SLURM_JOB_CPUS_PER_NODE=48(x2) COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login3 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=2 COMPSS_MASTER_NODE=s10r1b54 COMPSS_WORKER_NODES= s10r1b56" ; + schema1:endTime "2023-06-23T13:59:37+00:00" ; + schema1:instrument ; + schema1:name "COMPSs SparseLU.java execution at marenostrum4 with JOB_ID 29155949" ; + schema1:object , + , + , + , + , + , + , + , + , + , + , + ; + schema1:result , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.1.rc2305" . + + a schema1:MediaObject ; + schema1:contentSize 1584 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 1935 ; + schema1:description "Auxiliary File" ; + schema1:name "Readme" . + + a schema1:MediaObject ; + schema1:contentSize 28758 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "application/java-archive" ; + schema1:name "sparseLU.jar" . + + a schema1:MediaObject ; + schema1:contentSize 4454 ; + schema1:description "Auxiliary File" ; + schema1:name "pom.xml" . + + a schema1:MediaObject ; + schema1:contentSize 2628 ; + schema1:description "Auxiliary File" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:MediaObject ; + schema1:contentSize 3304 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "SparseLU.class" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 4840 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "SparseLU.java" . + + a schema1:MediaObject ; + schema1:contentSize 2430 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "SparseLUImpl.class" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 4114 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "SparseLUImpl.java" . + + a schema1:MediaObject ; + schema1:contentSize 808 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "SparseLUItf.class" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1899 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "SparseLUItf.java" . + + a schema1:MediaObject ; + schema1:contentSize 4135 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "Block.class" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 5589 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "Block.java" . + + a schema1:MediaObject ; + schema1:contentSize 4682 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "SparseLU.class" . + + a schema1:MediaObject ; + schema1:contentSize 1310 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "SparseLUImpl.class" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 2431 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "SparseLUImpl.java" . + + a schema1:MediaObject ; + schema1:contentSize 904 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "SparseLUItf.class" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1808 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "SparseLUItf.java" . + + a schema1:MediaObject ; + schema1:contentSize 2991 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "Block.class" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 4345 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "Block.java" . + + a schema1:MediaObject ; + schema1:contentSize 3403 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "SparseLU.class" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 4740 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "SparseLU.java" . + + a schema1:MediaObject ; + schema1:contentSize 816 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "SparseLUItf.class" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1529 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "SparseLUItf.java" . + + a schema1:MediaObject ; + schema1:contentSize 289 ; + schema1:description "Auxiliary File" ; + schema1:name "project.xml" . + + a schema1:MediaObject ; + schema1:contentSize 983 ; + schema1:description "Auxiliary File" ; + schema1:name "resources.xml" . + + a schema1:MediaObject ; + schema1:contentSize 695 ; + schema1:description "COMPSs command line execution command (runcompss), including flags and parameters passed" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_command_line_arguments.txt" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_sander_mdrun_eq3_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_sander_mdrun_eq3_input_mdin_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_sander_mdrun_eq3_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_sander_mdrun_eq3_output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_sander_mdrun_eq3_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_sander_mdrun_eq3_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_process_minout_eq3_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_process_minout_eq3_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_sander_mdrun_eq4_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_sander_mdrun_eq4_input_mdin_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_sander_mdrun_eq4_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_sander_mdrun_eq4_output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_sander_mdrun_eq4_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_sander_mdrun_eq4_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_process_minout_eq4_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_process_minout_eq4_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_sander_mdrun_eq5_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_sander_mdrun_eq5_input_mdin_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_sander_mdrun_eq5_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_sander_mdrun_eq5_output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_sander_mdrun_eq5_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_sander_mdrun_eq5_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step15_process_minout_eq5_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step15_process_minout_eq5_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_sander_mdrun_eq6_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_sander_mdrun_eq6_input_mdin_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_sander_mdrun_eq6_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_sander_mdrun_eq6_output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_sander_mdrun_eq6_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_sander_mdrun_eq6_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step17_process_mdout_eq6_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step17_process_mdout_eq6_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_sander_mdrun_eq7_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_sander_mdrun_eq7_input_mdin_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_sander_mdrun_eq7_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_sander_mdrun_eq7_output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_sander_mdrun_eq7_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_sander_mdrun_eq7_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_process_mdout_eq7_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_process_mdout_eq7_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_leap_gen_top_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_leap_gen_top_input_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_leap_gen_top_output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_leap_gen_top_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_leap_gen_top_output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_sander_mdrun_eq8_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_sander_mdrun_eq8_input_mdin_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_sander_mdrun_eq8_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_sander_mdrun_eq8_output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_sander_mdrun_eq8_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_sander_mdrun_eq8_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step21_process_mdout_eq8_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step21_process_mdout_eq8_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_sander_mdrun_eq9_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_sander_mdrun_eq9_input_mdin_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_sander_mdrun_eq9_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_sander_mdrun_eq9_output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_sander_mdrun_eq9_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_sander_mdrun_eq9_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step23_process_mdout_eq9_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step23_process_mdout_eq9_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step24_sander_mdrun_eq10_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step24_sander_mdrun_eq10_input_mdin_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step24_sander_mdrun_eq10_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step24_sander_mdrun_eq10_output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step24_sander_mdrun_eq10_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step24_sander_mdrun_eq10_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step25_process_mdout_eq10_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step25_process_mdout_eq10_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step26_sander_mdrun_md_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step26_sander_mdrun_md_input_mdin_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step26_sander_mdrun_md_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step26_sander_mdrun_md_output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step26_sander_mdrun_md_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step26_sander_mdrun_md_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_leap_solvate_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_leap_solvate_output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_leap_solvate_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_leap_solvate_output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_leap_add_ions_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_leap_add_ions_output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_leap_add_ions_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_leap_add_ions_output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_cpptraj_randomize_ions_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_cpptraj_randomize_ions_output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_cpptraj_randomize_ions_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_parmed_hmassrepartition_output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_sander_mdrun_eq1_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_sander_mdrun_eq1_input_mdin_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_sander_mdrun_eq1_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_sander_mdrun_eq1_output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_sander_mdrun_eq1_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_sander_mdrun_eq1_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step7_process_minout_eq1_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step7_process_minout_eq1_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_sander_mdrun_eq2_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_sander_mdrun_eq2_input_mdin_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_sander_mdrun_eq2_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_sander_mdrun_eq2_output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_sander_mdrun_eq2_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_sander_mdrun_eq2_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_process_mdout_eq2_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_process_mdout_eq2_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:name "Tim Dudgeon" . + + a schema1:Person ; + schema1:name "Simon Bray" . + + a schema1:Person ; + schema1:name "Gianmauro Cuccuru" . + + a schema1:Person ; + schema1:name "Björn Grüning" . + + a schema1:Person ; + schema1:name "Rachael Skyner" . + + a schema1:Person ; + schema1:name "Jack Scantlebury" . + + a schema1:Person ; + schema1:name "Susan Leung" . + + a schema1:Person ; + schema1:name "Frank von Delft" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "Docker" ; + schema1:identifier ; + schema1:name "Docker" ; + schema1:url . + + a schema1:Person ; + schema1:name "Dannon Baker" . + + a schema1:Person ; + schema1:name "Björn Grüning" . + + a schema1:Person ; + schema1:name "Delphine Larivière" . + + a schema1:Person ; + schema1:name "Gildas Le Corguillé" . + + a schema1:Person ; + schema1:name "Andrew Lonie" . + + a schema1:Person ; + schema1:name "Nicholas Keener" . + + a schema1:Person ; + schema1:name "Sergei Kosakovsky Pond" . + + a schema1:Person ; + schema1:name "Wolfgang Maier" . + + a schema1:Person ; + schema1:name "Anton Nekrutenko" . + + a schema1:Person ; + schema1:name "James Taylor" . + + a schema1:Person ; + schema1:name "Steven Weaver" . + + a schema1:Person ; + schema1:name "Marius van den Beek" . + + a schema1:Person ; + schema1:name "Dave Bouvier" . + + a schema1:Person ; + schema1:name "John Chilton" . + + a schema1:Person ; + schema1:name "Nate Coraor" . + + a schema1:Person ; + schema1:name "Frederik Coppens" . + + a schema1:Person ; + schema1:name "Bert Droesbeke" . + + a schema1:Person ; + schema1:name "Ignacio Eguinoa" . + + a schema1:Person ; + schema1:name "Simon Gladman" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/141" ; + schema1:name "Tracy Chew" . + + a schema1:ComputerLanguage ; + schema1:name "Shell Script" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bamqc_report_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "combined_coverage" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "combined_multifasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ivar_consensus_genome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ivar_variants_tabular" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "primer_trimmed_bam" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "snpeff_annotated_vcf" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/560" ; + schema1:name "Akshay Akshay" . + + a schema1:ComputerLanguage ; + schema1:alternateName "WDL" ; + schema1:identifier ; + schema1:name "Workflow Description Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chromosome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq_files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gqb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "known_indels_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "known_sites_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "readgroup_str" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_genome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sample_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gvcf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "metrics" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/chipseq-sr/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "ABRomics" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/quality-and-contamination-control/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/atacseq/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Peter van Heusden" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/galaxyproject/iwc/actions/workflows/workflow_test.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/galaxyproject/iwc/actions/workflows/workflow_test.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/assembly-with-flye/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Simon Bray" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/galaxyproject/iwc/actions/workflows/workflow_test.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/cutandrun/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "BridgeDB cache" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Differential gene expression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Differential miRNA expression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "STRING identifier mapping" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mRNA expression correlation" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of correlation edges to use, USE FOR TESTING ONLY" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "miRNA mRNA correlation" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "miRTarBase data" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of miRTarBase edges to use, USE FOR TESTING ONLY" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of cores to let MOGAMUN use" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of generation to let the genetic algorithm in MOGAMUN evolve" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The maximum size of subnetworks during postprocessing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "the minimum Jaccard Index overlap between two subnetworks to allow them to be merged" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The minimum size of subnetworks during postprocessing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of parallel runs to let MOGAMUN do, these parallel runs are combined in postprocessing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "STRING data" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The minimum score for a STRING edge to be included in the analysis" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of STRING edges to use, USE FOR TESTING ONLY" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Variant Burden" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "full_graph" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "subnetworks" . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/445" ; + schema1:name "Diego De Panis" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_molecule_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "Docker" ; + schema1:identifier ; + schema1:name "Docker" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bits_set" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "blacklist" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bwt_algorithm" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chromosome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_lratio" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_exomeDepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_manta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_bf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_exome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "min_mapping_quality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "read_group" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samples" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_bwa_mem" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastqc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_samtools" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "html_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "json_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_all" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_bam_filtering" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_exomedepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_manta" . + + a schema1:Person ; + schema1:name "Daniel López-López" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "List of mutations" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Collected Simulation Data" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Gareth Price" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sars-cov-2-pe-illumina-wgs-variant-calling/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/406" ; + schema1:name "Andrey Prjibelski" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/atacseq/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rRNA filtering" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "forward reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gzip compression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "identifier used" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken2 database" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "memory usage (mb)" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pacbio reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reverse reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "number of threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "BAM files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "FASTQC" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filtered reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "KRAKEN2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MetaBat2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "QUAST" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "SPADES" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/atacseq/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sars-cov-2-variation-reporting/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Scaffolding-HiC-VGP8/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Payam Emami" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sra-manifest-to-concatenated-fastqs/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:name "Gareth Price" . + + a schema1:MediaObject ; + schema1:description "Workflow documentation" . + + a schema1:ComputerLanguage ; + schema1:alternateName "Docker" ; + schema1:identifier ; + schema1:name "Docker" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/203" ; + schema1:name "Luiz Gadelha" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/371" ; + schema1:name "Lucas Cruz" . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux gs05r2b06 5.14.0-284.30.1.el9_2.x86_64 #1 SMP PREEMPT_DYNAMIC Fri Aug 25 09:13:12 EDT 2023 x86_64 x86_64 x86_64 GNU/Linux" ; + schema1:endTime "2024-06-18T13:22:31+00:00" ; + schema1:instrument ; + schema1:name "COMPSs SparseLU.java execution at marenostrum5 with JOB_ID 3166653" ; + schema1:object , + , + , + , + , + , + , + , + , + , + , + ; + schema1:result , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:startTime "2024-06-18T13:21:59+00:00" ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 2208 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 1935 ; + schema1:description "Auxiliary File" ; + schema1:name "Readme" . + + a schema1:MediaObject ; + schema1:contentSize 28758 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "application/java-archive" ; + schema1:name "sparseLU.jar" . + + a schema1:MediaObject ; + schema1:contentSize 4454 ; + schema1:description "Auxiliary File" ; + schema1:name "pom.xml" . + + a schema1:MediaObject ; + schema1:contentSize 3304 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "SparseLU.class" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 4840 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "SparseLU.java" . + + a schema1:MediaObject ; + schema1:contentSize 2430 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "SparseLUImpl.class" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 4114 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "SparseLUImpl.java" . + + a schema1:MediaObject ; + schema1:contentSize 808 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "SparseLUItf.class" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1899 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "SparseLUItf.java" . + + a schema1:MediaObject ; + schema1:contentSize 4135 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "Block.class" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 5589 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "Block.java" . + + a schema1:MediaObject ; + schema1:contentSize 4682 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "SparseLU.class" . + + a schema1:MediaObject ; + schema1:contentSize 1310 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "SparseLUImpl.class" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 2431 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "SparseLUImpl.java" . + + a schema1:MediaObject ; + schema1:contentSize 904 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "SparseLUItf.class" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1808 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "SparseLUItf.java" . + + a schema1:MediaObject ; + schema1:contentSize 2991 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "Block.class" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 4345 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "Block.java" . + + a schema1:MediaObject ; + schema1:contentSize 3403 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "SparseLU.class" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 4740 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "SparseLU.java" . + + a schema1:MediaObject ; + schema1:contentSize 816 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "SparseLUItf.class" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1529 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "SparseLUItf.java" . + + a schema1:MediaObject ; + schema1:contentSize 289 ; + schema1:description "Auxiliary File" ; + schema1:name "project.xml" . + + a schema1:MediaObject ; + schema1:contentSize 983 ; + schema1:description "Auxiliary File" ; + schema1:name "resources.xml" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_marenostrum5_SLURM_JOB_ID_3166653" ; + schema1:contentSize 100 ; + schema1:description "COMPSs console standard error log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-3166653.err" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_marenostrum5_SLURM_JOB_ID_3166653" ; + schema1:contentSize 32821 ; + schema1:description "COMPSs console standard output log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-3166653.out" . + + a schema1:MediaObject ; + schema1:contentSize 192 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 2474 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rnaseq_left_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rnaseq_right_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sampleName" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sars_cov_2_reference_2bit_genome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sars_cov_2_reference_genome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "indels" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "snps" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/26" ; + schema1:name "Ambarish Kumar" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_scrna-seq-fastq-to-matrix-10x-cellplex_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_scrna-seq-fastq-to-matrix-10x-cellplex_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/fastq-to-matrix-10x/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_scrna-seq-fastq-to-matrix-10x-v3_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_scrna-seq-fastq-to-matrix-10x-v3_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/fastq-to-matrix-10x/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/cutandrun/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_scrna-seq-fastq-to-matrix-10x-cellplex_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_scrna-seq-fastq-to-matrix-10x-cellplex_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/fastq-to-matrix-10x/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_scrna-seq-fastq-to-matrix-10x-v3_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_scrna-seq-fastq-to-matrix-10x-v3_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/fastq-to-matrix-10x/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "S2B_MSIL2A_20200626T095029_N0214_R079_T34VFN_20200626T123234_tar" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sentinel2_tiles_world" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "test_parcels_32635" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/49" ; + schema1:name "Anne Fouilloux" . + + a schema1:Person ; + schema1:name "Tim Dudgeon" . + + a schema1:Person ; + schema1:name "Simon Bray" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/galaxyproject/iwc/actions/workflows/workflow_test.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s02r1b45 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=COMPSs COMPSS_PYTHON_VERSION=3.9.10 SLURM_JOB_QOS=debug SLURM_MEM_PER_CPU=1880 SLURM_JOB_ID=31507118 SLURM_JOB_USER=bsc19057 COMPSS_HOME=/apps/COMPSs/3.3/ SLURM_JOB_UID=2952 SLURM_SUBMIT_DIR=/gpfs/home/bsc19/bsc19057/Tutorial_2024/lysozyme_in_water SLURM_JOB_NODELIST=s02r1b[45-46] SLURM_JOB_GID=2950 SLURM_JOB_CPUS_PER_NODE=48(x2) COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login3 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=2 COMPSS_MASTER_NODE=s02r1b45 COMPSS_WORKER_NODES= s02r1b46" ; + schema1:endTime "2024-01-24T14:28:04+00:00" ; + schema1:instrument ; + schema1:name "COMPSs lysozyme_in_water.py execution at marenostrum4 with JOB_ID 31507118" ; + schema1:object , + , + , + , + , + , + ; + schema1:result , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 2295 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 1565 ; + schema1:description "Auxiliary File" ; + schema1:name "launch_full.sh" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 8957 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "lysozyme_in_water_full.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 8868 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "lysozyme_in_water_full_no_mpi.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 10194 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "lysozyme_in_water_full_singularity.py" . + + a schema1:MediaObject ; + schema1:contentSize 755 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 651 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s21r1b48 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=COMPSs COMPSS_PYTHON_VERSION=3.9.10 SLURM_JOB_QOS=debug SLURM_MEM_PER_CPU=1880 SLURM_JOB_ID=31494645 SLURM_JOB_USER=bsc19057 COMPSS_HOME=/apps/COMPSs/3.3/ SLURM_JOB_UID=2952 SLURM_SUBMIT_DIR=/gpfs/home/bsc19/bsc19057/Tutorial_2024/clustering_comparison SLURM_JOB_NODELIST=s21r1b48,s23r1b70 SLURM_JOB_GID=2950 SLURM_JOB_CPUS_PER_NODE=48(x2) COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login3 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=2 COMPSS_MASTER_NODE=s21r1b48 COMPSS_WORKER_NODES= s23r1b70" ; + schema1:endTime "2024-01-22T16:06:45+00:00" ; + schema1:instrument ; + schema1:name "COMPSs cc.py execution at marenostrum4 with JOB_ID 31494645" ; + schema1:result ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 8534 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 918 ; + schema1:description "Auxiliary File" ; + schema1:name "launch_cc.sh" . + + a schema1:MediaObject ; + schema1:contentSize 555 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 779 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:Person ; + schema1:name "Milad Miladi" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "draft.fa" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fast5_files.tar.gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reads.fasta" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GROMACS grompp configuration dictionary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GROMACS grompp configuration dictionary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GROMACS mdrun configuration dictionary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Editconf configuration dictionary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GROMACS grompp configuration dictionary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Genion configuration dictionary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GROMACS grompp configuration dictionary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GROMACS make_ndx configuration dictionary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GROMACS grompp configuration dictionary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "whole workflow output" . + + a schema1:Person ; + schema1:name "Payam Emami" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Scipion" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:Person ; + schema1:name "The workflow is based on the Galaxy Training tutorial Analyses of metagenomics data. Thank you to the Galaxy Australia team" . + + a schema1:Person ; + schema1:name "Igor Makunin and Mike Thang for help with the workflow" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Darwin MacBook-Pro-Raul-2018.local 23.5.0 Darwin Kernel Version 23.5.0: Wed May 1 20:09:52 PDT 2024; root:xnu-10063.121.3~5/RELEASE_X86_64 x86_64" ; + schema1:endTime "2024-06-18T13:37:29+00:00" ; + schema1:instrument ; + schema1:name "COMPSs matmul_directory.py execution at MacBook-Pro-Raul-2018.local" ; + schema1:object , + , + ; + schema1:result , + ; + schema1:startTime "2024-06-18T13:37:20+00:00" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3.rc2402" . + + a schema1:MediaObject ; + schema1:contentSize 244 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 628 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "DIRECTORY_example.yaml" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1966 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "matmul_tasks.py" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.0" . + + a schema1:MediaObject ; + schema1:contentSize 11372 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 918 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "AsyncPlotter.py" . + + a schema1:MediaObject ; + schema1:contentSize 559 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "Config.py" . + + a schema1:MediaObject ; + schema1:contentSize 2166 ; + schema1:description "Auxiliary File" ; + schema1:name "IA_Ealloc.c" . + + a schema1:MediaObject ; + schema1:contentSize 12895 ; + schema1:description "Auxiliary File" ; + schema1:name "IA_Kdiag.c" . + + a schema1:MediaObject ; + schema1:contentSize 4171 ; + schema1:description "Auxiliary File" ; + schema1:name "IA_Kdiag.h" . + + a schema1:MediaObject ; + schema1:contentSize 6634 ; + schema1:description "Auxiliary File" ; + schema1:name "IA_R2upd.c" . + + a schema1:MediaObject ; + schema1:contentSize 1699 ; + schema1:description "Auxiliary File" ; + schema1:name "LICENSE" . + + a schema1:MediaObject ; + schema1:contentSize 819 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "LocalCC.py" . + + a schema1:MediaObject ; + schema1:contentSize 29992 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "NLLGrid.py" . + + a schema1:MediaObject ; + schema1:contentSize 3411 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "RosenbergerAlgorithm.py" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "__init__.py" . + + a schema1:MediaObject ; + schema1:contentSize 44 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "_version.py" . + + a schema1:MediaObject ; + schema1:contentSize 9838 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "bp_types.py" . + + a schema1:MediaObject ; + schema1:contentSize 4798 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "bt2eventdata.py" . + + a schema1:MediaObject ; + schema1:contentSize 10085 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "btbb.py" . + + a schema1:MediaObject ; + schema1:contentSize 18148 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "btbb_distrostream.py" . + + a schema1:MediaObject ; + schema1:contentSize 2155 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "btbb_utils.py" . + + a schema1:MediaObject ; + schema1:contentSize 319 ; + schema1:description "Parameters passed as arguments to the COMPSs application through the command line" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_command_line_arguments.txt" . + + a schema1:MediaObject ; + schema1:contentSize 3586 ; + schema1:description "Auxiliary File" ; + schema1:name "configspec.conf" . + + a schema1:MediaObject ; + schema1:contentSize 13075 ; + schema1:description "Auxiliary File" ; + schema1:name "coord_convert.c" . + + a schema1:MediaObject ; + schema1:contentSize 2224 ; + schema1:description "Auxiliary File" ; + schema1:name "coord_convert.h" . + + a schema1:MediaObject ; + schema1:contentSize 4168 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ellipsoid.py" . + + a schema1:MediaObject ; + schema1:contentSize 2419 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "generate_signal.py" . + + a schema1:MediaObject ; + schema1:contentSize 1055 ; + schema1:description "Auxiliary File" ; + schema1:name "geo.h" . + + a schema1:MediaObject ; + schema1:contentSize 1165 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "grid_projection.py" . + + a schema1:MediaObject ; + schema1:contentSize 1259 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "group_triggers.py" . + + a schema1:MediaObject ; + schema1:contentSize 1232 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "init_filter.py" . + + a schema1:MediaObject ; + schema1:contentSize 171 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "input_parameters.py" . + + a schema1:MediaObject ; + schema1:contentSize 519 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "lib_names.py" . + + a schema1:MediaObject ; + schema1:contentSize 4049 ; + schema1:description "Auxiliary File" ; + schema1:name "lib_rec_cc.c" . + + a schema1:MediaObject ; + schema1:contentSize 2512 ; + schema1:description "Auxiliary File" ; + schema1:name "lib_rec_filter.c" . + + a schema1:MediaObject ; + schema1:contentSize 1786 ; + schema1:description "Auxiliary File" ; + schema1:name "lib_rec_hos.c" . + + a schema1:MediaObject ; + schema1:contentSize 892 ; + schema1:description "Auxiliary File" ; + schema1:name "lib_rec_rms.c" . + + a schema1:MediaObject ; + schema1:contentSize 9038 ; + schema1:description "Auxiliary File" ; + schema1:name "map_project.c" . + + a schema1:MediaObject ; + schema1:contentSize 269 ; + schema1:description "Auxiliary File" ; + schema1:name "map_project.h" . + + a schema1:MediaObject ; + schema1:contentSize 2282 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "map_project.py" . + + a schema1:MediaObject ; + schema1:contentSize 6658 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "mbf_plot.py" . + + a schema1:MediaObject ; + schema1:contentSize 14692 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "mod_btbb.py" . + + a schema1:MediaObject ; + schema1:contentSize 12605 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "mod_btbb_orig.py" . + + a schema1:MediaObject ; + schema1:contentSize 11696 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "mod_filter_picker.py" . + + a schema1:MediaObject ; + schema1:contentSize 1286 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "mod_group_trigs.py" . + + a schema1:MediaObject ; + schema1:contentSize 7422 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "mod_setup.py" . + + a schema1:MediaObject ; + schema1:contentSize 2493 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "mod_utils.py" . + + a schema1:MediaObject ; + schema1:contentSize 18536 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "plot.py" . + + a schema1:MediaObject ; + schema1:contentSize 2556 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "readNLL_grid.py" . + + a schema1:MediaObject ; + schema1:contentSize 966 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "read_grids.py" . + + a schema1:MediaObject ; + schema1:contentSize 7812 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "read_traces.py" . + + a schema1:MediaObject ; + schema1:contentSize 2053 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "rec_cc.py" . + + a schema1:MediaObject ; + schema1:contentSize 4537 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "rec_filter.py" . + + a schema1:MediaObject ; + schema1:contentSize 1115 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "rec_gauss_filter.py" . + + a schema1:MediaObject ; + schema1:contentSize 2526 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "rec_hos.py" . + + a schema1:MediaObject ; + schema1:contentSize 973 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "rec_memory.py" . + + a schema1:MediaObject ; + schema1:contentSize 1562 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "rec_rms.py" . + + a schema1:MediaObject ; + schema1:contentSize 4069 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "recursive_cc.py" . + + a schema1:MediaObject ; + schema1:contentSize 845 ; + schema1:description "Auxiliary File" ; + schema1:name "rosenberger.c" . + + a schema1:MediaObject ; + schema1:contentSize 4750 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "rosenberger.py" . + + a schema1:MediaObject ; + schema1:contentSize 4202 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "summary_cf.py" . + + a schema1:MediaObject ; + schema1:contentSize 3466 ; + schema1:description "Auxiliary File" ; + schema1:name "util.c" . + + a schema1:MediaObject ; + schema1:contentSize 1896 ; + schema1:description "Auxiliary File" ; + schema1:name "util.h" . + + a schema1:MediaObject ; + schema1:contentSize 47273 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "validate.py" . + + a schema1:MediaObject ; + schema1:contentSize 3605 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "version.py" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "daily_barchart" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "daily_mean_timeseries" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "monthly_mean_timeseries" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "stripes_daily_temperatures" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "stripes_monthly_temperatures" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bits_set" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "blacklist" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bwt_algorithm" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_lratio" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_exomeDepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_manta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_bf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "generate_bwa_indexes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_exome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "min_mapping_quality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "read_group" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_amb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_ann" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_bwt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fai" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_pac" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_sa" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samples" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_bwa_mem" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastqc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_samtools" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "html_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "json_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_all" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_bam_filtering" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_exomedepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_manta" . + + a schema1:Person ; + schema1:name "Daniel López-López" . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "Docker" ; + schema1:identifier ; + schema1:name "Docker" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "contamination reference file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "forward reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "identifier used" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "memory usage (mb)" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pacbio reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reverse reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filer rRNA" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "number of threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "BAM files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CheckM" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "FASTQC" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filtered reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MetaBat2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "QUAST" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "SPADES" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "FilterVariantTranches_resource_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "FilterVariantTranches_resource_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "FilterVariantTranches_resource_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "VariantFiltration_cluster" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "VariantFiltration_filter_indel" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "VariantFiltration_filter_name_indel" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "VariantFiltration_filter_name_snp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "VariantFiltration_filter_snp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "VariantFiltration_window" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bcftools_norm_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bcftools_view_include_CNN_filters" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bcftools_view_include_hard_filters" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bcftools_view_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bcftoomls_norm_multiallelics" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bwa_mem_num_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bwa_mem_sec_shorter_split_hits" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gatk_splitintervals_exclude_intervalList" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gatk_splitintervals_include_intervalList" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gatk_splitintervals_scatter_count" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_file_split" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_file_split_fwd_single" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_file_split_rev" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_qc_check" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_trimming_check" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "picard_addorreplacereadgroups_rgpl" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "raw_files_directory" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_genome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_fixmate_output_format" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_fixmate_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_flagstat_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_sort_compression_level" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_sort_memory" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_sort_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_cigar" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_collapsecigar" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_count" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_fastcompression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_iscram" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_randomseed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_readsingroup" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_readsinlibrary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_readsquality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_readswithbits" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_readswithoutbits" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_readtagtostrip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_region" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_samheader" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_uncompressed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sub_bqsr_interval_padding" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sub_bqsr_known_sites_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sub_bqsr_known_sites_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sub_bqsr_known_sites_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sub_hc_java_options" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sub_hc_native_pairHMM_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "table_annovar_build_over" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "table_annovar_convert_arg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "table_annovar_database_location" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "table_annovar_na_string" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "table_annovar_operation" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "table_annovar_otherinfo" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "table_annovar_protocol" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "table_annovar_remove" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "table_annovar_vcfinput" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tg_compression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tg_do_not_compress" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tg_length" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tg_quality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tg_strigency" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tg_trim_suffix" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_bcftools_concat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_bcftools_norm_cnn" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_bcftools_norm_hard_filter" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_bcftools_view_filter_cnn" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_bcftools_view_hard_filter" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_paired_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_paired_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_raw_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_raw_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_single_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_single_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gather_bwa_sam_files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_ApplyBQSR" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_CNNScoreVariants" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_FilterVariantTranches" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_HaplotypeCaller" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_bqsr_subworkflow" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_splitintervals" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_picard_addorreplacereadgroups" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_picard_markduplicates" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_picard_markduplicates_metrics" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_fixmate" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_flagstat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_index" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_index_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_sort" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_sort_by_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_view_conversion" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_view_count_total" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_tabix_indels" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_tabix_snps" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_table_annovar_cnn_filtered_avinput" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_table_annovar_cnn_filtered_multianno_txt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_table_annovar_cnn_filtered_multianno_vcf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_table_annovar_hard_filtered_avinput" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_table_annovar_hard_filtered_multianno_txt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_table_annovar_hard_filtered_multianno_vcf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_trim_galore_paired_fq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_trim_galore_paired_reports" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_trim_galore_single_fq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_trim_galore_single_reports" . + + a schema1:Person ; + schema1:name "Miguel Roncoroni" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/145" ; + schema1:name "Alexandre Cormier" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/146" ; + schema1:name "Patrick Durand" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/148" ; + schema1:name "Laura Leroi" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/150" ; + schema1:name "Pierre Cuzin" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-atac-cutandrun_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-atac-cutandrun_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-chip-pe_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-chip-pe_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-chip-sr_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-chip-sr_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bits_set" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "blacklist" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chromosome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_lratio" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_exomeDepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_manta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_bf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_exome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "min_mapping_quality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "read_group" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samples" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_bwa_mem" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastqc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_samtools" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "html_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "json_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_all" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_bam_filtering" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_exomedepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_manta" . + + a schema1:Person ; + schema1:name "Daniel López-López" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_30" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_31" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_32" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_33" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_34" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_35" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fast" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Leah W Roberts" . + + a schema1:Person ; + schema1:name "Scott A Beatson " . + + a schema1:Person ; + schema1:name " Brian M Forde" . + + a schema1:Person ; + schema1:name "Minh-Duy Phan" . + + a schema1:Person ; + schema1:name "Nguyen Thi Khanh Nhu" . + + a schema1:Person ; + schema1:name "Adam D Irwin" . + + a schema1:Person ; + schema1:name "Patrick N A Harris" . + + a schema1:Person ; + schema1:name "David L Paterson" . + + a schema1:Person ; + schema1:name "Mark A Schembri" . + + a schema1:Person ; + schema1:name "David M Whiley" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Alignment score from Kpax to analyse structures" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CATH family ids" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Obsolete and inconsistent CATH domain StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filename for residue-mapped CATH domain StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Core domain structure (.pdb)" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Database to select to compute core average structure" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "To store all the domain-like StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "To store all failed domain StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filename to store family ids per iteration" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Iteration number starting from 0" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Threshold for minimum domain length" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Parameter file for current iteration" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The directory for storing all PDB files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Pfam family ids" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Obsolete and inconsistent Pfam domain StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filename for residue-mapped Pfam domain StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CATH cross-mapped domain StIs from previous iteration" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Pfam cross-mapped domain StIs from previous iteration" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Score threshold for given alignment score from Kpax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Directory for storing all SIFTS files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "To store all the true domain StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filename to store unmapped and not properly aligned instances from CATH" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filename to store unmapped but structurally well aligned instances from CATH" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filename to store unmapped and not properly aligned instances from Pfam" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filename to store unmapped but structurally well aligned instances from Pfam" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filename with alignment scores for unmapped instances" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Alignment results for CATH unmapped instances" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Alignment results for Pfam unmapped instances" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Domain-like StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Failed domain StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "All CATH cross-mapped domin StIs family-wise together" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "All Pfam domain StIs cross-mapped to CATH family-wise" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Alignment results from Kpax for all cross-mapped families" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Average structures per cross-mapped Pfam family for CATH StIs at family level" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Core domain StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Core domain structure (.pdb)" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CATH domain StIs cross-mapped to Pfam family-wise" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Pfam domin StIs cross-mapped to CATH family-wise" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Cross-mapped families with CATH domain StIs passing the threshold" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Cross-mapped families with Pfam domain StIs passing the threshold" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Merged cross-mapped and residue-mapped domain StIs from CATH" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Merged cross-mapped and residue-mapped domain StIs from Pfam" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Family ids per iteration" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Parameter file for next iteration of the workflow" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Average structures per cross-mapped CATH family for Pfam StIs at family level" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Obsolete and inconsistent domain StIs from CATH" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Obsolete and inconsistent domain StIs from Pfam" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "All CATH residue-mapped domain StIs with domain labels" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "All Pfam residue-mapped domain StIs with domain labels" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "True domain StIs per iteration" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "All un-mapped domin StIs from CATH" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Failed domain StIs from CATH" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Domain-like StIs from CATH" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "All Pfam un-mapped domin StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Failed domain StIs from Pfam" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Domain-like StIs from Pfam" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/kmer-profiling-hifi-trio-VGP2/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "Docker" ; + schema1:identifier ; + schema1:name "Docker" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-atac-cutandrun_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-atac-cutandrun_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-chip-pe_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-chip-pe_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-chip-sr_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-chip-sr_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Lucille Delisle" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/rnaseq-pe/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Research Infrastructure RECETOX RI (No LM2018121) financed by the Ministry of Education" . + + a schema1:Person ; + schema1:name "Youth and Sports" . + + a schema1:Person ; + schema1:name "and Operational Programme Research" . + + a schema1:Person ; + schema1:name "Development and Innovation - project CETOCOEN EXCELLENCE (No CZ.02.1.01/0.0/0.0/17_043/0009632)." . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/753" ; + schema1:name "Zargham Ahmad" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sra-manifest-to-concatenated-fastqs/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "contour_levels" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "dec" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "do_cone_search" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "level_threshold" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ra" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "radius" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "t1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "t2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "asciicat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "contours" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "image" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "skymap_files" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Martin Hölzer" . + + a schema1:Person ; + schema1:name "Alexandre Almeida" . + + a schema1:Person ; + schema1:name "Guillermo Rangel-Pineros and Ekaterina Sakharova" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "add_hmms_tsv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hmmscan_database_dir" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "img_blast_database_dir" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_fasta_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mashmap_reference_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ncbi_tax_db_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pprmeta_simg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "virsorter_data_dir" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "virsorter_virome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "blast_merged_tsvs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "blast_result_filtereds" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "blast_results" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "filtered_contigs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "high_confidence_contigs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "high_confidence_faa" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "krona_plot_all" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "krona_plots" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "low_confidence_contigs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "low_confidence_faa" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mashmap_hits" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "parse_prophages_contigs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "prophages_faa" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "taxonomy_assignations" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "virfinder_output" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "virsorter_output_fastas" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "KNIME" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux nvb4 2.6.32-642.6.2.el6.x86_64 #1 SMP Mon Oct 24 10:22:33 EDT 2016 x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=COMPSs SLURM_JOB_QOS=bsc_cs SLURM_JOB_GPUS=0,1,2,3 SLURM_MEM_PER_CPU=8000 SLURM_JOB_ID=1930260 SLURM_JOB_USER=bsc19959 COMPSS_HOME=/apps/COMPSs/3.3/ SLURM_JOB_UID=4373 SLURM_SUBMIT_DIR=/gpfs/scratch/bsc19/bsc19959/cache SLURM_JOB_NODELIST=nvb[4-9,12-21] SLURM_JOB_CPUS_PER_NODE=16(x16) SLURM_SUBMIT_HOST=nvblogin1 SLURM_JOB_PARTITION=projects SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=16 COMPSS_MASTER_NODE=nvb4 COMPSS_WORKER_NODES= nvb5 nvb6 nvb7 nvb8 nvb9 nvb12 nvb13 nvb14 nvb15 nvb16 nvb17 nvb18 nvb19 nvb20 nvb21" ; + schema1:endTime "2024-03-22T17:53:30+00:00" ; + schema1:instrument ; + schema1:name "COMPSs cch_kmeans_test.py execution at bsc_nvidia with JOB_ID 1930260" ; + schema1:result , + ; + schema1:startTime "2024-03-22T17:29:38+00:00" ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:ComputerLanguage ; + schema1:alternateName "Dislib" ; + schema1:name "Dislib" ; + schema1:url "https://dislib.readthedocs.io/en/latest/" ; + schema1:version "0.9" . + + a schema1:MediaObject ; + schema1:contentSize 6201 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_bsc_nvidia_SLURM_JOB_ID_1930260" ; + schema1:contentSize 64 ; + schema1:description "COMPSs console standard error log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-1930260.err" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_bsc_nvidia_SLURM_JOB_ID_1930260" ; + schema1:contentSize 2835 ; + schema1:description "COMPSs console standard output log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-1930260.out" . + + a schema1:MediaObject ; + schema1:contentSize 731 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 1009 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/atacseq/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "energy_min.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "energy_npt.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "energy_nvt.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myeditconf.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfix_side_chain.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygenion.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygenion.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_image.xtc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_rgyr.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_trjconv_str.xtc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygrompp.tpr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.cpt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.edr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.xtc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb2gmx.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb2gmx.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysolvate.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysolvate.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rmsd_exp.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rmsd_first.xvg" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "Docker" ; + schema1:identifier ; + schema1:name "Docker" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_fpocket_select_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_fpocket_select_input_pockets_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_fpocket_select_output_pocket_pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_fpocket_select_output_pocket_pqr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_box_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_box_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_babel_convert_prep_lig_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_babel_convert_prep_lig_input_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_babel_convert_prep_lig_output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_str_check_add_hydrogens_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_str_check_add_hydrogens_input_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_str_check_add_hydrogens_output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_autodock_vina_run_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_autodock_vina_run_output_pdbqt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_babel_convert_pose_pdb_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_babel_convert_pose_pdb_output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pocket_pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pocket_pqr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdbqt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pedigree" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "html_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "list_output_txt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "out_file1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/159" ; + schema1:name "Oliver Woolland" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/89" ; + schema1:name "Paul Brack" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/parallel-accession-download/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "barcodes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "block_size" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "features" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "filterDataR" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "loadDataR" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "margin" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "matrix" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "minCells" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "minFeatures" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "nCountRNAmax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "nCountRNAmin" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "nFeatureRNAmax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "nFeatureRNAmin" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "normalization_method" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "normalizeDataR" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pattern" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "percentMTmax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "percentMTmin" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "projectName" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "scale_factor" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "verbose" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11utput" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1Output" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22Output" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2Output" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3Output" . + + a schema1:Person ; + schema1:name "Lucille Delisle" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/rnaseq-pe/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CAMS-PM2_5-20211222_netcdf" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "Docker" ; + schema1:identifier ; + schema1:name "Docker" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "COMPSs" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_babel_minimize_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_babel_minimize_input_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_babel_minimize_output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_acpype_params_gmx_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_acpype_params_gmx_output_path_gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_acpype_params_gmx_output_path_itp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_acpype_params_gmx_output_path_top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path_gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path_itp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path_top" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sra-manifest-to-concatenated-fastqs/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Dannon Baker" . + + a schema1:Person ; + schema1:name "Björn Grüning" . + + a schema1:Person ; + schema1:name "Delphine Larivière" . + + a schema1:Person ; + schema1:name "Gildas Le Corguillé" . + + a schema1:Person ; + schema1:name "Andrew Lonie" . + + a schema1:Person ; + schema1:name "Nicholas Keener" . + + a schema1:Person ; + schema1:name "Sergei Kosakovsky Pond" . + + a schema1:Person ; + schema1:name "Wolfgang Maier" . + + a schema1:Person ; + schema1:name "Anton Nekrutenko" . + + a schema1:Person ; + schema1:name "James Taylor" . + + a schema1:Person ; + schema1:name "Steven Weaver" . + + a schema1:Person ; + schema1:name "Marius van den Beek" . + + a schema1:Person ; + schema1:name "Dave Bouvier" . + + a schema1:Person ; + schema1:name "John Chilton" . + + a schema1:Person ; + schema1:name "Nate Coraor" . + + a schema1:Person ; + schema1:name "Frederik Coppens" . + + a schema1:Person ; + schema1:name "Bert Droesbeke" . + + a schema1:Person ; + schema1:name "Ignacio Eguinoa" . + + a schema1:Person ; + schema1:name "Simon Gladman" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "Alex L Mitchell" . + + a schema1:Person ; + schema1:name "Lorna J Richardson" . + + a schema1:Person ; + schema1:name "Ekaterina Sakharova" . + + a schema1:Person ; + schema1:name "Maxim Scheremetjew" . + + a schema1:Person ; + schema1:name "Anton Korobeynikov" . + + a schema1:Person ; + schema1:name "Alex Shlemov" . + + a schema1:Person ; + schema1:name "Olga Kunyavskaya" . + + a schema1:Person ; + schema1:name "Alla Lapidus" . + + a schema1:Person ; + schema1:name "Robert D Finn" . + + a schema1:Person ; + schema1:name "Alexandre Almeida" . + + a schema1:Person ; + schema1:name "Martin Beracochea" . + + a schema1:Person ; + schema1:name "Miguel Boland" . + + a schema1:Person ; + schema1:name "Josephine Burgin" . + + a schema1:Person ; + schema1:name "Guy Cochrane" . + + a schema1:Person ; + schema1:name "Michael R Crusoe" . + + a schema1:Person ; + schema1:name "Varsha Kale" . + + a schema1:Person ; + schema1:name "Simon C Potter" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "5.8s_pattern" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "5s_pattern" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CGC_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CGC_postfixes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "EggNOG_data_dir" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "EggNOG_db" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "EggNOG_diamond_db" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "HMM_gathering_bit_score" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "HMM_name_database" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "HMM_omit_alignment" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "InterProScan_applications" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "InterProScan_databases" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "InterProScan_outputFormat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Uniref90_db_txt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "cgc_chunk_size" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "clusters_glossary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "contig_min_length" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "contigs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "diamond_databaseFile" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "diamond_header" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "diamond_maxTargetSeqs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "func_ann_names_hmmer" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "func_ann_names_ips" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "go_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gp_flatfiles_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "graphs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hmmsearch_header" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ips_header" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ko_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lsu_db" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lsu_label" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lsu_otus" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lsu_tax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "other_ncrna_models" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pathways_classes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pathways_names" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "protein_chunk_size_IPS" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "protein_chunk_size_eggnog" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "protein_chunk_size_hmm" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rfam_model_clans" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rfam_models" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ssu_db" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ssu_label" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ssu_otus" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ssu_tax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bgzip_fasta_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bgzip_index" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chunking_nucleotides" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chunking_proteins" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "completed_flag_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "compressed_files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "functional_annotation_folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hashsum_input" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "index_fasta_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "no_cds_flag_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "no_tax_flag_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pathways_systems_folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pathways_systems_folder_antismash" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pathways_systems_folder_antismash_summary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "qc-statistics_folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "qc-status" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "qc_summary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rna-count" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sequence-categorisation_folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "stats" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "taxonomy-summary_folder" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sars-cov-2-se-illumina-wgs-variant-calling/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_energy_min_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_energy_min_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_gppnvt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_energy_nvt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_energy_nvt_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_gppnpt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_energy_npt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_energy_npt_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step17_gppmd_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_rmsfirst_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_rmsfirst_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_pdb_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_pdb_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_rmsexp_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_rmsexp_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step21_rgyr_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_image_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step23_dry_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_editconf_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_gppion_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step7_genion_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_gppmin_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Checkpoint file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Structures - Raw structure" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Resulting protein structure" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GROMACS topology file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Topologies GROMACS portable binary run" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Trajectories - Raw trajectory" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Trajectories - Post-processed trajectory" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "System Setup Observables - Potential Energy" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "System Setup Observables - Pressure and density" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "System Setup Observables - Temperature" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Simulation Analysis" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Simulation Analysis" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Simulation Analysis" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/kmer-profiling-hifi-trio-VGP2/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "Ruby bioinformatics toolkit" ; + schema1:name "Rbbt" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Adrián Muñoz-Civico" . + + a schema1:Person ; + schema1:name "Daniel López-López" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Scaffolding-HiC-VGP8/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_30" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_31" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_32" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_33" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_34" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_35" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_36" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_37" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_38" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_39" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_40" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_41" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_42" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_43" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_44" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_45" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_46" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_47" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_48" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_49" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_50" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_51" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_52" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_53" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_54" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_55" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_56" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_57" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_58" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_59" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_60" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_61" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_62" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_63" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_64" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_65" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_66" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_67" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_68" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_randomize_ions.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_randomize_ions.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.parmtop" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.parmtop" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myparmed_hmassrepartition.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myprocess_mdout.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myprocess_minout.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.cpout" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.cprst" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.mdinfo" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.nc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.ncrst" . + + a schema1:Person ; + schema1:name "Sophie Alain" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/146" ; + schema1:name "Patrick Durand" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/148" ; + schema1:name "Laura Leroi" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/150" ; + schema1:name "Pierre Cuzin" . + + a schema1:Person ; + schema1:name "Engy Nasr" . + + a schema1:Person ; + schema1:name "Bérénice Batut" . + + a schema1:Person ; + schema1:name "Paul Zierep" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/nanopore-pre-processing/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tg_ens_mean_0_1deg_reg_v20_0e_Paris_daily_csv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ts_cities_csv" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/49" ; + schema1:name "Anne Fouilloux" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-decontamination-VGP9/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/445" ; + schema1:name "Diego De Panis" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pocket_pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pocket_pqr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdbqt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "workdir_array" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "final_result" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Deduplicate reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output Destination" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filter reference file(s)" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "filter rRNA" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Forward reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "identifier used" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Keep mapped reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken2 confidence threshold" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken2 database" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken2 standard report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Maximum memory in MB" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Prepare references" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Reverse reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Skip QC filtered" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Skip QC unfiltered" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output Step number" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Number of threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filtered forward read" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filtered reverse read" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken2 folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filtering reports folder" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_30" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_gmx.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_gmx.itp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_gmx.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myappend_ligand.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mybabel_minimize.mol2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycat_pdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myeditconf.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myextract_heteroatoms.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myextract_molecule.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfix_side_chain.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygenion.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygenion.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygenrestr.itp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_energy.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_image.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_rgyr.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_rms_exp.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_trjconv_str_lig.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_trjconv_str_prot.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygrompp_ion.tpr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymake_ndx.ndx" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.cpt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.edr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.xtc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb2gmx.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb2gmx.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb_prot.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myreduce_add_hydrogens.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysolvate.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysolvate.zip" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:Person ; + schema1:name "ERGA" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lineage" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/445" ; + schema1:name "Diego De Panis" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Shell Script" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/kmer-profiling-hifi-trio-VGP2/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "run_idw_interpolation_test_input1.csv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "run_idw_interpolation_test_input2.geojson" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "Andrey Bliznyuk" . + + a schema1:Person ; + schema1:name "Ben Menadue" . + + a schema1:Person ; + schema1:name "Rika Kobayashi" . + + a schema1:Person ; + schema1:name "Matthew Downton" . + + a schema1:Person ; + schema1:name "Yue Sun" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/141" ; + schema1:name "Tracy Chew" . + + a schema1:ComputerLanguage ; + schema1:name "Shell Script" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_Velocyto-on10X-filtered-barcodes_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_Velocyto-on10X-filtered-barcodes_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/velocyto/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_Velocyto-on10X-from-bundled_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_Velocyto-on10X-from-bundled_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/velocyto/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/cutandrun/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Assembly fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Bam file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "BUSCO dataset" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output destination (not used in the workflow itself)" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gtdbtk data directory" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Identifier used" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "memory usage (MB)" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Run SemiBin" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "SemiBin Environment" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CWL base step number" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Sub workflow Run" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Bin files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Assembly/Bin read stats" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Bins summary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "BUSCO" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CheckM" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "DAS Tool" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "EukRep fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "EukRep stats" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GTDB-Tk" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MaxBin2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MetaBAT2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "SemiBin" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "endpoint" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "query_file" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux nvb4 2.6.32-642.6.2.el6.x86_64 #1 SMP Mon Oct 24 10:22:33 EDT 2016 x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=COMPSs SLURM_JOB_QOS=bsc_cs SLURM_JOB_GPUS=0,1,2,3 SLURM_MEM_PER_CPU=8000 SLURM_JOB_ID=1930224 SLURM_JOB_USER=bsc19959 COMPSS_HOME=/apps/COMPSs/TrunkCTCache/ SLURM_JOB_UID=4373 SLURM_SUBMIT_DIR=/gpfs/scratch/bsc19/bsc19959/cache SLURM_JOB_NODELIST=nvb[4-9,12-21] SLURM_JOB_CPUS_PER_NODE=16(x16) SLURM_SUBMIT_HOST=nvblogin1 SLURM_JOB_PARTITION=projects SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=16 COMPSS_MASTER_NODE=nvb4 COMPSS_WORKER_NODES= nvb5 nvb6 nvb7 nvb8 nvb9 nvb12 nvb13 nvb14 nvb15 nvb16 nvb17 nvb18 nvb19 nvb20 nvb21" ; + schema1:endTime "2024-03-22T11:39:26+00:00" ; + schema1:instrument ; + schema1:name "COMPSs cch_matmul_test.py execution at bsc_nvidia with JOB_ID 1930224" ; + schema1:result , + , + , + ; + schema1:startTime "2024-03-22T11:31:38+00:00" ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:SoftwareApplication ; + schema1:name "Dislib" ; + schema1:version "0.9.x" . + + a schema1:MediaObject ; + schema1:contentSize 4112 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_bsc_nvidia_SLURM_JOB_ID_1930224" ; + schema1:contentSize 64 ; + schema1:description "COMPSs console standard error log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-1930224.err" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_bsc_nvidia_SLURM_JOB_ID_1930224" ; + schema1:contentSize 2864 ; + schema1:description "COMPSs console standard output log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-1930224.out" . + + a schema1:MediaObject ; + schema1:contentSize 708 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 1203 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-only-VGP3/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bits_set" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "blacklist" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bwt_algorithm" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chromosome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_lratio" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_exomeDepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_manta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_bf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "generate_bwa_indexes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_exome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "min_mapping_quality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "read_group" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_amb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_ann" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_bwt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fai" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_pac" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_sa" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samples" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_bwa_mem" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastqc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_samtools" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "html_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "json_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_all" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_bam_filtering" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_exomedepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_manta" . + + a schema1:Person ; + schema1:name "Daniel López-López" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Scipion" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "ABRomics" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/bacterial-genome-assembly/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "auto_kmer_choice" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "careful" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "cov_cutoff" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "cov_state" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq1_type" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq2_type" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq_file_type" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "iontorrent" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "kmers" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "libraries_fwd_rev" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "libraries_metadata" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "libraries_mono" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mode" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "nanopore_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "onlyassembler" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pacbio_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sanger_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "trusted_contigs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "untrusted_contigs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "all_log_spades" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "assembly_graph_spades" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "assembly_graph_unicycler" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "assembly_graph_with_scaffolds_spades" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "assembly_image_spades" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "assembly_image_unicycler" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "assembly_info_spades" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "assembly_info_unicycler" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "assembly_unicycler" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "out_contig_stats_spades" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "out_contigs_spades" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "out_scaffold_stats_spades" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "out_scaffolds_spades" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bits_set" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "blacklist" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bwt_algorithm" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_lratio" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_exomeDepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_manta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_bf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "generate_bwa_indexes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_exome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "min_mapping_quality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "read_group" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_amb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_ann" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_bwt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fai" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_pac" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_sa" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samples" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_bwa_mem" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastqc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_samtools" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "html_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "json_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_all" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_bam_filtering" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_exomedepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_manta" . + + a schema1:Person ; + schema1:name "Daniel López-López" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:MediaObject ; + schema1:contentSize 183 . + + a schema1:MediaObject ; + schema1:contentSize 90 . + + a schema1:MediaObject ; + schema1:contentSize 558 . + + a schema1:MediaObject ; + schema1:contentSize 1289 . + + a schema1:Dataset . + + a schema1:MediaObject ; + schema1:contentSize 44006 . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/445" ; + schema1:name "Diego De Panis" . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux nvb4 2.6.32-642.6.2.el6.x86_64 #1 SMP Mon Oct 24 10:22:33 EDT 2016 x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=COMPSs SLURM_JOB_QOS=bsc_cs SLURM_JOB_GPUS=0,1,2,3 SLURM_MEM_PER_CPU=8000 SLURM_JOB_ID=1930568 SLURM_JOB_USER=bsc19959 COMPSS_HOME=/apps/COMPSs/3.3/ SLURM_JOB_UID=4373 SLURM_SUBMIT_DIR=/gpfs/scratch/bsc19/bsc19959/cache/Imagenet SLURM_JOB_NODELIST=nvb[4-9,12-21] SLURM_JOB_CPUS_PER_NODE=16(x16) SLURM_SUBMIT_HOST=nvblogin1 SLURM_JOB_PARTITION=projects SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=16 COMPSS_MASTER_NODE=nvb4 COMPSS_WORKER_NODES= nvb5 nvb6 nvb7 nvb8 nvb9 nvb12 nvb13 nvb14 nvb15 nvb16 nvb17 nvb18 nvb19 nvb20 nvb21" ; + schema1:endTime "2024-03-25T11:02:59+00:00" ; + schema1:instrument ; + schema1:name "COMPSs main_pytorch_sync_5_nodes.py execution at bsc_nvidia with JOB_ID 1930568" ; + schema1:result , + ; + schema1:startTime "2024-03-25T10:49:35+00:00" ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:SoftwareApplication ; + schema1:name "Dislib" ; + schema1:version "0.9.x" . + + a schema1:SoftwareApplication ; + schema1:name "Pytorch" ; + schema1:version "1.7.1+cu101" . + + a schema1:MediaObject ; + schema1:contentSize 4515 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_bsc_nvidia_SLURM_JOB_ID_1930568" ; + schema1:contentSize 64 ; + schema1:description "COMPSs console standard error log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-1930568.err" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_bsc_nvidia_SLURM_JOB_ID_1930568" ; + schema1:contentSize 3012 ; + schema1:description "COMPSs console standard output log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-1930568.out" . + + a schema1:MediaObject ; + schema1:contentSize 769 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 928 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Engy Nasr" . + + a schema1:Person ; + schema1:name "Bérénice Batut" . + + a schema1:Person ; + schema1:name "Paul Zierep" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/allele-based-pathogen-identification/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "metrics" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/parallel-accession-download/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/brew3r/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_30" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_31" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_32" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_33" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_34" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_35" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_36" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_37" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_38" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_39" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_40" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_41" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_42" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_43" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_44" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_45" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_46" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_47" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_48" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_49" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_50" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_51" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_52" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_53" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_54" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_55" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_56" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_57" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_58" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fast" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Scaffolding-Bionano-VGP7/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_30" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_31" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_32" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_33" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_34" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_35" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_36" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_37" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_38" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_39" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_40" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_41" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_ac.frcmod" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_ac.inpcrd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_ac.lib" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_ac.prmtop" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myamber_to_pdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mybabel_minimize.mol2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_image.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_rgyr.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_rms.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myextract_heteroatoms.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.parmtop" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.parmtop" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb4amber_run.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myprocess_mdout.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myprocess_minout.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myreduce_add_hydrogens.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myremove_ligand.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myremove_pdb_water.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.cpout" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.cprst" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.mdinfo" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.netcdf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.rst" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.trj" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sra-manifest-to-concatenated-fastqs/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Abromics" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/bacterial-genome-assembly/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/atacseq/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myeditconf.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfix_side_chain.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygenion.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygenion.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_energy.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_image.xtc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_rgyr.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_rms.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_trjconv_str.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygrompp.tpr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.cpt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.edr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.xtc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb2gmx.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb2gmx.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysolvate.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysolvate.zip" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sra-manifest-to-concatenated-fastqs/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ChIPQC_annotation" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ChIPQC_bCount" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ChIPQC_blacklist" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ChIPQC_consensus" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ChIPQC_facetBy" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "DiffBind_bParallel" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "DiffBind_background" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "DiffBind_blacklist" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "DiffBind_consensus" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "DiffBind_cores" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "DiffBind_design" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "DiffBind_filterFun" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "DiffBind_greylist" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "DiffBind_library" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "DiffBind_low_read_count_filter" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "DiffBind_minOverlap" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "DiffBind_normalization" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "DiffBind_reorderMeta_factor" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "DiffBind_reorderMeta_value" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "DiffBind_retrieve_consensus" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bamCoverage_effective_genome_size" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bamCoverage_extendReads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bamCoverage_normalizeUsing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bamCoverage_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "blackListFile" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "computeMatrix_downstream" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "computeMatrix_outFileSortedRegions" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "computeMatrix_outputFile" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "computeMatrix_regions" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "computeMatrix_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "computeMatrix_upstream" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hisat2_idx_basename" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hisat2_idx_directory" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hisat2_num_of_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_control_samples" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_file_split" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_file_split_fwd_single" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_file_split_rev" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_qc_check" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_treatment_samples" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_trimming_check" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "macs2_callpeak_bdg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "macs2_callpeak_broad" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "macs2_callpeak_format" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "macs2_callpeak_gsize" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "macs2_callpeak_nomodel" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "macs2_extsize" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "macs2_pvalue" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "macs2_qvalue" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "macs2_shift" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "metadata_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "multiBamSummary_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "plotCorrelation_color" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "plotCorrelation_method" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "plotCorrelation_numbers" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "plotCorrelation_outFileName" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "plotCorrelation_plotType" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "plotCorrelation_title" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "plotCoverage_outFileName" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "plotCoverage_plotFileFormat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "plotCoverage_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "plotFingerprint_outFileName" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "plotFingerprint_plotFileFormat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "plotFingerprint_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "plotHeatmap_outputFile" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "plotHeatmap_plotFileFormat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "raw_files_directory" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rose_genome_build" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rose_stitch_distance" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rose_tss_distance" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_fixmate_output_format" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_fixmate_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_markdup_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_readswithoutbits" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_sort_compression_level" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_sort_memory" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_sort_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "trimmomatic_pe_illuminaClip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "trimmomatic_pe_leading" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "trimmomatic_pe_minlen" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "trimmomatic_pe_slidingWindow" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "trimmomatic_pe_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "trimmomatic_pe_trailing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "trimmomatic_se_illuminaClip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "trimmomatic_se_leading" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "trimmomatic_se_minlen" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "trimmomatic_se_slidingWindow" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "trimmomatic_se_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "trimmomatic_se_trailing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_ChIPQC_macs_ChIPQCexperiment" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_ChIPQC_macs_ChIPQCreport" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_ChIPQC_macs_outdir" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_ChIPQC_rose_ChIPQCexperiment" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_ChIPQC_rose_ChIPQCreport" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_ChIPQC_rose_outdir" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_DiffBind_macs_correlation_heatmap" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_DiffBind_macs_diffbind_consensus" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_DiffBind_macs_diffbind_dba_object" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_DiffBind_macs_diffbind_normalized_counts" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_DiffBind_macs_diffbind_results" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_DiffBind_rose_correlation_heatmap" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_DiffBind_rose_diffbind_consensus" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_DiffBind_rose_diffbind_dba_object" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_DiffBind_rose_diffbind_normalized_counts" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_DiffBind_rose_diffbind_results" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_append_files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_bamCoverage_norm" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_bed_to_rose_gff_conversion" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_bedtools_coverage" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_bedtools_intersect" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_bedtools_merge" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_computeMatrix_matrix" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_computeMatrix_regions" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_enhancer_bed_processing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_exclude_black_list_regions" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_exclude_black_list_regions_narrowPeak" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_paired_html_fwd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_paired_html_rev" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_paired_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_raw_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_raw_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_single_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_single_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_hisat2_for_paired_reads_sam" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_hisat2_for_paired_reads_stderr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_hisat2_for_single_reads_sam" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_hisat2_for_single_reads_stderr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_macs2_call_peaks_bed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_macs2_call_peaks_broadPeak" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_macs2_call_peaks_cutoff" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_macs2_call_peaks_gappedPeak" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_macs2_call_peaks_lambda" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_macs2_call_peaks_model_r" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_macs2_call_peaks_narrowPeak" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_macs2_call_peaks_pileup" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_macs2_call_peaks_xls" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_multiBamSummary_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_paste_content_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_paste_content_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_plotCorrelation_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_plotCoverage_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_plotFingerprint_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_plotHeatmap" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_printf_header_samples" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_rose_main_AllEnhancers_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_rose_main_Enhancers_withSuper" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_rose_main_Plot_points" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_rose_main_STITCHED_ENHANCER_REGION_MAP" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_rose_main_SuperEnhancers_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_rose_main_gff_dir_outputs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_rose_main_mappedGFF_dir_outputs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_fixmate" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_index" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_markdup" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_sort" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_sort_by_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_sort_peaks_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_total_peaks_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_trimmomatic_paired_end_fwd_paired" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_trimmomatic_paired_end_fwd_unpaired" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_trimmomatic_paired_end_rev_paired" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_trimmomatic_paired_end_rev_unpaired" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_trimmomatic_paired_end_stderr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_trimmomatic_single_end_fastq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_trimmomatic_single_end_stderr" . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux nvb4 2.6.32-642.6.2.el6.x86_64 #1 SMP Mon Oct 24 10:22:33 EDT 2016 x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=COMPSs SLURM_JOB_QOS=bsc_cs SLURM_JOB_GPUS=0,1,2,3 SLURM_MEM_PER_CPU=8000 SLURM_JOB_ID=1930560 SLURM_JOB_USER=bsc19959 COMPSS_HOME=/apps/COMPSs/3.3/ SLURM_JOB_UID=4373 SLURM_SUBMIT_DIR=/gpfs/scratch/bsc19/bsc19959/cache/Imagenet SLURM_JOB_NODELIST=nvb[4-9,12-21] SLURM_JOB_CPUS_PER_NODE=16(x16) SLURM_SUBMIT_HOST=nvblogin1 SLURM_JOB_PARTITION=projects SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=16 COMPSS_MASTER_NODE=nvb4 COMPSS_WORKER_NODES= nvb5 nvb6 nvb7 nvb8 nvb9 nvb12 nvb13 nvb14 nvb15 nvb16 nvb17 nvb18 nvb19 nvb20 nvb21" ; + schema1:endTime "2024-03-25T10:49:09+00:00" ; + schema1:instrument ; + schema1:name "COMPSs main_pytorch_sync_5_nodes.py execution at bsc_nvidia with JOB_ID 1930560" ; + schema1:result , + ; + schema1:startTime "2024-03-25T10:30:21+00:00" ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:SoftwareApplication ; + schema1:name "Dislib" ; + schema1:version "0.9.x" . + + a schema1:SoftwareApplication ; + schema1:name "Pytorch" ; + schema1:version "1.7.1+cu101" . + + a schema1:MediaObject ; + schema1:contentSize 4523 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_bsc_nvidia_SLURM_JOB_ID_1930560" ; + schema1:contentSize 64 ; + schema1:description "COMPSs console standard error log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-1930560.err" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_bsc_nvidia_SLURM_JOB_ID_1930560" ; + schema1:contentSize 3010 ; + schema1:description "COMPSs console standard output log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-1930560.out" . + + a schema1:MediaObject ; + schema1:contentSize 765 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 928 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step00_cat_pdb_input_structure2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step00_cat_pdb_output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step0_reduce_remove_hydrogens_input_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step0_reduce_remove_hydrogens_output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_genrestr_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_genrestr_input_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_genrestr_output_itp_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_gmx_trjconv_str_protein_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_gmx_trjconv_str_protein_output_str_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_gmx_trjconv_str_ligand_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_gmx_trjconv_str_ligand_input_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_gmx_trjconv_str_ligand_input_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_gmx_trjconv_str_ligand_output_str_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_cat_pdb_hydrogens_output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_append_ligand_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_append_ligand_input_itp_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_append_ligand_output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step15_editconf_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step15_editconf_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_solvate_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_solvate_output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step17_grompp_genion_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step17_grompp_genion_output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_genion_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_genion_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_genion_output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_grompp_min_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_grompp_min_output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_mdrun_min_output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_mdrun_min_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_mdrun_min_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_mdrun_min_output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step21_gmx_energy_min_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step21_gmx_energy_min_output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_make_ndx_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_make_ndx_output_ndx_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step23_grompp_nvt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step23_grompp_nvt_output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step24_mdrun_nvt_output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step24_mdrun_nvt_output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step24_mdrun_nvt_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step24_mdrun_nvt_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step24_mdrun_nvt_output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step25_gmx_energy_nvt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step25_gmx_energy_nvt_output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step26_grompp_npt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step26_grompp_npt_output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step27_mdrun_npt_output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step27_mdrun_npt_output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step27_mdrun_npt_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step27_mdrun_npt_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step27_mdrun_npt_output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step28_gmx_energy_npt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step28_gmx_energy_npt_output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step29_grompp_md_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step29_grompp_md_output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_extract_molecule_output_molecule_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step30_mdrun_md_output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step30_mdrun_md_output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step30_mdrun_md_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step30_mdrun_md_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step30_mdrun_md_output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step31_rmsd_first_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step31_rmsd_first_output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step32_rmsd_exp_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step32_rmsd_exp_output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step33_gmx_rgyr_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step33_gmx_rgyr_output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step34_gmx_image_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step34_gmx_image_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step34b_gmx_image2_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step34b_gmx_image2_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step35_gmx_trjconv_str_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step35_gmx_trjconv_str_output_str_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step36_grompp_md_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step36_grompp_md_output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_fix_side_chain_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_pdb2gmx_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_pdb2gmx_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_pdb2gmx_output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_make_ndx_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_make_ndx_input_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_make_ndx_output_ndx_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_itp_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_str_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_str_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_ndx_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_molecule_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_str_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_ndx_path" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-decontamination-VGP9/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Engy Nasr" . + + a schema1:Person ; + schema1:name "Bérénice Batut" . + + a schema1:Person ; + schema1:name "Paul Zierep" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/taxonomy-profiling-and-visualization-with-krona/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bits_set" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "blacklist" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chromosome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_lratio" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_exomeDepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_manta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_bf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_exome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "min_mapping_quality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "read_group" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samples" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_bwa_mem" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastqc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_samtools" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "html_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "json_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_all" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_bam_filtering" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_exomedepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_manta" . + + a schema1:Person ; + schema1:name "Daniel López-López" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/cutandrun/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/732" ; + schema1:name "Andrii Neronov" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "BridgeDB cache" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Differential gene expression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Differential miRNA expression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "STRING identifier mapping" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mRNA expression correlation" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of correlation edges to use, USE FOR TESTING ONLY" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "miRNA mRNA correlation" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "miRTarBase data" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of miRTarBase edges to use, USE FOR TESTING ONLY" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of cores to let MOGAMUN use" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of generation to let the genetic algorithm in MOGAMUN evolve" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The maximum size of subnetworks during postprocessing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "the minimum Jaccard Index overlap between two subnetworks to allow them to be merged" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The minimum size of subnetworks during postprocessing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of parallel runs to let MOGAMUN do, these parallel runs are combined in postprocessing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "STRING data" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The minimum score for a STRING edge to be included in the analysis" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of STRING edges to use, USE FOR TESTING ONLY" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Variant Burden" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "full_graph" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "subnetworks" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "COMPSs" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_COVID-19-PE-ARTIC-ILLUMINA_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_COVID-19-PE-ARTIC-ILLUMINA_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sars-cov-2-pe-illumina-artic-variant-calling/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CPAT_header_tab" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GRCh38_p13_genome_fa_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Pfam-A_hmm_dat_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Pfam-A_hmm_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "active_site_dat_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gencode_v43_annotation_gtf_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gencode_v43_lncRNA_transcripts_fa_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gencode_v43_pc_transcripts_fa_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gencode_v43_transcripts_fa_gz" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/231" ; + schema1:name "Ryan Patterson-Cross" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:MediaObject ; + schema1:contentSize 200 . + + a schema1:MediaObject ; + schema1:contentSize 1322 . + + a schema1:MediaObject ; + schema1:contentSize 1297 . + + a schema1:MediaObject ; + schema1:contentSize 718 . + + a schema1:Dataset . + + a schema1:MediaObject ; + schema1:contentSize 262005 . + + a schema1:MediaObject ; + schema1:contentSize 21291 . + + a schema1:MediaObject ; + schema1:contentSize 30406 . + + a schema1:ComputerLanguage ; + schema1:name "Shell Script" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/parallel-accession-download/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Shell Script" . + + a schema1:Person ; + schema1:name "Lucille Delisle" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/rnaseq-sr/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Scaffolding-HiC-VGP8/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "ABRomics" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/quality-and-contamination-control/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Simon Bray" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/gromacs-dctmd/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Daniel López-López" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/357" ; + schema1:name "Tatiana Gurbich" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/chipseq-sr/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Scaffolding-Bionano-VGP7/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s11r1b48 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=COMPSs COMPSS_PYTHON_VERSION=3.9.10 SLURM_JOB_QOS=debug SLURM_MEM_PER_CPU=1880 SLURM_JOB_ID=29726926 SLURM_JOB_USER=bsc19776 COMPSS_HOME=/apps/COMPSs/Trunk/ SLURM_JOB_UID=7363 SLURM_SUBMIT_DIR=/gpfs/home/bsc19/bsc19776/PyCOMPSs/Galileo_school/example_egu11_v04 SLURM_JOB_NODELIST=s11r1b48 SLURM_JOB_GID=2950 SLURM_JOB_CPUS_PER_NODE=48 COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login1 SLURM_JOB_PARTITION=sequential SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=1 COMPSS_MASTER_NODE=s11r1b48 COMPSS_WORKER_NODES=" ; + schema1:endTime "2023-07-20T15:48:28+00:00" ; + schema1:instrument ; + schema1:name "COMPSs my_workflow_multiple.py execution at marenostrum4 with JOB_ID 29726926" ; + schema1:result , + , + , + , + , + , + , + ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.2.rc2307" . + + a schema1:MediaObject ; + schema1:contentSize 839 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 512 ; + schema1:description "Auxiliary File" ; + schema1:name "launch.sh" . + + a schema1:MediaObject ; + schema1:contentSize 1827 ; + schema1:description "Auxiliary File" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:MediaObject ; + schema1:contentSize 12520 ; + schema1:description "Auxiliary File" ; + schema1:name "my_analytic" . + + a schema1:MediaObject ; + schema1:contentSize 770 ; + schema1:description "Auxiliary File" ; + schema1:name "my_analytic.c" . + + a schema1:MediaObject ; + schema1:contentSize 12392 ; + schema1:description "Auxiliary File" ; + schema1:name "my_sim" . + + a schema1:MediaObject ; + schema1:contentSize 355 ; + schema1:description "Auxiliary File" ; + schema1:name "my_sim.c" . + + a schema1:MediaObject ; + schema1:contentSize 690 ; + schema1:description "COMPSs command line execution command (runcompss), including flags and parameters passed" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_command_line_arguments.txt" . + + a schema1:Person ; + schema1:name "Lucille Delisle" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/rnaseq-pe/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/422" ; + schema1:name "Zavolan Lab" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "Matthew Downton" . + + a schema1:Person ; + schema1:name "Andrey Bliznyuk" . + + a schema1:Person ; + schema1:name "Rika Kobayashi" . + + a schema1:Person ; + schema1:name "Ben Menadue" . + + a schema1:Person ; + schema1:name "Ben Evans" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/141" ; + schema1:name "Tracy Chew" . + + a schema1:ComputerLanguage ; + schema1:name "Shell Script" . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pcz_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pcz_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pcz_path" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:Person ; + schema1:name "Miguel Roncoroni" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Proteinortho_extract_by_orthogroup" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "extracted_ORFs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fasta_header_cleaned" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "funannotate_predicted_proteins" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "headers_shortened" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "proteomes_to_one_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "repeat_masked" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sample_names_to_headers" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/atacseq/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Sample" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/atacseq/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:name "Tim Dudgeon" . + + a schema1:Person ; + schema1:name "Simon Bray" . + + a schema1:Person ; + schema1:name "Gianmauro Cuccuru" . + + a schema1:Person ; + schema1:name "Björn Grüning" . + + a schema1:Person ; + schema1:name "Rachael Skyner" . + + a schema1:Person ; + schema1:name "Jack Scantlebury" . + + a schema1:Person ; + schema1:name "Susan Leung" . + + a schema1:Person ; + schema1:name "Frank von Delft" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/chipseq-sr/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sars-cov-2-variation-reporting/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "WDL" ; + schema1:identifier ; + schema1:name "Workflow Description Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step0_extract_model_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step0_extract_model_input_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step0_extract_model_output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_cpptraj_convert_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_cpptraj_convert_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_bd_run_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_bd_run_output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_bd_run_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_cpptraj_rms_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_cpptraj_rms_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_cpptraj_rms_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_dmd_run_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_dmd_run_output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_dmd_run_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_cpptraj_rms_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_cpptraj_rms_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_cpptraj_rms_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step15_nma_run_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step15_nma_run_output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step15_nma_run_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_cpptraj_rms_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_cpptraj_rms_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step17_cpptraj_convert_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step17_cpptraj_convert_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_nolb_nma_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_nolb_nma_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_cpptraj_rms_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_cpptraj_rms_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_extract_chain_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_extract_chain_output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_cpptraj_convert_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_cpptraj_convert_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step21_imod_imode_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step21_imod_imode_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_imod_imc_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_imod_imc_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step23_cpptraj_rms_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step23_cpptraj_rms_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step24_cpptraj_convert_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step24_cpptraj_convert_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step26_make_ndx_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step26_make_ndx_output_ndx_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step27_gmx_cluster_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step27_gmx_cluster_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step28_cpptraj_rms_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step28_cpptraj_rms_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step28_cpptraj_rms_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step29_pcz_zip_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step29_pcz_zip_output_pcz_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_cpptraj_mask_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_cpptraj_mask_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step30_pcz_zip_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step30_pcz_zip_output_pcz_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step31_pcz_info_output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step32_pcz_evecs_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step32_pcz_evecs_output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step33_pcz_animate_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step33_pcz_animate_output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step34_cpptraj_convert_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step34_cpptraj_convert_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step35_pcz_bfactor_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step35_pcz_bfactor_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step35_pcz_bfactor_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step36_pcz_hinges_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step36_pcz_hinges_output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step37_pcz_hinges_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step37_pcz_hinges_output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step38_pcz_hinges_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step38_pcz_hinges_output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step39_pcz_stiffness_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step39_pcz_stiffness_output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_cpptraj_mask_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_cpptraj_mask_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step40_pcz_collectivity_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step40_pcz_collectivity_output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_concoord_disco_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_concoord_dist_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_concoord_dist_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_concoord_dist_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_concoord_dist_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_concoord_disco_output_bfactor_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_concoord_disco_output_rmsd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_concoord_disco_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_cpptraj_rms_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_cpptraj_rms_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step7_cpptraj_convert_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step7_cpptraj_convert_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_prody_anm_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_prody_anm_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_cpptraj_rms_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_cpptraj_rms_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_ndx_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pcz_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pcz_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rmsd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_bfactor_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:ComputerLanguage ; + schema1:alternateName "Docker" ; + schema1:identifier ; + schema1:name "Docker" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Heatmap" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sars-cov-2-consensus-from-variation/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/cutandrun/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/atacseq/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CPAT_header_tab" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GRCh38_p13_genome_fa_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Pfam-A_hmm_dat_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Pfam-A_hmm_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "active_site_dat_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gencode_v43_annotation_gtf_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gencode_v43_lncRNA_transcripts_fa_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gencode_v43_pc_transcripts_fa_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gencode_v43_transcripts_fa_gz" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "Docker" ; + schema1:identifier ; + schema1:name "Docker" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "COMPSs" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/141" ; + schema1:name "Tracy Chew" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Basecalling model" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Run binning workflow" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Deduplicate reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Contamination reference file(s)" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Identifier used" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "illumina forward reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "illumina reverse reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken2 database" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Maximum memory in MB" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "When working with metagenomes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Nanopore reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Pilon fix list" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Number of threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Use mapped reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Assembly output" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Binning output" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filtered statistics" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken2 reports" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Read quality and filtering reports" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/141" ; + schema1:name "Tracy Chew" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Cali Willet" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/141" ; + schema1:name "Tracy Chew" . + + a schema1:ComputerLanguage ; + schema1:name "Shell Script" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_chic-fastq-to-cool-hicup-cooler_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_chic-fastq-to-cool-hicup-cooler_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/hic-hicup-cooler/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_hic-fastq-to-cool-hicup-cooler_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_hic-fastq-to-cool-hicup-cooler_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/hic-hicup-cooler/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_hic-fastq-to-pairs-hicup_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_hic-fastq-to-pairs-hicup_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/hic-hicup-cooler/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_hic-juicermediumtabix-to-cool-cooler_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_hic-juicermediumtabix-to-cool-cooler_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/hic-hicup-cooler/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "KNIME" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Scaffolding-Bionano-VGP7/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Simon Bray" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/gromacs-mmgbsa/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/159" ; + schema1:name "Oliver Woolland" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/89" ; + schema1:name "Paul Brack" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "Andrea Zaliani" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:MediaObject ; + schema1:contentSize 180 . + + a schema1:MediaObject ; + schema1:contentSize 88 . + + a schema1:MediaObject ; + schema1:contentSize 274 . + + a schema1:MediaObject ; + schema1:contentSize 708 . + + a schema1:Dataset . + + a schema1:MediaObject ; + schema1:contentSize 75358 . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/average-bigwig-between-replicates/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-only-VGP3/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myautodock_vina_run.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myautodock_vina_run.pdbqt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mybabel_convert.ent" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mybox.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycat_pdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myextract_model_pdbqt.pdbqt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myextract_molecule.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfpocket_filter.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfpocket_run.json" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfpocket_run.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfpocket_select.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfpocket_select.pqr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myideal_sdf.sdf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mystr_check_add_hydrogens.pdb" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_chic-fastq-to-cool-hicup-cooler_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_chic-fastq-to-cool-hicup-cooler_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/hic-hicup-cooler/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_hic-fastq-to-cool-hicup-cooler_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_hic-fastq-to-cool-hicup-cooler_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/hic-hicup-cooler/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_hic-fastq-to-pairs-hicup_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_hic-fastq-to-pairs-hicup_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/hic-hicup-cooler/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_hic-juicermediumtabix-to-cool-cooler_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_hic-juicermediumtabix-to-cool-cooler_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/hic-hicup-cooler/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/atacseq/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:MediaObject ; + schema1:contentSize 213 . + + a schema1:MediaObject ; + schema1:contentSize 115 . + + a schema1:MediaObject ; + schema1:contentSize 1240 . + + a schema1:MediaObject ; + schema1:contentSize 1037 . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/chipseq-pe/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Lucille Delisle" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/rnaseq-pe/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a ; + dct:conformsTo ; + schema1:description "Note: The synthetic dataset is saved as a CSV file, UTF-8 format, pipe-separated (|)" ; + schema1:name "vaccines_synthetic_1M.csv" . + + a schema1:CreateAction ; + schema1:endTime "2022-07-27" ; + schema1:instrument ; + schema1:name "dataspice JSON-LD created from CSV templates" ; + schema1:object , + , + , + , + ; + schema1:result , + . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:endTime "2023-01-06" ; + schema1:name "RO-Crate metadata created based on README and dataspice JSON-LD" ; + schema1:object , + ; + schema1:result . + + a schema1:CreateAction ; + schema1:endTime "2022-05-24" ; + schema1:instrument ; + schema1:name "Execution of Jupyter Notebook to generate 10k synthetic dataset" ; + schema1:result . + + a schema1:CreateAction ; + schema1:description "Note that this CSV has only 650k entries, but the Notebook code is meant to crate 1M entries. A modified notebook must have been executed." ; + schema1:endTime "2022-05-24" ; + schema1:instrument ; + schema1:name "Second (?) execution of Jupyter Notebook to generate 650k synthetic dataset" ; + schema1:result . + + a schema1:CreateAction ; + schema1:endTime "2022-05-27T09:52:32.926250" ; + schema1:instrument ; + schema1:name "Execution of pandas-profiling for exploratory data analysis" ; + schema1:object , + ; + schema1:result , + ; + schema1:startTime "2022-05-27T09:52:29.847284" . + + a schema1:CreateAction ; + schema1:endTime "2022-07-27T12:00:00+01:00" ; + schema1:instrument ; + schema1:name "Generating HTML from QMD" ; + schema1:object ; + schema1:result . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about ; + schema1:author ; + schema1:isBasedOn . + + a schema1:Dataset ; + schema1:author , + , + , + , + ; + schema1:datePublished "2023-04-26" ; + schema1:description "Reproducable analytical pipeline to apply causal inference techniques using distributed observational data" ; + schema1:hasPart , + , + , + , + , + , + , + , + ; + schema1:name "BY-COVID - WP5 - Baseline Use Case: SARS-CoV-2 vaccine effectiveness assessment - Analytical pipeline" ; + schema1:url "https://github.com/by-covid/BY-COVID_WP5_T5.2_baseline-use-case/tree/main/vaccine_effectiveness_analytical_pipeline" ; + schema1:version "1.0.0" . + + a schema1:MediaObject ; + schema1:contributor , + , + , + ; + schema1:creator , + , + , + , + ; + schema1:datePublished "2023-01-26" ; + schema1:description "Causal model responding to the research question, using a Directed Acyclic Graph" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.6913045" ; + schema1:name "BY-COVID - WP5 - Baseline Use Case: SARS-CoV-2 vaccine effectiveness assessment - Causal Model" ; + schema1:url "https://zenodo.org/record/7572373" ; + schema1:version "1.1.0" . + + a schema1:Dataset ; + schema1:creator , + , + , + , + ; + schema1:datePublished "2023-02-09" ; + schema1:description "Data management plan" ; + schema1:hasPart , + , + , + , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.7625783" ; + schema1:name "BY-COVID - WP5 - Baseline Use Case: SARS-CoV-2 vaccine effectiveness assessment - Data Management Plan" ; + schema1:url "https://zenodo.org/record/7625784" ; + schema1:version "0.0.1" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Geofile" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/159" ; + schema1:name "Oliver Woolland" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Data_R1.fastq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Data_R2.fastq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Contigs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Read_mapping_alignement" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mapped_read" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fasta" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/atacseq/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:Person ; + schema1:name "ERGA" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lineage" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/445" ; + schema1:name "Diego De Panis" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Scaffolding-HiC-VGP8/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "WDL" ; + schema1:identifier ; + schema1:name "Workflow Description Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Bpipe" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sars-cov-2-ont-artic-variant-calling/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Egon Willighagen" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/262" ; + schema1:name "Marvin Martens" . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Mpro-x0195_0_apo-desolv_pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hits_frankenstein_17_sdf" . + + a schema1:Person ; + schema1:name "Tim Dudgeon" . + + a schema1:Person ; + schema1:name "Simon Bray" . + + a schema1:Person ; + schema1:name "Gianmauro Cuccuru" . + + a schema1:Person ; + schema1:name "Björn Grüning" . + + a schema1:Person ; + schema1:name "Rachael Skyner" . + + a schema1:Person ; + schema1:name "Jack Scantlebury" . + + a schema1:Person ; + schema1:name "Susan Leung" . + + a schema1:Person ; + schema1:name "Frank von Delft" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "Mar Batlle" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "LFC" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Molti_Louvain" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Molti_modularity" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "approach" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "control_id" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "counts" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "layers" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "metadata" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "min_nodes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "multiXrank_r" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "multiXrank_selfloops" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dir" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "padj" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step0_extract_molecule_input_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step0_extract_molecule_output_molecule_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_sander_mdrun_energy_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_sander_mdrun_energy_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_sander_mdrun_energy_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_sander_mdrun_energy_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_process_minout_energy_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_process_minout_energy_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_sander_mdrun_warm_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_sander_mdrun_warm_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_sander_mdrun_warm_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_sander_mdrun_warm_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_process_mdout_warm_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_process_mdout_warm_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_sander_mdrun_nvt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_sander_mdrun_nvt_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_sander_mdrun_nvt_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_sander_mdrun_nvt_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step15_process_mdout_nvt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step15_process_mdout_nvt_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_sander_mdrun_npt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_sander_mdrun_npt_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_sander_mdrun_npt_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_sander_mdrun_npt_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step17_process_mdout_npt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step17_process_mdout_npt_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_sander_mdrun_md_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_sander_mdrun_md_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_sander_mdrun_md_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_sander_mdrun_md_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_rmsd_first_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_rmsd_first_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_pdb4amber_run_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_rmsd_exp_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_rmsd_exp_input_exp_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_rmsd_exp_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step21_cpptraj_rgyr_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step21_cpptraj_rgyr_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_cpptraj_image_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_cpptraj_image_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_leap_gen_top_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_leap_gen_top_output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_leap_gen_top_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_leap_gen_top_output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_sander_mdrun_minH_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_sander_mdrun_minH_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_sander_mdrun_minH_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_sander_mdrun_minH_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_process_minout_minH_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_process_minout_minH_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_sander_mdrun_min_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_sander_mdrun_min_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_sander_mdrun_min_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_sander_mdrun_min_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_process_minout_min_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_process_minout_min_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step7_amber_to_pdb_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_leap_solvate_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_leap_solvate_output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_leap_solvate_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_leap_solvate_output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_leap_add_ions_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_leap_add_ions_output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_leap_add_ions_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_leap_add_ions_output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_molecule_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux gs06r3b72 5.14.0-284.30.1.el9_2.x86_64 #1 SMP PREEMPT_DYNAMIC Fri Aug 25 09:13:12 EDT 2023 x86_64 x86_64 x86_64 GNU/Linux" ; + schema1:endTime "2024-04-30T13:04:29+00:00" ; + schema1:instrument ; + schema1:name "COMPSs matmul_files.py execution at marenostrum5 with JOB_ID 1236485" ; + schema1:object , + , + , + , + , + , + , + , + , + , + , + ; + schema1:result , + , + , + , + ; + schema1:startTime "2024-04-30T13:04:01+00:00" ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 248 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1549 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "matmul_tasks.py" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_marenostrum5_SLURM_JOB_ID_1236485" ; + schema1:contentSize 103 ; + schema1:description "COMPSs console standard error log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-1236485.err" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_marenostrum5_SLURM_JOB_ID_1236485" ; + schema1:contentSize 37369 ; + schema1:description "COMPSs console standard output log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-1236485.out" . + + a schema1:MediaObject ; + schema1:contentSize 287 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 2394 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "matmul_reproducibility_no_persistence.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Payam Emami" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/cutandrun/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_gmx.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_gmx.itp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_gmx.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mybabel_minimize.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myligand.pdb" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_gmx.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_gmx.itp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_gmx.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mybabel_minimize.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myligand.pdb" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_30" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_31" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_32" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_33" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_34" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_35" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_36" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_37" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_38" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_39" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_40" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_41" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_ac.frcmod" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_ac.inpcrd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_ac.lib" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_ac.prmtop" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myamber_to_pdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mybabel_minimize.mol2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_image.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_rgyr.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_rms.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myextract_heteroatoms.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.parmtop" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.parmtop" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb4amber_run.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myprocess_mdout.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myprocess_minout.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myreduce_add_hydrogens.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myremove_ligand.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myremove_pdb_water.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.cpout" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.cprst" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.mdinfo" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.netcdf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.rst" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.trj" . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux gs10r3b56 5.14.0-284.30.1.el9_2.x86_64 #1 SMP PREEMPT_DYNAMIC Fri Aug 25 09:13:12 EDT 2023 x86_64 x86_64 x86_64 GNU/Linux" ; + schema1:endTime "2024-07-04T13:38:20+00:00" ; + schema1:instrument ; + schema1:name "COMPSs random_svd_compss.py execution at marenostrum5 with JOB_ID 3618551" ; + schema1:object , + ; + schema1:result ; + schema1:startTime "2024-07-04T13:34:27+00:00" ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 7154 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_marenostrum5_SLURM_JOB_ID_3618551" ; + schema1:contentSize 100 ; + schema1:description "COMPSs console standard error log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-3618551.err" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_marenostrum5_SLURM_JOB_ID_3618551" ; + schema1:contentSize 8696 ; + schema1:description "COMPSs console standard output log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-3618551.out" . + + a schema1:MediaObject ; + schema1:contentSize 623 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 1046 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rRNA filtering" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "forward reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gzip compression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken2 database" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "memory usage (mb)" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pacbio reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reverse reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "number of threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "BAM files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "FASTQC" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filtered reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "KRAKEN2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MetaBat2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "QUAST" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "SPADES" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/269" ; + schema1:name "Marlene Rezk" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "API key for CDS service" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Longitude for right-edge of domain" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "emepcores" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Day for end date" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Month for end date" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Year for end date" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "generate_metdir" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "generate_rundir" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Geographic inputs for geogrid" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Geogrid data table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "HTTPS proxy information, if needed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "EMEP Input Files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Directory name for WRF input Files, should match 'meteo' base-directory in namelist" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "EMEP configuration file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Geogrid namelist" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "metgrid configuration" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Real preprocessor Configuration File" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Configuration File" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Configuration File" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "WRF Configuration File" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Latitude for top of domain" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "outname_atm" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "outname_sfc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "realcores" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "EMEP run label, for output files, should match 'runlabel1' in namelist" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Latitude for bottom of domain" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Day for starting date" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Month for starting date" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Year for starting date" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "grib variable table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "grib variable table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Longitude for left-edge of domain" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "wrfcores" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output files" . + + a schema1:Person ; + schema1:name "ABRomics" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/amr_gene_detection/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a ; + dct:conformsTo ; + schema1:description "Note: The synthetic dataset is saved as a CSV file, UTF-8 format, pipe-separated (|)" ; + schema1:name "vaccines_synthetic_1M.csv" . + + a schema1:CreateAction ; + schema1:endTime "2022-07-27" ; + schema1:instrument ; + schema1:name "dataspice JSON-LD created from CSV templates" ; + schema1:object , + , + , + , + ; + schema1:result , + . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:endTime "2023-01-06" ; + schema1:name "RO-Crate metadata created based on README and dataspice JSON-LD" ; + schema1:object , + ; + schema1:result . + + a schema1:CreateAction ; + schema1:endTime "2022-05-24" ; + schema1:instrument ; + schema1:name "Execution of Jupyter Notebook to generate 10k synthetic dataset" ; + schema1:result . + + a schema1:CreateAction ; + schema1:description "Note that this CSV has only 650k entries, but the Notebook code is meant to crate 1M entries. A modified notebook must have been executed." ; + schema1:endTime "2022-05-24" ; + schema1:instrument ; + schema1:name "Second (?) execution of Jupyter Notebook to generate 650k synthetic dataset" ; + schema1:result . + + a schema1:CreateAction ; + schema1:endTime "2022-05-27T09:52:32.926250" ; + schema1:instrument ; + schema1:name "Execution of pandas-profiling for exploratory data analysis" ; + schema1:object , + ; + schema1:result , + ; + schema1:startTime "2022-05-27T09:52:29.847284" . + + a schema1:CreateAction ; + schema1:endTime "2022-07-27T12:00:00+01:00" ; + schema1:instrument ; + schema1:name "Generating HTML from QMD" ; + schema1:object ; + schema1:result . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about ; + schema1:author ; + schema1:isBasedOn . + + a schema1:Dataset ; + schema1:author , + , + , + , + ; + schema1:datePublished "2023-04-26" ; + schema1:description "Reproducable analytical pipeline to apply causal inference techniques using distributed observational data" ; + schema1:hasPart , + , + , + , + , + , + , + , + ; + schema1:name "BY-COVID - WP5 - Baseline Use Case: SARS-CoV-2 vaccine effectiveness assessment - Analytical pipeline" ; + schema1:url "https://github.com/by-covid/BY-COVID_WP5_T5.2_baseline-use-case/tree/main/vaccine_effectiveness_analytical_pipeline" ; + schema1:version "1.0.0" . + + a schema1:MediaObject ; + schema1:contributor , + , + , + ; + schema1:creator , + , + , + , + ; + schema1:datePublished "2023-01-26" ; + schema1:description "Causal model responding to the research question, using a Directed Acyclic Graph" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.6913045" ; + schema1:name "BY-COVID - WP5 - Baseline Use Case: SARS-CoV-2 vaccine effectiveness assessment - Causal Model" ; + schema1:url "https://zenodo.org/record/7572373" ; + schema1:version "1.1.0" . + + a schema1:Dataset ; + schema1:creator , + , + , + , + ; + schema1:datePublished "2023-02-09" ; + schema1:description "Data management plan" ; + schema1:hasPart , + , + , + , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.7625783" ; + schema1:name "BY-COVID - WP5 - Baseline Use Case: SARS-CoV-2 vaccine effectiveness assessment - Data Management Plan" ; + schema1:url "https://zenodo.org/record/7625784" ; + schema1:version "0.0.1" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MaxMTpc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MinCountPerCell" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MinGenesPerCell" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "genecount_qc_plot" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mito_qc_plot" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "qc_anndata_object" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "top_genes_plot" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Darwin bsccs742.int.bsc.es 23.1.0 Darwin Kernel Version 23.1.0: Mon Oct 9 21:27:27 PDT 2023; root:xnu-10002.41.9~6/RELEASE_X86_64 x86_64 COMPSS_HOME=/Users/rsirvent/opt/COMPSs/" ; + schema1:endTime "2023-11-16T14:25:14+00:00" ; + schema1:instrument ; + schema1:name "COMPSs simple.py execution at bsccs742.int.bsc.es" ; + schema1:object ; + schema1:result , + . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 232 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 639 ; + schema1:description "Auxiliary File" ; + schema1:name "README" . + + a schema1:MediaObject ; + schema1:contentSize 4467 ; + schema1:description "Auxiliary File" ; + schema1:name "pom.xml" . + + a schema1:MediaObject ; + schema1:contentSize 121 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 1312 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sra-manifest-to-concatenated-fastqs/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "basecalling model" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "identifier used" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "kraken_database" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "nanopore FASTQ reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "number of threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Flye de novo assembler for single-molecule reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken2 reports" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Krona taxonomy visualization" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Medaka polisher" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "QUAlity assessment" . + + a schema1:ComputerLanguage ; + schema1:name "Scipion" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "collision_info" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "db_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "db_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "file_id" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gnps_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hmdb_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mbank_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mzml_files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ppmx" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "python_script" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "r_script" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "candidate_files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "result" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "Matthias Bernt" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/openms-metaprosip/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "Matthias Bernt" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/openms-metaprosip/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Andreas Wilm and October SESSIONS and Paola Florez DE SESSIONS and ZHU Yuan and Shuzhen SIM and CHU Wenhan Collins" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:ComputerLanguage ; + schema1:alternateName "Docker" ; + schema1:identifier ; + schema1:name "Docker" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/parallel-accession-download/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux 663d7a747723 5.15.0-101-generic #111~20.04.1-Ubuntu SMP Mon Mar 11 15:44:43 UTC 2024 x86_64 x86_64 x86_64 GNU/Linux COMPSS_HOME=/opt/COMPSs/" ; + schema1:endTime "2024-03-28T09:14:42+00:00" ; + schema1:instrument ; + schema1:name "COMPSs fibonacci.py execution at 663d7a747723" ; + schema1:result . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3.rc2401" . + + a schema1:MediaObject ; + schema1:contentSize 244 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 26 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 340 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:Person ; + schema1:name "All the Sarek team" . + + a schema1:Person ; + schema1:name "nf-core comunity and people in the IMPaCT-Data project." . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "WDL" ; + schema1:identifier ; + schema1:name "Workflow Description Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "Docker" ; + schema1:identifier ; + schema1:name "Docker" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Purge-duplicates-one-haplotype-VGP6b/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:MediaObject ; + schema1:contentSize 183 . + + a schema1:MediaObject ; + schema1:contentSize 2039 . + + a schema1:MediaObject ; + schema1:contentSize 572 . + + a schema1:MediaObject ; + schema1:contentSize 1442 . + + a schema1:Dataset . + + a schema1:MediaObject ; + schema1:contentSize 44222 . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/370" ; + schema1:name "Peter Menzel" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:name "Eleni Mina" . + + a schema1:Person ; + schema1:name "Daniël Wijnbergen" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Egon Willighagen" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/262" ; + schema1:name "Marvin Martens" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/polish-with-long-reads/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "All the Sarek team" . + + a schema1:Person ; + schema1:name "nf-core comunity and people in the IMPaCT-data project." . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/gromacs-mmgbsa/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Occurrence_southpacific.csv" . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Purge-duplicates-one-haplotype-VGP6b/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "KNIME" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-only-VGP3/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:MediaObject ; + schema1:contentSize 178 . + + a schema1:MediaObject ; + schema1:contentSize 106 . + + a schema1:MediaObject ; + schema1:contentSize 268 . + + a schema1:MediaObject ; + schema1:contentSize 846 . + + a schema1:Dataset . + + a schema1:MediaObject ; + schema1:contentSize 1237 . + + a schema1:MediaObject ; + schema1:contentSize 650 . + + a schema1:MediaObject ; + schema1:contentSize 650 . + + a schema1:MediaObject ; + schema1:contentSize 12 . + + a schema1:MediaObject ; + schema1:contentSize 10 . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-atac-cutandrun_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-atac-cutandrun_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-chip-pe_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-chip-pe_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-chip-sr_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-chip-sr_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "body" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "db_host" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "db_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "db_password" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "db_user" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "is_availability" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "result_modifiers" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "results" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_file" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/476" ; + schema1:name "Vasiliki Panagi" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/499" ; + schema1:name "Elisabetta Spinazzola" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:Person ; + schema1:name "ERGA" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lineage" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/445" ; + schema1:name "Diego De Panis" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Reverse read length" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Forward primer" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "forward reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Subfragment name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Primers are removed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Reference database" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Reverse read length" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Reverse primer" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reverse reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Sample name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "files_to_folder_fastqc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "files_to_folder_ngtax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "files_to_folder_phyloseq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "files_to_folder_picrust2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "turtle" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "WDL" ; + schema1:identifier ; + schema1:name "Workflow Description Language" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-only-VGP3/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Alex L Mitchell" . + + a schema1:Person ; + schema1:name "Lorna J Richardson" . + + a schema1:Person ; + schema1:name "Ekaterina Sakharova" . + + a schema1:Person ; + schema1:name "Maxim Scheremetjew" . + + a schema1:Person ; + schema1:name "Anton Korobeynikov" . + + a schema1:Person ; + schema1:name "Alex Shlemov" . + + a schema1:Person ; + schema1:name "Olga Kunyavskaya" . + + a schema1:Person ; + schema1:name "Alla Lapidus" . + + a schema1:Person ; + schema1:name "Robert D Finn" . + + a schema1:Person ; + schema1:name "Alexandre Almeida" . + + a schema1:Person ; + schema1:name "Martin Beracochea" . + + a schema1:Person ; + schema1:name "Miguel Boland" . + + a schema1:Person ; + schema1:name "Josephine Burgin" . + + a schema1:Person ; + schema1:name "Guy Cochrane" . + + a schema1:Person ; + schema1:name "Michael R Crusoe" . + + a schema1:Person ; + schema1:name "Varsha Kale" . + + a schema1:Person ; + schema1:name "Simon C Potter" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "5.8s_pattern" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "5s_pattern" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "forward_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "itsonedb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "itsonedb_label" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "itsonedb_otu_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "itsonedb_tax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lsu_db" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lsu_label" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lsu_otus" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lsu_tax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "qc_min_length" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reverse_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rfam_model_clans" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rfam_models" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "single_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ssu_db" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ssu_label" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ssu_otus" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ssu_tax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "stats_file_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "unite_db" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "unite_label" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "unite_otu_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "unite_tax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ITS-length" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "completed_flag_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastp_filtering_json_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gz_files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hashsum_paired" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hashsum_single" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "no_tax_flag_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "qc-statistics" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "qc-status" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "qc_summary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rna-count" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sequence-categorisation_folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "suppressed_upload" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "taxonomy-summary_folder" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Scaffolding-Bionano-VGP7/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input File" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Select One Gene" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "File URL" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MetGENE metabolite table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MetGENE Reaction Table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Regulatory Element Set" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GlyGen Protein Products" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Gene Count Matrix" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Scored Genes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Gene" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "LINCS L1000 Reverse Search Dashboard" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Scored Drugs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Scored Genes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Scored Genes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MetGENE Summary" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/chipseq-pe/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "Docker" ; + schema1:identifier ; + schema1:name "Docker" ; + schema1:url . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-atac-cutandrun_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-atac-cutandrun_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-chip-pe_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-chip-pe_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-chip-sr_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-chip-sr_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s14r2b13 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=svd_lanczos COMPSS_PYTHON_VERSION=3.9.10 SLURM_JOB_QOS=debug SLURM_MEM_PER_CPU=1880 SLURM_JOB_ID=31727336 SLURM_JOB_USER=bsc19756 COMPSS_HOME=/apps/COMPSs/3.3/ SLURM_JOB_UID=4486 SLURM_SUBMIT_DIR=/gpfs/projects/bsc19/bsc19756/RF_Example SLURM_JOB_NODELIST=s14r2b[13,15-16] SLURM_JOB_GID=2950 SLURM_JOB_CPUS_PER_NODE=48(x3) COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login1 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=3 COMPSS_MASTER_NODE=s14r2b13 COMPSS_WORKER_NODES= s14r2b15 s14r2b16" ; + schema1:endTime "2024-02-14T13:47:39+00:00" ; + schema1:instrument ; + schema1:name "COMPSs main_rf.py execution at marenostrum4 with JOB_ID 31727336" ; + schema1:result ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 6801 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 941 ; + schema1:description "Auxiliary File" ; + schema1:name "launch_provenance.sh" . + + a schema1:MediaObject ; + schema1:contentSize 445 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 1032 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MaxMTpc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MinCountPerCell" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MinGenesPerCell" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "genecount_qc_plot" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mito_qc_plot" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "qc_anndata_object" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "top_genes_plot" . + + a schema1:Person ; + schema1:name "Lucille Delisle" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/rnaseq-pe/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/average-bigwig-between-replicates/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Adrián Muñoz-Civico" . + + a schema1:Person ; + schema1:name "Daniel López-López" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_30" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_31" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_32" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_33" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_34" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_35" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_36" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_37" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_38" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_39" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_40" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_41" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_42" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_43" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fasta" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_30" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_31" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_32" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_33" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_34" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_35" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_36" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_37" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_38" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_39" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_40" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myamber_to_pdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_image.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_rgyr.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_rms.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb4amber_run.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myprocess_mdout.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myprocess_minout.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.cpout" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.cprst" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.mdinfo" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.netcdf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.rst" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.trj" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_baredSC-1d-logNorm_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_baredSC-1d-logNorm_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/baredsc/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_baredSC-2d-logNorm_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_baredSC-2d-logNorm_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/baredsc/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bits_set" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "blacklist" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bwt_algorithm" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_lratio" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_exomeDepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_manta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_bf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "generate_bwa_indexes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_exome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "min_mapping_quality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "read_group" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_amb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_ann" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_bwt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fai" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_pac" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_sa" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samples" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_bwa_mem" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastqc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_samtools" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "html_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "json_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_all" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_bam_filtering" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_exomedepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_manta" . + + a schema1:Person ; + schema1:name "Daniel López-López" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/chipseq-pe/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:Person ; + schema1:name "Milad Miladi" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "PkorrP19E3_ONT_fast5.tar.gz" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Directory with cached BridgeDB data" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output of differential mRNA expression testing from DESeq2, with Ensembl gene ID's concatened with gene symbols with \";\" inbetween in the first column" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output of differential miRNA expression testing from DESeq2, with miRBase ID's in the first column" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Table with entrez mappings from STRING protein idenfitiers as downloaded from STRING" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Tab separated edge list with Ensembl gene ID's in the first two columns, and their bi-weight midcorrelation as defined by Langelder et al. in the third column" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of correlation edges to use, USE FOR TESTING ONLY" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Tab separated edge list with Ensembl gene ID's in the first column, miRBase ID's in the second column. and their bi-weight midcorrelation as defined by Langelder et al. in the third column" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Table with miRNA - mRNA target data as downloaded from miRTarBase" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of miRTarBase edges to use, USE FOR TESTING ONLY" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of cores to let MOGAMUN use" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of generation to let the genetic algorithm in MOGAMUN evolve" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The maximum size of subnetworks during postprocessing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "the minimum Jaccard Index overlap between two subnetworks to allow them to be merged" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The minimum size of subnetworks during postprocessing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of parallel runs to let MOGAMUN do, these parallel runs are combined in postprocessing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Table with Protein - protein interaction data as downloaded from STRING" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The minimum score for a STRING edge to be included in the analysis" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of STRING edges to use, USE FOR TESTING ONLY" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output of variant burden testing using SKAT in the rvtests package, with HGNC symbols in the first column" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "full_graph" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "subnetworks" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "annotation_metadata" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "cgi_genes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "civic_genes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "final_variants" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gene_cards_germline" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gene_cards_loh" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gene_cards_somatic" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gene_reports_tabular" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "germline_cancer_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "loh_cancer_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "loh_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "maf_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mutations_summary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "somatic_cancer_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "somatic_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "uniprot_cancer_genes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "variant_reports_tabular" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux a5026d668aa6 5.15.0-102-generic #112~20.04.1-Ubuntu SMP Thu Mar 14 14:28:24 UTC 2024 x86_64 x86_64 x86_64 GNU/Linux COMPSS_HOME=/opt/COMPSs/" ; + schema1:endTime "2024-04-19T08:05:56+00:00" ; + schema1:instrument ; + schema1:name "COMPSs increment_fibonacci.py execution at a5026d668aa6" ; + schema1:result . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3.rc2404" . + + a schema1:MediaObject ; + schema1:contentSize 454 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 48 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 1072 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:alternateName "RMD" ; + schema1:name "R markdown" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_30" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_31" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_32" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_33" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_34" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_35" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_36" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_37" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_38" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_39" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_40" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_41" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_ac.frcmod" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_ac.inpcrd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_ac.lib" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_ac.prmtop" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myamber_to_pdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mybabel_minimize.mol2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_image.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_rgyr.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_rms.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myextract_heteroatoms.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.parmtop" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.parmtop" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb4amber_run.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myprocess_mdout.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myprocess_minout.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myreduce_add_hydrogens.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myremove_ligand.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myremove_pdb_water.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.cpout" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.cprst" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.mdinfo" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.netcdf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.rst" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.trj" . + + a schema1:ComputerLanguage ; + schema1:name "Bpipe" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:name "Payam Emami" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ApplyVQSR_ts_filter_level" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "VariantRecalibrator_trust_all_polymorphic" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "VariantRecalibrator_truth_sensitivity_trance_indels" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "VariantRecalibrator_truth_sensitivity_trance_snps" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "VariantRecalibrator_use_annotation" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bcftools_norm_multiallelics" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bcftools_norm_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bcftools_view_include_VQSR_filters" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bcftools_view_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bwa_mem_num_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bwa_mem_sec_shorter_split_hits" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gatk_splitintervals_exclude_intervalList" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gatk_splitintervals_include_intervalList" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gatk_splitintervals_scatter_count" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_file_split" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_file_split_fwd_single" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_file_split_rev" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_qc_check" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_trimming_check" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "picard_addorreplacereadgroups_rgpl" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "raw_files_directory" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_genome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_fixmate_output_format" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_fixmate_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_flagstat_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_sort_compression_level" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_sort_memory" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_sort_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_cigar" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_collapsecigar" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_count" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_fastcompression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_iscram" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_randomseed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_readsingroup" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_readsinlibrary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_readsquality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_readswithbits" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_readswithoutbits" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_readtagtostrip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_region" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_samheader" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_target_bed_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_uncompressed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sub_bqsr_hc_java_options" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sub_bqsr_hc_native_pairHMM_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sub_bqsr_interval_padding" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sub_bqsr_known_sites_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sub_bqsr_known_sites_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sub_bqsr_known_sites_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "table_annovar_build_over" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "table_annovar_convert_arg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "table_annovar_database_location" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "table_annovar_na_string" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "table_annovar_operation" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "table_annovar_otherinfo" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "table_annovar_protocol" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "table_annovar_remove" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "table_annovar_vcfinput" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tg_compression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tg_do_not_compress" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tg_length" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tg_quality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tg_strigency" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tg_trim_suffix" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "vqsr_arguments_indels_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "vqsr_arguments_indels_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "vqsr_arguments_indels_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "vqsr_arguments_snps_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "vqsr_arguments_snps_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "vqsr_arguments_snps_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "vqsr_arguments_snps_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "vqsr_known_sites_indels_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "vqsr_known_sites_indels_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "vqsr_known_sites_indels_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "vqsr_known_sites_snps_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "vqsr_known_sites_snps_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "vqsr_known_sites_snps_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "vqsr_known_sites_snps_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_bcftools_norm_vqsr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_bcftools_view_filter_vqsr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_bwa_mem_paired" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_bwa_mem_single" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_paired_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_paired_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_raw_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_raw_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_single_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_single_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gather_bwa_sam_files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_ApplyVQSR_indel" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_ApplyVQSR_snp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_CombineGVCFs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_GenotypeGVCFs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_MakeSitesOnlyVcf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_VQSR_MergeVCFs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_VariantRecalibrator_indel_recal" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_VariantRecalibrator_indel_tranches" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_VariantRecalibrator_snp_recal" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_VariantRecalibrator_snp_tranches" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_bqsr_subworkflowbqsr_bqsr_bam" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_bqsr_subworkflowbqsr_hc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_bqsr_subworkflowbqsr_mergevcfs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_bqsr_subworkflowbqsr_tables" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_splitintervals" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_picard_addorreplacereadgroups" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_picard_markduplicates" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_picard_markduplicates_metrics" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_fixmate" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_flagstat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_index" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_sort" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_view_conversion" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_view_count_total" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_table_annovar_filtered_avinput" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_table_annovar_filtered_multianno_txt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_table_annovar_filtered_multianno_vcf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_trim_galore_paired_fq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_trim_galore_paired_reports" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_trim_galore_single_fq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_trim_galore_single_reports" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_sort_by_name" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_baredSC-1d-logNorm_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_baredSC-1d-logNorm_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/baredsc/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_baredSC-2d-logNorm_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_baredSC-2d-logNorm_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/baredsc/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name " test suite" ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "GitHub testing workflow for " ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/crs4/fair-crcc-send-data/actions/workflows/main.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:Dataset ; + schema1:description "Integration tests for the workflow" . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset ; + schema1:description "Configuration folder" . + + a schema1:MediaObject . + + a schema1:Dataset ; + schema1:description "Workflow folder" . + + a schema1:Dataset ; + schema1:description "Workflow rule module" . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset ; + schema1:description "Validation files" . + + a schema1:MediaObject . + + a schema1:Dataset ; + schema1:description "Scripts folder" . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Javier Garrayo-Ventas" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myeditconf.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfix_side_chain.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygenion.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygenion.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_energy.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_image.xtc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_rgyr.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_rms.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_trjconv_str.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygrompp.tpr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.cpt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.edr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.xtc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb2gmx.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb2gmx.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysolvate.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysolvate.zip" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Purge-duplicates-one-haplotype-VGP6b/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "ABRomics" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/bacterial_genome_annotation/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a ; + dct:conformsTo ; + schema1:description "Note: The synthetic dataset is saved as a CSV file, UTF-8 format, pipe-separated (|)" ; + schema1:name "vaccines_synthetic_1M.csv" . + + a schema1:CreateAction ; + schema1:endTime "2022-07-27" ; + schema1:instrument ; + schema1:name "dataspice JSON-LD created from CSV templates" ; + schema1:object , + , + , + , + ; + schema1:result , + . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:endTime "2023-01-06" ; + schema1:name "RO-Crate metadata created based on README and dataspice JSON-LD" ; + schema1:object , + ; + schema1:result . + + a schema1:CreateAction ; + schema1:endTime "2022-05-24" ; + schema1:instrument ; + schema1:name "Execution of Jupyter Notebook to generate 10k synthetic dataset" ; + schema1:result . + + a schema1:CreateAction ; + schema1:description "Note that this CSV has only 650k entries, but the Notebook code is meant to crate 1M entries. A modified notebook must have been executed." ; + schema1:endTime "2022-05-24" ; + schema1:instrument ; + schema1:name "Second (?) execution of Jupyter Notebook to generate 650k synthetic dataset" ; + schema1:result . + + a schema1:CreateAction ; + schema1:endTime "2022-05-27T09:52:32.926250" ; + schema1:instrument ; + schema1:name "Execution of pandas-profiling for exploratory data analysis" ; + schema1:object , + ; + schema1:result , + ; + schema1:startTime "2022-05-27T09:52:29.847284" . + + a schema1:CreateAction ; + schema1:endTime "2022-07-27T12:00:00+01:00" ; + schema1:instrument ; + schema1:name "Generating HTML from QMD" ; + schema1:object ; + schema1:result . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about ; + schema1:author ; + schema1:isBasedOn . + + a schema1:Dataset ; + schema1:author , + , + , + , + ; + schema1:datePublished "2023-04-26" ; + schema1:description "Reproducable analytical pipeline to apply causal inference techniques using distributed observational data" ; + schema1:hasPart , + , + , + , + , + , + , + , + ; + schema1:name "BY-COVID - WP5 - Baseline Use Case: SARS-CoV-2 vaccine effectiveness assessment - Analytical pipeline" ; + schema1:url "https://github.com/by-covid/BY-COVID_WP5_T5.2_baseline-use-case/tree/main/vaccine_effectiveness_analytical_pipeline" ; + schema1:version "1.0.0" . + + a schema1:MediaObject ; + schema1:contributor , + , + , + ; + schema1:creator , + , + , + , + ; + schema1:datePublished "2023-01-26" ; + schema1:description "Causal model responding to the research question, using a Directed Acyclic Graph" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.6913045" ; + schema1:name "BY-COVID - WP5 - Baseline Use Case: SARS-CoV-2 vaccine effectiveness assessment - Causal Model" ; + schema1:url "https://zenodo.org/record/7572373" ; + schema1:version "1.1.0" . + + a schema1:Dataset ; + schema1:creator , + , + , + , + ; + schema1:datePublished "2023-02-09" ; + schema1:description "Data management plan" ; + schema1:hasPart , + , + , + , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.7625783" ; + schema1:name "BY-COVID - WP5 - Baseline Use Case: SARS-CoV-2 vaccine effectiveness assessment - Data Management Plan" ; + schema1:url "https://zenodo.org/record/7625784" ; + schema1:version "0.0.1" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s01r2b50 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=Increment COMPSS_PYTHON_VERSION=3.9.10 SLURM_JOB_QOS=debug SLURM_MEM_PER_CPU=1880 SLURM_JOB_ID=30894524 SLURM_JOB_USER=bsc19057 COMPSS_HOME=/apps/COMPSs/3.3/ SLURM_JOB_UID=2952 SLURM_SUBMIT_DIR=/gpfs/home/bsc19/bsc19057/COMPSs-DP/tutorial_apps/python/increment SLURM_JOB_NODELIST=s01r2b50 SLURM_JOB_GID=2950 SLURM_JOB_CPUS_PER_NODE=48 COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login3 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=1 COMPSS_MASTER_NODE=s01r2b50 COMPSS_WORKER_NODES=" ; + schema1:endTime "2023-11-24T08:24:54+00:00" ; + schema1:instrument ; + schema1:name "COMPSs increment.py execution at marenostrum4 with JOB_ID 30894524" ; + schema1:object , + , + ; + schema1:result , + , + , + ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 243 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 920 ; + schema1:description "Auxiliary File" ; + schema1:name "README" . + + a schema1:MediaObject ; + schema1:contentSize 4618 ; + schema1:description "Auxiliary File" ; + schema1:name "pom.xml" . + + a schema1:MediaObject ; + schema1:contentSize 207 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 1633 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:Person ; + schema1:name "Tim Dudgeon" . + + a schema1:Person ; + schema1:name "Simon Bray" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/galaxyproject/iwc/actions/workflows/workflow_test.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "Alex L Mitchell" . + + a schema1:Person ; + schema1:name "Lorna J Richardson" . + + a schema1:Person ; + schema1:name "Ekaterina Sakharova" . + + a schema1:Person ; + schema1:name "Maxim Scheremetjew" . + + a schema1:Person ; + schema1:name "Anton Korobeynikov" . + + a schema1:Person ; + schema1:name "Alex Shlemov" . + + a schema1:Person ; + schema1:name "Olga Kunyavskaya" . + + a schema1:Person ; + schema1:name "Alla Lapidus" . + + a schema1:Person ; + schema1:name "Robert D Finn" . + + a schema1:Person ; + schema1:name "Alexandre Almeida" . + + a schema1:Person ; + schema1:name "Martin Beracochea" . + + a schema1:Person ; + schema1:name "Miguel Boland" . + + a schema1:Person ; + schema1:name "Josephine Burgin" . + + a schema1:Person ; + schema1:name "Guy Cochrane" . + + a schema1:Person ; + schema1:name "Michael R Crusoe" . + + a schema1:Person ; + schema1:name "Varsha Kale" . + + a schema1:Person ; + schema1:name "Simon C Potter" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "5.8s_pattern" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "5s_pattern" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CGC_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CGC_postfixes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "EggNOG_data_dir" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "EggNOG_db" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "EggNOG_diamond_db" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "HMM_gathering_bit_score" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "HMM_name_database" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "HMM_omit_alignment" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "InterProScan_applications" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "InterProScan_databases" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "InterProScan_outputFormat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Uniref90_db_txt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "cgc_chunk_size" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "clusters_glossary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "contig_min_length" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "contigs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "diamond_databaseFile" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "diamond_header" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "diamond_maxTargetSeqs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "func_ann_names_hmmer" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "func_ann_names_ips" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "go_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gp_flatfiles_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "graphs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hmmsearch_header" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ips_header" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ko_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lsu_db" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lsu_label" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lsu_otus" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lsu_tax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "other_ncrna_models" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pathways_classes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pathways_names" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "protein_chunk_size_IPS" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "protein_chunk_size_eggnog" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "protein_chunk_size_hmm" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rfam_model_clans" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rfam_models" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ssu_db" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ssu_label" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ssu_otus" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ssu_tax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bgzip_fasta_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bgzip_index" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chunking_nucleotides" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chunking_proteins" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "completed_flag_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "compressed_files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "functional_annotation_folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hashsum_input" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "index_fasta_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "no_cds_flag_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "no_tax_flag_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pathways_systems_folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pathways_systems_folder_antismash" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pathways_systems_folder_antismash_summary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "qc-statistics_folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "qc-status" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "qc_summary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rna-count" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sequence-categorisation_folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "stats" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "taxonomy-summary_folder" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-only-VGP3/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s01r2b53 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=COMPSs COMPSS_PYTHON_VERSION=3.9.10 SLURM_JOB_QOS=bsc_cs SLURM_MEM_PER_CPU=1880 SLURM_JOB_ID=31494577 SLURM_JOB_USER=bsc19057 COMPSS_HOME=/apps/COMPSs/3.3/ SLURM_JOB_UID=2952 SLURM_SUBMIT_DIR=/gpfs/home/bsc19/bsc19057/Tutorial_2024/kmeans SLURM_JOB_NODELIST=s01r2b[53-54] SLURM_JOB_GID=2950 SLURM_JOB_CPUS_PER_NODE=48(x2) COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login3 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=2 COMPSS_MASTER_NODE=s01r2b53 COMPSS_WORKER_NODES= s01r2b54" ; + schema1:endTime "2024-01-22T15:53:19+00:00" ; + schema1:instrument ; + schema1:name "COMPSs kmeans.py execution at marenostrum4 with JOB_ID 31494577" ; + schema1:result ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 1137 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 1017 ; + schema1:description "Auxiliary File" ; + schema1:name "launch_kmeans.sh" . + + a schema1:MediaObject ; + schema1:contentSize 471 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 761 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:MediaObject ; + schema1:contentSize 177 . + + a schema1:MediaObject ; + schema1:contentSize 422 . + + a schema1:MediaObject ; + schema1:contentSize 724 . + + a schema1:MediaObject ; + schema1:contentSize 1056 . + + a schema1:Dataset . + + a schema1:MediaObject ; + schema1:contentSize 6619 . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_molecules_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_aln_orig_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_aln_target_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_ene_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_trj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Dannon Baker" . + + a schema1:Person ; + schema1:name "Björn Grüning" . + + a schema1:Person ; + schema1:name "Delphine Larivière" . + + a schema1:Person ; + schema1:name "Gildas Le Corguillé" . + + a schema1:Person ; + schema1:name "Andrew Lonie" . + + a schema1:Person ; + schema1:name "Nicholas Keener" . + + a schema1:Person ; + schema1:name "Sergei Kosakovsky Pond" . + + a schema1:Person ; + schema1:name "Wolfgang Maier" . + + a schema1:Person ; + schema1:name "Anton Nekrutenko" . + + a schema1:Person ; + schema1:name "James Taylor" . + + a schema1:Person ; + schema1:name "Steven Weaver" . + + a schema1:Person ; + schema1:name "Marius van den Beek" . + + a schema1:Person ; + schema1:name "Dave Bouvier" . + + a schema1:Person ; + schema1:name "John Chilton" . + + a schema1:Person ; + schema1:name "Nate Coraor" . + + a schema1:Person ; + schema1:name "Frederik Coppens" . + + a schema1:Person ; + schema1:name "Bert Droesbeke" . + + a schema1:Person ; + schema1:name "Ignacio Eguinoa" . + + a schema1:Person ; + schema1:name "Simon Gladman" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux archit-Alpha-15-B5EEK 6.5.0-35-generic #35~22.04.1-Ubuntu SMP PREEMPT_DYNAMIC Tue May 7 09:00:52 UTC 2 x86_64 x86_64 x86_64 GNU/Linux COMPSS_HOME=/opt/COMPSs/" ; + schema1:endTime "2024-06-06T10:12:50+00:00" ; + schema1:instrument ; + schema1:name "COMPSs monte_carlo_pi.py execution at archit-Alpha-15-B5EEK" ; + schema1:result , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3.rc2404" . + + a schema1:MediaObject ; + schema1:contentSize 480 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 44 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 727 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "RMD" ; + schema1:name "R markdown" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s11r2b54 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=COMPSs COMPSS_PYTHON_VERSION=3.9.10 SLURM_JOB_QOS=debug SLURM_MEM_PER_CPU=1880 SLURM_JOB_ID=31521170 SLURM_JOB_USER=nct00014 COMPSS_HOME=/apps/COMPSs/3.3/ SLURM_JOB_UID=9214 SLURM_SUBMIT_DIR=/gpfs/home/nct00/nct00014/lysozyme_in_water SLURM_JOB_NODELIST=s11r2b[54-56,62] SLURM_JOB_GID=16440 SLURM_JOB_CPUS_PER_NODE=48(x4) COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login3 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=nct_290 SLURM_JOB_NUM_NODES=4 COMPSS_MASTER_NODE=s11r2b54 COMPSS_WORKER_NODES= s11r2b55 s11r2b56 s11r2b62" ; + schema1:endTime "2024-01-25T16:09:26+00:00" ; + schema1:instrument ; + schema1:name "COMPSs lysozyme_in_water.py execution at marenostrum4 with JOB_ID 31521170" ; + schema1:object , + , + , + , + , + , + ; + schema1:result , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 4169 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 1584 ; + schema1:description "Auxiliary File" ; + schema1:name "launch.sh" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 8957 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "lysozyme_in_water_full.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 8868 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "lysozyme_in_water_full_no_mpi.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 10194 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "lysozyme_in_water_full_singularity.py" . + + a schema1:MediaObject ; + schema1:contentSize 629 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 600 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Scaffolding-HiC-VGP8/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/cutandrun/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Lucille Delisle" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/rnaseq-sr/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "QCFilteredAnnDataObject" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "1k_cell_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "1k_gene_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "marker_dot_plot" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "marker_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "processed_anndata_object" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "umap_cluster_plot" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "umap_sample_plot" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/141" ; + schema1:name "Tracy Chew" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MaxMTpc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MinCountPerCell" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MinGenesPerCell" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "genecount_qc_plot" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mito_qc_plot" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "qc_anndata_object" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "top_genes_plot" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sra-manifest-to-concatenated-fastqs/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Cali Willet" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/141" ; + schema1:name "Tracy Chew" . + + a schema1:ComputerLanguage ; + schema1:name "Shell Script" . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "indices_folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rnaseq_left_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rnaseq_right_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sars_cov_2_reference_genome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "indels" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "snps" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/26" ; + schema1:name "Ambarish Kumar" . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux nvb4 2.6.32-642.6.2.el6.x86_64 #1 SMP Mon Oct 24 10:22:33 EDT 2016 x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=COMPSs SLURM_JOB_QOS=bsc_cs SLURM_JOB_GPUS=0,1,2,3 SLURM_MEM_PER_CPU=8000 SLURM_JOB_ID=1930260 SLURM_JOB_USER=bsc19959 COMPSS_HOME=/apps/COMPSs/3.3/ SLURM_JOB_UID=4373 SLURM_SUBMIT_DIR=/gpfs/scratch/bsc19/bsc19959/cache SLURM_JOB_NODELIST=nvb[4-9,12-21] SLURM_JOB_CPUS_PER_NODE=16(x16) SLURM_SUBMIT_HOST=nvblogin1 SLURM_JOB_PARTITION=projects SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=16 COMPSS_MASTER_NODE=nvb4 COMPSS_WORKER_NODES= nvb5 nvb6 nvb7 nvb8 nvb9 nvb12 nvb13 nvb14 nvb15 nvb16 nvb17 nvb18 nvb19 nvb20 nvb21" ; + schema1:endTime "2024-03-22T17:53:30+00:00" ; + schema1:instrument ; + schema1:name "COMPSs cch_kmeans_test.py execution at bsc_nvidia with JOB_ID 1930260" ; + schema1:result , + ; + schema1:startTime "2024-03-22T17:29:38+00:00" ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:ComputerLanguage ; + schema1:alternateName "Dislib" ; + schema1:name "Dislib" ; + schema1:url "https://dislib.readthedocs.io/en/latest/" ; + schema1:version "0.9" . + + a schema1:MediaObject ; + schema1:contentSize 6201 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_bsc_nvidia_SLURM_JOB_ID_1930260" ; + schema1:contentSize 64 ; + schema1:description "COMPSs console standard error log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-1930260.err" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_bsc_nvidia_SLURM_JOB_ID_1930260" ; + schema1:contentSize 2835 ; + schema1:description "COMPSs console standard output log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-1930260.out" . + + a schema1:MediaObject ; + schema1:contentSize 731 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 1009 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s02r2b29 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=COMPSs COMPSS_PYTHON_VERSION=3.9.10 SLURM_JOB_QOS=debug SLURM_MEM_PER_CPU=1880 SLURM_JOB_ID=31494702 SLURM_JOB_USER=bsc19057 COMPSS_HOME=/apps/COMPSs/3.3/ SLURM_JOB_UID=2952 SLURM_SUBMIT_DIR=/gpfs/home/bsc19/bsc19057/Tutorial_2024/wordcount SLURM_JOB_NODELIST=s02r2b[29,45] SLURM_JOB_GID=2950 SLURM_JOB_CPUS_PER_NODE=48(x2) COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login3 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=2 COMPSS_MASTER_NODE=s02r2b29 COMPSS_WORKER_NODES= s02r2b45" ; + schema1:endTime "2024-01-22T16:19:50+00:00" ; + schema1:instrument ; + schema1:name "COMPSs wc_reduce.py execution at marenostrum4 with JOB_ID 31494702" ; + schema1:object ; + schema1:result ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 972 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 994 ; + schema1:description "Auxiliary File" ; + schema1:name "launch_wordcount.sh" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 2782 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "wc_merge.py" . + + a schema1:MediaObject ; + schema1:contentSize 600 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 854 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/parallel-accession-download/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:MediaObject ; + schema1:contentSize 177 . + + a schema1:MediaObject ; + schema1:contentSize 167 . + + a schema1:MediaObject ; + schema1:contentSize 274 . + + a schema1:MediaObject ; + schema1:contentSize 994 . + + a schema1:Dataset . + + a schema1:MediaObject ; + schema1:contentSize 21291 . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Sample" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "barcodes.tsv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "genes.tsv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "matrix.mtx" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_baredSC-1d-logNorm_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_baredSC-1d-logNorm_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/baredsc/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_baredSC-2d-logNorm_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_baredSC-2d-logNorm_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/baredsc/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/kmer-profiling-hifi-VGP1/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_summary_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_summary_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_molecules_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_molecules_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a schema1:ComputerLanguage ; + schema1:name "Pi" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "BridgeDB cache" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Differential gene expression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Differential miRNA expression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "STRING identifier mapping" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mRNA expression correlation" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Correlation limit" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "miRNA mRNA correlation" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "miRTarBase data" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "miRTarBase limit" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MOGAMUN cores" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MOGAMUN generations" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Maximum subnetwork size" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Subnetwork merge threshold" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Minimum subnetwork size" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MOGAMUN runs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "STRING data" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "STRING filter" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "STRING limit" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Variant Burden" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Full network" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Subnetworks" . + + a schema1:Person ; + schema1:name "Scott Handley" . + + a schema1:Person ; + schema1:name "Rob Edwards" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Reverse read length" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Forward primer" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "forward reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Subfragment name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Metadata file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Primers are removed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Reference database" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Reverse read length" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Reverse primer" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reverse reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Sample name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "number of threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "files_to_folder_fastqc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "files_to_folder_ngtax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "files_to_folder_phyloseq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "files_to_folder_picrust2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "turtle" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myautodock_vina_run.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myautodock_vina_run.pdbqt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mybabel_convert.ent" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mybox.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycat_pdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myextract_model_pdbqt.pdbqt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myextract_molecule.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfpocket_filter.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfpocket_run.json" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfpocket_run.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfpocket_select.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfpocket_select.pqr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myideal_sdf.sdf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mystr_check_add_hydrogens.pdb" . + + a schema1:Person ; + schema1:name "Fernando Cruz (CNAG)" . + + a schema1:Person ; + schema1:name "Francisco Camara (CNAG)" . + + a schema1:Person ; + schema1:name "Tyler Alioto (CNAG)" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "collision_info" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "db_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "db_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gnps_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hmdb_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mbank_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mzml_files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ppmx" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "python_script" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "r_script" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "candidate_files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "result" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/galaxyproject/iwc/actions/workflows/workflow_test.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Marc Del Pino" . + + a schema1:Person ; + schema1:name "Laia Codo" . + + a schema1:Person ; + schema1:name "Luis Jordá" . + + a schema1:SoftwareApplication ; + schema1:citation ; + schema1:description "AmberTools is a set of programs for biomolecular simulation and analysis" ; + schema1:installUrl "https://anaconda.org/conda-forge/ambertools" ; + schema1:name "AmberTools20" ; + schema1:softwareHelp "http://ambermd.org/doc12/Amber20.pdf" ; + schema1:url "http://ambermd.org/AmberTools.php" ; + schema1:version "20.0" . + + a schema1:Person ; + schema1:name "Frank Kauff" . + + a schema1:Person ; + schema1:name "Servaas Michielssens" . + + a schema1:Dataset ; + schema1:dateModified "2021-09-28T10:02:17.833Z" ; + schema1:description "HPC launchers for PyCOMPSs workflows" ; + schema1:hasPart , + ; + schema1:name "launchers" . + + a schema1:Dataset ; + schema1:dateModified "2021-09-28T10:02:17.833Z" ; + schema1:description "HPC workflows using PyCOMPSs" ; + schema1:hasPart ; + schema1:name "workflows" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "folder where the STAR indices are" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "filter_rrna" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "forward reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gtf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "folder where the kallisto indices are" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "maximum memory usage in megabytes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "prefix_id" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "quantMode" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reverse reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "number of threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "STAR output folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "FASTQC" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "FeatureCounts output" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filtered reads folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "kallisto output" . + + a schema1:Person ; + schema1:name "ABRomics" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/amr_gene_detection/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_chic-fastq-to-cool-hicup-cooler_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_chic-fastq-to-cool-hicup-cooler_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/hic-hicup-cooler/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_hic-fastq-to-cool-hicup-cooler_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_hic-fastq-to-cool-hicup-cooler_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/hic-hicup-cooler/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_hic-fastq-to-pairs-hicup_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_hic-fastq-to-pairs-hicup_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/hic-hicup-cooler/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_hic-juicermediumtabix-to_cool-cooler_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_hic-juicermediumtabix-to_cool-cooler_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/hic-hicup-cooler/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_30" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_31" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_32" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_33" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_34" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_35" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_36" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_37" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_38" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_39" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_40" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myamber_to_pdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_image.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_rgyr.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_rms.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb4amber_run.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myprocess_mdout.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myprocess_minout.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.cpout" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.cprst" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.mdinfo" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.netcdf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.rst" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.trj" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/chipseq-sr/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Delphine Lariviere" . + + a schema1:Organization ; + schema1:name "VGP" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Plot-Nx-Size/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/galaxyproject/iwc/actions/workflows/workflow_test.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sra-manifest-to-concatenated-fastqs/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "W1_oversample_tr_features.tsv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "W1_oversample_tr_labels.tsv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "W1_te_features.tsv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "W1_te_labels.tsv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "W1_val_labels.tsv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "labels.tsv" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "transdecoder-nucleotides.fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "transdecoder-peptides.fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_baredSC-1d-logNorm_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_baredSC-1d-logNorm_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/baredsc/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_baredSC-2d-logNorm_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_baredSC-2d-logNorm_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/baredsc/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:name "Research Infrastructure RECETOX RI (No LM2018121) financed by the Ministry of Education" . + + a schema1:Person ; + schema1:name "Youth and Sports" . + + a schema1:Person ; + schema1:name "and Operational Programme Research" . + + a schema1:Person ; + schema1:name "Development and Innovation - project CETOCOEN EXCELLENCE (No CZ.02.1.01/0.0/0.0/17_043/0009632)." . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/753" ; + schema1:name "Zargham Ahmad" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "converted_library" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/fluorescence-nuclei-segmentation-and-counting/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bin_widths" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "cells_per_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exclude" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "inputReads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "metadata" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "metadataSchema" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "subjects" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "colinear_components" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "indexed_paths" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mergedMetadata" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "odgiGraph" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "odgiPNG" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "odgiRDF" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "readsMergeDedup" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "seqwishGFA" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pedigree" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:Person ; + schema1:name "Lucille Delisle" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/rnaseq-pe/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sars-cov-2-pe-illumina-artic-variant-calling/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "inputdata_cesm_2_1_3_B1850_f19_g17_tar" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "user_nl_cam_rs" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sars-cov-2-consensus-from-variation/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/445" ; + schema1:name "Diego De Panis" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/476" ; + schema1:name "Vasiliki Panagi" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "body" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "db_host" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "db_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "db_password" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "db_user" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "is_availability" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "result_modifiers" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "results" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_file" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Lucille Delisle" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/rnaseq-sr/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Clinical Bioinformatics Unit" . + + a schema1:Person ; + schema1:name "Pathology Department" . + + a schema1:Person ; + schema1:name "Eramus Medical Center" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "factordata" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "count_data" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "limma_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "minerva_table" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_itp_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_str_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_str_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_ndx_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_molecule_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_str_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_ndx_path" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rnaseq_left_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rnaseq_right_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sample_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sars_cov_2_reference_genome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "indel" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "snp" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Lucille Delisle" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/rnaseq-sr/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s01r1b31 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=COMPSs COMPSS_PYTHON_VERSION=3.9.10 SLURM_JOB_QOS=debug SLURM_MEM_PER_CPU=1880 SLURM_JOB_ID=31494609 SLURM_JOB_USER=bsc19057 COMPSS_HOME=/apps/COMPSs/3.3/ SLURM_JOB_UID=2952 SLURM_SUBMIT_DIR=/gpfs/home/bsc19/bsc19057/Tutorial_2024/cholesky SLURM_JOB_NODELIST=s01r1b[31,33] SLURM_JOB_GID=2950 SLURM_JOB_CPUS_PER_NODE=48(x2) COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login3 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=2 COMPSS_MASTER_NODE=s01r1b31 COMPSS_WORKER_NODES= s01r1b33" ; + schema1:endTime "2024-01-22T15:55:23+00:00" ; + schema1:instrument ; + schema1:name "COMPSs cholesky.py execution at marenostrum4 with JOB_ID 31494609" ; + schema1:result ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 1164 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 1021 ; + schema1:description "Auxiliary File" ; + schema1:name "launch_cholesky.sh" . + + a schema1:MediaObject ; + schema1:contentSize 501 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 765 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s23r2b61 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=COMPSs COMPSS_PYTHON_VERSION=3.8.2 SLURM_JOB_QOS=debug SLURM_MEM_PER_CPU=1880 SLURM_JOB_ID=30503085 SLURM_JOB_USER=bsc19959 COMPSS_HOME=/apps/COMPSs/3.3.pr/ SLURM_JOB_UID=4373 SLURM_SUBMIT_DIR=/gpfs/scratch/bsc19/bsc19959/randomsvd SLURM_JOB_NODELIST=s23r2b[61,68] SLURM_JOB_GID=2950 SLURM_JOB_CPUS_PER_NODE=48(x2) COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login3 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=2 COMPSS_MASTER_NODE=s23r2b61 COMPSS_WORKER_NODES= s23r2b68" ; + schema1:endTime "2023-11-03T08:10:10+00:00" ; + schema1:instrument ; + schema1:name "COMPSs random_svd_compss.py execution at marenostrum4 with JOB_ID 30503085" ; + schema1:object ; + schema1:result ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.2.rc2310" . + + a schema1:MediaObject ; + schema1:contentSize 2515 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 502 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 1451 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "WDL" ; + schema1:identifier ; + schema1:name "Workflow Description Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/galaxyproject/iwc/actions/workflows/workflow_test.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s12r2b30 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=COMPSs COMPSS_PYTHON_VERSION=3.9.10 SLURM_JOB_QOS=debug SLURM_MEM_PER_CPU=1880 SLURM_JOB_ID=31494751 SLURM_JOB_USER=bsc19057 COMPSS_HOME=/apps/COMPSs/3.3/ SLURM_JOB_UID=2952 SLURM_SUBMIT_DIR=/gpfs/home/bsc19/bsc19057/Tutorial_2024/wordcount SLURM_JOB_NODELIST=s12r2b[30,34] SLURM_JOB_GID=2950 SLURM_JOB_CPUS_PER_NODE=48(x2) COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login3 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=2 COMPSS_MASTER_NODE=s12r2b30 COMPSS_WORKER_NODES= s12r2b34" ; + schema1:endTime "2024-01-22T16:32:29+00:00" ; + schema1:instrument ; + schema1:name "COMPSs wc_merge.py execution at marenostrum4 with JOB_ID 31494751" ; + schema1:object , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:result ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 878 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 1004 ; + schema1:description "Auxiliary File" ; + schema1:name "launch_wordcount_merge.sh" . + + a schema1:MediaObject ; + schema1:contentSize 600 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 873 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/chipseq-sr/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "ABRomics" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/bacterial_genome_annotation/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-atac-cutandrun_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-atac-cutandrun_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-chip-pe_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-chip-pe_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-chip-sr_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-chip-sr_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_gmx.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_gmx.itp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_gmx.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mybabel_minimize.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myligand.pdb" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_30" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_31" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_32" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_33" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_34" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_35" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_36" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_37" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_38" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_39" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_40" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_41" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_42" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_43" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_44" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_45" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_46" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_47" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_48" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_49" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_50" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_51" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_52" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_53" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_54" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_55" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_56" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_57" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_58" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_59" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_60" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_61" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_62" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_63" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_64" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_65" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_66" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_67" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_68" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_randomize_ions.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_randomize_ions.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.parmtop" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.parmtop" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myparmed_hmassrepartition.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myprocess_mdout.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myprocess_minout.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.cpout" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.cprst" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.mdinfo" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.nc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.ncrst" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:Dataset . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:Dataset . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-atac-cutandrun_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-atac-cutandrun_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-chip-pe_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-chip-pe_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-chip-sr_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-chip-sr_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sra-manifest-to-concatenated-fastqs/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "Gareth Price" . + + a schema1:MediaObject ; + schema1:description "Workflow documentation" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/gromacs-mmgbsa/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Delphine Lariviere" . + + a schema1:Organization ; + schema1:name "VGP" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Plot-Nx-Size/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bits_set" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "blacklist" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bwt_algorithm" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_lratio" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_exomeDepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_manta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_bf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "generate_bwa_indexes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_exome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "min_mapping_quality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "read_group" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_amb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_ann" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_bwt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fai" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_pac" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_sa" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samples" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_bwa_mem" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastqc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_samtools" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "html_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "json_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_all" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_bam_filtering" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_exomedepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_manta" . + + a schema1:Person ; + schema1:name "Daniel López-López" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/atacseq/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sra-manifest-to-concatenated-fastqs/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/VGP-meryldb-creation/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s02r1b59 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=COMPSs COMPSS_PYTHON_VERSION=3.9.10 SLURM_JOB_QOS=debug SLURM_MEM_PER_CPU=1880 SLURM_JOB_ID=31507350 SLURM_JOB_USER=bsc19057 COMPSS_HOME=/apps/COMPSs/3.3/ SLURM_JOB_UID=2952 SLURM_SUBMIT_DIR=/gpfs/home/bsc19/bsc19057/Tutorial_2024/lysozyme_in_water SLURM_JOB_NODELIST=s02r1b[59,66] SLURM_JOB_GID=2950 SLURM_JOB_CPUS_PER_NODE=48(x2) COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login3 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=2 COMPSS_MASTER_NODE=s02r1b59 COMPSS_WORKER_NODES= s02r1b66" ; + schema1:endTime "2024-01-24T15:00:23+00:00" ; + schema1:instrument ; + schema1:name "COMPSs lysozyme_in_water.py execution at marenostrum4 with JOB_ID 31507350" ; + schema1:object , + , + , + , + , + , + ; + schema1:result , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 2295 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 1565 ; + schema1:description "Auxiliary File" ; + schema1:name "launch_full.sh" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 8957 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "lysozyme_in_water_full.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 8868 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "lysozyme_in_water_full_no_mpi.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 10194 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "lysozyme_in_water_full_singularity.py" . + + a schema1:MediaObject ; + schema1:contentSize 755 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 676 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:Person ; + schema1:name "Peter van Heusden" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/galaxyproject/iwc/actions/workflows/workflow_test.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:MediaObject ; + schema1:contentSize 177 . + + a schema1:MediaObject ; + schema1:contentSize 329 . + + a schema1:MediaObject ; + schema1:contentSize 724 . + + a schema1:MediaObject ; + schema1:contentSize 1056 . + + a schema1:Dataset . + + a schema1:MediaObject ; + schema1:contentSize 6619 . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_30" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_31" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_32" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_33" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_34" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_35" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_36" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_37" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_38" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_39" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_40" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_41" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_42" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_43" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_44" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_45" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_46" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_47" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_48" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_49" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_50" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_51" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_52" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_53" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_54" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_55" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_56" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_57" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_58" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_59" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_60" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_61" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_62" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_63" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_64" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_65" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_66" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_67" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_68" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_randomize_ions.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_randomize_ions.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.parmtop" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.parmtop" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myparmed_hmassrepartition.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myprocess_mdout.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myprocess_minout.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.cpout" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.cprst" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.mdinfo" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.nc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.ncrst" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Darwin MacBook-Pro-Raul-2018.local 23.1.0 Darwin Kernel Version 23.1.0: Mon Oct 9 21:27:27 PDT 2023; root:xnu-10002.41.9~6/RELEASE_X86_64 x86_64 COMPSS_HOME=/Users/rsirvent/opt/COMPSs/" ; + schema1:endTime "2023-10-27T13:17:52+00:00" ; + schema1:instrument ; + schema1:name "COMPSs matmul_objects.py execution at MacBook-Pro-Raul-2018.local" ; + schema1:result . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.2.rc2310" . + + a schema1:MediaObject ; + schema1:contentSize 252 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 1442 ; + schema1:description "Auxiliary File" ; + schema1:name "README" . + + a schema1:MediaObject ; + schema1:contentSize 4553 ; + schema1:description "Auxiliary File" ; + schema1:name "pom.xml" . + + a schema1:MediaObject ; + schema1:contentSize 122 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 2246 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:name "Lucille Delisle" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/rnaseq-sr/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "Docker" ; + schema1:identifier ; + schema1:name "Docker" ; + schema1:url . + + a schema1:Person ; + schema1:name "Krisztian Papp" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bg_confounders" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bg_custom_model" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bg_feature" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bg_measure" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bg_mod" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bg_mod0" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bg_phenotype" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bg_phenotype_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bg_samples" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bg_timecourse" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_alpha" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_blind" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_contrast" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_cores" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_denominator" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_design" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_hidden_batch_effects" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_hidden_batch_method" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_hidden_batch_row_means" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_hypothesis" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_lfcThreshold" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_metadata" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_min_sum_of_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_numerator" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_pAdjustMethod" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_parallelization" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_phenotype" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_reduced" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_reference_level" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_samples" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_transformation" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_variables" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastx_first_base_to_keep" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastx_last_base_to_keep" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "featureCounts_annotation_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "featureCounts_number_of_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "featureCounts_output_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "featureCounts_read_meta_feature_overlap" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hisat2_alignments_tailored_trans_assemb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hisat2_idx_basename" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hisat2_idx_directory" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hisat2_known_splicesite_infile" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hisat2_num_of_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_file_split" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_file_split_fwd_single" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_file_split_rev" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_qc_check" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_trimming_check" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "premapping_input_check" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "raw_files_directory" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_sort_compression_level" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_sort_memory" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_sort_sort_by_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_sort_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_cigar" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_collapsecigar" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_count" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_fastcompression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_isbam" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_iscram" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_randomseed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_readsingroup" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_readsinlibrary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_readsquality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_readswithbits" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_readswithoutbits" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_readtagtostrip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_region" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_samheader" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_uncompressed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "stringtie_ballgown_table_files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "stringtie_conservative_mode" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "stringtie_cpus" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "stringtie_expression_estimation_mode" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "stringtie_guide_gff" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "stringtie_junction_coverage" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "stringtie_min_isoform_abundance" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "stringtie_min_read_coverage" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "stringtie_out_gtf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "stringtie_transcript_merge_mode" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "stringtie_verbose" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tg_compression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tg_do_not_compress" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tg_length" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tg_quality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tg_strigency" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tg_trim_suffix" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_ballgown_de_custom_model" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_ballgown_de_results" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_ballgown_object" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_collect_hisat2_sam_files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_deseq2_dds_object" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_deseq2_de_results" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_deseq2_res_lfcShrink_object" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_deseq2_transformed_object" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_paired_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_paired_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_raw_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_raw_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_single_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_single_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastx_trimmer_paired" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastx_trimmer_single" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_featureCounts" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_hisat2_for_paired_reads_reports" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_hisat2_for_single_reads_reports" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_sort" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_view" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_stringtie_expression_gtf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_stringtie_expression_outdir" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_stringtie_merge" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_stringtie_transcript_assembly_gtf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_trim_galore_paired_fq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_trim_galore_paired_reports" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_trim_galore_single_fq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_trim_galore_single_reports" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s04r2b44 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=wordcount_blocks COMPSS_PYTHON_VERSION=3.9.10 SLURM_JOB_QOS=debug SLURM_MEM_PER_CPU=1880 COMPSS_BINDINGS_DEBUG=1 SLURM_JOB_ID=31176032 SLURM_JOB_USER=bsc19057 COMPSS_HOME=/apps/COMPSs/3.3/ SLURM_JOB_UID=2952 SLURM_SUBMIT_DIR=/gpfs/home/bsc19/bsc19057/COMPSs-DP/tutorial_apps/python/wordcount_blocks SLURM_JOB_NODELIST=s04r2b44 SLURM_JOB_GID=2950 SLURM_JOB_CPUS_PER_NODE=48 COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login3 SLURM_JOB_PARTITION=sequential SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=1 COMPSS_MASTER_NODE=s04r2b44 COMPSS_WORKER_NODES=" ; + schema1:endTime "2023-12-15T14:53:21+00:00" ; + schema1:instrument ; + schema1:name "COMPSs wordcount_blocks.py execution at marenostrum4 with JOB_ID 31176032" ; + schema1:object ; + schema1:result , + ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 447 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 961 ; + schema1:description "Auxiliary File" ; + schema1:name "README" . + + a schema1:MediaObject ; + schema1:contentSize 4649 ; + schema1:description "Auxiliary File" ; + schema1:name "pom.xml" . + + a schema1:MediaObject ; + schema1:contentSize 331 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 2207 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "contamination reference file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "forward reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "identifier used" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "memory usage (mb)" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pacbio reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reverse reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Run GTDB-Tk" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "number of threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "BAM files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CheckM" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "FASTQC" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filtered reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GTDB-Tk" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MetaBat2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "QUAST" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "SPADES" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "WDL" ; + schema1:identifier ; + schema1:name "Workflow Description Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sars-cov-2-variation-reporting/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:name "ABRomics" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/quality-and-contamination-control/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "BridgeDB cache" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Differential gene expression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Differential miRNA expression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "STRING identifier mapping" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mRNA expression correlation" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of correlation edges to use, USE FOR TESTING ONLY" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "miRNA mRNA correlation" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "miRTarBase data" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of miRTarBase edges to use, USE FOR TESTING ONLY" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of cores to let MOGAMUN use" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of generation to let the genetic algorithm in MOGAMUN evolve" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The maximum size of subnetworks during postprocessing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "the minimum Jaccard Index overlap between two subnetworks to allow them to be merged" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The minimum size of subnetworks during postprocessing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of parallel runs to let MOGAMUN do, these parallel runs are combined in postprocessing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "STRING data" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The minimum score for a STRING edge to be included in the analysis" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of STRING edges to use, USE FOR TESTING ONLY" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Variant Burden" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "full_graph" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "subnetworks" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-decontamination-VGP9/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/cutandrun/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_ndx_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_str_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_molecule_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s23r2b61 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=COMPSs COMPSS_PYTHON_VERSION=3.8.2 SLURM_JOB_QOS=debug SLURM_MEM_PER_CPU=1880 SLURM_JOB_ID=30503085 SLURM_JOB_USER=bsc19959 COMPSS_HOME=/apps/COMPSs/3.3.pr/ SLURM_JOB_UID=4373 SLURM_SUBMIT_DIR=/gpfs/scratch/bsc19/bsc19959/randomsvd SLURM_JOB_NODELIST=s23r2b[61,68] SLURM_JOB_GID=2950 SLURM_JOB_CPUS_PER_NODE=48(x2) COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login3 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=2 COMPSS_MASTER_NODE=s23r2b61 COMPSS_WORKER_NODES= s23r2b68" ; + schema1:endTime "2023-11-03T08:10:10+00:00" ; + schema1:instrument ; + schema1:name "COMPSs random_svd_compss.py execution at marenostrum4 with JOB_ID 30503085" ; + schema1:object ; + schema1:result ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.2.rc2310" . + + a schema1:MediaObject ; + schema1:contentSize 2515 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 502 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 1451 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Cali Willet" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/141" ; + schema1:name "Tracy Chew" . + + a schema1:ComputerLanguage ; + schema1:name "Shell Script" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/370" ; + schema1:name "Peter Menzel" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:Person ; + schema1:name "Wolfgang Maier" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "af_recalculated" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "all_variants_all_samples" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "by_variant_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "cleaned_header" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "collapsed_effects" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "combined_variant_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "filtered_extracted_variants" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "filtered_variants" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "highest_impact_effects" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "prefiltered_variants" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "processed_variants_collection" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "variants_for_plotting" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/chipseq-pe/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Galaxy" . + + a schema1:Person ; + schema1:name "VGP" . + + a schema1:Person ; + schema1:name "VGP" . + + a schema1:Person ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Lineage" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "assembly.fasta" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step00_cat_pdb_input_structure2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step00_cat_pdb_output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step0_reduce_remove_hydrogens_input_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step0_reduce_remove_hydrogens_output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step100_make_ndx_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step100_make_ndx_output_ndx_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_mdrun_min_output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_mdrun_min_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_mdrun_min_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_mdrun_min_output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_grompp_nvt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_grompp_nvt_output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_mdrun_nvt_output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_mdrun_nvt_output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_mdrun_nvt_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_mdrun_nvt_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_mdrun_nvt_output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_grompp_npt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_grompp_npt_output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_mdrun_npt_output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_mdrun_npt_output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_mdrun_npt_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_mdrun_npt_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_mdrun_npt_output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step15_grompp_md_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step15_grompp_md_output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_mdrun_md_output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_mdrun_md_output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_mdrun_md_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_mdrun_md_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_mdrun_md_output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step17_gmx_image1_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step17_gmx_image1_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_gmx_image2_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_gmx_image2_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_gmx_trjconv_str_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_gmx_trjconv_str_output_str_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_extract_molecule_output_molecule_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_gmx_energy_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_gmx_energy_output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step21_gmx_rgyr_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step21_gmx_rgyr_output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_rmsd_first_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_rmsd_first_output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step23_rmsd_exp_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step23_rmsd_exp_output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step24_grompp_md_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step24_grompp_md_output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_fix_side_chain_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_pdb2gmx_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_pdb2gmx_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_pdb2gmx_output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_editconf_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_editconf_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_solvate_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_solvate_output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step7_grompp_genion_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step7_grompp_genion_output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_genion_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_genion_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_genion_output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_grompp_min_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_grompp_min_output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_ndx_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_str_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_molecule_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/galaxyproject/iwc/actions/workflows/workflow_test.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Tim Dudgeon" . + + a schema1:Person ; + schema1:name "Simon Bray" . + + a schema1:Person ; + schema1:name "Gianmauro Cuccuru" . + + a schema1:Person ; + schema1:name "Björn Grüning" . + + a schema1:Person ; + schema1:name "Rachael Skyner" . + + a schema1:Person ; + schema1:name "Jack Scantlebury" . + + a schema1:Person ; + schema1:name "Susan Leung" . + + a schema1:Person ; + schema1:name "Frank von Delft" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "NFL" ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url ; + schema1:version "21.10.3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ceamarc_env.csv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "cnidaria_filtered.csv" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:MediaObject ; + schema1:contentSize 155 ; + schema1:dateModified "2023-11-03T17:21:39" ; + schema1:name "platforms.yml" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:Dataset . + + a schema1:Dataset . + + a schema1:MediaObject ; + schema1:contentSize 114 ; + schema1:dateModified "2023-11-03T22:41:31" ; + schema1:name "include.yml" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 996 ; + schema1:dateModified "2023-11-03T22:41:31" ; + schema1:name "experiment.yml" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2478 ; + schema1:dateModified "2023-11-03T22:41:31" ; + schema1:name "jobs.yml" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1019 ; + schema1:dateModified "2023-11-03T22:41:31" ; + schema1:name "mhm.yml" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1459 ; + schema1:dateModified "2023-11-03T22:41:31" ; + schema1:name "platforms.yml" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1679 ; + schema1:dateModified "2023-11-03T22:41:31" ; + schema1:name "rocrate.yaml" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:ContactPoint ; + schema1:name "biobb forum at ask.bioexcel.eu" ; + schema1:url "https://ask.bioexcel.eu/c/BioExcel-Building-Blocks-library/23" . + + a schema1:SoftwareApplication ; + schema1:citation ; + schema1:installUrl "https://anaconda.org/bioconda/biopython" ; + schema1:license , + ; + schema1:name "BioPython" ; + schema1:url "https://biopython.org/" ; + schema1:version "1.70" . + + a schema1:ScholarlyArticle ; + schema1:author , + , + , + ; + schema1:datePublished "2015-01-01T00:00:00Z" ; + schema1:name "pmx: Automated protein structure and topology" . + + a schema1:ScholarlyArticle ; + schema1:datePublished "1995-09-02T00:00:00.000Z" ; + schema1:name "AMBER, a package of computer programs for applying molecular mechanics, normal mode analysis, molecular dynamics and free energy calculations to simulate the structural and energetic properties of molecules" . + + a schema1:ScholarlyArticle ; + schema1:author , + ; + schema1:datePublished "2010-01-01T00:00:00Z" ; + schema1:name "Protein Thermostability Calculations Using Alchemical Free Energy Simulations" . + + a schema1:ScholarlyArticle ; + schema1:author , + , + , + , + , + , + ; + schema1:name "GROMACS: High performance molecular simulations through multi-level parallelism from laptops to supercomputers" . + + a schema1:ScholarlyArticle ; + schema1:author , + , + , + , + , + , + , + , + , + , + , + ; + schema1:copyrightYear "2019-09-10T00:00:00.000Z" ; + schema1:datePublished "2019-09-10T00:00:00.000Z" ; + schema1:description "In the recent years, the improvement of software and hardware performance has made biomolecular simulations a mature tool for the study of biological processes. Simulation length and the size and complexity of the analyzed systems make simulations both complementary and compatible with other bioinformatics disciplines. However, the characteristics of the software packages used for simulation have prevented the adoption of the technologies accepted in other bioinformatics fields like automated deployment systems, workflow orchestration, or the use of software containers. We present here a comprehensive exercise to bring biomolecular simulations to the “bioinformatics way of working”. The exercise has led to the development of the BioExcel Building Blocks (BioBB) library. BioBB’s are built as Python wrappers to provide an interoperable architecture. BioBB’s have been integrated in a chain of usual software management tools to generate data ontologies, documentation, installation packages, software containers and ways of integration with workflow managers, that make them usable in most computational environments." ; + schema1:license ; + schema1:name "BioExcel Building Blocks, a software library for interoperable biomolecular simulation workflows" . + + a schema1:ScholarlyArticle ; + schema1:author , + , + , + , + , + , + , + , + , + , + ; + schema1:datePublished "2009-03-20T00:00:00.000Z" ; + schema1:name "Biopython: freely available Python tools for computational molecular biology and bioinformatics" . + + a schema1:Dataset, + ; + schema1:distribution ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.120.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_md_setup" ; + schema1:name "Research Object Crate for Protein MD Setup tutorial using BioExcel Building Blocks (biobb)" ; + schema1:url "https://workflowhub.eu/workflows/120/ro_crate?version=2" . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:CreativeWork ; + schema1:description """Permission to use, copy, modify, and distribute this software and its documentation with or without modifications and for any purpose and without fee is hereby granted, provided that any copyright notices appear in all copies and that both those copyright notices and this permission notice appear in supporting documentation, and that the names of the contributors or copyright holders not be used in advertising or publicity pertaining to distribution of the software without specific prior permission. + + THE CONTRIBUTORS AND COPYRIGHT HOLDERS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE""" ; + schema1:name "Biopython License Agreement" . + + a schema1:SoftwareSourceCode ; + schema1:abstract """The Autosubmit project. It contains the templates used +by Autosubmit for the scripts used in the workflow, as well as any other +source code used by the scripts (i.e. any files sourced, or other source +code compiled or executed in the workflow).""" ; + schema1:codeRepository "https://github.com/kinow/auto-mhm-test-domains.git" ; + schema1:codeSampleType "template" ; + schema1:name "https://github.com/kinow/auto-mhm-test-domains.git" ; + schema1:programmingLanguage "Any" ; + schema1:runtimePlatform "Autosubmit 4.0.98" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" ; + schema1:targetProduct "Autosubmit" ; + schema1:version "985863e5b257fa36c6ede7e73ac4fc9fe1ff4c73" . + + a schema1:Person ; + schema1:name "Daniele Lezzi" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Sebastian Ares de Parga Regalado" . + + a schema1:Person ; + schema1:name "Adam M. Novak" . + + a schema1:Person ; + schema1:name "Mark James Abraham" . + + a schema1:Person ; + schema1:name "Fani Hatjina" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Justin A. Lemkul" . + + a schema1:Person ; + schema1:name "Kamshat Temirbayeva" . + + a schema1:Person ; + schema1:name "Slađan Rašić" . + + a schema1:Person ; + schema1:name "Tim Dudgeon" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0001-7036-9987" ; + schema1:name "Wudmir Rojas" . + + a schema1:Person ; + schema1:name "Maria Bouga" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0001-8197-3303" ; + schema1:name "Stevie Pederson" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0001-8875-7304" ; + schema1:name "Thomas Roetzer-Pejrimovsky" . + + a schema1:Person ; + schema1:name "Andrew Dalke" . + + a schema1:Person ; + schema1:name "Rutger Vos" . + + a schema1:Person ; + schema1:name "Peter J. A. Cock" . + + a schema1:Person ; + schema1:name "Zlatko Puškadija" . + + a schema1:Person ; + schema1:name "M. Alice Pinto" . + + a schema1:Person ; + schema1:name "Andrea Guarracino" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-0320-0214" ; + schema1:name "Matthias Haimel" . + + a schema1:Person ; + schema1:name "Andrzej Oleksa" . + + a schema1:Person ; + schema1:name "Pedro João Rodrigues" . + + a schema1:Person ; + schema1:name "Josep Ll. Gelpi" . + + a schema1:Person ; + schema1:name "Franck Dedeine" . + + a schema1:Person ; + schema1:name "Usman Rashid" . + + a schema1:Person ; + schema1:name "Leonidas Charistos" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-1640-3045" ; + schema1:name "Justin Sonneck" . + + a schema1:Person ; + schema1:name "Iddo Friedberg" . + + a schema1:Person ; + schema1:name "Max Schubach" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-2319-2960" ; + schema1:name "Woosub Shin" . + + a schema1:Person ; + schema1:name "Erik Lindahl" . + + a schema1:Person ; + schema1:name "Irfan Kandemir" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-2937-9259" ; + schema1:name "Anton Korobeynikov" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-2977-5032" ; + schema1:name "Johan Gustafsson" . + + a schema1:Person ; + schema1:name "Jeremy C. Smith" . + + a schema1:Person ; + schema1:name "Brad A. Chapman" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Claudio Satriano" . + + a schema1:Person ; + schema1:name "Fabian Deister" . + + a schema1:Person ; + schema1:name "Aibyn Torekhanov" . + + a schema1:Person ; + schema1:name "Luc Cornet" . + + a schema1:Person ; + schema1:name "Vincent Hervé" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-3668-8711" ; + schema1:name "Davide Gurnari" . + + a schema1:Person ; + schema1:name "Marin Kovačić" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-3811-4527" ; + schema1:name "Rafael Terra" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-3935-2279" ; + schema1:name "Sarah Beecroft" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-4416-5224" ; + schema1:name "Ian Brennan" . + + a schema1:Person ; + schema1:name "Zhanar Sheralieva" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Jose Raul Bravo Martinez" . + + a schema1:Person ; + schema1:name "Eliza Căuia" . + + a schema1:Person ; + schema1:name "Jeffrey T. Chang" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-4594-8423" ; + schema1:name "Willem de Koning" . + + a schema1:Person ; + schema1:name "Cymon J. Cox" . + + a schema1:Person ; + schema1:name "Bonface Munyoki" . + + a schema1:Person ; + schema1:name "Szilvia Kusza" . + + a schema1:Person ; + schema1:name "Anton Nekrutenko" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Natalia Poiata" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-6435-7100" ; + schema1:name "Richard Lupat" . + + a schema1:Person ; + schema1:name "Vytautas Gapsys" . + + a schema1:Person ; + schema1:name "Ulzhan Nuralieva" . + + a schema1:Person ; + schema1:name "Tiago Antao" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-7490-8260" ; + schema1:name "Dominik Lux" . + + a schema1:Person ; + schema1:name "Berk Hess" . + + a schema1:Person ; + schema1:name "Pjotr Prins" . + + a schema1:Person ; + schema1:name "Bartek Wilczynski" . + + a schema1:Person ; + schema1:name "Modesto Orozco" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-8639-8325" ; + schema1:name "Carlos Classen" . + + a schema1:Person ; + schema1:name "Michael Heuer" . + + a schema1:Person ; + schema1:name "Kateřina Storchmannová" . + + a schema1:Person ; + schema1:name "Michiel J. L. de Hoon" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Riccardo Rossi Bernecoli" . + + a schema1:Person ; + schema1:name "Szilárd Páll" . + + a schema1:Person ; + schema1:name "Michael Lloyd" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-1075-1628" ; + schema1:name "David Yuan" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-1157-9030" ; + schema1:name "Camille Juigné" . + + a schema1:Person ; + schema1:name "Carole Goble" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-1302-3909" ; + schema1:name "Felicita Gernhardt" . + + a schema1:Person ; + schema1:name "Leonid Kostrykin" . + + a schema1:Person ; + schema1:name "Adrian Siceanu" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-1488-5148" ; + schema1:name "Michael Roach" . + + a schema1:Person ; + schema1:name "Diego Carvalho" . + + a schema1:Person ; + schema1:name "Roland Schulz" . + + a schema1:Person ; + schema1:name "Nachida Tadrent" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-2470-9519" ; + schema1:name "Tim Booth" . + + a schema1:Person ; + schema1:name "Janez Prešern" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-2739-1315" ; + schema1:name "Pablo Riesgo Ferreiro" . + + a schema1:Person ; + schema1:name "Leandro Liborio" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-2878-4049" ; + schema1:name "Aishwarya Iyer" . + + a schema1:Person ; + schema1:name "Thomas Hamelryck" . + + a schema1:Person ; + schema1:name "Teemu Murtola" . + + a schema1:Person ; + schema1:name "Thomas Liener" . + + a schema1:Person ; + schema1:name "Marina Kennerson" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-3549-9115" ; + schema1:name "Alejandra Escobar" . + + a schema1:Person ; + schema1:name "Peter Amstutz" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-3777-5945" ; + schema1:name "Tazro Inutano" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-3876-6581" ; + schema1:name "Agata Kilar" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-4684-317X" ; + schema1:name "Xiaokang Zhang" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-4771-6113" ; + schema1:name "Sagane Joye-Dind" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0009-0007-9672-6728" ; + schema1:name "Xiaolong Luo" . + + a schema1:MediaObject . + + a schema1:SoftwareApplication ; + schema1:citation , + ; + schema1:description """pmx is a python library that allows users to setup and analyse molecular dynamics simulations with the Gromacs package. Among its main features are the setup and analysis of alchemical free energy calculations for protein, nucleic acid, and small molecule mutations. +""" ; + schema1:license ; + schema1:name "pmx-biobb" ; + schema1:softwareRequirements , + ; + schema1:url "https://degrootlab.github.io/pmx/" . + + a schema1:SoftwareApplication ; + schema1:license ; + schema1:name "SciPy" ; + schema1:url "https://www.scipy.org/" ; + schema1:version "1.7.1." . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject ; + schema1:name "sdrf" . + + a schema1:MediaObject ; + schema1:name "fasta" . + + a schema1:MediaObject ; + schema1:name "parameters" . + + a schema1:Organization ; + schema1:alternateName "DANS" ; + schema1:legalName "Nederlands Instituut voor Permanente Toegang tot Digitale Onderzoeksgegevens" ; + schema1:name "Data Archiving and Networked Services" ; + schema1:url "https://dans.knaw.nl/" . + + a schema1:CreativeWork ; + schema1:identifier "CC0-1.0" ; + schema1:name "Creative Commons Zero v1.0 Universal" ; + schema1:url "https://creativecommons.org/publicdomain/zero/1.0/" . + + a schema1:CreativeWork ; + schema1:identifier "LGPL-2.1-or-later" ; + schema1:name "GNU Lesser General Public License v2.1 or later" ; + schema1:url "https://www.gnu.org/licenses/old-licenses/lgpl-2.1-standalone.html" . + + a schema1:CreativeWork ; + schema1:identifier "LGPL-3.0-only" ; + schema1:name "GNU Lesser General Public License v3.0 only" ; + schema1:url "https://www.gnu.org/licenses/lgpl-3.0-standalone.html" . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url ; + schema1:version "6.5.0" . + + a schema1:Organization ; + schema1:name "workflow4metabolomics" . + + a schema1:Organization ; + schema1:name "workflow4metabolomics" . + + a schema1:Collection ; + schema1:name "Sanger Tree of Life Assembly method" . + + a schema1:Collection ; + schema1:name "BY-COVID related workflows" . + + a schema1:Person ; + schema1:name "Sarah Beecroft" . + + a schema1:Person ; + schema1:name "Camille Juigné" . + + a schema1:Person ; + schema1:name "Richard Lupat" . + + a schema1:Person ; + schema1:name "Johannes Köster" . + + a schema1:Person ; + schema1:name "Valentin Tilloy" . + + a schema1:Person ; + schema1:name "Michael Roach" . + + a schema1:Person ; + schema1:name "Elida Schneltzer" . + + a schema1:Person ; + schema1:name "Stian Soiland-Reyes" . + + a schema1:Person ; + schema1:name "Luiz Gadelha" . + + a schema1:Person ; + schema1:name "Woosub Shin" . + + a schema1:Person ; + schema1:name "Ryan Patterson-Cross" . + + a schema1:Person ; + schema1:name "Anthony Bretaudeau" . + + a schema1:Person ; + schema1:name "Pasi Korhonen" . + + a schema1:Person ; + schema1:name "Tazro Inutano" . + + a schema1:Person ; + schema1:name "Marlene Rezk" . + + a schema1:Person ; + schema1:name "Tim Booth" . + + a schema1:Person ; + schema1:name "Alejandra Escobar" . + + a schema1:Person ; + schema1:name "Lucas Cruz" . + + a schema1:Person ; + schema1:name "Pablo Riesgo Ferreiro" . + + a schema1:Person ; + schema1:name "Felicita Gernhardt" . + + a schema1:Person ; + schema1:name "Andrey Prjibelski" . + + a schema1:Person ; + schema1:name "Xiaokang Zhang" . + + a schema1:Person ; + schema1:name "Zavolan Lab" . + + a schema1:Person ; + schema1:name "Stevie Pederson" . + + a schema1:Person ; + schema1:name "Anton Korobeynikov" . + + a schema1:Person ; + schema1:name "Willem de Koning" . + + a schema1:Person ; + schema1:name "Elisabetta Spinazzola" . + + a schema1:Person ; + schema1:name "Agata Kilar" . + + a schema1:Person ; + schema1:name "Sagane Joye-Dind" . + + a schema1:Person ; + schema1:name "Akshay Akshay" . + + a schema1:Person ; + schema1:name "Davide Gurnari" . + + a schema1:Person ; + schema1:name "Johan Gustafsson" . + + a schema1:Person ; + schema1:name "David Yuan" . + + a schema1:Person ; + schema1:name "Justin Sonneck" . + + a schema1:Person ; + schema1:name "Damon-Lee Pointon" . + + a schema1:Person ; + schema1:name "William Eagles" . + + a schema1:Person ; + schema1:name "Zafran Hussain Shah" . + + a schema1:Person ; + schema1:name "Nandan Deshpande" . + + a schema1:Person ; + schema1:name "Rafael Terra" . + + a schema1:Person ; + schema1:name "Ian Brennan" . + + a schema1:Person ; + schema1:name "Aishwarya Iyer" . + + a schema1:Person ; + schema1:name "Dominik Lux" . + + a schema1:Person ; + schema1:name "Matthias Haimel" . + + a schema1:Person ; + schema1:name "Andrii Neronov" . + + a schema1:Person ; + schema1:name "Thomas Roetzer-Pejrimovsky" . + + a schema1:Person ; + schema1:name "Xiaolong Luo" . + + a schema1:Person ; + schema1:name "Wudmir Rojas" . + + a schema1:Person ; + schema1:name "Carlos Classen" . + + a schema1:Person ; + schema1:name "Carlos Oscar Sorzano Sanchez" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Edinburgh Genomics" . + + a schema1:Organization, + schema1:Project ; + schema1:name "KircherLab" . + + a schema1:Organization, + schema1:Project ; + schema1:name "BCCM_ULC" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Computational Science at HZDR" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Big data in biomedicine" . + + a schema1:Organization, + schema1:Project ; + schema1:name "MISTIC" . + + a schema1:Organization, + schema1:Project ; + schema1:name "NMR Workflow" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Guigó lab" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Statistical genetics" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Delineating Regions-of-interest for Mass Spectrometry Imaging by Multimodally Corroborated Spatial Segmentation" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Bioinformatics Unit @ CRG" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Black Ochre Data Labs" . + + a schema1:Organization, + schema1:Project ; + schema1:name "IRRI Bioinformatics Group" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Zavolan Lab" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Bioinformatics workflows for life science" . + + a schema1:Organization, + schema1:Project ; + schema1:name "EOSC-Life WP3 OC Team, cross RI project" . + + a schema1:Organization, + schema1:Project ; + schema1:name "ARA-dev" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Mendel Centre for Plant Genomics and Proteomics" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Metagenomic tools" . + + a schema1:Organization, + schema1:Project ; + schema1:name "ERGA Annotation" . + + a schema1:Organization, + schema1:Project ; + schema1:name "MLme: Machine Learning Made Easy" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Dioscuri TDA" . + + a schema1:Organization, + schema1:Project ; + schema1:name "NIH CFDE Playbook Workflow Partnership" . + + a schema1:Organization, + schema1:Project ; + schema1:name "MMV-Lab" . + + a schema1:Organization, + schema1:Project ; + schema1:name "EMBL-CBA" . + + a schema1:Organization, + schema1:Project ; + schema1:name "EBP-Nor" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Evaluation of Swin Transformer and knowledge transfer for denoising of super-resolution structured illumination microscopy data" . + + a schema1:Organization, + schema1:Project ; + schema1:name "COVID-19 PubSeq: Public SARS-CoV-2 Sequence Resource" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Chemical Data Lab" . + + a schema1:Organization, + schema1:Project ; + schema1:name "HP2NET - Framework for construction of phylogenetic networks on High Performance Computing (HPC) environment" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Building spatio-temporal workflows in Galaxy" . + + a schema1:Organization, + schema1:Project ; + schema1:name "CEMCOF" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Jackson Laboratory NGS-Ops" . + + a schema1:Organization, + schema1:Project ; + schema1:name "BRAIN - Biomedical Research on Adult Intracranial Neoplasms" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Italy-Covid-data-Portal" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Institute for Human Genetics and Genomic Medicine Aachen" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Plant-Food-Research-Open" . + + a schema1:Organization, + schema1:Project ; + schema1:name "WGGC" . + + a schema1:Organization, + schema1:Project ; + schema1:name "EOSC-Life WP3" . + + a schema1:Organization, + schema1:Project ; + schema1:name "MOLGENIS" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Janis" . + + a schema1:Organization, + schema1:Project ; + schema1:name "IAA-CSIC" . + + a schema1:Organization, + schema1:Project ; + schema1:name "FAME" . + + a schema1:Organization, + schema1:Project ; + schema1:name "CHU Limoges - UF9481 Bioinformatique / CNR Herpesvirus" . + + a schema1:Organization, + schema1:Project ; + schema1:name "HecatombDevelopment" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Bioinformatics and Biostatistics (BIO2 ) Core" . + + a schema1:Organization, + schema1:Project ; + schema1:name "VIB Bioinformatics Core" . + + a schema1:Organization, + schema1:Project ; + schema1:name "CINECA" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Gyn Department" . + + a schema1:Organization, + schema1:Project ; + schema1:name "AGRF BIO" . + + a schema1:DataDownload ; + schema1:contentSize "11867" ; + schema1:encodingFormat "application/zip" . + + a schema1:Organization ; + schema1:name "Department of Computer Science" ; + schema1:parentOrganization ; + schema1:url "https://www.cs.manchester.ac.uk/" . + + a schema1:Organization ; + schema1:name "ICREA" ; + schema1:url "https://www.icrea.cat/" . + + a schema1:Organization ; + schema1:name "CECH" ; + schema1:url "https://www.imim.cat/programesrecerca/neurociencies/en_cech.htm" . + + a schema1:CreativeWork ; + schema1:identifier "BSD-3-Clause" ; + schema1:isBasedOn ; + schema1:url "https://www.scipy.org/scipylib/license.html" . + + a schema1:Organization ; + schema1:name "Universitat de Barcelona" ; + schema1:url "https://www.ub.edu/" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "Raul.Sirvent@bsc.es" ; + schema1:identifier "Raul.Sirvent@bsc.es" ; + schema1:url "https://orcid.org/0000-0003-0606-2512" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "Rosa.M.Badia@bsc.es" ; + schema1:identifier "Rosa.M.Badia@bsc.es" ; + schema1:url "https://orcid.org/0000-0003-2941-5499" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "adam.hospital@irbbarcelona.org" ; + schema1:identifier "adam.hospital@irbbarcelona.org" ; + schema1:url "https://orcid.org/0000-0002-8291-8071" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "alessandro.danca@cmcc.it" ; + schema1:identifier "alessandro.danca@cmcc.it" ; + schema1:url "https://orcid.org/0000-0002-0372-2530" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "architdabral1234567890@gmail.com" ; + schema1:identifier "architdabral1234567890@gmail.com" ; + schema1:url "https://orcid.org/0009-0009-6701-3547" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "castro@anamat.cie.uma.es" ; + schema1:identifier "castro@anamat.cie.uma.es" ; + schema1:url "https://orcid.org/0000-0003-3164-7715" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "cristian.tatu@bsc.es" ; + schema1:identifier "cristian.tatu@bsc.es" ; + schema1:url "https://orcid.org/0009-0003-8848-9436" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "csl@uma.es" ; + schema1:identifier "csl@uma.es" ; + schema1:url "https://orcid.org/0000-0002-5493-5982" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "fabrizio.bernardi@ingv.it" ; + schema1:identifier "fabrizio.bernardi@ingv.it" ; + schema1:url "https://orcid.org/0000-0002-0414-8411" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "fabrizio.romano@ingv.it" ; + schema1:identifier "fabrizio.romano@ingv.it" ; + schema1:url "https://orcid.org/0000-0003-2725-3596" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "fernando.vazquez@bsc.es" ; + schema1:identifier "fernando.vazquez@bsc.es" ; + schema1:url "https://orcid.org/0000-0001-5634-509X" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "finn.lovholt@ngi.no" ; + schema1:identifier "finn.lovholt@ngi.no" ; + schema1:url "https://orcid.org/0000-0003-1019-7321" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "francisco.conejero@bsc.es" ; + schema1:identifier "francisco.conejero@bsc.es" ; + schema1:url "https://orcid.org/0000-0001-6401-6229" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "jacopo.selva@unina.it" ; + schema1:identifier "jacopo.selva@unina.it" ; + schema1:url "https://orcid.org/0000-0001-6263-6934" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "jalemkul@vt.edu" ; + schema1:identifier "jalemkul@vt.edu" ; + schema1:url "https://orcid.org/0000-0001-6661-8653" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "javier.conejero@bsc.es" ; + schema1:identifier "javier.conejero@bsc.es" ; + schema1:url "https://orcid.org/0000-0001-6401-6229" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "jorge.ejarque@bsc.es" ; + schema1:identifier "jorge.ejarque@bsc.es" ; + schema1:url "https://orcid.org/0000-0003-4725-5097" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "jose.raul.bravo@upc.edu" ; + schema1:identifier "jose.raul.bravo@upc.edu" ; + schema1:url "https://orcid.org/0000-0002-4465-7536" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "louise.cordrie@ingv.it" ; + schema1:identifier "louise.cordrie@ingv.it" ; + schema1:url "https://orcid.org/0000-0003-2290-8637" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "macias@anamat.cie.uma.es" ; + schema1:identifier "macias@anamat.cie.uma.es" ; + schema1:url "https://orcid.org/0000-0002-3010-8050" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "manuela.volpe@ingv.it" ; + schema1:identifier "manuela.volpe@ingv.it" ; + schema1:url "https://orcid.org/0000-0003-4551-3339" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "marcah@uma.es" ; + schema1:identifier "marcah@uma.es" ; + schema1:url "https://orcid.org/0000-0003-3130-1335" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "raul.sirvent@bsc.es" ; + schema1:identifier "raul.sirvent@bsc.es" ; + schema1:url "https://orcid.org/0000-0003-0606-2512" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "roberto.tonini@ingv.it" ; + schema1:identifier "roberto.tonini@ingv.it" ; + schema1:url "https://orcid.org/0000-0001-7617-7206" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "rosa.m.badia@bsc.es" ; + schema1:identifier "rosa.m.badia@bsc.es" ; + schema1:url "https://orcid.org/0000-0003-2941-5499" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "rrossi@cimne.upc.edu" ; + schema1:identifier "rrossi@cimne.upc.edu" ; + schema1:url "https://orcid.org/0000-0003-0528-7074" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "sebastian.ares@upc.edu" ; + schema1:identifier "sebastian.ares@upc.edu" ; + schema1:url "https://orcid.org/0000-0001-5709-4683" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "sonia.scardigno@cmcc.it" ; + schema1:identifier "sonia.scardigno@cmcc.it" ; + schema1:url "https://orcid.org/0000-0003-2347-3698" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "stefano.lorito@ingv.it" ; + schema1:identifier "stefano.lorito@ingv.it" ; + schema1:url "https://orcid.org/0000-0002-1458-2131" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "steven.gibbons@ngi.no" ; + schema1:identifier "steven.gibbons@ngi.no" ; + schema1:url "https://orcid.org/0000-0002-7822-0244" . + + a schema1:Dataset ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:name "outdir" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Demultiplexing pipeline for Illumina sequencing data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/978?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/demultiplex" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/demultiplex" ; + schema1:sdDatePublished "2024-07-12 13:21:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/978/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8275 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:47Z" ; + schema1:dateModified "2024-06-11T12:54:47Z" ; + schema1:description "Demultiplexing pipeline for Illumina sequencing data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/978?version=7" ; + schema1:keywords "bases2fastq, bcl2fastq, demultiplexing, elementbiosciences, illumina" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/demultiplex" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/978?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# rquest-omop-worker-workflows\r +\r +Source for workflow definitions for the open source RQuest OMOP Worker tool developed for Hutch/TRE-FX\r +\r +Note: ARM workflows are currently broken. x86 ones work.\r +\r +## Inputs\r +\r +### Body\r +Sample input payload:\r +\r +```json\r +{\r + "task_id": "job-2023-01-13-14: 20: 38-",\r + "project": "",\r + "owner": "",\r + "cohort": {\r + "groups": [\r + {\r + "rules": [\r + {\r + "varname": "OMOP",\r + "varcat": "Person",\r + "type": "TEXT",\r + "oper": "=",\r + "value": "8507"\r + }\r + ],\r + "rules_oper": "AND"\r + }\r + ],\r + "groups_oper": "OR"\r + },\r + "collection": "",\r + "protocol_version": "",\r + "char_salt": "",\r + "uuid": ""\r +}\r +```\r +\r +### Database access\r +\r +Currently this workflow requires inputs for connecting to the database it will run queries against.\r +\r +In future this may be moved to environment variables.""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/471?version=3" ; + schema1:isBasedOn "https://github.com/HDRUK/rquest-omop-worker-workflows" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for rquest-omop-worker-workflow" ; + schema1:sdDatePublished "2024-07-12 13:27:10 +0100" ; + schema1:url "https://workflowhub.eu/workflows/471/ro_crate?version=3" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 5257 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 607 ; + schema1:creator ; + schema1:dateCreated "2023-10-23T13:58:11Z" ; + schema1:dateModified "2023-10-23T13:58:11Z" ; + schema1:description """# rquest-omop-worker-workflows\r +\r +Source for workflow definitions for the open source RQuest OMOP Worker tool developed for Hutch/TRE-FX\r +\r +Note: ARM workflows are currently broken. x86 ones work.\r +\r +## Inputs\r +\r +### Body\r +Sample input payload:\r +\r +```json\r +{\r + "task_id": "job-2023-01-13-14: 20: 38-",\r + "project": "",\r + "owner": "",\r + "cohort": {\r + "groups": [\r + {\r + "rules": [\r + {\r + "varname": "OMOP",\r + "varcat": "Person",\r + "type": "TEXT",\r + "oper": "=",\r + "value": "8507"\r + }\r + ],\r + "rules_oper": "AND"\r + }\r + ],\r + "groups_oper": "OR"\r + },\r + "collection": "",\r + "protocol_version": "",\r + "char_salt": "",\r + "uuid": ""\r +}\r +```\r +\r +### Database access\r +\r +Currently this workflow requires inputs for connecting to the database it will run queries against.\r +\r +In future this may be moved to environment variables.""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/471?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "rquest-omop-worker-workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/471?version=3" ; + schema1:version 3 ; + ns1:input , + , + , + ; + ns1:output . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Workflow for Creating a large disease network from various datasets and databases for IBM, and applying the active subnetwork identification method MOGAMUN." ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.681.7" ; + schema1:isBasedOn "https://github.com/jdwijnbergen/IBM_ASI_workflow.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Inclusion Body Myositis Active Subnetwork Identification Workflow" ; + schema1:sdDatePublished "2024-07-12 13:24:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/681/ro_crate?version=7" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 26610 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8257 ; + schema1:creator , + ; + schema1:dateCreated "2023-11-27T16:19:23Z" ; + schema1:dateModified "2024-02-01T11:27:19Z" ; + schema1:description "Workflow for Creating a large disease network from various datasets and databases for IBM, and applying the active subnetwork identification method MOGAMUN." ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/681?version=6" ; + schema1:keywords "Bioinformatics, CWL, Genomics, Transcriptomics, Protein-Protein Interaction" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Inclusion Body Myositis Active Subnetwork Identification Workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/681?version=7" ; + schema1:version 7 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Coprolite Identification" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/974?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/coproid" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/coproid" ; + schema1:sdDatePublished "2024-07-12 13:22:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/974/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4835 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:45Z" ; + schema1:dateModified "2024-06-11T12:54:45Z" ; + schema1:description "Coprolite Identification" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/974?version=2" ; + schema1:keywords "adna, ancient-dna, coprolite, microbiome" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/coproid" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/974?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/986?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/fetchngs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/fetchngs" ; + schema1:sdDatePublished "2024-07-12 13:21:28 +0100" ; + schema1:url "https://workflowhub.eu/workflows/986/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6171 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:52Z" ; + schema1:dateModified "2024-06-11T12:54:52Z" ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/986?version=13" ; + schema1:keywords "ddbj, download, ena, FASTQ, geo, sra, synapse" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/fetchngs" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/986?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """## Summary\r +\r +The PPI information aggregation pipeline starts getting all the datasets in [GEO](https://www.ncbi.nlm.nih.gov/geo/) database whose material was generated using expression profiling by high throughput sequencing. From each database identifiers, it extracts the supplementary files that had the counts table. Once finishing the download step, it identifies those that were normalized or had the raw counts to normalize. It also identify and map the gene ids to uniprot (the ids found usually were from HGNC and Ensembl). For each normalized counts table belonging to some experiment, il filters those which have the proteins (already mapped from HGNC to Uniprot identifiers) in the pairs in evaluation. Then, it calculates the correlation matrix based on Pearson method in the tables and saves the respective pairs correlation value for each table. Finally, a repor is made for each pair in descending order of correlation value with the experiment identifiers.\r +\r +## Requirements:\r +* Python packages needed:\r + - os\r + - scipy\r + - pandas\r + - sklearn\r + - Bio python\r + - numpy\r +\r +## Usage Instructions\r +* Preparation:\r + 1. ````git clone https://github.com/YasCoMa/PipeAggregationInfo.git````\r + 2. ````cd PipeAggregationInfo````\r + 3. ````pip3 install -r requirements.txt````\r +\r +### Preprocessing pipeline\r +* Go to the ncbi [GDS database webpage](https://www.ncbi.nlm.nih.gov/gds), use the key words to filter your gds datasets of interest and save the results as file ("Send to" option), and choose "Summary (text)"\r +* Alternatively, we already saved the results concerning protein interactions, you may use them to run preprocessing in order to obtain the necessary files for the main pipeline\r +* Running preprocessing:\r + - ````cd preprocessing````\r + - ````python3 data_preprocessing.py ./workdir_preprocessing filter_files````\r + - ````cd ../````\r + - Copy the generated output folder "data_matrices_count" into the workflow folder: ````cp -R preprocessing/workdir_preprocessing/data_matrices_count .````\r +\r +### Main pipeline\r +\r +* Pipeline parameters:\r + - __-rt__ or __--running_type__
\r + Use to indicate the step you want to execute (it is desirable following the order):
\r + 1 - Make the process of finding the experiments and ranking them by correlation
\r + 2 - Select pairs that were already processed and ranked making a separated folder of interest\r +\r + - __-fo__ or __--folder__
\r + Folder to store the files (use the folder where the other required file can be found)\r + \r + - __-if__ or __--interactome_file__
\r + File with the pairs (two columns with uniprot identifiers in tsv format)
\r + \r + Example of this file: running_example/all_pairs.tsv\r +\r + - __-spf__ or __--selected_pairs_file__
\r + File with PPIs of interest (two columns with uniprot identifiers in tsv format)
\r + \r + Example of this file: running_example/selected_pairs.tsv\r +\r +* Running modes examples:\r + 1. Run step 1:
\r + ````python3 pipeline_expression_pattern.py -rt 1 -fo running_example/ -if all_pairs.tsv ````\r +\r + 2. Run step 2:
\r + ````python3 pipeline_expression_pattern.py -rt 2 -fo running_example/ -spf selected_pairs.tsv ````\r +\r +## Bug Report\r +Please, use the [Issue](https://github.com/YasCoMa/PipeAggregationInfo/issues) tab to report any bug.""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/619?version=1" ; + schema1:isBasedOn "https://github.com/YasCoMa/PipeAggregationInfo" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for PipePatExp - Pipeline to aggregate gene expression correlation information for PPI" ; + schema1:sdDatePublished "2024-07-12 13:27:11 +0100" ; + schema1:url "https://workflowhub.eu/workflows/619/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 23984 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7332 ; + schema1:creator ; + schema1:dateCreated "2023-10-22T00:02:52Z" ; + schema1:dateModified "2023-10-22T00:02:52Z" ; + schema1:description """## Summary\r +\r +The PPI information aggregation pipeline starts getting all the datasets in [GEO](https://www.ncbi.nlm.nih.gov/geo/) database whose material was generated using expression profiling by high throughput sequencing. From each database identifiers, it extracts the supplementary files that had the counts table. Once finishing the download step, it identifies those that were normalized or had the raw counts to normalize. It also identify and map the gene ids to uniprot (the ids found usually were from HGNC and Ensembl). For each normalized counts table belonging to some experiment, il filters those which have the proteins (already mapped from HGNC to Uniprot identifiers) in the pairs in evaluation. Then, it calculates the correlation matrix based on Pearson method in the tables and saves the respective pairs correlation value for each table. Finally, a repor is made for each pair in descending order of correlation value with the experiment identifiers.\r +\r +## Requirements:\r +* Python packages needed:\r + - os\r + - scipy\r + - pandas\r + - sklearn\r + - Bio python\r + - numpy\r +\r +## Usage Instructions\r +* Preparation:\r + 1. ````git clone https://github.com/YasCoMa/PipeAggregationInfo.git````\r + 2. ````cd PipeAggregationInfo````\r + 3. ````pip3 install -r requirements.txt````\r +\r +### Preprocessing pipeline\r +* Go to the ncbi [GDS database webpage](https://www.ncbi.nlm.nih.gov/gds), use the key words to filter your gds datasets of interest and save the results as file ("Send to" option), and choose "Summary (text)"\r +* Alternatively, we already saved the results concerning protein interactions, you may use them to run preprocessing in order to obtain the necessary files for the main pipeline\r +* Running preprocessing:\r + - ````cd preprocessing````\r + - ````python3 data_preprocessing.py ./workdir_preprocessing filter_files````\r + - ````cd ../````\r + - Copy the generated output folder "data_matrices_count" into the workflow folder: ````cp -R preprocessing/workdir_preprocessing/data_matrices_count .````\r +\r +### Main pipeline\r +\r +* Pipeline parameters:\r + - __-rt__ or __--running_type__
\r + Use to indicate the step you want to execute (it is desirable following the order):
\r + 1 - Make the process of finding the experiments and ranking them by correlation
\r + 2 - Select pairs that were already processed and ranked making a separated folder of interest\r +\r + - __-fo__ or __--folder__
\r + Folder to store the files (use the folder where the other required file can be found)\r + \r + - __-if__ or __--interactome_file__
\r + File with the pairs (two columns with uniprot identifiers in tsv format)
\r + \r + Example of this file: running_example/all_pairs.tsv\r +\r + - __-spf__ or __--selected_pairs_file__
\r + File with PPIs of interest (two columns with uniprot identifiers in tsv format)
\r + \r + Example of this file: running_example/selected_pairs.tsv\r +\r +* Running modes examples:\r + 1. Run step 1:
\r + ````python3 pipeline_expression_pattern.py -rt 1 -fo running_example/ -if all_pairs.tsv ````\r +\r + 2. Run step 2:
\r + ````python3 pipeline_expression_pattern.py -rt 2 -fo running_example/ -spf selected_pairs.tsv ````\r +\r +## Bug Report\r +Please, use the [Issue](https://github.com/YasCoMa/PipeAggregationInfo/issues) tab to report any bug.""" ; + schema1:image ; + schema1:keywords "Bioinformatics, gene expression correlation, gene expression data wrangling, geo database mining" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "PipePatExp - Pipeline to aggregate gene expression correlation information for PPI" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/619?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.285.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_abc_md_setup/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python ABC MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/285/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8001 ; + schema1:creator , + ; + schema1:dateCreated "2023-04-14T08:44:03Z" ; + schema1:dateModified "2023-04-14T08:45:01Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/285?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python ABC MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_abc_md_setup/python/workflow.py" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Take an anndata file, and perform basic QC with scanpy. Produces a filtered AnnData object." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/467?version=4" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq_CellQC" ; + schema1:sdDatePublished "2024-07-12 13:22:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/467/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 21434 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-05-30T05:51:13Z" ; + schema1:dateModified "2024-05-30T05:51:13Z" ; + schema1:description "Take an anndata file, and perform basic QC with scanpy. Produces a filtered AnnData object." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/467?version=3" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "scRNAseq_CellQC" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/467?version=4" ; + schema1:version 4 ; + ns1:input , + , + , + , + ; + ns1:output , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Detects SNPs and INDELs using VARSCAN2." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/36?version=1" ; + schema1:isBasedOn "https://usegalaxy.eu/u/ambarishk/w/varscan" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for COVID-19: VARSCAN" ; + schema1:sdDatePublished "2024-07-12 13:37:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/36/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 31968 ; + schema1:dateCreated "2020-06-18T22:57:27Z" ; + schema1:dateModified "2023-02-13T14:06:46Z" ; + schema1:description "Detects SNPs and INDELs using VARSCAN2." ; + schema1:image ; + schema1:keywords "Galaxy, VARSCAN2, SNPs, INDELs" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "COVID-19: VARSCAN" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/36?version=1" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 221636 . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:datePublished "2024-03-04T20:06:19+00:00" ; + schema1:description "PyCOMPSs implementation of Probabilistic Tsunami Forecast (PTF). PTF explicitly treats data- and forecast-uncertainties, enabling alert level definitions according to any predefined level of conservatism, which is connected to the average balance of missed-vs-false-alarms. Run of the Kos-Bodrum 2017 event test-case with 1000 scenarios, 8h tsunami simulation for each and forecast calculations for partial and full ensembles with focal mechanism and tsunami data updates." ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "https://creativecommons.org/licenses/by-nc-nd/4.0/" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "PyCOMPSs Probabilistic Tsunami Forecast (PTF) - Kos-Bodrum 2017 earthquake and tsunami test-case" ; + schema1:publisher , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 1685175 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:MediaObject ; + schema1:contentSize 243467 ; + schema1:dateModified "2023-10-19T13:01:10" ; + schema1:name "POIs.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 42945012 ; + schema1:dateModified "2023-10-19T13:01:10" ; + schema1:name "regional_domain.grd" ; + schema1:sdDatePublished "2024-03-04T20:06:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 37218 ; + schema1:dateModified "2023-10-19T13:01:10" ; + schema1:name "regional_domain_POIs_depth.dat" ; + schema1:sdDatePublished "2024-03-04T20:06:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 5895 ; + schema1:dateModified "2023-10-19T13:13:04" ; + schema1:name "Step1_config_template_mod.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2126 ; + schema1:dateModified "2023-10-19T12:59:46" ; + schema1:name "Step2_parfile_tmp.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 231 ; + schema1:dateModified "2024-03-04T14:23:49" ; + schema1:name "parfile_mod.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:19+00:00" . + + a schema1:Dataset ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:name "Run" ; + schema1:sdDatePublished "2024-03-04T20:06:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name ".gitkeep" ; + schema1:sdDatePublished "2024-03-04T20:06:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101755 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "Step1_scenario_list_BS.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2061820 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "ptf_out.hdf5" ; + schema1:sdDatePublished "2024-03-04T20:06:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "Step2_BS_failed.log" ; + schema1:sdDatePublished "2024-03-04T20:06:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2086912 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "ptf_out.hdf5" ; + schema1:sdDatePublished "2024-03-04T20:06:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 6538 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "ptf_main.config" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 5142 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "sim_files.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 40 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulBS_lb_01level_01proc.bin" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS1.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS10.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS100.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS101.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS102.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS103.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS104.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS105.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS106.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS107.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS108.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS109.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS11.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS110.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS111.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS112.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS113.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS114.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS115.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS116.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS117.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS118.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS119.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS12.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS120.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS121.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS122.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS123.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS124.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS125.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS126.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS127.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS128.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS129.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS13.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS130.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS131.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS132.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS133.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS134.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS135.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS136.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS137.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS138.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS139.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS14.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS140.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS141.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS142.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS143.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS144.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS145.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS146.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS147.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS148.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS149.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS15.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS150.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS151.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS152.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS153.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS154.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS155.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS156.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS157.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS158.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS159.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS16.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS160.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS161.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS162.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS163.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS164.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS165.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS166.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS167.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS168.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS169.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS17.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS170.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS171.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS172.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS173.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS174.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS175.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS176.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS177.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS178.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS179.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS18.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS180.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS181.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS182.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS183.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS184.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS185.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS186.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS187.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS188.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS189.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS19.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS190.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS191.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS192.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS193.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS194.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS195.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS196.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS197.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS198.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS199.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS2.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS20.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS200.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS201.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS202.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS203.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS204.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS205.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS206.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS207.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS208.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS209.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS21.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS210.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS211.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS212.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS213.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS214.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS215.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS216.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS217.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS218.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS219.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS22.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS220.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS221.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS222.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS223.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS224.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS225.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS226.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS227.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS228.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS229.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS23.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS230.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS231.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS232.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS233.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS234.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS235.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS236.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS237.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS238.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS239.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS24.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS240.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS241.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS242.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS243.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS244.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS245.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS246.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS247.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS248.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS249.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS25.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS250.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS26.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS27.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS28.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS29.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS3.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS30.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS31.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS32.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS33.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS34.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS35.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS36.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS37.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS38.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS39.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS4.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS40.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS41.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS42.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS43.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS44.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS45.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS46.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS47.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS48.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS49.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS5.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS50.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS51.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS52.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS53.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS54.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS55.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS56.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS57.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS58.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS59.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS6.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS60.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS61.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS62.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS63.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS64.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS65.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS66.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS67.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS68.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS69.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS7.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS70.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS71.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS72.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS73.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS74.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS75.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS76.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS77.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS78.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS79.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS8.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS80.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS81.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS82.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS83.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS84.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS85.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS86.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS87.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS88.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS89.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS9.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS90.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS91.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS92.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS93.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS94.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS95.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS96.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS97.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS98.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS99.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "This workflow extracts 5 different time periods e.g. January- June 2019, 2020 and 2021, July-December 2019 and 2020 over a single selected location. Then statistics (mean, minimum, maximum) are computed. The final products are maximum, minimum and mean." ; + schema1:hasPart , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.251.1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Investigation of lockdown effect on air quality between January 2019 to May 2021." ; + schema1:sdDatePublished "2024-07-12 13:36:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/251/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 20663 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 117910 ; + schema1:dateCreated "2021-12-20T09:43:13Z" ; + schema1:dateModified "2023-01-16T13:56:22Z" ; + schema1:description "This workflow extracts 5 different time periods e.g. January- June 2019, 2020 and 2021, July-December 2019 and 2020 over a single selected location. Then statistics (mean, minimum, maximum) are computed. The final products are maximum, minimum and mean." ; + schema1:image ; + schema1:keywords "RELIANCE, copernicus, air-quality" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Investigation of lockdown effect on air quality between January 2019 to May 2021." ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/251?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 61932 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# The Polygenic Score Catalog Calculator (`pgsc_calc`)\r +\r +[![Documentation Status](https://readthedocs.org/projects/pgsc-calc/badge/?version=latest)](https://pgsc-calc.readthedocs.io/en/latest/?badge=latest)\r +[![pgscatalog/pgsc_calc CI](https://github.com/PGScatalog/pgsc_calc/actions/workflows/ci.yml/badge.svg)](https://github.com/PGScatalog/pgsc_calc/actions/workflows/ci.yml)\r +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.5970794.svg)](https://doi.org/10.5281/zenodo.5970794)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-≥22.10.0-23aa62.svg?labelColor=000000)](https://www.nextflow.io/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +\r +## Introduction\r +\r +`pgsc_calc` is a bioinformatics best-practice analysis pipeline for calculating\r +polygenic [risk] scores on samples with imputed genotypes using existing scoring\r +files from the [Polygenic Score (PGS) Catalog](https://www.pgscatalog.org/)\r +and/or user-defined PGS/PRS.\r +\r +## Pipeline summary\r +\r +

\r + \r +

\r +\r +The workflow performs the following steps:\r +\r +* Downloading scoring files using the PGS Catalog API in a specified genome build (GRCh37 and GRCh38).\r +* Reading custom scoring files (and performing a liftover if genotyping data is in a different build).\r +* Automatically combines and creates scoring files for efficient parallel computation of multiple PGS\r + - Matching variants in the scoring files against variants in the target dataset (in plink bfile/pfile or VCF format)\r +* Calculates PGS for all samples (linear sum of weights and dosages)\r +* Creates a summary report to visualize score distributions and pipeline metadata (variant matching QC)\r +\r +And optionally:\r +\r +- Genetic Ancestry: calculate similarity of target samples to populations in a\r + reference dataset ([1000 Genomes (1000G)](http://www.nature.com/nature/journal/v526/n7571/full/nature15393.html)), using principal components analysis (PCA)\r +- PGS Normalization: Using reference population data and/or PCA projections to report\r + individual-level PGS predictions (e.g. percentiles, z-scores) that account for genetic ancestry\r +\r +See documentation for a list of planned [features under development](https://pgsc-calc.readthedocs.io/en/latest/index.html#Features-under-development).\r +\r +## Quick start\r +\r +1. Install\r +[`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation)\r +(`>=22.10.0`)\r +\r +2. Install [`Docker`](https://docs.docker.com/engine/installation/) or\r +[`Singularity (v3.8.3 minimum)`](https://www.sylabs.io/guides/3.0/user-guide/)\r +(please only use [`Conda`](https://conda.io/miniconda.html) as a last resort)\r +\r +3. Download the pipeline and test it on a minimal dataset with a single command:\r +\r + ```console\r + nextflow run pgscatalog/pgsc_calc -profile test,\r + ```\r +\r +4. Start running your own analysis!\r +\r + ```console\r + nextflow run pgscatalog/pgsc_calc -profile --input samplesheet.csv --pgs_id PGS001229\r + ```\r +\r +See [getting\r +started](https://pgsc-calc.readthedocs.io/en/latest/getting-started.html) for more\r +details.\r +\r +## Documentation\r +\r +[Full documentation is available on Read the Docs](https://pgsc-calc.readthedocs.io/)\r +\r +## Credits\r +\r +pgscatalog/pgsc_calc is developed as part of the PGS Catalog project, a\r +collaboration between the University of Cambridge’s Department of Public Health\r +and Primary Care (Michael Inouye, Samuel Lambert) and the European\r +Bioinformatics Institute (Helen Parkinson, Laura Harris).\r +\r +The pipeline seeks to provide a standardized workflow for PGS calculation and\r +ancestry inference implemented in nextflow derived from an existing set of\r +tools/scripts developed by Inouye lab (Rodrigo Canovas, Scott Ritchie, Jingqin\r +Wu) and PGS Catalog teams (Samuel Lambert, Laurent Gil).\r +\r +The adaptation of the codebase, nextflow implementation, and PGS Catalog features\r +are written by Benjamin Wingfield, Samuel Lambert, Laurent Gil with additional input\r +from Aoife McMahon (EBI). Development of new features, testing, and code review\r +is ongoing including Inouye lab members (Rodrigo Canovas, Scott Ritchie) and others. A\r +manuscript describing the tool is *in preparation*. In the meantime if you use the\r +tool we ask you to cite the repo and the paper describing the PGS Catalog\r +resource:\r +\r +- >PGS Catalog Calculator _(in preparation)_. PGS Catalog\r + Team. [https://github.com/PGScatalog/pgsc_calc](https://github.com/PGScatalog/pgsc_calc)\r +- >Lambert _et al._ (2021) The Polygenic Score Catalog as an open database for\r +reproducibility and systematic evaluation. Nature Genetics. 53:420–425\r +doi:[10.1038/s41588-021-00783-5](https://doi.org/10.1038/s41588-021-00783-5).\r +\r +This pipeline is distrubuted under an [Apache License](LICENSE) amd uses code and \r +infrastructure developed and maintained by the [nf-core](https://nf-co.re) community \r +(Ewels *et al. Nature Biotech* (2020) doi:[10.1038/s41587-020-0439-x](https://doi.org/10.1038/s41587-020-0439-x)), \r +reused here under the [MIT license](https://github.com/nf-core/tools/blob/master/LICENSE).\r +\r +Additional references of open-source tools and data used in this pipeline are described in\r +[`CITATIONS.md`](CITATIONS.md).\r +\r +This work has received funding from EMBL-EBI core funds, the Baker Institute,\r +the University of Cambridge, Health Data Research UK (HDRUK), and the European\r +Union’s Horizon 2020 research and innovation programme under grant agreement No\r +101016775 INTERVENE.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/556?version=2" ; + schema1:isBasedOn "https://github.com/PGScatalog/pgsc_calc" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for The Polygenic Score Catalog Calculator" ; + schema1:sdDatePublished "2024-07-12 13:27:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/556/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1673 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-08-10T09:09:35Z" ; + schema1:dateModified "2023-08-10T09:09:35Z" ; + schema1:description """# The Polygenic Score Catalog Calculator (`pgsc_calc`)\r +\r +[![Documentation Status](https://readthedocs.org/projects/pgsc-calc/badge/?version=latest)](https://pgsc-calc.readthedocs.io/en/latest/?badge=latest)\r +[![pgscatalog/pgsc_calc CI](https://github.com/PGScatalog/pgsc_calc/actions/workflows/ci.yml/badge.svg)](https://github.com/PGScatalog/pgsc_calc/actions/workflows/ci.yml)\r +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.5970794.svg)](https://doi.org/10.5281/zenodo.5970794)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-≥22.10.0-23aa62.svg?labelColor=000000)](https://www.nextflow.io/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +\r +## Introduction\r +\r +`pgsc_calc` is a bioinformatics best-practice analysis pipeline for calculating\r +polygenic [risk] scores on samples with imputed genotypes using existing scoring\r +files from the [Polygenic Score (PGS) Catalog](https://www.pgscatalog.org/)\r +and/or user-defined PGS/PRS.\r +\r +## Pipeline summary\r +\r +

\r + \r +

\r +\r +The workflow performs the following steps:\r +\r +* Downloading scoring files using the PGS Catalog API in a specified genome build (GRCh37 and GRCh38).\r +* Reading custom scoring files (and performing a liftover if genotyping data is in a different build).\r +* Automatically combines and creates scoring files for efficient parallel computation of multiple PGS\r + - Matching variants in the scoring files against variants in the target dataset (in plink bfile/pfile or VCF format)\r +* Calculates PGS for all samples (linear sum of weights and dosages)\r +* Creates a summary report to visualize score distributions and pipeline metadata (variant matching QC)\r +\r +And optionally:\r +\r +- Genetic Ancestry: calculate similarity of target samples to populations in a\r + reference dataset ([1000 Genomes (1000G)](http://www.nature.com/nature/journal/v526/n7571/full/nature15393.html)), using principal components analysis (PCA)\r +- PGS Normalization: Using reference population data and/or PCA projections to report\r + individual-level PGS predictions (e.g. percentiles, z-scores) that account for genetic ancestry\r +\r +See documentation for a list of planned [features under development](https://pgsc-calc.readthedocs.io/en/latest/index.html#Features-under-development).\r +\r +## Quick start\r +\r +1. Install\r +[`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation)\r +(`>=22.10.0`)\r +\r +2. Install [`Docker`](https://docs.docker.com/engine/installation/) or\r +[`Singularity (v3.8.3 minimum)`](https://www.sylabs.io/guides/3.0/user-guide/)\r +(please only use [`Conda`](https://conda.io/miniconda.html) as a last resort)\r +\r +3. Download the pipeline and test it on a minimal dataset with a single command:\r +\r + ```console\r + nextflow run pgscatalog/pgsc_calc -profile test,\r + ```\r +\r +4. Start running your own analysis!\r +\r + ```console\r + nextflow run pgscatalog/pgsc_calc -profile --input samplesheet.csv --pgs_id PGS001229\r + ```\r +\r +See [getting\r +started](https://pgsc-calc.readthedocs.io/en/latest/getting-started.html) for more\r +details.\r +\r +## Documentation\r +\r +[Full documentation is available on Read the Docs](https://pgsc-calc.readthedocs.io/)\r +\r +## Credits\r +\r +pgscatalog/pgsc_calc is developed as part of the PGS Catalog project, a\r +collaboration between the University of Cambridge’s Department of Public Health\r +and Primary Care (Michael Inouye, Samuel Lambert) and the European\r +Bioinformatics Institute (Helen Parkinson, Laura Harris).\r +\r +The pipeline seeks to provide a standardized workflow for PGS calculation and\r +ancestry inference implemented in nextflow derived from an existing set of\r +tools/scripts developed by Inouye lab (Rodrigo Canovas, Scott Ritchie, Jingqin\r +Wu) and PGS Catalog teams (Samuel Lambert, Laurent Gil).\r +\r +The adaptation of the codebase, nextflow implementation, and PGS Catalog features\r +are written by Benjamin Wingfield, Samuel Lambert, Laurent Gil with additional input\r +from Aoife McMahon (EBI). Development of new features, testing, and code review\r +is ongoing including Inouye lab members (Rodrigo Canovas, Scott Ritchie) and others. A\r +manuscript describing the tool is *in preparation*. In the meantime if you use the\r +tool we ask you to cite the repo and the paper describing the PGS Catalog\r +resource:\r +\r +- >PGS Catalog Calculator _(in preparation)_. PGS Catalog\r + Team. [https://github.com/PGScatalog/pgsc_calc](https://github.com/PGScatalog/pgsc_calc)\r +- >Lambert _et al._ (2021) The Polygenic Score Catalog as an open database for\r +reproducibility and systematic evaluation. Nature Genetics. 53:420–425\r +doi:[10.1038/s41588-021-00783-5](https://doi.org/10.1038/s41588-021-00783-5).\r +\r +This pipeline is distrubuted under an [Apache License](LICENSE) amd uses code and \r +infrastructure developed and maintained by the [nf-core](https://nf-co.re) community \r +(Ewels *et al. Nature Biotech* (2020) doi:[10.1038/s41587-020-0439-x](https://doi.org/10.1038/s41587-020-0439-x)), \r +reused here under the [MIT license](https://github.com/nf-core/tools/blob/master/LICENSE).\r +\r +Additional references of open-source tools and data used in this pipeline are described in\r +[`CITATIONS.md`](CITATIONS.md).\r +\r +This work has received funding from EMBL-EBI core funds, the Baker Institute,\r +the University of Cambridge, Health Data Research UK (HDRUK), and the European\r +Union’s Horizon 2020 research and innovation programme under grant agreement No\r +101016775 INTERVENE.\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/556?version=3" ; + schema1:keywords "Nextflow, Workflows, polygenic risk score, polygenic score, prediction, GWAS, genomic ancestry" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "The Polygenic Score Catalog Calculator" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/556?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-23T06:34:49+00:00" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/AustralianBioCommons/Genome-assessment-post-assembly.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Genome-assessment-post-assembly" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Genome-assessment-post-assembly" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/AustralianBioCommons/Genome-assessment-post-assembly.git" ; + schema1:version "main" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Analysis pipeline for CUT&RUN and CUT&TAG experiments that includes sequencing QC, spike-in normalisation, IgG control normalisation, peak calling and downstream peak analysis." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/976?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/cutandrun" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/cutandrun" ; + schema1:sdDatePublished "2024-07-12 13:21:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/976/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15191 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:46Z" ; + schema1:dateModified "2024-06-11T12:54:46Z" ; + schema1:description "Analysis pipeline for CUT&RUN and CUT&TAG experiments that includes sequencing QC, spike-in normalisation, IgG control normalisation, peak calling and downstream peak analysis." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/976?version=7" ; + schema1:keywords "cutandrun, cutandrun-seq, cutandtag, cutandtag-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/cutandrun" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/976?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-23T13:33:07.952232" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-Trio-phasing-VGP5/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Demultiplexing pipeline for Illumina sequencing data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/978?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/demultiplex" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/demultiplex" ; + schema1:sdDatePublished "2024-07-12 13:21:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/978/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9518 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:47Z" ; + schema1:dateModified "2024-06-11T12:54:47Z" ; + schema1:description "Demultiplexing pipeline for Illumina sequencing data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/978?version=7" ; + schema1:keywords "bases2fastq, bcl2fastq, demultiplexing, elementbiosciences, illumina" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/demultiplex" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/978?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=13" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-07-12 13:19:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=13" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15644 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:15Z" ; + schema1:dateModified "2024-06-11T12:55:15Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=13" ; + schema1:version 13 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-07-12 13:20:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7062 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:58Z" ; + schema1:dateModified "2024-06-11T12:54:58Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.292.2" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Amber Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-07-12 13:35:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/292/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1722 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-24T14:21:22Z" ; + schema1:dateModified "2023-01-16T13:58:35Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/292?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Amber Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/292?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=12" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-07-12 13:19:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=12" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15642 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:15Z" ; + schema1:dateModified "2024-06-11T12:55:15Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=12" ; + schema1:version 12 . + + a schema1:Dataset ; + schema1:datePublished "2021-12-20T17:04:47.611475" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/fragment-based-docking-scoring" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "fragment-based-docking-scoring/main" ; + schema1:sdDatePublished "2021-12-21 03:01:02 +0000" ; + schema1:softwareVersion "v0.1.2" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """## CWL based workflow to assemble haploid/diploid eukaryote genomes of non-model organisms\r +The workflow is designed to use both PacBio long-reads and Illumina short-reads. The workflow first extracts, corrects, trims and decontaminates the long reads. Decontaminated trimmed reads are then used to assemble the genome and raw reads are used to polish it. Next, Illumina reads are cleaned and used to further polish the resultant assembly. Finally, the polished assembly is masked using inferred repeats and haplotypes are eliminated. The workflow uses BioConda and DockerHub to install required software and is therefore fully automated. In addition to final assembly, the workflow produces intermediate assemblies before and after polishing steps. The workflow follows the syntax for CWL v1.0.\r +\r +### Dependencies\r +# Programs\r +The pipeline can be run either using [Cromwell](https://cromwell.readthedocs.io/en/stable) or [cwltool reference](https://github.com/common-workflow-language/cwltool) implementation and docker containers can be run either using [Singularity](https://singularity.lbl.gov) or [udocker](https://singularity.lbl.gov).\r +\r +Cromwell implementation\r +* [cromwell v44](https://github.com/broadinstitute/cromwell/releases/tag/44)\r +* [java-jdk v8.0.112](https://www.java.com/en)\r +\r +Reference implementation\r +* [cwltool v1.0.20181012180214](https://github.com/common-workflow-language/cwltool)\r +* [nodejs v10.4.1 required by cwltool](https://nodejs.org/en)\r +* [Python library galaxy-lib v18.5.7](https://pypi.org/project/galaxy-lib)\r +\r +Singularity software packages have to be installed server-wide by administrator\r +* [Singularity v3.2.1](https://singularity.lbl.gov)\r +* [squashfs-tools v4.3.0](https://github.com/plougher/squashfs-tools)\r +\r +Udocker software package can be installed locally\r +* [udocker v1.1.2](https://github.com/indigo-dc/udocker)\r +\r +# Data\r +* [Illumina adapters converted to FASTA format](http://sapac.support.illumina.com/downloads/illumina-adapter-sequences-document-1000000002694.html)\r +* [NCBI nucleotide non-redundant sequences for decontamination with Centrifuge](http://www.ccb.jhu.edu/software/centrifuge)\r +* [RepBase v17.02 file RMRBSeqs.embl](https://www.girinst.org/repbase)\r +\r +### Installation\r +Install miniconda using installation script ```installConda.sh```.\r +To install CWL, use either installation script ```installCromwell.sh``` or ```installCwltool.sh```.\r +To install udocker, use installation script ```installUdocker.sh```.\r +To install singularity, ask your system administrator.\r +\r +```\r +# First confirm that you have the program 'git' installed in your system\r +> cd\r +> git clone -b 'v0.1.3-beta' --single-branch --depth 1 https://github.com/vetscience/Assemblosis\r +> cd Assemblosis\r +> bash installConda.sh\r +> bash installCromwell.sh # or bash installCwltool.sh\r +> bash installUdocker.sh # if singularity cannot be installed or does not run\r +\r +```\r +For data dependencies: download and extract [RepBase database](https://www.girinst.org/repbase), download Centrifuge version of [NCBI nt database](http://www.ccb.jhu.edu/software/centrifuge) and create [Illumina adapter FASTA file](http://sapac.support.illumina.com/downloads/illumina-adapter-sequences-document-1000000002694.html) to your preferred locations. If your reads are clean from adapters, the adapter FASTA file can be empty.\r +Give the location of these data in the configuration (.yml) file (see **Usage**).\r +\r +### Usage\r +You have to create a YAML (.yml) file for each assembly. This file defines the required parameters and the location for both PacBio and Illumina raw-reads.\r +```\r +> cd\r +> export PATH=~/miniconda3/bin:$PATH\r +> cd Assemblosis/Run\r +> cp ../Examples/assemblyCele.yml .\r +\r +"Edit assemblyCele.yml to fit your computing environment and to define the location for the read files, databases and Illumina adapters"\r +\r +"Running docker images using Cromwell and singularity:"\r +> java -Dconfig.file=cromwell.udocker.conf -jar cromwell-44.jar run -t CWL -v v1.0 assembly.cwl -i assemblyCele.yml\r +\r +"Running docker images using Cromwell and udocker:"\r +> java -Dconfig.file=cromwell.singularity.conf -jar cromwell-44.jar run -t CWL -v v1.0 assembly.cwl -i assemblyCele.yml\r +\r +"Running docker images using Cwltool and singularity:"\r +> cwltool --tmpdir-prefix /home//Tmp --beta-conda-dependencies --cachedir /home//Cache --singularity --leave-tmpdir assembly.cwl assemblyCele.yml\r +\r +"Running docker images using Cwltool and udocker:"\r +> cwltool --tmpdir-prefix /home//Tmp --beta-conda-dependencies --cachedir /home//Cache --user-space-docker-cmd udocker --leave-tmpdir assembly.cwl assemblyCele.yml\r +```\r +\r +An annotated example of the YAML file for Caenorhabditis elegans assembly.\r +```\r +## Directory, which contains the PacBio raw data\r +# NOTE! The software looks for all .h5 file (or bam files if bacBioInBam below is defined true) in given directory\r +pacBioDataDir:\r + class: Directory\r + location: /home//Dna\r +\r +## PacBio files are in bam format as returned from Sequel platform\r +pacBioInBam: true\r +\r +## Prefix for the resultant assembly files\r +prefix: cele\r +\r +## Maximum number of threads used in the pipeline\r +threads: 24\r +\r +## Minimum number of threads per job used in canu assembler\r +minThreads: 4\r +\r +## Number of concurrent jobs in canu assembler (recommended to use threads / minThreads)\r +canuConcurrency: 6\r +\r +### Parameters for the program Canu are described in https://canu.readthedocs.io/en/latest/parameter-reference.html\r +## Expected genome size. This parameter is forwarded to Canu assembler.\r +genomeSize: 100m\r +\r +## Minimum length for the PacBio reads used for the assembly. This parameter is forwarded to Canu assembler.\r +# The maximum resolvable repeat regions becomes 2 x minReadLength\r +minReadLen: 6000\r +\r +## Parameter for Canu assembler to adjust to GC-content. Should be 0.15 for high or low GC content.\r +corMaxEvidenceErate: 0.20\r +\r +### Parameters for the program Trimmomatic are described in http://www.usadellab.org/cms/?page=trimmomatic\r +## Paired-end (PE) reads of Illumina raw data. These files are given to the program Trimmomatic.\r +# NOTE! Data for two paired libraries is given below.\r +readsPe1:\r + - class: File\r + format: edam:format_1930 # fastq\r + path: /home//Dna/SRR2598966_1.fastq.gz\r + - class: File\r + format: edam:format_1930 # fastq\r + path: /home//Dna/SRR2598967_1.fastq.gz\r +readsPe2:\r + - class: File\r + format: edam:format_1930 # fastq\r + path: /home//Dna/SRR2598966_2.fastq.gz\r + - class: File\r + format: edam:format_1930 # fastq\r + path: /home//Dna/SRR2598967_2.fastq.gz\r +\r +## Phred coding of Illumina data. This parameter is forwarded to Trimmomatic.\r +# NOTE! Each read-pair needs one phred value.\r +phredsPe: ['33','33']\r +\r +## Sliding window and illuminaClip parameters for Trimmomatic\r +slidingWindow:\r + windowSize: 4\r + requiredQuality: 25\r +illuminaClip:\r + adapters:\r + class: File\r + path: \r + seedMismatches: 2\r + palindromeClipThreshold: 30\r + simpleClipThreshold: 10\r + minAdapterLength: 20\r + keepBothReads: true\r +## Further parameters for Trimmomatic\r +# Required phred-quality for leading 5 nucleotides\r +leading: 25\r +# Required phred-quality for trailing 5 nucleotides\r +trailing: 25\r +# Minimum accepted read-length to keep the read after trimming\r +minlen: 40\r +\r +### Parameters for the program bowtie2 are described in http://bowtie-bio.sourceforge.net/bowtie2/manual.shtml\r +## Illumina PE fragment length. Program bowtie2 parameter -X.\r +# NOTE! Each read-pair needs one phred value.\r +maxFragmentLens: [500, 600]\r +# Orientation of pair-end reads e.g. 'fr', 'rf', 'ff': Program bowtie2 parameters --fr, --rf or --ff\r +orientation: 'fr'\r +\r +### Parameters for the program Pilon are described in https://github.com/broadinstitute/pilon/wiki/Requirements-&-Usage\r +# Prefix for the resultant pilon polished assembly. Pilon parameter --output\r +polishedAssembly: celePilon\r +# This is set 'true' for an organism with diploid genome: Pilon parameter --diploid\r +diploidOrganism: true\r +# Value 'bases' fixes snps and indels: Pilon parameter --fix\r +fix: bases\r +\r +### Parameters for the program centrifuge are described in http://www.ccb.jhu.edu/software/centrifuge/manual.shtml\r +# Path to the directory, that contains NCBI nt database in nt.?.cf files. Centrifuge parameter -x\r +database:\r + class: Directory\r + path: /home//ntDatabase\r +# Lenght of the identical match in nucleotides required to infer a read as contaminant. Centrifuge parameter --min-hitlen\r +partialMatch: 100\r +# NCBI taxon root identifers for the species considered contaminants: e.g. bacteria (=2), viruses (=10239), fungi (=4751), mammals (=40674), artificial seqs (=81077). Pipeline specific parameter.\r +taxons: [2,10239,4751,40674,81077]\r +\r +## Parameters for the RepeatModeler and RepeatMasker are described in http://www.repeatmasker.org\r +repBaseLibrary:\r + class: File\r + # This is the RepBase file from https://www.girinst.org/repbase. RepeatMasker parameter -lib\r + path: /home//RepBaseLibrary/RMRBSeqs.embl\r +# Constant true and false values for repeat masker\r +trueValue: true\r +falseValue: false\r +\r +```\r +### Runtimes and hardware requirements\r +The workflow was tested in Linux environment (CentOS Linux release 7.2.1511) in a server with 24 physical CPUs (48 hyperthreaded CPUs) and 512 GB RAM.\r +\r +| Assembly | Runtime in CPU hours | RAM usage (GB) |\r +| --- | --- | --- |\r +| *Caenorhabditis elegans* | 1537 | 134.1 |\r +| *Drosophila melanogaster* | 6501 | 134.1 |\r +| *Plasmodium falciparum* | 424 | 134.1 |\r +\r +Maximum memory usage of 134.1 GB was claimed by the program Centrifuge for each assembly.\r +\r +### Software tools used in this pipeline\r +* [Dextractor v1.0](https://github.com/thegenemyers/DEXTRACTOR)\r +* [Trimmomatic v0.36](http://www.usadellab.org/cms/?page=trimmomatic)\r +* [Centrifuge v1.0.3](http://www.ccb.jhu.edu/software/centrifuge)\r +* [Canu v1.8](http://canu.readthedocs.io/en/latest/index.html)\r +* [Arrow in SmrtLink v7.0.1](https://www.pacb.com/support/software-downloads)\r +* [Bowtie 2 v2.2.8](http://bowtie-bio.sourceforge.net/bowtie2/index.shtml)\r +* [SAMtools v1.6](http://samtools.sourceforge.net)\r +* [Pilon v1.22](https://github.com/broadinstitute/pilon)\r +* [RepeatMasker v4.0.6](http://www.repeatmasker.org)\r +* [RepeatModeler v1.0.11](http://www.repeatmasker.org)\r +* [RepBase v17.02](https://www.girinst.org/repbase)\r +* [HaploMerger2 build_20160512](https://github.com/mapleforest/HaploMerger2)\r +\r +### Cite\r +If you use the pipeline, please cite:\r +Korhonen, Pasi K., Ross S. Hall, Neil D. Young, and Robin B. Gasser. "Common Workflow Language (CWL)-based software pipeline for de novo genome assembly from long-and short-read data." GigaScience 8, no. 4 (2019): giz014.\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/334?version=1" ; + schema1:isBasedOn "https://github.com/vetscience/Assemblosis.git" ; + schema1:license "BSD-3-Clause" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Assemblosis" ; + schema1:sdDatePublished "2024-07-12 13:35:40 +0100" ; + schema1:url "https://workflowhub.eu/workflows/334/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8166 ; + schema1:creator ; + schema1:dateCreated "2022-04-20T23:10:26Z" ; + schema1:dateModified "2023-01-16T13:59:43Z" ; + schema1:description """## CWL based workflow to assemble haploid/diploid eukaryote genomes of non-model organisms\r +The workflow is designed to use both PacBio long-reads and Illumina short-reads. The workflow first extracts, corrects, trims and decontaminates the long reads. Decontaminated trimmed reads are then used to assemble the genome and raw reads are used to polish it. Next, Illumina reads are cleaned and used to further polish the resultant assembly. Finally, the polished assembly is masked using inferred repeats and haplotypes are eliminated. The workflow uses BioConda and DockerHub to install required software and is therefore fully automated. In addition to final assembly, the workflow produces intermediate assemblies before and after polishing steps. The workflow follows the syntax for CWL v1.0.\r +\r +### Dependencies\r +# Programs\r +The pipeline can be run either using [Cromwell](https://cromwell.readthedocs.io/en/stable) or [cwltool reference](https://github.com/common-workflow-language/cwltool) implementation and docker containers can be run either using [Singularity](https://singularity.lbl.gov) or [udocker](https://singularity.lbl.gov).\r +\r +Cromwell implementation\r +* [cromwell v44](https://github.com/broadinstitute/cromwell/releases/tag/44)\r +* [java-jdk v8.0.112](https://www.java.com/en)\r +\r +Reference implementation\r +* [cwltool v1.0.20181012180214](https://github.com/common-workflow-language/cwltool)\r +* [nodejs v10.4.1 required by cwltool](https://nodejs.org/en)\r +* [Python library galaxy-lib v18.5.7](https://pypi.org/project/galaxy-lib)\r +\r +Singularity software packages have to be installed server-wide by administrator\r +* [Singularity v3.2.1](https://singularity.lbl.gov)\r +* [squashfs-tools v4.3.0](https://github.com/plougher/squashfs-tools)\r +\r +Udocker software package can be installed locally\r +* [udocker v1.1.2](https://github.com/indigo-dc/udocker)\r +\r +# Data\r +* [Illumina adapters converted to FASTA format](http://sapac.support.illumina.com/downloads/illumina-adapter-sequences-document-1000000002694.html)\r +* [NCBI nucleotide non-redundant sequences for decontamination with Centrifuge](http://www.ccb.jhu.edu/software/centrifuge)\r +* [RepBase v17.02 file RMRBSeqs.embl](https://www.girinst.org/repbase)\r +\r +### Installation\r +Install miniconda using installation script ```installConda.sh```.\r +To install CWL, use either installation script ```installCromwell.sh``` or ```installCwltool.sh```.\r +To install udocker, use installation script ```installUdocker.sh```.\r +To install singularity, ask your system administrator.\r +\r +```\r +# First confirm that you have the program 'git' installed in your system\r +> cd\r +> git clone -b 'v0.1.3-beta' --single-branch --depth 1 https://github.com/vetscience/Assemblosis\r +> cd Assemblosis\r +> bash installConda.sh\r +> bash installCromwell.sh # or bash installCwltool.sh\r +> bash installUdocker.sh # if singularity cannot be installed or does not run\r +\r +```\r +For data dependencies: download and extract [RepBase database](https://www.girinst.org/repbase), download Centrifuge version of [NCBI nt database](http://www.ccb.jhu.edu/software/centrifuge) and create [Illumina adapter FASTA file](http://sapac.support.illumina.com/downloads/illumina-adapter-sequences-document-1000000002694.html) to your preferred locations. If your reads are clean from adapters, the adapter FASTA file can be empty.\r +Give the location of these data in the configuration (.yml) file (see **Usage**).\r +\r +### Usage\r +You have to create a YAML (.yml) file for each assembly. This file defines the required parameters and the location for both PacBio and Illumina raw-reads.\r +```\r +> cd\r +> export PATH=~/miniconda3/bin:$PATH\r +> cd Assemblosis/Run\r +> cp ../Examples/assemblyCele.yml .\r +\r +"Edit assemblyCele.yml to fit your computing environment and to define the location for the read files, databases and Illumina adapters"\r +\r +"Running docker images using Cromwell and singularity:"\r +> java -Dconfig.file=cromwell.udocker.conf -jar cromwell-44.jar run -t CWL -v v1.0 assembly.cwl -i assemblyCele.yml\r +\r +"Running docker images using Cromwell and udocker:"\r +> java -Dconfig.file=cromwell.singularity.conf -jar cromwell-44.jar run -t CWL -v v1.0 assembly.cwl -i assemblyCele.yml\r +\r +"Running docker images using Cwltool and singularity:"\r +> cwltool --tmpdir-prefix /home//Tmp --beta-conda-dependencies --cachedir /home//Cache --singularity --leave-tmpdir assembly.cwl assemblyCele.yml\r +\r +"Running docker images using Cwltool and udocker:"\r +> cwltool --tmpdir-prefix /home//Tmp --beta-conda-dependencies --cachedir /home//Cache --user-space-docker-cmd udocker --leave-tmpdir assembly.cwl assemblyCele.yml\r +```\r +\r +An annotated example of the YAML file for Caenorhabditis elegans assembly.\r +```\r +## Directory, which contains the PacBio raw data\r +# NOTE! The software looks for all .h5 file (or bam files if bacBioInBam below is defined true) in given directory\r +pacBioDataDir:\r + class: Directory\r + location: /home//Dna\r +\r +## PacBio files are in bam format as returned from Sequel platform\r +pacBioInBam: true\r +\r +## Prefix for the resultant assembly files\r +prefix: cele\r +\r +## Maximum number of threads used in the pipeline\r +threads: 24\r +\r +## Minimum number of threads per job used in canu assembler\r +minThreads: 4\r +\r +## Number of concurrent jobs in canu assembler (recommended to use threads / minThreads)\r +canuConcurrency: 6\r +\r +### Parameters for the program Canu are described in https://canu.readthedocs.io/en/latest/parameter-reference.html\r +## Expected genome size. This parameter is forwarded to Canu assembler.\r +genomeSize: 100m\r +\r +## Minimum length for the PacBio reads used for the assembly. This parameter is forwarded to Canu assembler.\r +# The maximum resolvable repeat regions becomes 2 x minReadLength\r +minReadLen: 6000\r +\r +## Parameter for Canu assembler to adjust to GC-content. Should be 0.15 for high or low GC content.\r +corMaxEvidenceErate: 0.20\r +\r +### Parameters for the program Trimmomatic are described in http://www.usadellab.org/cms/?page=trimmomatic\r +## Paired-end (PE) reads of Illumina raw data. These files are given to the program Trimmomatic.\r +# NOTE! Data for two paired libraries is given below.\r +readsPe1:\r + - class: File\r + format: edam:format_1930 # fastq\r + path: /home//Dna/SRR2598966_1.fastq.gz\r + - class: File\r + format: edam:format_1930 # fastq\r + path: /home//Dna/SRR2598967_1.fastq.gz\r +readsPe2:\r + - class: File\r + format: edam:format_1930 # fastq\r + path: /home//Dna/SRR2598966_2.fastq.gz\r + - class: File\r + format: edam:format_1930 # fastq\r + path: /home//Dna/SRR2598967_2.fastq.gz\r +\r +## Phred coding of Illumina data. This parameter is forwarded to Trimmomatic.\r +# NOTE! Each read-pair needs one phred value.\r +phredsPe: ['33','33']\r +\r +## Sliding window and illuminaClip parameters for Trimmomatic\r +slidingWindow:\r + windowSize: 4\r + requiredQuality: 25\r +illuminaClip:\r + adapters:\r + class: File\r + path: \r + seedMismatches: 2\r + palindromeClipThreshold: 30\r + simpleClipThreshold: 10\r + minAdapterLength: 20\r + keepBothReads: true\r +## Further parameters for Trimmomatic\r +# Required phred-quality for leading 5 nucleotides\r +leading: 25\r +# Required phred-quality for trailing 5 nucleotides\r +trailing: 25\r +# Minimum accepted read-length to keep the read after trimming\r +minlen: 40\r +\r +### Parameters for the program bowtie2 are described in http://bowtie-bio.sourceforge.net/bowtie2/manual.shtml\r +## Illumina PE fragment length. Program bowtie2 parameter -X.\r +# NOTE! Each read-pair needs one phred value.\r +maxFragmentLens: [500, 600]\r +# Orientation of pair-end reads e.g. 'fr', 'rf', 'ff': Program bowtie2 parameters --fr, --rf or --ff\r +orientation: 'fr'\r +\r +### Parameters for the program Pilon are described in https://github.com/broadinstitute/pilon/wiki/Requirements-&-Usage\r +# Prefix for the resultant pilon polished assembly. Pilon parameter --output\r +polishedAssembly: celePilon\r +# This is set 'true' for an organism with diploid genome: Pilon parameter --diploid\r +diploidOrganism: true\r +# Value 'bases' fixes snps and indels: Pilon parameter --fix\r +fix: bases\r +\r +### Parameters for the program centrifuge are described in http://www.ccb.jhu.edu/software/centrifuge/manual.shtml\r +# Path to the directory, that contains NCBI nt database in nt.?.cf files. Centrifuge parameter -x\r +database:\r + class: Directory\r + path: /home//ntDatabase\r +# Lenght of the identical match in nucleotides required to infer a read as contaminant. Centrifuge parameter --min-hitlen\r +partialMatch: 100\r +# NCBI taxon root identifers for the species considered contaminants: e.g. bacteria (=2), viruses (=10239), fungi (=4751), mammals (=40674), artificial seqs (=81077). Pipeline specific parameter.\r +taxons: [2,10239,4751,40674,81077]\r +\r +## Parameters for the RepeatModeler and RepeatMasker are described in http://www.repeatmasker.org\r +repBaseLibrary:\r + class: File\r + # This is the RepBase file from https://www.girinst.org/repbase. RepeatMasker parameter -lib\r + path: /home//RepBaseLibrary/RMRBSeqs.embl\r +# Constant true and false values for repeat masker\r +trueValue: true\r +falseValue: false\r +\r +```\r +### Runtimes and hardware requirements\r +The workflow was tested in Linux environment (CentOS Linux release 7.2.1511) in a server with 24 physical CPUs (48 hyperthreaded CPUs) and 512 GB RAM.\r +\r +| Assembly | Runtime in CPU hours | RAM usage (GB) |\r +| --- | --- | --- |\r +| *Caenorhabditis elegans* | 1537 | 134.1 |\r +| *Drosophila melanogaster* | 6501 | 134.1 |\r +| *Plasmodium falciparum* | 424 | 134.1 |\r +\r +Maximum memory usage of 134.1 GB was claimed by the program Centrifuge for each assembly.\r +\r +### Software tools used in this pipeline\r +* [Dextractor v1.0](https://github.com/thegenemyers/DEXTRACTOR)\r +* [Trimmomatic v0.36](http://www.usadellab.org/cms/?page=trimmomatic)\r +* [Centrifuge v1.0.3](http://www.ccb.jhu.edu/software/centrifuge)\r +* [Canu v1.8](http://canu.readthedocs.io/en/latest/index.html)\r +* [Arrow in SmrtLink v7.0.1](https://www.pacb.com/support/software-downloads)\r +* [Bowtie 2 v2.2.8](http://bowtie-bio.sourceforge.net/bowtie2/index.shtml)\r +* [SAMtools v1.6](http://samtools.sourceforge.net)\r +* [Pilon v1.22](https://github.com/broadinstitute/pilon)\r +* [RepeatMasker v4.0.6](http://www.repeatmasker.org)\r +* [RepeatModeler v1.0.11](http://www.repeatmasker.org)\r +* [RepBase v17.02](https://www.girinst.org/repbase)\r +* [HaploMerger2 build_20160512](https://github.com/mapleforest/HaploMerger2)\r +\r +### Cite\r +If you use the pipeline, please cite:\r +Korhonen, Pasi K., Ross S. Hall, Neil D. Young, and Robin B. Gasser. "Common Workflow Language (CWL)-based software pipeline for de novo genome assembly from long-and short-read data." GigaScience 8, no. 4 (2019): giz014.\r +\r +""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/BSD-3-Clause" ; + schema1:name "Assemblosis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/334?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 98508 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Alignment, assembly and annotation of RNQSEQ reads using TOPHAT (without filtering out host reads)." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/37?version=1" ; + schema1:isBasedOn "https://usegalaxy.eu/u/ambarishk/w/covid-19-assembly-using-tophat2-and-annotation-alternate" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Assembly using Tophat2 and annotation (alternate)" ; + schema1:sdDatePublished "2024-07-12 13:37:28 +0100" ; + schema1:url "https://workflowhub.eu/workflows/37/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 35059 ; + schema1:dateCreated "2020-06-18T23:03:09Z" ; + schema1:dateModified "2023-02-13T14:06:46Z" ; + schema1:description "Alignment, assembly and annotation of RNQSEQ reads using TOPHAT (without filtering out host reads)." ; + schema1:image ; + schema1:keywords "Galaxy, Tophat2, Assembly, Alignment, RNASEQ, covid-19" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Assembly using Tophat2 and annotation (alternate)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/37?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 207755 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """This Galaxy workflow takes a list of tumor/normal sample pair variants in VCF format and\r +1. annotates them using the ENSEMBL Variant Effect Predictor and custom annotation data\r +2. turns the annotated VCF into a MAF file for import into cBioPortal\r +3. generates human-readable variant- and gene-centric reports\r +\r +The input VCF is expected to encode somatic status, somatic p-value and germline p-value of each variant in varscan somatic format, i.e., via SS, SPV and GPV INFO keys, respectively.""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.607.1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Cancer variant annotation (hg38 VEP-based)" ; + schema1:sdDatePublished "2024-07-12 13:27:15 +0100" ; + schema1:url "https://workflowhub.eu/workflows/607/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 164505 ; + schema1:creator ; + schema1:dateCreated "2023-10-10T16:08:59Z" ; + schema1:dateModified "2023-10-27T13:42:31Z" ; + schema1:description """This Galaxy workflow takes a list of tumor/normal sample pair variants in VCF format and\r +1. annotates them using the ENSEMBL Variant Effect Predictor and custom annotation data\r +2. turns the annotated VCF into a MAF file for import into cBioPortal\r +3. generates human-readable variant- and gene-centric reports\r +\r +The input VCF is expected to encode somatic status, somatic p-value and germline p-value of each variant in varscan somatic format, i.e., via SS, SPV and GPV INFO keys, respectively.""" ; + schema1:keywords "EOSC4Cancer" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Cancer variant annotation (hg38 VEP-based)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://usegalaxy.eu/api/workflows/2424282f793f0f1b/download?format=json-download" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """Parabricks-Genomics-nf is a GPU-enabled pipeline for alignment and germline short variant calling for short read sequencing data. The pipeline utilises [NVIDIA's Clara Parabricks](https://docs.nvidia.com/clara/parabricks/4.2.0/index.html) toolkit to dramatically speed up the execution of best practice bioinformatics tools. Currently, this pipeline is **configured specifically for [NCI's Gadi HPC](https://nci.org.au/our-systems/hpc-systems)**. \r +\r +NVIDIA's Clara Parabricks can deliver a significant speed improvement over traditional CPU-based methods, and is designed to be used only with NVIDIA GPUs. This pipeline is suitable for population screening projects as it executes Parabrick's implementations of BWA mem for short read alignment and Google's DeepVariant for short variant calling. Additionally, it uses standard CPU implementations of data quality evaluation tools [FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/) and [MultiQC](https://multiqc.info/) and [DNAnexus' GLnexus](https://academic.oup.com/bioinformatics/article/36/24/5582/6064144) for scalable gVCF merging and joint variant calling. Optionally, [Variant Effect Predictor (VEP)](https://genomebiology.biomedcentral.com/articles/10.1186/s13059-016-0974-4) can be run for variant annotation. \r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.836.1" ; + schema1:isBasedOn "https://github.com/Sydney-Informatics-Hub/Parabricks-Genomics-nf.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Parabricks-Genomics-nf" ; + schema1:sdDatePublished "2024-07-12 13:23:04 +0100" ; + schema1:url "https://workflowhub.eu/workflows/836/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6413 ; + schema1:creator ; + schema1:dateCreated "2024-04-25T23:19:53Z" ; + schema1:dateModified "2024-04-25T23:20:08Z" ; + schema1:description """Parabricks-Genomics-nf is a GPU-enabled pipeline for alignment and germline short variant calling for short read sequencing data. The pipeline utilises [NVIDIA's Clara Parabricks](https://docs.nvidia.com/clara/parabricks/4.2.0/index.html) toolkit to dramatically speed up the execution of best practice bioinformatics tools. Currently, this pipeline is **configured specifically for [NCI's Gadi HPC](https://nci.org.au/our-systems/hpc-systems)**. \r +\r +NVIDIA's Clara Parabricks can deliver a significant speed improvement over traditional CPU-based methods, and is designed to be used only with NVIDIA GPUs. This pipeline is suitable for population screening projects as it executes Parabrick's implementations of BWA mem for short read alignment and Google's DeepVariant for short variant calling. Additionally, it uses standard CPU implementations of data quality evaluation tools [FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/) and [MultiQC](https://multiqc.info/) and [DNAnexus' GLnexus](https://academic.oup.com/bioinformatics/article/36/24/5582/6064144) for scalable gVCF merging and joint variant calling. Optionally, [Variant Effect Predictor (VEP)](https://genomebiology.biomedcentral.com/articles/10.1186/s13059-016-0974-4) can be run for variant annotation. \r +""" ; + schema1:keywords "Bioinformatics, INDELs, SNPs, variant calling, Genomics, whole genome sequencing, gpu, Annotation, mapping" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Parabricks-Genomics-nf" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/836?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1027?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/viralrecon" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/viralrecon" ; + schema1:sdDatePublished "2024-07-12 13:18:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1027/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10538 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:20Z" ; + schema1:dateModified "2024-06-11T12:55:20Z" ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1027?version=10" ; + schema1:keywords "Amplicon, ARTIC, Assembly, covid-19, covid19, illumina, long-read-sequencing, Metagenomics, nanopore, ONT, oxford-nanopore, SARS-CoV-2, variant-calling, viral, Virus" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/viralrecon" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1027?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +**Based on the official [pmx tutorial](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate how to compute a **fast-growth mutation free energy** calculation, step by step, using the BioExcel **Building Blocks library (biobb)**. The particular example used is the **Staphylococcal nuclease** protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +The **non-equilibrium free energy calculation** protocol performs a **fast alchemical transition** in the direction **WT->Mut** and back **Mut->WT**. The two equilibrium trajectories needed for the tutorial, one for **Wild Type (WT)** and another for the **Mutated (Mut)** protein (Isoleucine 10 to Alanine -I10A-), have already been generated and are included in this example. We will name **WT as stateA** and **Mut as stateB**.\r +\r +![](https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/schema.png)\r +\r +The tutorial calculates the **free energy difference** in the folded state of a protein. Starting from **two 1ns-length independent equilibrium simulations** (WT and mutant), snapshots are selected to start **fast (50ps) transitions** driving the system in the **forward** (WT to mutant) and **reverse** (mutant to WT) directions, and the **work values** required to perform these transitions are collected. With these values, **Crooks Gaussian Intersection** (CGI), **Bennett Acceptance Ratio** (BAR) and **Jarzynski estimator** methods are used to calculate the **free energy difference** between the two states.\r +\r +*Please note that for the sake of disk space this tutorial is using 1ns-length equilibrium trajectories, whereas in the [original example](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/eq.mdp) the equilibrium trajectories used were obtained from 10ns-length simulations.*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.55.5" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_pmx_tutorial" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)" ; + schema1:sdDatePublished "2024-07-12 13:24:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/55/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 49135 ; + schema1:creator , + ; + schema1:dateCreated "2023-07-26T09:23:40Z" ; + schema1:dateModified "2023-07-26T09:26:49Z" ; + schema1:description """# Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +**Based on the official [pmx tutorial](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate how to compute a **fast-growth mutation free energy** calculation, step by step, using the BioExcel **Building Blocks library (biobb)**. The particular example used is the **Staphylococcal nuclease** protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +The **non-equilibrium free energy calculation** protocol performs a **fast alchemical transition** in the direction **WT->Mut** and back **Mut->WT**. The two equilibrium trajectories needed for the tutorial, one for **Wild Type (WT)** and another for the **Mutated (Mut)** protein (Isoleucine 10 to Alanine -I10A-), have already been generated and are included in this example. We will name **WT as stateA** and **Mut as stateB**.\r +\r +![](https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/schema.png)\r +\r +The tutorial calculates the **free energy difference** in the folded state of a protein. Starting from **two 1ns-length independent equilibrium simulations** (WT and mutant), snapshots are selected to start **fast (50ps) transitions** driving the system in the **forward** (WT to mutant) and **reverse** (mutant to WT) directions, and the **work values** required to perform these transitions are collected. With these values, **Crooks Gaussian Intersection** (CGI), **Bennett Acceptance Ratio** (BAR) and **Jarzynski estimator** methods are used to calculate the **free energy difference** between the two states.\r +\r +*Please note that for the sake of disk space this tutorial is using 1ns-length equilibrium trajectories, whereas in the [original example](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/eq.mdp) the equilibrium trajectories used were obtained from 10ns-length simulations.*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/55?version=5" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/biobb_wf_pmx_tutorial.ipynb" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A mapping-based pipeline for creating a phylogeny from bacterial whole genome sequences" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/967?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/bactmap" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/bactmap" ; + schema1:sdDatePublished "2024-07-12 13:22:14 +0100" ; + schema1:url "https://workflowhub.eu/workflows/967/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5953 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:43Z" ; + schema1:dateModified "2024-06-11T12:54:43Z" ; + schema1:description "A mapping-based pipeline for creating a phylogeny from bacterial whole genome sequences" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/967?version=2" ; + schema1:keywords "bacteria, bacterial, bacterial-genome-analysis, Genomics, mapping, phylogeny, tree" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/bactmap" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/967?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for the analysis of CRISPR data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/975?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/crisprseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/crisprseq" ; + schema1:sdDatePublished "2024-07-12 13:18:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/975/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12235 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-21T03:02:41Z" ; + schema1:dateModified "2024-06-21T03:02:41Z" ; + schema1:description "Pipeline for the analysis of CRISPR data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/975?version=4" ; + schema1:keywords "crispr, crispr-analysis, crispr-cas, NGS" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/crisprseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/975?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """The image is referenced in the paper "NesSys: a novel method for accurate nuclear segmentation in 3D" published August 2019 in PLOS Biology: https://doi.org/10.1371/journal.pbio.3000388 and can be viewed online in the [Image Data Resource](https://idr.openmicroscopy.org/webclient/?show=image-6001247).\r +\r +This original image was converted into the Zarr format. The analysis results produced by the authors of the paper were converted into labels and linked to the Zarr file which was placed into a public S3 repository.\r +\r +In this notebook, the Zarr file is then loaded together with the labels from the S3 storage and analyzed using [StarDist](https://github.com/stardist/stardist). The StarDist analysis produces a segmentation, which is then viewed side-by-side with the original segmentations produced by the authors of the paper obtained via the loaded labels.\r +\r +## Launch\r +This notebook uses the [environment_stardist.yml](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_5/environment_stardist.yml) file.\r +\r +See [Setup](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_5/setup.md).""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.496.1" ; + schema1:license "BSD-2-Clause" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Load ome.zarr Image with labels from a public S3 repository, analyze using StarDist and compare results" ; + schema1:sdDatePublished "2024-07-12 13:33:09 +0100" ; + schema1:url "https://workflowhub.eu/workflows/496/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 689789 ; + schema1:url "https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_5/includes/StarDistNgff.png" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 27375 ; + schema1:creator , + ; + schema1:dateCreated "2023-06-01T10:49:40Z" ; + schema1:dateModified "2023-06-01T10:50:42Z" ; + schema1:description """The image is referenced in the paper "NesSys: a novel method for accurate nuclear segmentation in 3D" published August 2019 in PLOS Biology: https://doi.org/10.1371/journal.pbio.3000388 and can be viewed online in the [Image Data Resource](https://idr.openmicroscopy.org/webclient/?show=image-6001247).\r +\r +This original image was converted into the Zarr format. The analysis results produced by the authors of the paper were converted into labels and linked to the Zarr file which was placed into a public S3 repository.\r +\r +In this notebook, the Zarr file is then loaded together with the labels from the S3 storage and analyzed using [StarDist](https://github.com/stardist/stardist). The StarDist analysis produces a segmentation, which is then viewed side-by-side with the original segmentations produced by the authors of the paper obtained via the loaded labels.\r +\r +## Launch\r +This notebook uses the [environment_stardist.yml](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_5/environment_stardist.yml) file.\r +\r +See [Setup](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_5/setup.md).""" ; + schema1:image ; + schema1:keywords "OME-Zarr, Python, Machine Learning, imaging, S3" ; + schema1:license "https://spdx.org/licenses/BSD-2-Clause" ; + schema1:name "Load ome.zarr Image with labels from a public S3 repository, analyze using StarDist and compare results" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_5/stardist.ipynb" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Galaxy Workflow created on Galaxy-E european instance, ecology.usegalaxy.eu, related to the Galaxy training tutorial "[Metabarcoding/eDNA through Obitools](https://training.galaxyproject.org/training-material/topics/ecology/tutorials/Obitools-metabarcoding/tutorial.html)" .\r +\r +This workflow allows to analyze DNA metabarcoding / eDNA data produced on Illumina sequencers using the OBITools.""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/655?version=1" ; + schema1:isBasedOn "https://ecology.usegalaxy.eu/u/ylebras/w/obitools-edna-metabarcoding" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Obitools eDNA metabarcoding" ; + schema1:sdDatePublished "2024-07-12 13:26:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/655/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 38160 ; + schema1:creator , + ; + schema1:dateCreated "2023-11-09T12:41:17Z" ; + schema1:dateModified "2023-11-09T21:02:58Z" ; + schema1:description """Galaxy Workflow created on Galaxy-E european instance, ecology.usegalaxy.eu, related to the Galaxy training tutorial "[Metabarcoding/eDNA through Obitools](https://training.galaxyproject.org/training-material/topics/ecology/tutorials/Obitools-metabarcoding/tutorial.html)" .\r +\r +This workflow allows to analyze DNA metabarcoding / eDNA data produced on Illumina sequencers using the OBITools.""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Obitools eDNA metabarcoding" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/655?version=1" ; + schema1:version 1 ; + ns1:input . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description "Purges any retained haplotypes using minimap2 to make the input assembly to itself and the reads to the assembly followed by purge-dups to identify and remove any error, repeat or retained haplotype contigs." ; + schema1:hasPart , + , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/506?version=1" ; + schema1:isBasedOn "https://github.com/ERGA-consortium/pipelines/tree/main/assembly/snakemake/3.Purging/purge-dups" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Purge retained haplotypes using Purge-Dups" ; + schema1:sdDatePublished "2024-07-12 13:23:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/506/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1706 ; + schema1:creator ; + schema1:dateCreated "2023-06-16T13:56:33Z" ; + schema1:dateModified "2024-03-16T07:44:46Z" ; + schema1:description "Purges any retained haplotypes using minimap2 to make the input assembly to itself and the reads to the assembly followed by purge-dups to identify and remove any error, repeat or retained haplotype contigs." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/506?version=1" ; + schema1:isPartOf ; + schema1:keywords "Assembly, Genomics, Snakemake, Bioinformatics, Genome assembly" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Purge retained haplotypes using Purge-Dups" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/snakemake/3.Purging/purge-dups/Snakefile" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """Take a scRNAseq counts matrix from a single sample, and perform basic QC with scanpy. Then, do further processing by making a UMAP and clustering. Produces a processed AnnData \r +object.""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/514?version=1" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "other-closed" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq Single Sample Processing (counts matrix)" ; + schema1:sdDatePublished "2024-07-12 13:22:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/514/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 105638 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-06-22T06:50:32Z" ; + schema1:dateModified "2023-06-23T06:47:26Z" ; + schema1:description """Take a scRNAseq counts matrix from a single sample, and perform basic QC with scanpy. Then, do further processing by making a UMAP and clustering. Produces a processed AnnData \r +object.""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/514?version=2" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "" ; + schema1:name "scRNAseq Single Sample Processing (counts matrix)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/514?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for analysis of Molecular Pixelation assays" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1010?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/pixelator" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/pixelator" ; + schema1:sdDatePublished "2024-07-12 13:20:21 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1010/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10413 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:07Z" ; + schema1:dateModified "2024-06-11T12:55:07Z" ; + schema1:description "Pipeline for analysis of Molecular Pixelation assays" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1010?version=5" ; + schema1:keywords "molecular-pixelation, pixelator, pixelgen-technologies, proteins, single-cell, single-cell-omics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/pixelator" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1010?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.11068736.svg)](https://doi.org/10.5281/zenodo.11068736)\r +\r +# JAX NGS Operations Nextflow DSL2 Pipelines\r +\r +This repository contains production bioinformatic analysis pipelines for a variety of bulk 'omics data analysis. Please see the [Wiki documentation](https://github.com/TheJacksonLaboratory/cs-nf-pipelines/wiki) associated with this repository for all documentation and available analysis workflows.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.874.1" ; + schema1:isBasedOn "https://github.com/TheJacksonLaboratory/cs-nf-pipelines.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for JAX NGS Operations Nextflow DSL2 Pipelines" ; + schema1:sdDatePublished "2024-07-12 13:23:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/874/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3246 ; + schema1:creator , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-05-03T12:55:48Z" ; + schema1:dateModified "2024-05-03T13:42:53Z" ; + schema1:description """[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.11068736.svg)](https://doi.org/10.5281/zenodo.11068736)\r +\r +# JAX NGS Operations Nextflow DSL2 Pipelines\r +\r +This repository contains production bioinformatic analysis pipelines for a variety of bulk 'omics data analysis. Please see the [Wiki documentation](https://github.com/TheJacksonLaboratory/cs-nf-pipelines/wiki) associated with this repository for all documentation and available analysis workflows.\r +""" ; + schema1:keywords "Bioinformatics, Nextflow, Workflows" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "JAX NGS Operations Nextflow DSL2 Pipelines" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/874?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/gmx-protein-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.277.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_md_setup/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/277/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 59732 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-22T09:46:27Z" ; + schema1:dateModified "2023-01-16T13:57:43Z" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/gmx-protein-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/277?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_md_setup/galaxy/biobb_wf_md_setup.ga" ; + schema1:version 2 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1021?version=14" ; + schema1:isBasedOn "https://github.com/nf-core/scrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/scrnaseq" ; + schema1:sdDatePublished "2024-07-12 13:19:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1021/ro_crate?version=14" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12056 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-26T03:02:46Z" ; + schema1:dateModified "2024-06-26T03:02:46Z" ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1021?version=13" ; + schema1:keywords "10x-genomics, 10xgenomics, alevin, bustools, Cellranger, kallisto, rna-seq, single-cell, star-solo" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/scrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1021?version=14" ; + schema1:version 14 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-07-12 13:22:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4811 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:11Z" ; + schema1:dateModified "2024-06-11T12:55:11Z" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.128.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_virtual-screening" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein-ligand Docking tutorial (PDBe REST API)" ; + schema1:sdDatePublished "2024-07-12 13:33:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/128/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 36987 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-04-14T08:15:11Z" ; + schema1:dateModified "2023-04-14T08:17:24Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/128?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein-ligand Docking tutorial (PDBe REST API)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_virtual-screening/blob/master/biobb_wf_virtual-screening/notebooks/ebi_api/wf_vs_ebi_api.ipynb" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:datePublished "2024-03-01T16:01:02+00:00" ; + schema1:description "PyCOMPSs implementation of Probabilistic Tsunami Forecast (PTF). PTF explicitly treats data- and forecast-uncertainties, enabling alert level definitions according to any predefined level of conservatism, which is connected to the average balance of missed-vs-false-alarms. Run of the Kos-Bodrum 2017 event test-case with 1000 scenarios, 8h tsunami simulation for each and forecast calculations for partial and full ensembles with focal mechanism and tsunami data updates." ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "https://creativecommons.org/licenses/by-nc-nd/4.0/" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "PyCOMPSs Probabilistic Tsunami Forecast (PTF) - Kos-Bodrum 2017 earthquake and tsunami test-case" ; + schema1:publisher , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 1685175 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:MediaObject ; + schema1:contentSize 243467 ; + schema1:dateModified "2023-10-19T13:01:10" ; + schema1:name "POIs.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 42945012 ; + schema1:dateModified "2023-10-19T13:01:10" ; + schema1:name "regional_domain.grd" ; + schema1:sdDatePublished "2024-03-01T16:01:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 37218 ; + schema1:dateModified "2023-10-19T13:01:10" ; + schema1:name "regional_domain_POIs_depth.dat" ; + schema1:sdDatePublished "2024-03-01T16:01:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 5895 ; + schema1:dateModified "2023-10-19T13:13:04" ; + schema1:name "Step1_config_template_mod.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2126 ; + schema1:dateModified "2023-10-19T12:59:46" ; + schema1:name "Step2_parfile_tmp.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 233 ; + schema1:dateModified "2024-02-29T09:18:35" ; + schema1:name "parfile_mod.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:02+00:00" . + + a schema1:Dataset ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:name "Run" ; + schema1:sdDatePublished "2024-03-01T16:01:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name ".gitkeep" ; + schema1:sdDatePublished "2024-03-01T16:01:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103564 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "Step1_scenario_list_BS.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2061724 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "ptf_out.hdf5" ; + schema1:sdDatePublished "2024-03-01T16:01:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "Step2_BS_failed.log" ; + schema1:sdDatePublished "2024-03-01T16:01:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2086816 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "ptf_out.hdf5" ; + schema1:sdDatePublished "2024-03-01T16:01:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 6539 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "ptf_main.config" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 5142 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "sim_files.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 40 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulBS_lb_01level_01proc.bin" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS1.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS10.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS100.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS101.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS102.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS103.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS104.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS105.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS106.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS107.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS108.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS109.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS11.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS110.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS111.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS112.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS113.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS114.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS115.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS116.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS117.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS118.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS119.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS12.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS120.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS121.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS122.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS123.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS124.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS125.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS126.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS127.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS128.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS129.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS13.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS130.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS131.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS132.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS133.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS134.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS135.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS136.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS137.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS138.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS139.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS14.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS140.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS141.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS142.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS143.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS144.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS145.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS146.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS147.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS148.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS149.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS15.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS150.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS151.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS152.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS153.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS154.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS155.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS156.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS157.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS158.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS159.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS16.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS160.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS161.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS162.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS163.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS164.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS165.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS166.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS167.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS168.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS169.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS17.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS170.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS171.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS172.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS173.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS174.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS175.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS176.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS177.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS178.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS179.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS18.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS180.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS181.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS182.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS183.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS184.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS185.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS186.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS187.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS188.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS189.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS19.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS190.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS191.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS192.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS193.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS194.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS195.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS196.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS197.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS198.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS199.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS2.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS20.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS200.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS201.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS202.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS203.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS204.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS205.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS206.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS207.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS208.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS209.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS21.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS210.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS211.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS212.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS213.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS214.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS215.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS216.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS217.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS218.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS219.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS22.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS220.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS221.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS222.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS223.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS224.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS225.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS226.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS227.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS228.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS229.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS23.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS230.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS231.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS232.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS233.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS234.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS235.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS236.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS237.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS238.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS239.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS24.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS240.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS241.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS242.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS243.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS244.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS245.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS246.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS247.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS248.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS249.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS25.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS250.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS26.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS27.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS28.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS29.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS3.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS30.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS31.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS32.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS33.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS34.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS35.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS36.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS37.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS38.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS39.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS4.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS40.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS41.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS42.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS43.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS44.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS45.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS46.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS47.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS48.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS49.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS5.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS50.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS51.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS52.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS53.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS54.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS55.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS56.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS57.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS58.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS59.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS6.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS60.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS61.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS62.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS63.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS64.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS65.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS66.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS67.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS68.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS69.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS7.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS70.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS71.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS72.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS73.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS74.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS75.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS76.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS77.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS78.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS79.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS8.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS80.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS81.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS82.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS83.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS84.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS85.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS86.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS87.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS88.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS89.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS9.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS90.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS91.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS92.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS93.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS94.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS95.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS96.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS97.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS98.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS99.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/998?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/methylseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/methylseq" ; + schema1:sdDatePublished "2024-07-12 13:18:23 +0100" ; + schema1:url "https://workflowhub.eu/workflows/998/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3877 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:01Z" ; + schema1:dateModified "2024-06-11T12:55:01Z" ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/998?version=14" ; + schema1:keywords "bisulfite-sequencing, dna-methylation, em-seq, epigenome, Epigenomics, methyl-seq, pbat, rrbs" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/methylseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/998?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/245?version=1" ; + schema1:isBasedOn "https://sdr.nhm.ac.uk/" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for De novo digitisation" ; + schema1:sdDatePublished "2024-07-12 13:36:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/245/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 18313 ; + schema1:dateCreated "2021-11-26T14:46:05Z" ; + schema1:dateModified "2023-01-16T13:55:15Z" ; + schema1:description "" ; + schema1:keywords "Segmentation" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "De novo digitisation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/245?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 129436 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + schema1:dateModified "2023-12-04T08:32:38" ; + schema1:hasPart , + ; + schema1:name "data-set" ; + schema1:sdDatePublished "2023-12-04T14:19:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777398 ; + schema1:dateModified "2020-09-21T10:34:27" ; + schema1:name "file_long.txt" ; + schema1:sdDatePublished "2023-12-04T14:19:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4872 ; + schema1:dateModified "2020-09-21T10:34:27" ; + schema1:name "file_small.txt" ; + schema1:sdDatePublished "2023-12-04T14:19:45+00:00" . + + a schema1:Dataset ; + schema1:datePublished "2023-10-17T15:35:20.967645" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/chipseq-pe" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "chipseq-pe/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.12" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/991?version=10" ; + schema1:isBasedOn "https://github.com/nf-core/hlatyping" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hlatyping" ; + schema1:sdDatePublished "2024-07-12 13:18:33 +0100" ; + schema1:url "https://workflowhub.eu/workflows/991/ro_crate?version=10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8220 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:57Z" ; + schema1:dateModified "2024-06-11T12:54:57Z" ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/991?version=9" ; + schema1:keywords "DNA, hla, hla-typing, immunology, optitype, personalized-medicine, rna" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hlatyping" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/991?version=10" ; + schema1:version 10 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for the identification of circular DNAs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/972?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/circdna" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/circdna" ; + schema1:sdDatePublished "2024-07-12 13:22:04 +0100" ; + schema1:url "https://workflowhub.eu/workflows/972/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11216 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:45Z" ; + schema1:dateModified "2024-06-11T12:54:45Z" ; + schema1:description "Pipeline for the identification of circular DNAs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/972?version=7" ; + schema1:keywords "ampliconarchitect, ampliconsuite, circle-seq, circular, DNA, eccdna, ecdna, extrachromosomal-circular-dna, Genomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/circdna" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/972?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.127.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_virtual-screening" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein-ligand Docking tutorial (Cluster90)" ; + schema1:sdDatePublished "2024-07-12 13:34:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/127/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 39576 ; + schema1:creator , + , + ; + schema1:dateCreated "2022-09-15T11:15:47Z" ; + schema1:dateModified "2023-01-16T13:50:18Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/127?version=5" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein-ligand Docking tutorial (Cluster90)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_virtual-screening/master/biobb_wf_virtual-screening/notebooks/clusterBindingSite/wf_vs_clusterBindingSite.ipynb" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Analysis of Chromosome Conformation Capture data (Hi-C)" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/989?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/hic" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hic" ; + schema1:sdDatePublished "2024-07-12 13:21:25 +0100" ; + schema1:url "https://workflowhub.eu/workflows/989/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7211 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:54Z" ; + schema1:dateModified "2024-06-11T12:54:54Z" ; + schema1:description "Analysis of Chromosome Conformation Capture data (Hi-C)" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/989?version=7" ; + schema1:keywords "chromosome-conformation-capture, Hi-C" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hic" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/989?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "BioTranslator performs sequentially pathway analysis and gene prioritization: A specific operator is executed for each task to translate the input gene set into semantic terms and pinpoint the pivotal-role genes on the derived semantic network. The output consists of the set of statistically significant semantic terms and the associated hub genes (the gene signature), prioritized according to their involvement in the underlying semantic topology." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/189?version=1" ; + schema1:isBasedOn "http://www.biotranslator.gr:8080/u/thodk/w/biotranslator-workflow" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for BioTranslator Workflow" ; + schema1:sdDatePublished "2024-07-12 13:36:46 +0100" ; + schema1:url "https://workflowhub.eu/workflows/189/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3955 ; + schema1:dateCreated "2021-09-15T17:27:55Z" ; + schema1:dateModified "2021-09-15T17:28:33Z" ; + schema1:description "BioTranslator performs sequentially pathway analysis and gene prioritization: A specific operator is executed for each task to translate the input gene set into semantic terms and pinpoint the pivotal-role genes on the derived semantic network. The output consists of the set of statistically significant semantic terms and the associated hub genes (the gene signature), prioritized according to their involvement in the underlying semantic topology." ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/189?version=2" ; + schema1:keywords "Semantic Network Analysis, Gene Prioritization, Pathway Analysis, Biomedical Ontologies, Semantic Interpretation" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "BioTranslator Workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/189?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 44291 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description "Refining Genome Annotations with Apollo" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/749?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Refining Genome Annotations with Apollo (prokaryotes)" ; + schema1:sdDatePublished "2024-07-12 13:24:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/749/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 18150 ; + schema1:creator ; + schema1:dateCreated "2024-02-14T15:59:09Z" ; + schema1:dateModified "2024-02-15T13:44:41Z" ; + schema1:description "Refining Genome Annotations with Apollo" ; + schema1:isPartOf ; + schema1:keywords "genome-annotation" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Refining Genome Annotations with Apollo (prokaryotes)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/749?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + ; + ns1:output , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Workflow converts one or multiple bam/cram files to fastq format" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/968?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/bamtofastq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/bamtofastq" ; + schema1:sdDatePublished "2024-07-12 13:22:09 +0100" ; + schema1:url "https://workflowhub.eu/workflows/968/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10293 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:44Z" ; + schema1:dateModified "2024-06-11T12:54:44Z" ; + schema1:description "Workflow converts one or multiple bam/cram files to fastq format" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/968?version=5" ; + schema1:keywords "bamtofastq, Conversion, cramtofastq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/bamtofastq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/968?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """## Description\r +\r +The workflow takes an input file with Cancer Driver Genes predictions (i.e. the results provided by a participant), computes a set of metrics, and compares them against the data currently stored in OpenEBench within the TCGA community. Two assessment metrics are provided for that predicitions. Also, some plots (which are optional) that allow to visualize the performance of the tool are generated. The workflow consists in three standard steps, defined by OpenEBench. The tools needed to run these steps must be in one or more Docker images, generated from [Docker containers](https://github.com/inab/TCGA_benchmarking_dockers ). Separated instances are spawned from these images for each step:\r +1. **Validation**: the input file format is checked and, if required, the content of the file is validated (e.g check whether the submitted gene IDs exist)\r +2. **Metrics Computation**: the predictions are compared with the 'Gold Standards' provided by the community, which results in two performance metrics - precision (Positive Predictive Value) and recall(True Positive Rate).\r +3. **Results Consolidation**: the benchmark itself is performed by merging the tool metrics with the rest of TCGA data. The results are provided in JSON format and SVG format (scatter plot).\r +\r +![alt text](https://raw.githubusercontent.com/inab/TCGA_benchmarking_workflow/1.0.6/workflow.jpg)\r +\r +\r +## Data\r +\r +* [TCGA_sample_data](./TCGA_sample_data) folder contains all the reference data required by the steps. It is derived from the manuscript:\r +[Comprehensive Characterization of Cancer Driver Genes and Mutations](https://www.cell.com/cell/fulltext/S0092-8674%2818%2930237-X?code=cell-site), Bailey et al, 2018, Cell [![doi:10.1016/j.cell.2018.02.060](https://img.shields.io/badge/doi-10.1016%2Fj.cell.2018.02.060-green.svg)](https://doi.org/10.1016/j.cell.2018.02.060) \r +* [TCGA_sample_out](./TCGA_sample_out) folder contains an example output for a worklow run, with two cancer types / challenges selected (ACC, BRCA). Results obtained from the default execution should be similar to those ones available in this directory. Results found in [TCGA_sample_out/results](./TCGA_sample_out/results) can be visualized in the browser using [`benchmarking_workflows_results_visualizer` javascript library](https://github.com/inab/benchmarking_workflows_results_visualizer).\r +\r +\r +## Usage\r +In order to use the workflow you need to:\r +* Install [Nextflow](https://www.nextflow.io/), which depends on Java virtual machine (>=8 , <15 ). You can automate their installation for local testing using [run_local_nextflow.bash](run_local_nextflow.bash).\r +* Clone [TCGA benchmarking Docker definitions repository](https://github.com/inab/TCGA_benchmarking_dockers) from tag 1.0.3 in a separate directory, and build locally the three Docker images found in it, running the `build.sh 1.0.3` script within that repo.\r +* Run it just using either *`nextflow run main.nf -profile docker`* or *`./run_local_nextflow.bash run main.nf -profile docker`*. Arguments specifications:\r +\r +```\r +Usage:\r +Run the pipeline with default parameters:\r +nextflow run main.nf -profile docker\r +\r +Run with user parameters:\r +nextflow run main.nf -profile docker --predictionsFile {driver.genes.file} --public_ref_dir {validation.reference.file} --participant_name {tool.name} --metrics_ref_dir {gold.standards.dir} --cancer_types {analyzed.cancer.types} --assess_dir {benchmark.data.dir} --results_dir {output.dir}\r +\r +Mandatory arguments:\r + --input List of cancer genes prediction\r + --community_id Name or OEB permanent ID for the benchmarking community\r + --public_ref_dir Directory with list of cancer genes used to validate the predictions\r + --participant_id Name of the tool used for prediction\r + --goldstandard_dir Dir that contains metrics reference datasets for all cancer types\r + --event_id List of types of cancer selected by the user, separated by spaces\r + --assess_dir Dir where the data for the benchmark are stored\r +\r +Other options:\r + --validation_result The output file where the results from validation step will be saved\r + --assessment_results The output file where the results from the computed metrics step will be saved\r + --aggregation_results The output file where the consolidation of the benchmark will be saved\r + --statistics_results The output directory with nextflow statistics\r + --outdir The output directory where the consolidation of the benchmark will be saved\r + --statsdir The output directory with nextflow statistics\r + --data_model_export_dir The output dir where json file with benchmarking data model contents will be saved\r + --otherdir The output directory where custom results will be saved (no directory inside)\r +Flags:\r + --help Display this message\r +```\r +\r +Default input parameters and Docker images to use for each step can be specified in the [config](./nextflow.config) file\r +**NOTE: In order to make your workflow compatible with the [OpenEBench VRE Nextflow Executor](https://github.com/inab/vre-process_nextflow-executor), please make sure to use the same parameter names in your workflow.**""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/244?version=2" ; + schema1:isBasedOn "https://github.com/inab/TCGA_benchmarking_workflow/tree/1.0.8" ; + schema1:license "LGPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for OpenEBench assessment workflow" ; + schema1:sdDatePublished "2024-07-12 13:36:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/244/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5597 ; + schema1:creator , + , + ; + schema1:dateCreated "2021-11-24T14:03:22Z" ; + schema1:dateModified "2021-11-24T21:57:06Z" ; + schema1:description """## Description\r +\r +The workflow takes an input file with Cancer Driver Genes predictions (i.e. the results provided by a participant), computes a set of metrics, and compares them against the data currently stored in OpenEBench within the TCGA community. Two assessment metrics are provided for that predicitions. Also, some plots (which are optional) that allow to visualize the performance of the tool are generated. The workflow consists in three standard steps, defined by OpenEBench. The tools needed to run these steps must be in one or more Docker images, generated from [Docker containers](https://github.com/inab/TCGA_benchmarking_dockers ). Separated instances are spawned from these images for each step:\r +1. **Validation**: the input file format is checked and, if required, the content of the file is validated (e.g check whether the submitted gene IDs exist)\r +2. **Metrics Computation**: the predictions are compared with the 'Gold Standards' provided by the community, which results in two performance metrics - precision (Positive Predictive Value) and recall(True Positive Rate).\r +3. **Results Consolidation**: the benchmark itself is performed by merging the tool metrics with the rest of TCGA data. The results are provided in JSON format and SVG format (scatter plot).\r +\r +![alt text](https://raw.githubusercontent.com/inab/TCGA_benchmarking_workflow/1.0.6/workflow.jpg)\r +\r +\r +## Data\r +\r +* [TCGA_sample_data](./TCGA_sample_data) folder contains all the reference data required by the steps. It is derived from the manuscript:\r +[Comprehensive Characterization of Cancer Driver Genes and Mutations](https://www.cell.com/cell/fulltext/S0092-8674%2818%2930237-X?code=cell-site), Bailey et al, 2018, Cell [![doi:10.1016/j.cell.2018.02.060](https://img.shields.io/badge/doi-10.1016%2Fj.cell.2018.02.060-green.svg)](https://doi.org/10.1016/j.cell.2018.02.060) \r +* [TCGA_sample_out](./TCGA_sample_out) folder contains an example output for a worklow run, with two cancer types / challenges selected (ACC, BRCA). Results obtained from the default execution should be similar to those ones available in this directory. Results found in [TCGA_sample_out/results](./TCGA_sample_out/results) can be visualized in the browser using [`benchmarking_workflows_results_visualizer` javascript library](https://github.com/inab/benchmarking_workflows_results_visualizer).\r +\r +\r +## Usage\r +In order to use the workflow you need to:\r +* Install [Nextflow](https://www.nextflow.io/), which depends on Java virtual machine (>=8 , <15 ). You can automate their installation for local testing using [run_local_nextflow.bash](run_local_nextflow.bash).\r +* Clone [TCGA benchmarking Docker definitions repository](https://github.com/inab/TCGA_benchmarking_dockers) from tag 1.0.3 in a separate directory, and build locally the three Docker images found in it, running the `build.sh 1.0.3` script within that repo.\r +* Run it just using either *`nextflow run main.nf -profile docker`* or *`./run_local_nextflow.bash run main.nf -profile docker`*. Arguments specifications:\r +\r +```\r +Usage:\r +Run the pipeline with default parameters:\r +nextflow run main.nf -profile docker\r +\r +Run with user parameters:\r +nextflow run main.nf -profile docker --predictionsFile {driver.genes.file} --public_ref_dir {validation.reference.file} --participant_name {tool.name} --metrics_ref_dir {gold.standards.dir} --cancer_types {analyzed.cancer.types} --assess_dir {benchmark.data.dir} --results_dir {output.dir}\r +\r +Mandatory arguments:\r + --input List of cancer genes prediction\r + --community_id Name or OEB permanent ID for the benchmarking community\r + --public_ref_dir Directory with list of cancer genes used to validate the predictions\r + --participant_id Name of the tool used for prediction\r + --goldstandard_dir Dir that contains metrics reference datasets for all cancer types\r + --event_id List of types of cancer selected by the user, separated by spaces\r + --assess_dir Dir where the data for the benchmark are stored\r +\r +Other options:\r + --validation_result The output file where the results from validation step will be saved\r + --assessment_results The output file where the results from the computed metrics step will be saved\r + --aggregation_results The output file where the consolidation of the benchmark will be saved\r + --statistics_results The output directory with nextflow statistics\r + --outdir The output directory where the consolidation of the benchmark will be saved\r + --statsdir The output directory with nextflow statistics\r + --data_model_export_dir The output dir where json file with benchmarking data model contents will be saved\r + --otherdir The output directory where custom results will be saved (no directory inside)\r +Flags:\r + --help Display this message\r +```\r +\r +Default input parameters and Docker images to use for each step can be specified in the [config](./nextflow.config) file\r +**NOTE: In order to make your workflow compatible with the [OpenEBench VRE Nextflow Executor](https://github.com/inab/vre-process_nextflow-executor), please make sure to use the same parameter names in your workflow.**""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/244?version=3" ; + schema1:keywords "tcga, openebench, benchmarking" ; + schema1:license "https://spdx.org/licenses/LGPL-3.0" ; + schema1:name "OpenEBench assessment workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/244?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1027?version=9" ; + schema1:isBasedOn "https://github.com/nf-core/viralrecon" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/viralrecon" ; + schema1:sdDatePublished "2024-07-12 13:18:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1027/ro_crate?version=9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10179 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:21Z" ; + schema1:dateModified "2024-06-11T12:55:21Z" ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1027?version=10" ; + schema1:keywords "Amplicon, ARTIC, Assembly, covid-19, covid19, illumina, long-read-sequencing, Metagenomics, nanopore, ONT, oxford-nanopore, SARS-CoV-2, variant-calling, viral, Virus" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/viralrecon" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1027?version=9" ; + schema1:version 9 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-23T13:33:07.961947" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Scaffolding-HiC-VGP8" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Scaffolding-HiC-VGP8/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1017?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/rnafusion" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnafusion" ; + schema1:sdDatePublished "2024-07-12 13:18:21 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1017/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9644 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:10Z" ; + schema1:dateModified "2024-06-11T12:55:10Z" ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1017?version=16" ; + schema1:keywords "fusion, fusion-genes, gene-fusion, rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnafusion" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1017?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=15" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-07-12 13:20:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=15" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8964 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:04Z" ; + schema1:dateModified "2024-06-11T12:55:04Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=15" ; + schema1:version 15 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """This is a genomics pipeline to do a single germline sample variant-calling, adapted from GATK Best Practice Workflow.\r +\r +This workflow is a reference pipeline for using the Janis Python framework (pipelines assistant).\r +- Alignment: bwa-mem\r +- Variant-Calling: GATK HaplotypeCaller\r +- Outputs the final variants in the VCF format.\r +\r +**Resources**\r +\r +This pipeline has been tested using the HG38 reference set, available on Google Cloud Storage through:\r +\r +- https://console.cloud.google.com/storage/browser/genomics-public-data/references/hg38/v0/\r +\r +This pipeline expects the assembly references to be as they appear in that storage (".fai", ".amb", ".ann", ".bwt", ".pac", ".sa", "^.dict").\r +The known sites (snps_dbsnp, snps_1000gp, known_indels, mills_indels) should be gzipped and tabix indexed.\r +\r +\r +Infrastructure_deployment_metadata: Spartan (Unimelb)""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/236?version=1" ; + schema1:isBasedOn "https://github.com/PMCC-BioinformaticsCore/janis-pipelines" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Janis Germline Variant-Calling Workflow (GATK)" ; + schema1:sdDatePublished "2024-07-12 13:36:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/236/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 9869 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7806 ; + schema1:creator , + ; + schema1:dateCreated "2021-11-12T02:30:06Z" ; + schema1:dateModified "2023-01-16T13:54:51Z" ; + schema1:description """This is a genomics pipeline to do a single germline sample variant-calling, adapted from GATK Best Practice Workflow.\r +\r +This workflow is a reference pipeline for using the Janis Python framework (pipelines assistant).\r +- Alignment: bwa-mem\r +- Variant-Calling: GATK HaplotypeCaller\r +- Outputs the final variants in the VCF format.\r +\r +**Resources**\r +\r +This pipeline has been tested using the HG38 reference set, available on Google Cloud Storage through:\r +\r +- https://console.cloud.google.com/storage/browser/genomics-public-data/references/hg38/v0/\r +\r +This pipeline expects the assembly references to be as they appear in that storage (".fai", ".amb", ".ann", ".bwt", ".pac", ".sa", "^.dict").\r +The known sites (snps_dbsnp, snps_1000gp, known_indels, mills_indels) should be gzipped and tabix indexed.\r +\r +\r +Infrastructure_deployment_metadata: Spartan (Unimelb)""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Janis Germline Variant-Calling Workflow (GATK)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/236?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 37814 . + + a schema1:Dataset ; + schema1:datePublished "2023-09-15T15:02:21.110980" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/rnaseq-pe" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "rnaseq-pe/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.11" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:description "This workflow uses Illumina and Oxford Nanopore reads that were pre-processed to remove human-derived sequences. Two assembly tools are used: spades and unicycler. In addition to assemblies (actual sequences) the two tools produce assembly graphs that can be used for visualization of assembly with bandage. More info can be found at https://covid19.galaxyproject.org/genomics/" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/5?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genomics - Assembly of the genome sequence" ; + schema1:sdDatePublished "2024-07-12 13:37:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/5/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 4086 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15846 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2020-04-10T12:32:21Z" ; + schema1:dateModified "2023-01-16T13:39:53Z" ; + schema1:description "This workflow uses Illumina and Oxford Nanopore reads that were pre-processed to remove human-derived sequences. Two assembly tools are used: spades and unicycler. In addition to assemblies (actual sequences) the two tools produce assembly graphs that can be used for visualization of assembly with bandage. More info can be found at https://covid19.galaxyproject.org/genomics/" ; + schema1:image ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Genomics - Assembly of the genome sequence" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/5?version=1" ; + schema1:version 1 ; + ns1:input , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 7739 . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2023-11-03T22:43:56+00:00" ; + schema1:description "Autosubmit mHM test domains" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:publisher . + + a ; + schema1:additionalType "Text" ; + schema1:name "CONFIG.AUTOSUBMIT_VERSION" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Integer" ; + schema1:name "CONFIG.MAXWAITINGJOBS" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Integer" ; + schema1:name "CONFIG.TOTALJOBS" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Text" ; + schema1:name "DEFAULT.CUSTOM_CONFIG" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Text" ; + schema1:name "DEFAULT.EXPID" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Text" ; + schema1:name "DEFAULT.HPCARCH" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Text" ; + schema1:name "EXPERIMENT.CALENDAR" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Integer" ; + schema1:name "EXPERIMENT.CHUNKSIZE" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Text" ; + schema1:name "EXPERIMENT.CHUNKSIZEUNIT" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Integer" ; + schema1:name "EXPERIMENT.DATELIST" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Text" ; + schema1:name "EXPERIMENT.MEMBERS" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Integer" ; + schema1:name "EXPERIMENT.NUMCHUNKS" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Boolean" ; + schema1:name "GIT.FETCH_SINGLE_BRANCH" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Text" ; + schema1:name "GIT.PROJECT_BRANCH" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Text" ; + schema1:name "GIT.PROJECT_COMMIT" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Text" ; + schema1:name "GIT.PROJECT_ORIGIN" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Text" ; + schema1:name "GIT.PROJECT_SUBMODULES" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Text" ; + schema1:name "MHM.BRANCH_NAME" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Integer" ; + schema1:name "MHM.DOMAIN" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Integer" ; + schema1:name "MHM.EVAL_PERIOD_DURATION_YEARS" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Text" ; + schema1:name "MHM.SINGULARITY_CONTAINER" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Text" ; + schema1:name "PROJECT.PROJECT_DESTINATION" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Text" ; + schema1:name "PROJECT.PROJECT_TYPE" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "File" ; + schema1:name "plot.gif" ; + schema1:valueRequired "True" . + + a schema1:MediaObject ; + schema1:contentSize 1579151 ; + schema1:dateModified "2023-11-03T22:41:31" ; + schema1:encodingFormat "image/gif" ; + schema1:exampleOfWork ; + schema1:name "plot.gif" ; + schema1:sdDatePublished "2023-11-03T22:43:57+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "Basic processing of a QC-filtered Anndata Object. UMAP, clustering e.t.c " ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/468?version=1" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq_QCtoBasicProcessing" ; + schema1:sdDatePublished "2024-07-12 13:22:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/468/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 33517 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-05-05T06:50:24Z" ; + schema1:dateModified "2023-06-22T06:20:10Z" ; + schema1:description "Basic processing of a QC-filtered Anndata Object. UMAP, clustering e.t.c " ; + schema1:isBasedOn "https://workflowhub.eu/workflows/468?version=2" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "scRNAseq_QCtoBasicProcessing" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/468?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=24" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-07-12 13:21:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=24" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13434 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:51Z" ; + schema1:dateModified "2024-06-11T12:54:51Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=24" ; + schema1:version 24 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-07-12 13:19:02 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9465 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:15Z" ; + schema1:dateModified "2024-06-11T12:55:15Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Takes fastqs and reference data, to produce a single cell counts matrix into and save in annData format - adding a column called sample with the sample name. " ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/513?version=1" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "other-closed" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq: Count and Load with starSOLO" ; + schema1:sdDatePublished "2024-07-12 13:22:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/513/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 27880 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-06-22T06:47:36Z" ; + schema1:dateModified "2023-11-09T03:49:56Z" ; + schema1:description "Takes fastqs and reference data, to produce a single cell counts matrix into and save in annData format - adding a column called sample with the sample name. " ; + schema1:isBasedOn "https://workflowhub.eu/workflows/513?version=2" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "" ; + schema1:name "scRNAseq: Count and Load with starSOLO" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/513?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-variant-calling" ; + schema1:mainEntity ; + schema1:name "COVID-19-PE-ARTIC-ILLUMINA (v0.2)" ; + schema1:sdDatePublished "2021-04-09 03:00:39 +0100" ; + schema1:softwareVersion "v0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 59379 ; + schema1:name "COVID-19-PE-ARTIC-ILLUMINA" ; + schema1:programmingLanguage . + + a schema1:Dataset ; + schema1:datePublished "2024-07-09T15:26:27.773435" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-only-VGP3" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-only-VGP3/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.24" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/986?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/fetchngs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/fetchngs" ; + schema1:sdDatePublished "2024-07-12 13:21:28 +0100" ; + schema1:url "https://workflowhub.eu/workflows/986/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6338 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:52Z" ; + schema1:dateModified "2024-06-11T12:54:52Z" ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/986?version=13" ; + schema1:keywords "ddbj, download, ena, FASTQ, geo, sra, synapse" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/fetchngs" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/986?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Conformational ensembles generation\r +\r +## Workflow included in the [ELIXIR 3D-Bioinfo](https://elixir-europe.org/communities/3d-bioinfo) Implementation Study:\r +\r +### Building on PDBe-KB to chart and characterize the conformation landscape of native proteins\r +\r +This tutorial aims to illustrate the process of generating **protein conformational ensembles** from** 3D structures **and analysing its **molecular flexibility**, step by step, using the **BioExcel Building Blocks library (biobb)**.\r +\r +## Conformational landscape of native proteins\r +**Proteins** are **dynamic** systems that adopt multiple **conformational states**, a property essential for many **biological processes** (e.g. binding other proteins, nucleic acids, small molecule ligands, or switching between functionaly active and inactive states). Characterizing the different **conformational states** of proteins and the transitions between them is therefore critical for gaining insight into their **biological function** and can help explain the effects of genetic variants in **health** and **disease** and the action of drugs.\r +\r +**Structural biology** has become increasingly efficient in sampling the different **conformational states** of proteins. The **PDB** has currently archived more than **170,000 individual structures**, but over two thirds of these structures represent **multiple conformations** of the same or related protein, observed in different crystal forms, when interacting with other proteins or other macromolecules, or upon binding small molecule ligands. Charting this conformational diversity across the PDB can therefore be employed to build a useful approximation of the **conformational landscape** of native proteins.\r +\r +A number of resources and **tools** describing and characterizing various often complementary aspects of protein **conformational diversity** in known structures have been developed, notably by groups in Europe. These tools include algorithms with varying degree of sophistication, for aligning the 3D structures of individual protein chains or domains, of protein assemblies, and evaluating their degree of **structural similarity**. Using such tools one can **align structures pairwise**, compute the corresponding **similarity matrix**, and identify ensembles of **structures/conformations** with a defined **similarity level** that tend to recur in different PDB entries, an operation typically performed using **clustering** methods. Such workflows are at the basis of resources such as **CATH, Contemplate, or PDBflex** that offer access to **conformational ensembles** comprised of similar **conformations** clustered according to various criteria. Other types of tools focus on differences between **protein conformations**, identifying regions of proteins that undergo large **collective displacements** in different PDB entries, those that act as **hinges or linkers**, or regions that are inherently **flexible**.\r +\r +To build a meaningful approximation of the **conformational landscape** of native proteins, the **conformational ensembles** (and the differences between them), identified on the basis of **structural similarity/dissimilarity** measures alone, need to be **biophysically characterized**. This may be approached at **two different levels**. \r +- At the **biological level**, it is important to link observed **conformational ensembles**, to their **functional roles** by evaluating the correspondence with **protein family classifications** based on sequence information and **functional annotations** in public databases e.g. Uniprot, PDKe-Knowledge Base (KB). These links should provide valuable mechanistic insights into how the **conformational and dynamic properties** of proteins are exploited by evolution to regulate their **biological function**.

\r +\r +- At the **physical level** one needs to introduce **energetic consideration** to evaluate the likelihood that the identified **conformational ensembles** represent **conformational states** that the protein (or domain under study) samples in isolation. Such evaluation is notoriously **challenging** and can only be roughly approximated by using **computational methods** to evaluate the extent to which the observed **conformational ensembles** can be reproduced by algorithms that simulate the **dynamic behavior** of protein systems. These algorithms include the computationally expensive **classical molecular dynamics (MD) simulations** to sample local thermal fluctuations but also faster more approximate methods such as **Elastic Network Models** and **Normal Node Analysis** (NMA) to model low energy **collective motions**. Alternatively, **enhanced sampling molecular dynamics** can be used to model complex types of **conformational changes** but at a very high computational cost. \r +\r +The **ELIXIR 3D-Bioinfo Implementation Study** *Building on PDBe-KB to chart and characterize the conformation landscape of native proteins* focuses on:\r +\r +1. Mapping the **conformational diversity** of proteins and their homologs across the PDB. \r +2. Characterize the different **flexibility properties** of protein regions, and link this information to sequence and functional annotation.\r +3. Benchmark **computational methods** that can predict a biophysical description of protein motions.\r +\r +This notebook is part of the third objective, where a list of **computational resources** that are able to predict **protein flexibility** and **conformational ensembles** have been collected, evaluated, and integrated in reproducible and interoperable workflows using the **BioExcel Building Blocks library**. Note that the list is not meant to be exhaustive, it is built following the expertise of the implementation study partners.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.822.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/main/biobb_wf_flexdyn/docker" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Docker Protein conformational ensembles generation" ; + schema1:sdDatePublished "2024-07-12 13:23:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/822/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 746 ; + schema1:creator , + ; + schema1:dateCreated "2024-04-22T10:40:29Z" ; + schema1:dateModified "2024-05-22T13:46:38Z" ; + schema1:description """# Protein Conformational ensembles generation\r +\r +## Workflow included in the [ELIXIR 3D-Bioinfo](https://elixir-europe.org/communities/3d-bioinfo) Implementation Study:\r +\r +### Building on PDBe-KB to chart and characterize the conformation landscape of native proteins\r +\r +This tutorial aims to illustrate the process of generating **protein conformational ensembles** from** 3D structures **and analysing its **molecular flexibility**, step by step, using the **BioExcel Building Blocks library (biobb)**.\r +\r +## Conformational landscape of native proteins\r +**Proteins** are **dynamic** systems that adopt multiple **conformational states**, a property essential for many **biological processes** (e.g. binding other proteins, nucleic acids, small molecule ligands, or switching between functionaly active and inactive states). Characterizing the different **conformational states** of proteins and the transitions between them is therefore critical for gaining insight into their **biological function** and can help explain the effects of genetic variants in **health** and **disease** and the action of drugs.\r +\r +**Structural biology** has become increasingly efficient in sampling the different **conformational states** of proteins. The **PDB** has currently archived more than **170,000 individual structures**, but over two thirds of these structures represent **multiple conformations** of the same or related protein, observed in different crystal forms, when interacting with other proteins or other macromolecules, or upon binding small molecule ligands. Charting this conformational diversity across the PDB can therefore be employed to build a useful approximation of the **conformational landscape** of native proteins.\r +\r +A number of resources and **tools** describing and characterizing various often complementary aspects of protein **conformational diversity** in known structures have been developed, notably by groups in Europe. These tools include algorithms with varying degree of sophistication, for aligning the 3D structures of individual protein chains or domains, of protein assemblies, and evaluating their degree of **structural similarity**. Using such tools one can **align structures pairwise**, compute the corresponding **similarity matrix**, and identify ensembles of **structures/conformations** with a defined **similarity level** that tend to recur in different PDB entries, an operation typically performed using **clustering** methods. Such workflows are at the basis of resources such as **CATH, Contemplate, or PDBflex** that offer access to **conformational ensembles** comprised of similar **conformations** clustered according to various criteria. Other types of tools focus on differences between **protein conformations**, identifying regions of proteins that undergo large **collective displacements** in different PDB entries, those that act as **hinges or linkers**, or regions that are inherently **flexible**.\r +\r +To build a meaningful approximation of the **conformational landscape** of native proteins, the **conformational ensembles** (and the differences between them), identified on the basis of **structural similarity/dissimilarity** measures alone, need to be **biophysically characterized**. This may be approached at **two different levels**. \r +- At the **biological level**, it is important to link observed **conformational ensembles**, to their **functional roles** by evaluating the correspondence with **protein family classifications** based on sequence information and **functional annotations** in public databases e.g. Uniprot, PDKe-Knowledge Base (KB). These links should provide valuable mechanistic insights into how the **conformational and dynamic properties** of proteins are exploited by evolution to regulate their **biological function**.

\r +\r +- At the **physical level** one needs to introduce **energetic consideration** to evaluate the likelihood that the identified **conformational ensembles** represent **conformational states** that the protein (or domain under study) samples in isolation. Such evaluation is notoriously **challenging** and can only be roughly approximated by using **computational methods** to evaluate the extent to which the observed **conformational ensembles** can be reproduced by algorithms that simulate the **dynamic behavior** of protein systems. These algorithms include the computationally expensive **classical molecular dynamics (MD) simulations** to sample local thermal fluctuations but also faster more approximate methods such as **Elastic Network Models** and **Normal Node Analysis** (NMA) to model low energy **collective motions**. Alternatively, **enhanced sampling molecular dynamics** can be used to model complex types of **conformational changes** but at a very high computational cost. \r +\r +The **ELIXIR 3D-Bioinfo Implementation Study** *Building on PDBe-KB to chart and characterize the conformation landscape of native proteins* focuses on:\r +\r +1. Mapping the **conformational diversity** of proteins and their homologs across the PDB. \r +2. Characterize the different **flexibility properties** of protein regions, and link this information to sequence and functional annotation.\r +3. Benchmark **computational methods** that can predict a biophysical description of protein motions.\r +\r +This notebook is part of the third objective, where a list of **computational resources** that are able to predict **protein flexibility** and **conformational ensembles** have been collected, evaluated, and integrated in reproducible and interoperable workflows using the **BioExcel Building Blocks library**. Note that the list is not meant to be exhaustive, it is built following the expertise of the implementation study partners.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Docker Protein conformational ensembles generation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/main/biobb_wf_flexdyn/docker/Dockerfile" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """The notebook shows how to load an IDR image with labels.\r +\r +The image is referenced in the paper "NesSys: a novel method for accurate nuclear segmentation in 3D" published August 2019 in PLOS Biology: https://doi.org/10.1371/journal.pbio.3000388 and can be viewed online in the Image Data Resource.\r +\r +In this notebook, the image is loaded together with the labels and analyzed using StarDist. The StarDist analysis produces a segmentation, which is then viewed side-by-side with the original segmentations produced by the authors of the paper obtained via the loaded labels.\r +\r +## Launch\r +This notebook uses the [environment_stardist.yml](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/environment_stardist.yml) file.\r +\r +See [Setup](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/setup.md).""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.493.1" ; + schema1:isBasedOn "https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/stardist.ipynb" ; + schema1:license "BSD-2-Clause" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Validate a tool against IDR data: Load Image with labels from IDR, re-analyze using StarDist" ; + schema1:sdDatePublished "2024-07-12 13:33:19 +0100" ; + schema1:url "https://workflowhub.eu/workflows/493/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 683190 ; + schema1:url "https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/includes/StarDistIDR.png" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 30978 ; + schema1:creator , + ; + schema1:dateCreated "2023-06-01T10:05:54Z" ; + schema1:dateModified "2023-06-01T10:07:09Z" ; + schema1:description """The notebook shows how to load an IDR image with labels.\r +\r +The image is referenced in the paper "NesSys: a novel method for accurate nuclear segmentation in 3D" published August 2019 in PLOS Biology: https://doi.org/10.1371/journal.pbio.3000388 and can be viewed online in the Image Data Resource.\r +\r +In this notebook, the image is loaded together with the labels and analyzed using StarDist. The StarDist analysis produces a segmentation, which is then viewed side-by-side with the original segmentations produced by the authors of the paper obtained via the loaded labels.\r +\r +## Launch\r +This notebook uses the [environment_stardist.yml](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/environment_stardist.yml) file.\r +\r +See [Setup](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/setup.md).""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/BSD-2-Clause" ; + schema1:name "Validate a tool against IDR data: Load Image with labels from IDR, re-analyze using StarDist" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/stardist.ipynb" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """The workflows in this collection are from the '16S Microbial Analysis with mothur' tutorial for analysis of 16S data (Saskia Hiltemann, Bérénice Batut, Dave Clements), adapted for pipeline use on galaxy australia (Ahmed Mehdi). The workflows developed in galaxy use mothur software package developed by Schloss et al https://pubmed.ncbi.nlm.nih.gov/19801464/. \r +\r +Please also refer to the 16S tutorials available at Galaxy https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop-short/tutorial.html and [https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop/tutorial.html\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/652?version=1" ; + schema1:isBasedOn "https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop-short/tutorial.html" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Workflow 6: Alpha Diversity [16S Microbial Analysis With Mothur]" ; + schema1:sdDatePublished "2024-07-12 13:26:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/652/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8494 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2023-11-09T05:24:50Z" ; + schema1:dateModified "2023-11-09T05:24:50Z" ; + schema1:description """The workflows in this collection are from the '16S Microbial Analysis with mothur' tutorial for analysis of 16S data (Saskia Hiltemann, Bérénice Batut, Dave Clements), adapted for pipeline use on galaxy australia (Ahmed Mehdi). The workflows developed in galaxy use mothur software package developed by Schloss et al https://pubmed.ncbi.nlm.nih.gov/19801464/. \r +\r +Please also refer to the 16S tutorials available at Galaxy https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop-short/tutorial.html and [https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop/tutorial.html\r +""" ; + schema1:isPartOf ; + schema1:keywords "Metagenomics" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Workflow 6: Alpha Diversity [16S Microbial Analysis With Mothur]" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/652?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Summary\r +\r +This tutorial aims to illustrate the process of setting up a simulation system containing a protein in complex with a ligand, step by step, using the BioExcel Building Blocks library (biobb). The particular example used is the T4 lysozyme L99A/M102Q protein (PDB code 3HTB), in complex with the 2-propylphenol small molecule (3-letter Code JZ4).\r +\r +Workflow engine is a jupyter notebook. It can be run in binder, following the link given, or locally. Auxiliar libraries used are: nb\\_conda_kernels, nglview, ipywidgets, os, plotly, and simpletraj. Environment can be setup using the included environment.yml file.\r +\r +\r +# Parameters\r +\r +## Inputs\r +\r +Parameters needed to configure the workflow:\r +\r +* **pdbCode**: PDB code of the protein-ligand complex structure (e.g. 3HTB)\r +\r +* **ligandCode**: Small molecule 3-letter code for the ligand structure (e.g. JZ4)\r +\r +* **mol\\_charge**: Charge of the small molecule, needed to add hydrogen atoms.\r +\r +## Outputs\r +\r +Output files generated (named according to the input parameters given above):\r +\r +* **output\\_md\\_gro**: final structure of the MD setup protocol\r +\r +* **output\\_md\\_trr**: final trajectory of the MD setup protocol\r +\r +* **output\\_md\\_cpt**: final checkpoint file\r +\r +* **output\\_gppmd\\_tpr**: final tpr file\r +\r +* **output\\_genion\\_top\\_zip**: final topology of the MD system""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.56.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_protein-complex_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb) (jupyter notebook)" ; + schema1:sdDatePublished "2024-07-12 13:34:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/56/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 272452 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 85281 ; + schema1:creator , + ; + schema1:dateCreated "2020-09-22T15:30:25Z" ; + schema1:dateModified "2021-05-07T13:28:09Z" ; + schema1:description """# Summary\r +\r +This tutorial aims to illustrate the process of setting up a simulation system containing a protein in complex with a ligand, step by step, using the BioExcel Building Blocks library (biobb). The particular example used is the T4 lysozyme L99A/M102Q protein (PDB code 3HTB), in complex with the 2-propylphenol small molecule (3-letter Code JZ4).\r +\r +Workflow engine is a jupyter notebook. It can be run in binder, following the link given, or locally. Auxiliar libraries used are: nb\\_conda_kernels, nglview, ipywidgets, os, plotly, and simpletraj. Environment can be setup using the included environment.yml file.\r +\r +\r +# Parameters\r +\r +## Inputs\r +\r +Parameters needed to configure the workflow:\r +\r +* **pdbCode**: PDB code of the protein-ligand complex structure (e.g. 3HTB)\r +\r +* **ligandCode**: Small molecule 3-letter code for the ligand structure (e.g. JZ4)\r +\r +* **mol\\_charge**: Charge of the small molecule, needed to add hydrogen atoms.\r +\r +## Outputs\r +\r +Output files generated (named according to the input parameters given above):\r +\r +* **output\\_md\\_gro**: final structure of the MD setup protocol\r +\r +* **output\\_md\\_trr**: final trajectory of the MD setup protocol\r +\r +* **output\\_md\\_cpt**: final checkpoint file\r +\r +* **output\\_gppmd\\_tpr**: final tpr file\r +\r +* **output\\_genion\\_top\\_zip**: final topology of the MD system""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/56?version=6" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb) (jupyter notebook)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/56?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2022-06-13T14:36:29.894406" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/VGP-meryldb-creation-trio" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "VGP-meryldb-creation-trio/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.11" . + + a schema1:Dataset ; + schema1:datePublished "2024-05-28T11:44:29.989985" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/dada2" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "dada2/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Common Workflow Language example that illustrate the process of setting up a simulation system containing a protein, step by step, using the [BioExcel Building Blocks](/projects/11) library (biobb). The particular example used is the Lysozyme protein (PDB code 1AKI). This workflow returns a resulting protein structure and simulated 3D trajectories." ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.29.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb-wf-md-setup-protein-cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Protein MD Setup tutorial using BioExcel Building Blocks (biobb) in CWL" ; + schema1:sdDatePublished "2024-07-12 13:36:21 +0100" ; + schema1:url "https://workflowhub.eu/workflows/29/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11271 ; + schema1:creator ; + schema1:dateCreated "2021-05-10T09:00:56Z" ; + schema1:dateModified "2023-01-16T13:42:00Z" ; + schema1:description "Common Workflow Language example that illustrate the process of setting up a simulation system containing a protein, step by step, using the [BioExcel Building Blocks](/projects/11) library (biobb). The particular example used is the Lysozyme protein (PDB code 1AKI). This workflow returns a resulting protein structure and simulated 3D trajectories." ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/29?version=2" ; + schema1:isPartOf , + ; + schema1:keywords "molecular dynamics, trajectories, protein" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Protein MD Setup tutorial using BioExcel Building Blocks (biobb) in CWL" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/29?version=3" ; + schema1:version 3 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 60267 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# MGnify genomes catalogue pipeline\r +\r +[MGnify](https://www.ebi.ac.uk/metagenomics/) A pipeline to perform taxonomic and functional annotation and to generate a catalogue from a set of isolate and/or metagenome-assembled genomes (MAGs) using the workflow described in the following publication:\r +\r +Gurbich TA, Almeida A, Beracochea M, Burdett T, Burgin J, Cochrane G, Raj S, Richardson L, Rogers AB, Sakharova E, Salazar GA and Finn RD. (2023) [MGnify Genomes: A Resource for Biome-specific Microbial Genome Catalogues.](https://www.sciencedirect.com/science/article/pii/S0022283623000724) J Mol Biol. doi: https://doi.org/10.1016/j.jmb.2023.168016\r +\r +Detailed information about existing MGnify catalogues: https://docs.mgnify.org/src/docs/genome-viewer.html\r +\r +### Tools used in the pipeline\r +| Tool/Database | Version | Purpose |\r +|---------------------------------------------------------|-------------------|----------- |\r +| CheckM2 | 1.0.1 | Determining genome quality |\r +| dRep | 3.2.2 | Genome clustering |\r +| Mash | 2.3 | Sketch for the catalogue; placement of genomes into clusters (update only); strain tree |\r +| GUNC | 1.0.3 | Quality control |\r +| GUNC DB | 2.0.4 | Database for GUNC |\r +| GTDB-Tk | 2.3.0 | Assigning taxonomy; generating alignments |\r +| GTDB | r214 | Database for GTDB-Tk |\r +| Prokka | 1.14.6 | Protein annotation |\r +| IQ-TREE 2 | 2.2.0.3 | Generating a phylogenetic tree |\r +| Kraken 2 | 2.1.2 | Generating a kraken database |\r +| Bracken | 2.6.2 | Generating a bracken database |\r +| MMseqs2 | 13.45111 | Generating a protein catalogue |\r +| eggNOG-mapper | 2.1.11 | Protein annotation (eggNOG, KEGG, COG, CAZy) |\r +| eggNOG DB | 5.0.2 | Database for eggNOG-mapper |\r +| Diamond | 2.0.11 | Protein annotation (eggNOG) |\r +| InterProScan | 5.62-94.0 | Protein annotation (InterPro, Pfam) |\r +| CRISPRCasFinder | 4.3.2 | Annotation of CRISPR arrays |\r +| AMRFinderPlus | 3.11.4 | Antimicrobial resistance gene annotation; virulence factors, biocide, heat, acid, and metal resistance gene annotation |\r +| AMRFinderPlus DB | 3.11 2023-02-23.1 | Database for AMRFinderPlus |\r +| SanntiS | 0.9.3.2 | Biosynthetic gene cluster annotation |\r +| Infernal | 1.1.4 | RNA predictions |\r +| tRNAscan-SE | 2.0.9 | tRNA predictions |\r +| Rfam | 14.9 | Identification of SSU/LSU rRNA and other ncRNAs |\r +| Panaroo | 1.3.2 | Pan-genome computation |\r +| Seqtk | 1.3 | Generating a gene catalogue |\r +| VIRify | 2.0.1 | Viral sequence annotation |\r +| [Mobilome annotation pipeline](https://github.com/EBI-Metagenomics/mobilome-annotation-pipeline) | 2.0.1 | Mobilome annotation |\r +| samtools | 1.15 | FASTA indexing |\r +\r +## Setup\r +\r +### Environment\r +\r +The pipeline is implemented in [Nextflow](https://www.nextflow.io/).\r +\r +Requirements:\r +- [singulairty](https://sylabs.io/docs/) or [docker](https://www.docker.com/)\r +\r +#### Reference databases\r +\r +The pipeline needs the following reference databases and configuration files (roughtly ~150G):\r +\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/gunc_db_2.0.4.dmnd.gz\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/eggnog_db_5.0.2.tgz\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/rfam_14.9/\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/kegg_classes.tsv\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/continent_countries.csv\r +- https://data.ace.uq.edu.au/public/gtdb/data/releases/release214/214.0/auxillary_files/gtdbtk_r214_data.tar.gz\r +- ftp://ftp.ncbi.nlm.nih.gov/pathogen/Antimicrobial_resistance/AMRFinderPlus/database/3.11/2023-02-23.1\r +- https://zenodo.org/records/4626519/files/uniref100.KO.v1.dmnd.gz\r +\r +### Containers\r +\r +This pipeline requires [singularity](https://sylabs.io/docs/) or [docker](https://www.docker.com/) as the container engine to run pipeline.\r +\r +The containers are hosted in [biocontainers](https://biocontainers.pro/) and [quay.io/microbiome-informatics](https://quay.io/organization/microbiome-informatics) repository.\r +\r +It's possible to build the containers from scratch using the following script:\r +\r +```bash\r +cd containers && bash build.sh\r +```\r +\r +## Running the pipeline\r +\r +## Data preparation\r +\r +1. You need to pre-download your data to directories and make sure that genomes are uncompressed. Scripts to fetch genomes from ENA ([fetch_ena.py](https://github.com/EBI-Metagenomics/genomes-pipeline/blob/master/bin/fetch_ena.py)) and NCBI ([fetch_ncbi.py](https://github.com/EBI-Metagenomics/genomes-pipeline/blob/master/bin/fetch_ncbi.py)) are provided and need to be executed separately from the pipeline. If you have downloaded genomes from both ENA and NCBI, put them into separate folders.\r +\r +2. When genomes are fetched from ENA using the `fetch_ena.py` script, a CSV file with contamination and completeness statistics is also created in the same directory where genomes are saved to. If you are downloading genomes using a different approach, a CSV file needs to be created manually (each line should be genome accession, % completeness, % contamination). The ENA fetching script also pre-filters genomes to satisfy the QS50 cut-off (QS = % completeness - 5 * % contamination).\r +\r +3. You will need the following information to run the pipeline:\r + - catalogue name (for example, zebrafish-faecal)\r + - catalogue version (for example, 1.0)\r + - catalogue biome (for example, root:Host-associated:Human:Digestive system:Large intestine:Fecal)\r + - min and max accession number to be assigned to the genomes (only MGnify specific). Max - Min = #total number of genomes (NCBI+ENA)\r +\r +### Execution\r +\r +The pipeline is built in [Nextflow](https://www.nextflow.io), and utilized containers to run the software (we don't support conda ATM).\r +In order to run the pipeline it's required that the user creates a profile that suits their needs, there is an `ebi` profile in `nexflow.config` that can be used as template.\r +\r +After downloading the databases and adjusting the config file:\r +\r +```bash\r +nextflow run EBI-Metagenomics/genomes-pipeline -c -profile \\\r +--genome-prefix=MGYG \\\r +--biome="root:Host-associated:Fish:Digestive system" \\\r +--ena_genomes= \\\r +--ena_genomes_checkm= \\\r +--mgyg_start=0 \\\r +--mgyg_end=10 \\\r +--preassigned_accessions=\r +--catalogue_name=zebrafish-faecal \\\r +--catalogue_version="1.0" \\\r +--ftp_name="zebrafish-faecal" \\\r +--ftp_version="v1.0" \\\r +--outdir=""\r +```\r +\r +### Development\r +\r +Install development tools (including pre-commit hooks to run Black code formatting).\r +\r +```bash\r +pip install -r requirements-dev.txt\r +pre-commit install\r +```\r +\r +#### Code style\r +\r +Use Black, this tool is configured if you install the pre-commit tools as above.\r +\r +To manually run them: black .\r +\r +### Testing\r +\r +This repo has 2 set of tests, python unit tests for some of the most critical python scripts and [nf-test](https://github.com/askimed/nf-test) scripts for the nextflow code.\r +\r +To run the python tests\r +\r +```bash\r +pip install -r requirements-test.txt\r +pytest\r +```\r +\r +To run the nextflow ones the databases have to downloaded manually, we are working to improve this.\r +\r +```bash\r +nf-test test tests/*\r +```\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/462?version=2" ; + schema1:isBasedOn "https://github.com/EBI-Metagenomics/genomes-pipeline.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for MGnify genomes catalogue pipeline" ; + schema1:sdDatePublished "2024-07-12 13:22:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/462/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 129 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-05-23T11:21:45Z" ; + schema1:dateModified "2024-05-23T11:23:44Z" ; + schema1:description """# MGnify genomes catalogue pipeline\r +\r +[MGnify](https://www.ebi.ac.uk/metagenomics/) A pipeline to perform taxonomic and functional annotation and to generate a catalogue from a set of isolate and/or metagenome-assembled genomes (MAGs) using the workflow described in the following publication:\r +\r +Gurbich TA, Almeida A, Beracochea M, Burdett T, Burgin J, Cochrane G, Raj S, Richardson L, Rogers AB, Sakharova E, Salazar GA and Finn RD. (2023) [MGnify Genomes: A Resource for Biome-specific Microbial Genome Catalogues.](https://www.sciencedirect.com/science/article/pii/S0022283623000724) J Mol Biol. doi: https://doi.org/10.1016/j.jmb.2023.168016\r +\r +Detailed information about existing MGnify catalogues: https://docs.mgnify.org/src/docs/genome-viewer.html\r +\r +### Tools used in the pipeline\r +| Tool/Database | Version | Purpose |\r +|---------------------------------------------------------|-------------------|----------- |\r +| CheckM2 | 1.0.1 | Determining genome quality |\r +| dRep | 3.2.2 | Genome clustering |\r +| Mash | 2.3 | Sketch for the catalogue; placement of genomes into clusters (update only); strain tree |\r +| GUNC | 1.0.3 | Quality control |\r +| GUNC DB | 2.0.4 | Database for GUNC |\r +| GTDB-Tk | 2.3.0 | Assigning taxonomy; generating alignments |\r +| GTDB | r214 | Database for GTDB-Tk |\r +| Prokka | 1.14.6 | Protein annotation |\r +| IQ-TREE 2 | 2.2.0.3 | Generating a phylogenetic tree |\r +| Kraken 2 | 2.1.2 | Generating a kraken database |\r +| Bracken | 2.6.2 | Generating a bracken database |\r +| MMseqs2 | 13.45111 | Generating a protein catalogue |\r +| eggNOG-mapper | 2.1.11 | Protein annotation (eggNOG, KEGG, COG, CAZy) |\r +| eggNOG DB | 5.0.2 | Database for eggNOG-mapper |\r +| Diamond | 2.0.11 | Protein annotation (eggNOG) |\r +| InterProScan | 5.62-94.0 | Protein annotation (InterPro, Pfam) |\r +| CRISPRCasFinder | 4.3.2 | Annotation of CRISPR arrays |\r +| AMRFinderPlus | 3.11.4 | Antimicrobial resistance gene annotation; virulence factors, biocide, heat, acid, and metal resistance gene annotation |\r +| AMRFinderPlus DB | 3.11 2023-02-23.1 | Database for AMRFinderPlus |\r +| SanntiS | 0.9.3.2 | Biosynthetic gene cluster annotation |\r +| Infernal | 1.1.4 | RNA predictions |\r +| tRNAscan-SE | 2.0.9 | tRNA predictions |\r +| Rfam | 14.9 | Identification of SSU/LSU rRNA and other ncRNAs |\r +| Panaroo | 1.3.2 | Pan-genome computation |\r +| Seqtk | 1.3 | Generating a gene catalogue |\r +| VIRify | 2.0.1 | Viral sequence annotation |\r +| [Mobilome annotation pipeline](https://github.com/EBI-Metagenomics/mobilome-annotation-pipeline) | 2.0.1 | Mobilome annotation |\r +| samtools | 1.15 | FASTA indexing |\r +\r +## Setup\r +\r +### Environment\r +\r +The pipeline is implemented in [Nextflow](https://www.nextflow.io/).\r +\r +Requirements:\r +- [singulairty](https://sylabs.io/docs/) or [docker](https://www.docker.com/)\r +\r +#### Reference databases\r +\r +The pipeline needs the following reference databases and configuration files (roughtly ~150G):\r +\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/gunc_db_2.0.4.dmnd.gz\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/eggnog_db_5.0.2.tgz\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/rfam_14.9/\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/kegg_classes.tsv\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/continent_countries.csv\r +- https://data.ace.uq.edu.au/public/gtdb/data/releases/release214/214.0/auxillary_files/gtdbtk_r214_data.tar.gz\r +- ftp://ftp.ncbi.nlm.nih.gov/pathogen/Antimicrobial_resistance/AMRFinderPlus/database/3.11/2023-02-23.1\r +- https://zenodo.org/records/4626519/files/uniref100.KO.v1.dmnd.gz\r +\r +### Containers\r +\r +This pipeline requires [singularity](https://sylabs.io/docs/) or [docker](https://www.docker.com/) as the container engine to run pipeline.\r +\r +The containers are hosted in [biocontainers](https://biocontainers.pro/) and [quay.io/microbiome-informatics](https://quay.io/organization/microbiome-informatics) repository.\r +\r +It's possible to build the containers from scratch using the following script:\r +\r +```bash\r +cd containers && bash build.sh\r +```\r +\r +## Running the pipeline\r +\r +## Data preparation\r +\r +1. You need to pre-download your data to directories and make sure that genomes are uncompressed. Scripts to fetch genomes from ENA ([fetch_ena.py](https://github.com/EBI-Metagenomics/genomes-pipeline/blob/master/bin/fetch_ena.py)) and NCBI ([fetch_ncbi.py](https://github.com/EBI-Metagenomics/genomes-pipeline/blob/master/bin/fetch_ncbi.py)) are provided and need to be executed separately from the pipeline. If you have downloaded genomes from both ENA and NCBI, put them into separate folders.\r +\r +2. When genomes are fetched from ENA using the `fetch_ena.py` script, a CSV file with contamination and completeness statistics is also created in the same directory where genomes are saved to. If you are downloading genomes using a different approach, a CSV file needs to be created manually (each line should be genome accession, % completeness, % contamination). The ENA fetching script also pre-filters genomes to satisfy the QS50 cut-off (QS = % completeness - 5 * % contamination).\r +\r +3. You will need the following information to run the pipeline:\r + - catalogue name (for example, zebrafish-faecal)\r + - catalogue version (for example, 1.0)\r + - catalogue biome (for example, root:Host-associated:Human:Digestive system:Large intestine:Fecal)\r + - min and max accession number to be assigned to the genomes (only MGnify specific). Max - Min = #total number of genomes (NCBI+ENA)\r +\r +### Execution\r +\r +The pipeline is built in [Nextflow](https://www.nextflow.io), and utilized containers to run the software (we don't support conda ATM).\r +In order to run the pipeline it's required that the user creates a profile that suits their needs, there is an `ebi` profile in `nexflow.config` that can be used as template.\r +\r +After downloading the databases and adjusting the config file:\r +\r +```bash\r +nextflow run EBI-Metagenomics/genomes-pipeline -c -profile \\\r +--genome-prefix=MGYG \\\r +--biome="root:Host-associated:Fish:Digestive system" \\\r +--ena_genomes= \\\r +--ena_genomes_checkm= \\\r +--mgyg_start=0 \\\r +--mgyg_end=10 \\\r +--preassigned_accessions=\r +--catalogue_name=zebrafish-faecal \\\r +--catalogue_version="1.0" \\\r +--ftp_name="zebrafish-faecal" \\\r +--ftp_version="v1.0" \\\r +--outdir=""\r +```\r +\r +### Development\r +\r +Install development tools (including pre-commit hooks to run Black code formatting).\r +\r +```bash\r +pip install -r requirements-dev.txt\r +pre-commit install\r +```\r +\r +#### Code style\r +\r +Use Black, this tool is configured if you install the pre-commit tools as above.\r +\r +To manually run them: black .\r +\r +### Testing\r +\r +This repo has 2 set of tests, python unit tests for some of the most critical python scripts and [nf-test](https://github.com/askimed/nf-test) scripts for the nextflow code.\r +\r +To run the python tests\r +\r +```bash\r +pip install -r requirements-test.txt\r +pytest\r +```\r +\r +To run the nextflow ones the databases have to downloaded manually, we are working to improve this.\r +\r +```bash\r +nf-test test tests/*\r +```\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/462?version=1" ; + schema1:keywords "Metagenomics, Nextflow, Bioinformatics" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "MGnify genomes catalogue pipeline" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/462?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """## Summary\r +PredPrIn is a scientific workflow to predict Protein-Protein Interactions (PPIs) using machine learning to combine multiple PPI detection methods of proteins according to three categories: structural, based on primary aminoacid sequence and functional annotations.
\r +\r +PredPrIn contains three main steps: (i) acquirement and treatment of protein information, (ii) feature generation, and (iii) classification and analysis.\r +\r +(i) The first step builds a knowledge base with the available annotations of proteins and reuses this base for other prediction experiments, saving time and becoming more efficient. \r +\r +(ii) The feature generation step involves several evidence from different classes, such as: Gene Ontology (GO) information, domain interaction, metabolic pathway participation and sequence-based interaction. For the GO branches, we made a study to evaluate the best method to calculate semantic similarity to enhance the workflow performance. This step can be easily modified by adding new metrics, making PredPrIn flexible for future improvements. \r +\r +Finally, (iii) in the third step, the adaboost classifier is responsible for predicting the final scores from the numerical features dataset, exporting results of performance evaluation metrics.\r +\r +## Requirements:\r +* Python packages needed:\r + - pip3 install luigi\r + - pip3 install sqlalchemy\r + - pip3 install rdflib\r + - pip3 install sklearn\r + - pip3 install matplotlib\r + - pip3 install numpy\r +\r +* Other instalation:\r + - sqlite (to be able to see the documentation generated by luigi about the tasks after execution)\r +\r +## Usage Instructions\r +The steps below consider the creation of a sqlite database file with all he tasks events which can be used after to retrieve the execution time taken by the tasks. It is possible run locally too (see luigi's documentation to change the running command).

\r +* Preparation:\r + 1. ````git clone https://github.com/YasCoMa/predprin.git````\r + 2. ````cd PredPrIn````\r + 3. `pip3 install -r requirements.txt`\r + 4. Download annotation_data.zip (https://drive.google.com/file/d/1bWPSyULaooj7GTrDf6QBY3ZyeyH5MRpm/view?usp=share_link)\r + 5. Download rdf_data.zip (https://drive.google.com/file/d/1Cp511ioXiw2PiOHdkxa4XsZnxOeM3Pan/view?usp=share_link)\r + 6. Download sequence_data.zip (https://drive.google.com/file/d/1uEKh5EF9X_6fgZ9cTTp0jW3XaL48stxA/view?usp=share_link)\r + 7. Unzip annotation_data.zip\r + 8. Unzip rdf_data.zip\r + 9. Unzip sequence_data.zip\r + 10. Download SPRINT pre-computed similarities in https://www.csd.uwo.ca/~ilie/SPRINT/precomputed_similarities.zip and unzip it inside core/sprint/HSP/\r + 11. Certify that there is a file named client.cfg (to configure the history log and feed the sqlite database). It must have the following data:\r + ````\r + [core]\r + default-scheduler-host=localhost\r + default-scheduler-port=8082\r + rpc-connect-timeout=60.0 \r + rpc-retry-attempts=10 \r + rpc-retry-wait=60 \r +\r + [scheduler]\r + record_task_history = True\r +\r + [task_history]\r + db_connection = sqlite:///luigi-task-hist.db\r + ````\r +* Parameters:\r + 1. parameters-file -> json file with all the information to process the prediction experiment (example: params.json)\r + 2. mode -> it can have two values: train (executes cross validation and save the model as a .joblib file) or test (uses a model obtained in train mode to test in some dataset listed in the parameters file)\r + 3. model -> it is the model file full path saved in train mode as .joblib\r + \r +* Running:\r + 1. ````mkdir luigi_log```` (or other name for the log folder of your choice)\r + 2. ````luigid --background --logdir luigi_log```` (start luigi server)\r + 3. ````nohup python3.5 -m luigi --module main RunPPIExperiment --parameters-file params.json --mode 'train' --model none.joblib --workers 3 &````
\r + ````nohup python3.5 -m luigi --module main RunPPIExperiment --parameters-file params.json --mode 'test' --model model.jolib --workers 3 &````
\r + - Replace python3.5 by the command python of your environment
\r + - Replace the data given as example in params.json using your own data
\r + - Adapt the number of workers to use as you need and the capacity of your computational resource available\r +\r + You can monitor the prediction experiment execution in localhost:8082\r +\r +## Reference\r +Martins YC, Ziviani A, Nicolás MF, de Vasconcelos AT. Large-Scale Protein Interactions Prediction by Multiple Evidence Analysis Associated With an In-Silico Curation Strategy. Frontiers in Bioinformatics. 2021:38.\r +https://www.frontiersin.org/articles/10.3389/fbinf.2021.731345/full\r +\r +## Bug Report\r +Please, use the [Issues](https://github.com/YasCoMa/PredPrIn/issues) tab to report any bug.""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/616?version=1" ; + schema1:isBasedOn "https://github.com/YasCoMa/predprin" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for PredPrIn - Scientific workflow to predict protein-protein interactions based in a combined analysis of multiple protein characteristics." ; + schema1:sdDatePublished "2024-07-12 13:27:12 +0100" ; + schema1:url "https://workflowhub.eu/workflows/616/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4098 ; + schema1:creator ; + schema1:dateCreated "2023-10-21T23:35:16Z" ; + schema1:dateModified "2023-10-21T23:37:59Z" ; + schema1:description """## Summary\r +PredPrIn is a scientific workflow to predict Protein-Protein Interactions (PPIs) using machine learning to combine multiple PPI detection methods of proteins according to three categories: structural, based on primary aminoacid sequence and functional annotations.
\r +\r +PredPrIn contains three main steps: (i) acquirement and treatment of protein information, (ii) feature generation, and (iii) classification and analysis.\r +\r +(i) The first step builds a knowledge base with the available annotations of proteins and reuses this base for other prediction experiments, saving time and becoming more efficient. \r +\r +(ii) The feature generation step involves several evidence from different classes, such as: Gene Ontology (GO) information, domain interaction, metabolic pathway participation and sequence-based interaction. For the GO branches, we made a study to evaluate the best method to calculate semantic similarity to enhance the workflow performance. This step can be easily modified by adding new metrics, making PredPrIn flexible for future improvements. \r +\r +Finally, (iii) in the third step, the adaboost classifier is responsible for predicting the final scores from the numerical features dataset, exporting results of performance evaluation metrics.\r +\r +## Requirements:\r +* Python packages needed:\r + - pip3 install luigi\r + - pip3 install sqlalchemy\r + - pip3 install rdflib\r + - pip3 install sklearn\r + - pip3 install matplotlib\r + - pip3 install numpy\r +\r +* Other instalation:\r + - sqlite (to be able to see the documentation generated by luigi about the tasks after execution)\r +\r +## Usage Instructions\r +The steps below consider the creation of a sqlite database file with all he tasks events which can be used after to retrieve the execution time taken by the tasks. It is possible run locally too (see luigi's documentation to change the running command).

\r +* Preparation:\r + 1. ````git clone https://github.com/YasCoMa/predprin.git````\r + 2. ````cd PredPrIn````\r + 3. `pip3 install -r requirements.txt`\r + 4. Download annotation_data.zip (https://drive.google.com/file/d/1bWPSyULaooj7GTrDf6QBY3ZyeyH5MRpm/view?usp=share_link)\r + 5. Download rdf_data.zip (https://drive.google.com/file/d/1Cp511ioXiw2PiOHdkxa4XsZnxOeM3Pan/view?usp=share_link)\r + 6. Download sequence_data.zip (https://drive.google.com/file/d/1uEKh5EF9X_6fgZ9cTTp0jW3XaL48stxA/view?usp=share_link)\r + 7. Unzip annotation_data.zip\r + 8. Unzip rdf_data.zip\r + 9. Unzip sequence_data.zip\r + 10. Download SPRINT pre-computed similarities in https://www.csd.uwo.ca/~ilie/SPRINT/precomputed_similarities.zip and unzip it inside core/sprint/HSP/\r + 11. Certify that there is a file named client.cfg (to configure the history log and feed the sqlite database). It must have the following data:\r + ````\r + [core]\r + default-scheduler-host=localhost\r + default-scheduler-port=8082\r + rpc-connect-timeout=60.0 \r + rpc-retry-attempts=10 \r + rpc-retry-wait=60 \r +\r + [scheduler]\r + record_task_history = True\r +\r + [task_history]\r + db_connection = sqlite:///luigi-task-hist.db\r + ````\r +* Parameters:\r + 1. parameters-file -> json file with all the information to process the prediction experiment (example: params.json)\r + 2. mode -> it can have two values: train (executes cross validation and save the model as a .joblib file) or test (uses a model obtained in train mode to test in some dataset listed in the parameters file)\r + 3. model -> it is the model file full path saved in train mode as .joblib\r + \r +* Running:\r + 1. ````mkdir luigi_log```` (or other name for the log folder of your choice)\r + 2. ````luigid --background --logdir luigi_log```` (start luigi server)\r + 3. ````nohup python3.5 -m luigi --module main RunPPIExperiment --parameters-file params.json --mode 'train' --model none.joblib --workers 3 &````
\r + ````nohup python3.5 -m luigi --module main RunPPIExperiment --parameters-file params.json --mode 'test' --model model.jolib --workers 3 &````
\r + - Replace python3.5 by the command python of your environment
\r + - Replace the data given as example in params.json using your own data
\r + - Adapt the number of workers to use as you need and the capacity of your computational resource available\r +\r + You can monitor the prediction experiment execution in localhost:8082\r +\r +## Reference\r +Martins YC, Ziviani A, Nicolás MF, de Vasconcelos AT. Large-Scale Protein Interactions Prediction by Multiple Evidence Analysis Associated With an In-Silico Curation Strategy. Frontiers in Bioinformatics. 2021:38.\r +https://www.frontiersin.org/articles/10.3389/fbinf.2021.731345/full\r +\r +## Bug Report\r +Please, use the [Issues](https://github.com/YasCoMa/PredPrIn/issues) tab to report any bug.""" ; + schema1:image ; + schema1:keywords "Bioinformatics, Luigi & Rufus workflow, Pathway co-occurrence, Gene ontology term sets similarity, Domain-Domain interaction" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "PredPrIn - Scientific workflow to predict protein-protein interactions based in a combined analysis of multiple protein characteristics." ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/616?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 98305 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.54.6" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_ligand_parameterization" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook GMX Notebook Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-07-12 13:24:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/54/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14394 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-03-04T14:20:06Z" ; + schema1:dateModified "2024-03-05T09:40:24Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/54?version=5" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook GMX Notebook Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_ligand_parameterization/blob/main/biobb_wf_ligand_parameterization/notebooks/biobb_ligand_CNS_parameterization_tutorial.ipynb" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """The second-level complexity workflow is one among a collection of workflows designed to address tasks up to CTF estimation. In addition to the functionalities provided by the layer 0 workflow, this workflow aims to enhance the quality of acquisition images using quality protocols.\r +\r +**Quality control protocols**\r +\r +* **Movie max shift**: automatic reject those movies whose frames move more than a given threshold. \r +\r +* **Tilt analysis**: quality score based in the Power Spectrum Density (astigmatism and tilt) \r +\r +* **CTF consensus**: acts as a filter discarding micrographs based on their CTF (limit resolution, defocus, astigmatism, etc.).\r +\r +**Advantages:** \r +\r +* More control of the acquisition quality\r +\r +* Reduce unnecessary processing time and storage""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/599?version=2" ; + schema1:license "CC-BY-4.0" ; + schema1:name "Research Object Crate for CEITEC layer 1 workflow" ; + schema1:sdDatePublished "2024-07-12 13:17:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/599/ro_crate?version=2" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "GATK4 RNA variant calling pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1019?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/rnavar" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnavar" ; + schema1:sdDatePublished "2024-07-12 13:18:20 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1019/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10407 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:14Z" ; + schema1:dateModified "2024-06-11T12:55:14Z" ; + schema1:description "GATK4 RNA variant calling pipeline" ; + schema1:keywords "GATK4, rna, RNASEQ, variant-calling, worflow" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnavar" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1019?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A mapping-based pipeline for creating a phylogeny from bacterial whole genome sequences" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/967?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/bactmap" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/bactmap" ; + schema1:sdDatePublished "2024-07-12 13:22:16 +0100" ; + schema1:url "https://workflowhub.eu/workflows/967/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7268 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:43Z" ; + schema1:dateModified "2024-06-11T12:54:43Z" ; + schema1:description "A mapping-based pipeline for creating a phylogeny from bacterial whole genome sequences" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/967?version=2" ; + schema1:keywords "bacteria, bacterial, bacterial-genome-analysis, Genomics, mapping, phylogeny, tree" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/bactmap" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/967?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.195.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_dna_helparms" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Structural DNA helical parameters tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/195/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 72437 ; + schema1:creator , + ; + schema1:dateCreated "2022-09-15T12:36:54Z" ; + schema1:dateModified "2023-01-16T13:53:15Z" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/195?version=4" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Structural DNA helical parameters tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_dna_helparms/master/biobb_wf_dna_helparms/notebooks/biobb_dna_helparms_tutorial.ipynb" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An analysis pipeline for Molecular Cartography data from Resolve Biosciences." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1001?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/molkart" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/molkart" ; + schema1:sdDatePublished "2024-07-12 13:18:22 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1001/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10586 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:05Z" ; + schema1:dateModified "2024-06-11T12:55:05Z" ; + schema1:description "An analysis pipeline for Molecular Cartography data from Resolve Biosciences." ; + schema1:keywords "fish, image-processing, imaging, molecularcartography, Segmentation, single-cell, spatial, Transcriptomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/molkart" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1001?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and modern ancient DNA pipeline in Nextflow and with cloud support." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-07-12 13:21:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3707 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:49Z" ; + schema1:dateModified "2024-06-11T12:54:49Z" ; + schema1:description "A fully reproducible and modern ancient DNA pipeline in Nextflow and with cloud support." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for analysis of Molecular Pixelation assays" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1010?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/pixelator" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/pixelator" ; + schema1:sdDatePublished "2024-07-12 13:20:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1010/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10309 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:07Z" ; + schema1:dateModified "2024-06-11T12:55:07Z" ; + schema1:description "Pipeline for analysis of Molecular Pixelation assays" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1010?version=5" ; + schema1:keywords "molecular-pixelation, pixelator, pixelgen-technologies, proteins, single-cell, single-cell-omics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/pixelator" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1010?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + schema1:datePublished "2024-06-25T09:57:05.224898" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/allele-based-pathogen-identification" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "allele-based-pathogen-identification/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=15" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-07-12 13:22:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=15" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9371 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:12Z" ; + schema1:dateModified "2024-06-11T12:55:12Z" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=15" ; + schema1:version 15 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A pipeline to identify (and remove) certain sequences from raw genomic data. Default taxa to identify (and remove) are Homo and Homo sapiens. Removal is optional." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/979?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/detaxizer" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/detaxizer" ; + schema1:sdDatePublished "2024-07-12 13:21:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/979/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11353 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:48Z" ; + schema1:dateModified "2024-06-11T12:54:48Z" ; + schema1:description "A pipeline to identify (and remove) certain sequences from raw genomic data. Default taxa to identify (and remove) are Homo and Homo sapiens. Removal is optional." ; + schema1:keywords "de-identification, decontamination, eDNA, FASTQ, filter, long-reads, Metabarcoding, Metagenomics, microbiome, nanopore, short-reads, shotgun, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/detaxizer" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/979?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2024-03-06T17:17:46.226835" ; + schema1:hasPart , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/baredsc" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + ; + schema1:name "baredsc/baredSC-1d-logNorm" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "baredsc/baredSC-2d-logNorm" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/baredsc" ; + schema1:version "0.3" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# Shotgun Metagenomics Analysis\r +Analysis of metagenomic shotgun sequences including assembly, speciation, ARG discovery and more\r +\r +## Description\r +The input for this analysis is paired end next generation sequencing data from metagenomic samples. The workflow is designed to be modular, so that individual modules can be run depending on the nature of the metagenomics project at hand. More modules will be added as we develop them - this repo is a work in progress!\r +\r +These scripts have been written specifically for NCI Gadi HPC, wich runs PBS Pro, however feel free to use and modify for anothre system if you are not a Gadi user. \r +\r +### Part 1. Setup and QC\r +Download the repo. You will see directories for `Fastq`, `Inputs`, `Reference` and `Logs`. You will need to copy or symlink your fastq to `Fastq`, sample configuration file (see below) to `Inputs` and the reference genome sequence of your host species (if applicable) to `Reference` for host contamination removal.\r + \r +\r +#### Fastq inputs\r +The scripts assume all fastq files are paired, gzipped, and all in the one directory named 'Fastq'. If your fastq are within a convoluted directory structure (eg per-sample directories) or you would simply like to link them from an alternate location, please use the script `setup_fastq.sh`.\r +\r +To use this script, parse the path name of your fastq as first argument on the command line, and run the script from the base working directory (/Shotgun-Metagenomics-Analysis) which will from here on be referred to as `workdir`. Note that this script looks for `f*q.gz` files (ie fastq.gz or fq.gz) - if yours differ in suffix, please adjust the script accordingly.\r +\r +```\r +bash ./Scripts/setup_fastq.sh \r +```\r +\r +#### Configuration/sample info\r +The only required input configuration file should be named .config, where is the name of the current batch of samples you are processing, or some other meaningful name to your project; it will be used to name output files. The config file should be placed inside the $workdir/Inputs directory, and include the following columns, in this order:\r +\r +```\r +1. Sample ID - used to identify the sample, eg if you have 3 lanes of sequencing per sample, erach of those 6 fastq files should contain this ID that si in column 1\r +2. Lab Sample ID - can be the same as column 1, or different if you have reason to change the IDs eg if the seq centre applies an in-house ID. Please make sure IDs are unique within column 1 and unique within column 2\r +3. Group - eg different time points or treatment groups. If no specific group structure is relevant, please set this to 1 (do not leave blank!) \r +3. Platform - should be Illumina; other sequencing platforms are not tested on this workflow\r +4. Sequencing centre name\r +5. Library - eg if you have 2 sequencing libraries for the same sample. Can be left blank, or assigned to 1. Blank will be assigned libray ID of 1 during processing.\r +```\r +\r +Please do not have spaces in any of the values for the config file. \r +\r +\r +#### General setup\r +\r +All scripts will need to be edited to reflect your NCI project code at the `-P ` and `-l directive. Please run the script create_project.sh and follow the prompts to complete some of the setup for you. \r +\r +Note that you will need to manually edit the PDS resource requests for each PBS script; guidelines/example resources will be given at each step to help you do this. As the 'sed' commands within this script operate on .sh and .pbs files, this setup script has been intentionally named .bash (easiest solution).\r +\r +Remember to submit all scripts from your `workdir`. \r +\r +`bash ./Scripts/create_project.sh`\r +\r +For jobs that execute in parallel, there are 3 scripts: one to make the 'inputs' file listing hte details of each parallel task, one job execution shell script that is run over each task in parallel, and one PBS launcher script. The process is to submit the make input script, check it to make sure your job details are correct, edit the resources directives depending on the number and size of your parallel tasks, then submit the PBS launcher script with `qsub`. \r +\r +#### QC\r +\r +Run fastQC over each fastq file in parallel. Adjust the resources as per your project. To run all files in parallel, set the number of NCPUS requested equal to the number of fastq files (remember that Gadi can only request <1 node or multiples of whole nodes). The make input script sorts the fastq files largest to smallest, so if you have a discrpeancy in file size, optimal efficiency can be achieved by requested less nodes than the total required to run all your fastq in parallel.\r +\r +FastQC does not multithread on a single file, so CPUs per parallel task is set to 1. Example walltimes on Gadi 'normal' queue: one 1.8 GB fastq = 4 minutes; one 52 GB fastq file = 69.5 minutes.\r +\r +Make the fastqc parallel inputs file by running (from `workdir`):\r +`bash ./Scripts/fastqc_make_inputs.sh`\r +\r +Edit the resource requests in `fastqc_run_parallel.pbs` according to your number of fastq files and their size, then submit:\r +`qsub fastqc_run_parallel.pbs`\r +\r +To ease manual inspection of the fastQC output, running `multiqc` is recommended. This will collate the individual fastQC reports into one report. This can be done on the login node for small sample numbers, or using the below script for larger cohorts. Edit the PBS directives, then run:\r +\r +`qsub multiqc.pbs`\r +\r +Save a copy of ./MultiQC/multiqc_report.html to your local disk then open in a web browser to inspect the results. \r +\r +#### Quality filtering and trimming\r +\r +Will be added at a later date. This is highly dependent on the quality of your data and your individual project needs so will be a guide only. \r +\r +### Part 2. Removal of host contamination. \r +\r +If you have metagenomic data extracted from a host, you will need a copy of the host reference genome sequence in order to remove any DNA sequences belonging to the host. Even if your wetlab protocol included a host removal step, it is still important to run bioinformatic host removal.\r +\r +\r +#### Prepare the reference\r +Ensure you have a copy of the reference genome (or symlink) in ./Fasta. This workflow requires BBtools(tested with version 37.98). As of writing, BBtools is not available as a global app on Gadi. Please install locally and make "module loadable", or else edit the scripts to point directly to your local BBtools installation.\r +\r +BBtools repeat masking will use all available threads on machine and 85% of available mem by default. For a mammalian genome, 2 hours on one Gadi 'normal' node is sufficient for repeat masking. \r +\r +Update the name of your reference fastq in the `bbmap_prep.pbs` script (and BBtools, see note above), then run:\r +`qsub ./Scripts/bbmap_prep.pbs`\r +\r +#### Host contamination removal\r +\r +TBC 1/4/22... \r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.327.1" ; + schema1:isBasedOn "https://github.com/Sydney-Informatics-Hub/Shotgun-Metagenomics-Analysis.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Shotgun-Metagenomics-Analysis" ; + schema1:sdDatePublished "2024-07-12 13:35:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/327/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6783 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2022-04-07T00:45:10Z" ; + schema1:dateModified "2023-01-16T13:59:41Z" ; + schema1:description """# Shotgun Metagenomics Analysis\r +Analysis of metagenomic shotgun sequences including assembly, speciation, ARG discovery and more\r +\r +## Description\r +The input for this analysis is paired end next generation sequencing data from metagenomic samples. The workflow is designed to be modular, so that individual modules can be run depending on the nature of the metagenomics project at hand. More modules will be added as we develop them - this repo is a work in progress!\r +\r +These scripts have been written specifically for NCI Gadi HPC, wich runs PBS Pro, however feel free to use and modify for anothre system if you are not a Gadi user. \r +\r +### Part 1. Setup and QC\r +Download the repo. You will see directories for `Fastq`, `Inputs`, `Reference` and `Logs`. You will need to copy or symlink your fastq to `Fastq`, sample configuration file (see below) to `Inputs` and the reference genome sequence of your host species (if applicable) to `Reference` for host contamination removal.\r + \r +\r +#### Fastq inputs\r +The scripts assume all fastq files are paired, gzipped, and all in the one directory named 'Fastq'. If your fastq are within a convoluted directory structure (eg per-sample directories) or you would simply like to link them from an alternate location, please use the script `setup_fastq.sh`.\r +\r +To use this script, parse the path name of your fastq as first argument on the command line, and run the script from the base working directory (/Shotgun-Metagenomics-Analysis) which will from here on be referred to as `workdir`. Note that this script looks for `f*q.gz` files (ie fastq.gz or fq.gz) - if yours differ in suffix, please adjust the script accordingly.\r +\r +```\r +bash ./Scripts/setup_fastq.sh \r +```\r +\r +#### Configuration/sample info\r +The only required input configuration file should be named .config, where is the name of the current batch of samples you are processing, or some other meaningful name to your project; it will be used to name output files. The config file should be placed inside the $workdir/Inputs directory, and include the following columns, in this order:\r +\r +```\r +1. Sample ID - used to identify the sample, eg if you have 3 lanes of sequencing per sample, erach of those 6 fastq files should contain this ID that si in column 1\r +2. Lab Sample ID - can be the same as column 1, or different if you have reason to change the IDs eg if the seq centre applies an in-house ID. Please make sure IDs are unique within column 1 and unique within column 2\r +3. Group - eg different time points or treatment groups. If no specific group structure is relevant, please set this to 1 (do not leave blank!) \r +3. Platform - should be Illumina; other sequencing platforms are not tested on this workflow\r +4. Sequencing centre name\r +5. Library - eg if you have 2 sequencing libraries for the same sample. Can be left blank, or assigned to 1. Blank will be assigned libray ID of 1 during processing.\r +```\r +\r +Please do not have spaces in any of the values for the config file. \r +\r +\r +#### General setup\r +\r +All scripts will need to be edited to reflect your NCI project code at the `-P ` and `-l directive. Please run the script create_project.sh and follow the prompts to complete some of the setup for you. \r +\r +Note that you will need to manually edit the PDS resource requests for each PBS script; guidelines/example resources will be given at each step to help you do this. As the 'sed' commands within this script operate on .sh and .pbs files, this setup script has been intentionally named .bash (easiest solution).\r +\r +Remember to submit all scripts from your `workdir`. \r +\r +`bash ./Scripts/create_project.sh`\r +\r +For jobs that execute in parallel, there are 3 scripts: one to make the 'inputs' file listing hte details of each parallel task, one job execution shell script that is run over each task in parallel, and one PBS launcher script. The process is to submit the make input script, check it to make sure your job details are correct, edit the resources directives depending on the number and size of your parallel tasks, then submit the PBS launcher script with `qsub`. \r +\r +#### QC\r +\r +Run fastQC over each fastq file in parallel. Adjust the resources as per your project. To run all files in parallel, set the number of NCPUS requested equal to the number of fastq files (remember that Gadi can only request <1 node or multiples of whole nodes). The make input script sorts the fastq files largest to smallest, so if you have a discrpeancy in file size, optimal efficiency can be achieved by requested less nodes than the total required to run all your fastq in parallel.\r +\r +FastQC does not multithread on a single file, so CPUs per parallel task is set to 1. Example walltimes on Gadi 'normal' queue: one 1.8 GB fastq = 4 minutes; one 52 GB fastq file = 69.5 minutes.\r +\r +Make the fastqc parallel inputs file by running (from `workdir`):\r +`bash ./Scripts/fastqc_make_inputs.sh`\r +\r +Edit the resource requests in `fastqc_run_parallel.pbs` according to your number of fastq files and their size, then submit:\r +`qsub fastqc_run_parallel.pbs`\r +\r +To ease manual inspection of the fastQC output, running `multiqc` is recommended. This will collate the individual fastQC reports into one report. This can be done on the login node for small sample numbers, or using the below script for larger cohorts. Edit the PBS directives, then run:\r +\r +`qsub multiqc.pbs`\r +\r +Save a copy of ./MultiQC/multiqc_report.html to your local disk then open in a web browser to inspect the results. \r +\r +#### Quality filtering and trimming\r +\r +Will be added at a later date. This is highly dependent on the quality of your data and your individual project needs so will be a guide only. \r +\r +### Part 2. Removal of host contamination. \r +\r +If you have metagenomic data extracted from a host, you will need a copy of the host reference genome sequence in order to remove any DNA sequences belonging to the host. Even if your wetlab protocol included a host removal step, it is still important to run bioinformatic host removal.\r +\r +\r +#### Prepare the reference\r +Ensure you have a copy of the reference genome (or symlink) in ./Fasta. This workflow requires BBtools(tested with version 37.98). As of writing, BBtools is not available as a global app on Gadi. Please install locally and make "module loadable", or else edit the scripts to point directly to your local BBtools installation.\r +\r +BBtools repeat masking will use all available threads on machine and 85% of available mem by default. For a mammalian genome, 2 hours on one Gadi 'normal' node is sufficient for repeat masking. \r +\r +Update the name of your reference fastq in the `bbmap_prep.pbs` script (and BBtools, see note above), then run:\r +`qsub ./Scripts/bbmap_prep.pbs`\r +\r +#### Host contamination removal\r +\r +TBC 1/4/22... \r +""" ; + schema1:isPartOf ; + schema1:keywords "Metagenomics, shotgun, antimicrobial resistance, humann2, bbmap, whole genome sequencing, Assembly, prokka, abricate, DIAMOND, kraken, braken" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Shotgun-Metagenomics-Analysis" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/327?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-20T11:44:03.098812" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-decontamination-VGP9" ; + schema1:license "BSD-3-Clause" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-decontamination-VGP9/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Molecular Structure Checking using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **checking** a **molecular structure** before using it as an input for a **Molecular Dynamics** simulation. The workflow uses the **BioExcel Building Blocks library (biobb)**. The particular structure used is the crystal structure of **human Adenylate Kinase 1A (AK1A)**, in complex with the **AP5A inhibitor** (PDB code [1Z83](https://www.rcsb.org/structure/1z83)). \r +\r +**Structure checking** is a key step before setting up a protein system for **simulations**. A number of **common issues** found in structures at **Protein Data Bank** may compromise the success of the **simulation**, or may suggest that longer **equilibration** procedures are necessary.\r +\r +The **workflow** shows how to:\r +\r +- Run **basic manipulations on structures** (selection of models, chains, alternative locations\r +- Detect and fix **amide assignments** and **wrong chiralities**\r +- Detect and fix **protein backbone** issues (missing fragments, and atoms, capping)\r +- Detect and fix **missing side-chain atoms**\r +- **Add hydrogen atoms** according to several criteria\r +- Detect and classify **atomic clashes**\r +- Detect possible **disulfide bonds (SS)**\r +\r +An implementation of this workflow in a **web-based Graphical User Interface (GUI)** can be found in the [https://mmb.irbbarcelona.org/biobb-wfs/](https://mmb.irbbarcelona.org/biobb-wfs/) server (see [https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check](https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check)).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.778.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/main/biobb_wf_structure_checking/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy Molecular Structure Checking" ; + schema1:sdDatePublished "2024-07-12 13:24:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/778/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 35053 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-05T08:58:01Z" ; + schema1:dateModified "2024-03-05T08:59:51Z" ; + schema1:description """# Molecular Structure Checking using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **checking** a **molecular structure** before using it as an input for a **Molecular Dynamics** simulation. The workflow uses the **BioExcel Building Blocks library (biobb)**. The particular structure used is the crystal structure of **human Adenylate Kinase 1A (AK1A)**, in complex with the **AP5A inhibitor** (PDB code [1Z83](https://www.rcsb.org/structure/1z83)). \r +\r +**Structure checking** is a key step before setting up a protein system for **simulations**. A number of **common issues** found in structures at **Protein Data Bank** may compromise the success of the **simulation**, or may suggest that longer **equilibration** procedures are necessary.\r +\r +The **workflow** shows how to:\r +\r +- Run **basic manipulations on structures** (selection of models, chains, alternative locations\r +- Detect and fix **amide assignments** and **wrong chiralities**\r +- Detect and fix **protein backbone** issues (missing fragments, and atoms, capping)\r +- Detect and fix **missing side-chain atoms**\r +- **Add hydrogen atoms** according to several criteria\r +- Detect and classify **atomic clashes**\r +- Detect possible **disulfide bonds (SS)**\r +\r +An implementation of this workflow in a **web-based Graphical User Interface (GUI)** can be found in the [https://mmb.irbbarcelona.org/biobb-wfs/](https://mmb.irbbarcelona.org/biobb-wfs/) server (see [https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check](https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check)).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy Molecular Structure Checking" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/main/biobb_wf_structure_checking/galaxy/biobb_wf_structure_checking.ga" ; + schema1:version 1 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """Workflow to build different indices for different tools from a genome and transcriptome. \r +\r +This workflow expects an (annotated) genome in GBOL ttl format.\r +\r +Steps:\r + - SAPP: rdf2gtf (genome fasta)\r + - SAPP: rdf2fasta (transcripts fasta)\r + - STAR index (Optional for Eukaryotic origin)\r + - bowtie2 index\r + - kallisto index\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/75?version=1" ; + schema1:isBasedOn "https://gitlab.com/m-unlock/cwl/" ; + schema1:license "CC0-1.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Indices builder from GBOL RDF (TTL)" ; + schema1:sdDatePublished "2024-07-12 13:37:19 +0100" ; + schema1:url "https://workflowhub.eu/workflows/75/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 12388 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4445 ; + schema1:creator ; + schema1:dateCreated "2020-11-23T16:22:43Z" ; + schema1:dateModified "2023-01-16T13:46:15Z" ; + schema1:description """Workflow to build different indices for different tools from a genome and transcriptome. \r +\r +This workflow expects an (annotated) genome in GBOL ttl format.\r +\r +Steps:\r + - SAPP: rdf2gtf (genome fasta)\r + - SAPP: rdf2fasta (transcripts fasta)\r + - STAR index (Optional for Eukaryotic origin)\r + - bowtie2 index\r + - kallisto index\r +""" ; + schema1:image ; + schema1:keywords "Alignment" ; + schema1:license "https://spdx.org/licenses/CC0-1.0" ; + schema1:name "Indices builder from GBOL RDF (TTL)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/75?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + ; + ns1:output , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """# workflow-partial-bwa-mem\r +\r +These workflows are part of a set designed to work for RAD-seq data on the Galaxy platform, using the tools from the Stacks program. \r +\r +Galaxy Australia: https://usegalaxy.org.au/\r +\r +Stacks: http://catchenlab.life.illinois.edu/stacks/\r +\r +This workflow is part of the reference-guided stacks workflow, https://workflowhub.eu/workflows/347\r +\r +Inputs\r +* demultiplexed reads in fastq format, may be output from the QC workflow. Files are in a collection. \r +* reference genome in fasta format\r +\r +Outputs\r +* A set of filtered bam files, ready for the next part of the stacks workflow (e.g. gstacks). \r +* Statistics on the bam files. \r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/351?version=1" ; + schema1:isBasedOn "https://github.com/AnnaSyme/workflow-partial-bwa-mem.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Partial ref-guided workflow - bwa mem only" ; + schema1:sdDatePublished "2024-07-12 13:35:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/351/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 20262 ; + schema1:creator ; + schema1:dateCreated "2022-05-31T08:05:01Z" ; + schema1:dateModified "2023-01-30T18:21:31Z" ; + schema1:description """# workflow-partial-bwa-mem\r +\r +These workflows are part of a set designed to work for RAD-seq data on the Galaxy platform, using the tools from the Stacks program. \r +\r +Galaxy Australia: https://usegalaxy.org.au/\r +\r +Stacks: http://catchenlab.life.illinois.edu/stacks/\r +\r +This workflow is part of the reference-guided stacks workflow, https://workflowhub.eu/workflows/347\r +\r +Inputs\r +* demultiplexed reads in fastq format, may be output from the QC workflow. Files are in a collection. \r +* reference genome in fasta format\r +\r +Outputs\r +* A set of filtered bam files, ready for the next part of the stacks workflow (e.g. gstacks). \r +* Statistics on the bam files. \r +\r +""" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Partial ref-guided workflow - bwa mem only" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/351?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# Local Cromwell implementation of GATK4 germline variant calling pipeline\r +See the [GATK](https://gatk.broadinstitute.org/hc/en-us) website for more information on this toolset \r +## Assumptions\r +- Using hg38 human reference genome build\r +- Running 'locally' i.e. not using HPC/SLURM scheduling, or containers. This repo was specifically tested on Pawsey Nimbus 16 CPU, 64GB RAM virtual machine, primarily running in the `/data` volume storage partition. \r +- Starting from short-read Illumina paired-end fastq files as input\r +\r +### Dependencies\r +The following versions have been tested and work, but GATK and Cromwell are regularly updated and so one must consider whether they would like to use newer versions of these tools. \r +- BWA/0.7.15\r +- GATK v4.0.6.0\r +- SAMtools/1.5\r +- picard/2.9\r +- Python/2.7\r +- Cromwell v61\r +\r +## Quick start guide\r +### Installing and preparing environment for GATK4 with Cromwell\r +\r +1. Clone repository\r +```\r +git clone https://github.com/SarahBeecroft/cromwellGATK4.git\r +cd cromwellGATK4\r +chmod 777 *.sh\r +```\r +\r +2. Install [Miniconda](https://docs.conda.io/en/latest/miniconda.html) if you haven’t already. Create Conda environment using the supplied conda environment file\r +\r +```\r +conda env create --file gatk4_pipeline.yml\r +```\r +\r +3. Download the necessary .jar files\r + - The Cromwell workfow orchestration engine can be downloaded from https://github.com/broadinstitute/cromwell/releases/ \r + - GATK can be downloaded from https://github.com/broadinstitute/gatk/releases. Unzip the file with `unzip` \r + - Picard can be downloaded from https://github.com/broadinstitute/picard/releases/\r +\r +\r +4. Upload the resource bundle file from IRDS using rclone or filezilla and unpack it with `tar xzvf resource.tar.gz`. Note that the `hg38_wgs_scattered_calling_intervals.txt` will need to be to generated using the following:\r +\r +```\r +cd \r +find `pwd` -name "scattered.interval_list" -print | sort > hg38_wgs_scattered_calling_intervals.txt\r +```\r +\r +5. Set up the config files. Files that you need to edit with the correct paths to your data/jar files or other specific configurations are:\r + - `Multisample_Fastq_to_Gvcf_GATK4_inputs_hg38.json`\r + - `Multisample_jointgt_GATK4_inputs_hg38.json`\r + - both json files will need the correct paths to your reference file locations, and the file specifying your inputs i.e. `samples.txt` or `gvcfs.txt`\r + - `samples.txt`\r + - `gvcfs.txt`\r + - These are the sample input files (tab seperated)\r + - The format for samples.txt is sampleID, sampleID_readgroup, path_to_fastq_R1_file, path_to_fastq_R2_file,\r + - The format for gvcfs.txt is sample ID, gvcf, gvcf .tbi index file\r + - Examples are included in this repo\r + - NOTE: Having tabs, not spaces, is vital for parsing the file. Visual studio code tends to introduce spaces, so if you are having issues, check the file with another text editor such as sublime. \r + - `launch_cromwell.sh`\r + - `launch_jointgt.sh`\r + - These are the scripts which launch the pipeline. \r + - `launch_cromwell.sh` launches the fastq to gvcf stage\r + - `launch_jointgt.sh` launched the gvcf joint genotyping to cohort vcf step. This is perfomed when you have run all samples through the fastq to gvcf stage.\r + - Check the paths and parameters make sense for your machine\r + - `local.conf`\r + - the main tuneable parameters here are:\r + - `concurrent-job-limit = 5` this is the max number of concurrent jobs that can be spawned by cromwell. This depends on the computational resources available to you. 5 was determined to work reasonably well on a 16 CPU, 64GB RAM Nimbus VM (Pawsey). \r + - `call-caching enabled = true`. Setting this parameter to `false` will disable call caching (i.e. being able to resume if the job fails before completion). By default, call caching is enabled. \r + - `cromwell.options`\r + - `cromwell.options` requires editing to provide the directory where you would like the final workflow outputs to be written\r + - `Multisample_Fastq_to_Gvcf_GATK4.wdl`\r + - `ruddle_fastq_to_gvcf_single_sample_gatk4.wdl`\r + - The paths to your jar files will need to be updated\r + - The path to your conda `activate` binary will need to be updated (e.g. `/data/miniconda/bin/activate`)\r +\r +6. Launch the job within a `screen` or `tmux` session, using `./launch_cromwell.sh`. When that has completed successfully, you can launch the second stage of the pipeline (joint calling) with `./launch_jointgt.sh`. Ensure you pipe the stdout and stderr to a log file using (for example) `./launch_cromwell.sh &> cromwell.log`\r +\r +### Overview of the steps in `Multisample_Fastq_to_Gvcf_GATK4.wdl`\r +This part of the pipeline takes short-read, Illumina paired-end fastq files as the input. The outputs generated are sorted, duplicate marked bam files and their indices, duplicate metric information, and a GVCF file for each sample. The GVCF files are used as input for the second part of the pipeline (joint genotyping).\r +\r +```\r +FastqToUbam\r +GetBwaVersion\r +SamToFastqAndBwaMem\r +MergeBamAlignment\r +SortAndFixTags\r +MarkDuplicates\r +CreateSequenceGroupingTSV\r +BaseRecalibrator\r +GatherBqsrReports\r +ApplyBQSR\r +GatherBamFiles\r +HaplotypeCaller\r +MergeGVCFs\r +```\r +\r +### Overview of the steps in `Multisample_jointgt_GATK4.wdl`\r +This part of the pipeline takes GVCF files (one per sample), and performs joint genotyping across all of the provided samples. This means that old previously generated GVCFs can be joint-called with new GVCFs whenever you need to add new samples. The key output from this is a joint-genotyped, cohort-wide VCF file. This file can be used for a GEMINI database after normalisation with VT and annotation with a tool such as VEP or SNPEFF. \r +\r +The file `hg38.custom_100Mb.intervals` is required for this step of the pipeline to run. This is included in the git repo for convenience, but should be moved to your resource directory with all the other resource files. \r +\r +```\r +GetNumberOfSamples\r +ImportGVCFs\r +GenotypeGVCFs\r +HardFilterAndMakeSitesOnlyVcf\r +IndelsVariantRecalibrator\r +SNPsVariantRecalibratorCreateModel\r +SNPsVariantRecalibrator\r +GatherTranches\r +ApplyRecalibration\r +GatherVcfs\r +CollectVariantCallingMetrics\r +GatherMetrics\r +DynamicallyCombineIntervals\r +```\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/147?version=1" ; + schema1:isBasedOn "https://github.com/SarahBeecroft/cromwellGATK4" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for GATK4 Fastq to joint-called cohort VCF with Cromwell on local cluster (no job scheduler)" ; + schema1:sdDatePublished "2024-07-12 13:36:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/147/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6212 ; + schema1:dateCreated "2021-08-17T04:47:53Z" ; + schema1:dateModified "2023-01-16T13:51:45Z" ; + schema1:description """# Local Cromwell implementation of GATK4 germline variant calling pipeline\r +See the [GATK](https://gatk.broadinstitute.org/hc/en-us) website for more information on this toolset \r +## Assumptions\r +- Using hg38 human reference genome build\r +- Running 'locally' i.e. not using HPC/SLURM scheduling, or containers. This repo was specifically tested on Pawsey Nimbus 16 CPU, 64GB RAM virtual machine, primarily running in the `/data` volume storage partition. \r +- Starting from short-read Illumina paired-end fastq files as input\r +\r +### Dependencies\r +The following versions have been tested and work, but GATK and Cromwell are regularly updated and so one must consider whether they would like to use newer versions of these tools. \r +- BWA/0.7.15\r +- GATK v4.0.6.0\r +- SAMtools/1.5\r +- picard/2.9\r +- Python/2.7\r +- Cromwell v61\r +\r +## Quick start guide\r +### Installing and preparing environment for GATK4 with Cromwell\r +\r +1. Clone repository\r +```\r +git clone https://github.com/SarahBeecroft/cromwellGATK4.git\r +cd cromwellGATK4\r +chmod 777 *.sh\r +```\r +\r +2. Install [Miniconda](https://docs.conda.io/en/latest/miniconda.html) if you haven’t already. Create Conda environment using the supplied conda environment file\r +\r +```\r +conda env create --file gatk4_pipeline.yml\r +```\r +\r +3. Download the necessary .jar files\r + - The Cromwell workfow orchestration engine can be downloaded from https://github.com/broadinstitute/cromwell/releases/ \r + - GATK can be downloaded from https://github.com/broadinstitute/gatk/releases. Unzip the file with `unzip` \r + - Picard can be downloaded from https://github.com/broadinstitute/picard/releases/\r +\r +\r +4. Upload the resource bundle file from IRDS using rclone or filezilla and unpack it with `tar xzvf resource.tar.gz`. Note that the `hg38_wgs_scattered_calling_intervals.txt` will need to be to generated using the following:\r +\r +```\r +cd \r +find `pwd` -name "scattered.interval_list" -print | sort > hg38_wgs_scattered_calling_intervals.txt\r +```\r +\r +5. Set up the config files. Files that you need to edit with the correct paths to your data/jar files or other specific configurations are:\r + - `Multisample_Fastq_to_Gvcf_GATK4_inputs_hg38.json`\r + - `Multisample_jointgt_GATK4_inputs_hg38.json`\r + - both json files will need the correct paths to your reference file locations, and the file specifying your inputs i.e. `samples.txt` or `gvcfs.txt`\r + - `samples.txt`\r + - `gvcfs.txt`\r + - These are the sample input files (tab seperated)\r + - The format for samples.txt is sampleID, sampleID_readgroup, path_to_fastq_R1_file, path_to_fastq_R2_file,\r + - The format for gvcfs.txt is sample ID, gvcf, gvcf .tbi index file\r + - Examples are included in this repo\r + - NOTE: Having tabs, not spaces, is vital for parsing the file. Visual studio code tends to introduce spaces, so if you are having issues, check the file with another text editor such as sublime. \r + - `launch_cromwell.sh`\r + - `launch_jointgt.sh`\r + - These are the scripts which launch the pipeline. \r + - `launch_cromwell.sh` launches the fastq to gvcf stage\r + - `launch_jointgt.sh` launched the gvcf joint genotyping to cohort vcf step. This is perfomed when you have run all samples through the fastq to gvcf stage.\r + - Check the paths and parameters make sense for your machine\r + - `local.conf`\r + - the main tuneable parameters here are:\r + - `concurrent-job-limit = 5` this is the max number of concurrent jobs that can be spawned by cromwell. This depends on the computational resources available to you. 5 was determined to work reasonably well on a 16 CPU, 64GB RAM Nimbus VM (Pawsey). \r + - `call-caching enabled = true`. Setting this parameter to `false` will disable call caching (i.e. being able to resume if the job fails before completion). By default, call caching is enabled. \r + - `cromwell.options`\r + - `cromwell.options` requires editing to provide the directory where you would like the final workflow outputs to be written\r + - `Multisample_Fastq_to_Gvcf_GATK4.wdl`\r + - `ruddle_fastq_to_gvcf_single_sample_gatk4.wdl`\r + - The paths to your jar files will need to be updated\r + - The path to your conda `activate` binary will need to be updated (e.g. `/data/miniconda/bin/activate`)\r +\r +6. Launch the job within a `screen` or `tmux` session, using `./launch_cromwell.sh`. When that has completed successfully, you can launch the second stage of the pipeline (joint calling) with `./launch_jointgt.sh`. Ensure you pipe the stdout and stderr to a log file using (for example) `./launch_cromwell.sh &> cromwell.log`\r +\r +### Overview of the steps in `Multisample_Fastq_to_Gvcf_GATK4.wdl`\r +This part of the pipeline takes short-read, Illumina paired-end fastq files as the input. The outputs generated are sorted, duplicate marked bam files and their indices, duplicate metric information, and a GVCF file for each sample. The GVCF files are used as input for the second part of the pipeline (joint genotyping).\r +\r +```\r +FastqToUbam\r +GetBwaVersion\r +SamToFastqAndBwaMem\r +MergeBamAlignment\r +SortAndFixTags\r +MarkDuplicates\r +CreateSequenceGroupingTSV\r +BaseRecalibrator\r +GatherBqsrReports\r +ApplyBQSR\r +GatherBamFiles\r +HaplotypeCaller\r +MergeGVCFs\r +```\r +\r +### Overview of the steps in `Multisample_jointgt_GATK4.wdl`\r +This part of the pipeline takes GVCF files (one per sample), and performs joint genotyping across all of the provided samples. This means that old previously generated GVCFs can be joint-called with new GVCFs whenever you need to add new samples. The key output from this is a joint-genotyped, cohort-wide VCF file. This file can be used for a GEMINI database after normalisation with VT and annotation with a tool such as VEP or SNPEFF. \r +\r +The file `hg38.custom_100Mb.intervals` is required for this step of the pipeline to run. This is included in the git repo for convenience, but should be moved to your resource directory with all the other resource files. \r +\r +```\r +GetNumberOfSamples\r +ImportGVCFs\r +GenotypeGVCFs\r +HardFilterAndMakeSitesOnlyVcf\r +IndelsVariantRecalibrator\r +SNPsVariantRecalibratorCreateModel\r +SNPsVariantRecalibrator\r +GatherTranches\r +ApplyRecalibration\r +GatherVcfs\r +CollectVariantCallingMetrics\r +GatherMetrics\r +DynamicallyCombineIntervals\r +```\r +""" ; + schema1:isPartOf ; + schema1:keywords "Alignment, GATK4, Genomics, variant_calling, SNPs, INDELs, workflow" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "GATK4 Fastq to joint-called cohort VCF with Cromwell on local cluster (no job scheduler)" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/147?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.131.4" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/131/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 77840 ; + schema1:creator , + ; + schema1:dateCreated "2023-07-26T09:37:34Z" ; + schema1:dateModified "2023-07-26T09:38:01Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/131?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_amber_md_setup/master/biobb_wf_amber_md_setup/notebooks/mdsetup_lig/biobb_amber_complex_setup_notebook.ipynb" ; + schema1:version 4 . + + a schema1:Dataset ; + schema1:datePublished "2021-12-06T17:16:32.864578" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/protein-ligand-complex-parameterization" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "protein-ligand-complex-parameterization/main" ; + schema1:sdDatePublished "2021-12-07 03:00:57 +0000" ; + schema1:softwareVersion "v0.1.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + schema1:datePublished "2024-04-09T08:44:23.480130" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + , + ; + schema1:name "consensus-peaks/consensus-peaks-chip-sr" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-atac-cutandrun" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.6" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-chip-pe" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.6" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=9" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-07-12 13:22:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7151 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:12Z" ; + schema1:dateModified "2024-06-11T12:55:12Z" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=9" ; + schema1:version 9 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """The ultimate-level complexity workflow is one among a collection of workflows designed to address tasks up to CTF estimation. In addition to the functionalities provided by layer 0 and 1 workflows, this workflow aims to enhance the quality of both **acquisition images** and **processing**.\r +\r +**Quality control protocols**\r +\r +…\r +\r +**Combination of methods**\r +* **CTF consensus**\r + * New methods to compare ctf estimations\r + * CTF xmipp criteria (richer parameters i.e. ice detection)\r +\r +**Advantages**: \r +* Control of the acquisition quality\r +* Robust estimations to continue with the processing""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/600?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CEITEC layer 2 workflow" ; + schema1:sdDatePublished "2024-07-12 13:27:18 +0100" ; + schema1:url "https://workflowhub.eu/workflows/600/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 79882 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11315 ; + schema1:dateCreated "2023-10-04T13:11:55Z" ; + schema1:dateModified "2024-07-10T14:00:07Z" ; + schema1:description """The ultimate-level complexity workflow is one among a collection of workflows designed to address tasks up to CTF estimation. In addition to the functionalities provided by layer 0 and 1 workflows, this workflow aims to enhance the quality of both **acquisition images** and **processing**.\r +\r +**Quality control protocols**\r +\r +…\r +\r +**Combination of methods**\r +* **CTF consensus**\r + * New methods to compare ctf estimations\r + * CTF xmipp criteria (richer parameters i.e. ice detection)\r +\r +**Advantages**: \r +* Control of the acquisition quality\r +* Robust estimations to continue with the processing""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "image processing, cryoem, spa, scipion" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "CEITEC layer 2 workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/600?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:description """Galaxy Workflow created on Galaxy-E european instance, ecology.usegalaxy.eu, to analyze crowdsourcing results of the SPIPOLL hoverflies GAPARS European project activity on MMOS server.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/660?version=1" ; + schema1:isBasedOn "https://ecology.usegalaxy.eu/u/ylebras/w/workflow-constructed-from-history-spipoll-mmos-gapars-results-2020---30-05-2023" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for SPIPOLL MMOS GAPARS crowdsourcing results" ; + schema1:sdDatePublished "2024-07-12 13:26:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/660/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 20142 ; + schema1:creator , + ; + schema1:dateCreated "2023-11-10T08:41:20Z" ; + schema1:dateModified "2023-11-10T08:41:20Z" ; + schema1:description """Galaxy Workflow created on Galaxy-E european instance, ecology.usegalaxy.eu, to analyze crowdsourcing results of the SPIPOLL hoverflies GAPARS European project activity on MMOS server.\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "SPIPOLL MMOS GAPARS crowdsourcing results" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/660?version=1" ; + schema1:version 1 ; + ns1:input , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1021?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/scrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/scrnaseq" ; + schema1:sdDatePublished "2024-07-12 13:19:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1021/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8791 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:17Z" ; + schema1:dateModified "2024-06-11T12:55:17Z" ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1021?version=13" ; + schema1:keywords "10x-genomics, 10xgenomics, alevin, bustools, Cellranger, kallisto, rna-seq, single-cell, star-solo" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/scrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1021?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """Bootstrapping-for-BQSR @ NCI-Gadi is a pipeline for bootstrapping a variant resource to enable GATK base quality score recalibration (BQSR) for non-model organisms that lack a publicly available variant resource. This implementation is optimised for the National Compute Infrastucture's Gadi HPC. Multiple rounds of bootstrapping can be performed. Users can use [Fastq-to-bam @ NCI-Gadi](https://workflowhub.eu/workflows/146) and [Germline-ShortV @ NCI-Gadi](https://workflowhub.eu/workflows/143) to produce required input files for Bootstrapping-for-BQSR @ NCI-Gadi. \r +\r +Infrastructure\\_deployment\\_metadata: Gadi (NCI)\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.153.1" ; + schema1:isBasedOn "https://github.com/Sydney-Informatics-Hub/Bootstrapping-for-BQSR" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Bootstrapping-for-BQSR @ NCI-Gadi" ; + schema1:sdDatePublished "2024-07-12 13:36:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/153/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 31062 ; + schema1:creator , + , + ; + schema1:dateCreated "2021-08-18T23:26:00Z" ; + schema1:dateModified "2023-01-16T13:51:46Z" ; + schema1:description """Bootstrapping-for-BQSR @ NCI-Gadi is a pipeline for bootstrapping a variant resource to enable GATK base quality score recalibration (BQSR) for non-model organisms that lack a publicly available variant resource. This implementation is optimised for the National Compute Infrastucture's Gadi HPC. Multiple rounds of bootstrapping can be performed. Users can use [Fastq-to-bam @ NCI-Gadi](https://workflowhub.eu/workflows/146) and [Germline-ShortV @ NCI-Gadi](https://workflowhub.eu/workflows/143) to produce required input files for Bootstrapping-for-BQSR @ NCI-Gadi. \r +\r +Infrastructure\\_deployment\\_metadata: Gadi (NCI)\r +\r +""" ; + schema1:isPartOf ; + schema1:keywords "GATK4, Bootstrapping, BQSR, SNPs, INDELs, illumina, WGS, Genomics, Alignment, variant calling, model, non-model, scalable, DNA, NCI, NCI-Gadi, PBS" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Bootstrapping-for-BQSR @ NCI-Gadi" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/153?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=22" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-07-12 13:21:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=22" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13435 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:51Z" ; + schema1:dateModified "2024-06-11T12:54:51Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=22" ; + schema1:version 22 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Differential abundance analysis" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/981?version=9" ; + schema1:isBasedOn "https://github.com/nf-core/differentialabundance" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/differentialabundance" ; + schema1:sdDatePublished "2024-07-12 13:21:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/981/ro_crate?version=9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 16959 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:49Z" ; + schema1:dateModified "2024-06-11T12:54:49Z" ; + schema1:description "Differential abundance analysis" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/981?version=8" ; + schema1:keywords "ATAC-seq, ChIP-seq, deseq2, differential-abundance, differential-expression, gsea, limma, microarray, rna-seq, shiny" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/differentialabundance" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/981?version=9" ; + schema1:version 9 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """This notebook is about pre-processing the Auditory Brainstem Response (ABR) raw data files provided by [Ingham et. al](https://journals.plos.org/plosbiology/article?id=10.1371/journal.pbio.3000194) to create a data set for Deep Learning models.\r +\r +The unprocessed ABR data files are available at [Dryad](https://datadryad.org/stash/dataset/doi:10.5061/dryad.cv803rv).\r +\r +Since the ABR raw data are available as zip-archives, these have to be unzipped and the extracted raw data files parsed so that the time series corresponding to the ABR audiograms can be saved in a single csv file.\r +\r +The final data set contains the ABR time series, an individual mouse identifier, stimulus frequency, stimulus sound pressure level (SPL) and a manually determined hearing threshold. For each mouse there are different time series corresponding to six different sound stimuli: broadband click, 6, 12, 18, 24, and 30 kHz, each of which was measured for a range of sound pressure levels. The exact range of sound levels can vary between the different mice and stimuli. \r +\r +The following is done: \r +\r +* The zip archives are unpacked.\r +* The extracted ABR raw data files are parsed and collected in one csv file per archive.\r +* The csv files are merged into a data set of time series. Each time series corresponds to an ABR audiogram measured for a mouse at a specific frequency and sound level.\r +* The mouse phenotyping data are available in Excel format. The individual data sheets are combined into one mouse phenotyping data set, maintaining the mouse pipeline and the cohort type mapping. In addition, the hearing thresholds are added to the ABR audiogram data set.\r +* The data sets are curated: \r +\r + * there is a single curve per mouse, stimulus frequency and sound level,\r + * each sound level is included in the list of potential sound pressure levels,\r + * for each mouse for which an ABR audiogram has been measured, mouse phenotyping data are also provided.""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/216?version=1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Preparing a data set for Deep Learning from zipped ABR raw data files" ; + schema1:sdDatePublished "2024-07-12 13:36:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/216/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4736972 ; + schema1:creator ; + schema1:dateCreated "2021-10-19T10:47:54Z" ; + schema1:dateModified "2023-01-16T13:53:24Z" ; + schema1:description """This notebook is about pre-processing the Auditory Brainstem Response (ABR) raw data files provided by [Ingham et. al](https://journals.plos.org/plosbiology/article?id=10.1371/journal.pbio.3000194) to create a data set for Deep Learning models.\r +\r +The unprocessed ABR data files are available at [Dryad](https://datadryad.org/stash/dataset/doi:10.5061/dryad.cv803rv).\r +\r +Since the ABR raw data are available as zip-archives, these have to be unzipped and the extracted raw data files parsed so that the time series corresponding to the ABR audiograms can be saved in a single csv file.\r +\r +The final data set contains the ABR time series, an individual mouse identifier, stimulus frequency, stimulus sound pressure level (SPL) and a manually determined hearing threshold. For each mouse there are different time series corresponding to six different sound stimuli: broadband click, 6, 12, 18, 24, and 30 kHz, each of which was measured for a range of sound pressure levels. The exact range of sound levels can vary between the different mice and stimuli. \r +\r +The following is done: \r +\r +* The zip archives are unpacked.\r +* The extracted ABR raw data files are parsed and collected in one csv file per archive.\r +* The csv files are merged into a data set of time series. Each time series corresponds to an ABR audiogram measured for a mouse at a specific frequency and sound level.\r +* The mouse phenotyping data are available in Excel format. The individual data sheets are combined into one mouse phenotyping data set, maintaining the mouse pipeline and the cohort type mapping. In addition, the hearing thresholds are added to the ABR audiogram data set.\r +* The data sets are curated: \r +\r + * there is a single curve per mouse, stimulus frequency and sound level,\r + * each sound level is included in the list of potential sound pressure levels,\r + * for each mouse for which an ABR audiogram has been measured, mouse phenotyping data are also provided.""" ; + schema1:keywords "ABR, DL" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Preparing a data set for Deep Learning from zipped ABR raw data files" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/216?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/991?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/hlatyping" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hlatyping" ; + schema1:sdDatePublished "2024-07-12 13:18:26 +0100" ; + schema1:url "https://workflowhub.eu/workflows/991/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3311 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:55Z" ; + schema1:dateModified "2024-06-11T12:54:55Z" ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/991?version=9" ; + schema1:keywords "DNA, hla, hla-typing, immunology, optitype, personalized-medicine, rna" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hlatyping" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/991?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-variation-reporting" ; + schema1:mainEntity ; + schema1:name "COVID-19-VARIATION-REPORTING (v0.1)" ; + schema1:sdDatePublished "2021-03-12 13:41:20 +0000" ; + schema1:softwareVersion "v0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 77834 ; + schema1:name "COVID-19-VARIATION-REPORTING" ; + schema1:programmingLanguage . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 386785 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "application/pdf" ; + schema1:name "complete_graph.pdf" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """# MMV Im2Im Transformation\r +\r +[![Build Status](https://github.com/MMV-Lab/mmv_im2im/workflows/Build%20Main/badge.svg)](https://github.com/MMV-Lab/mmv_im2im/actions)\r +\r +A generic python package for deep learning based image-to-image transformation in biomedical applications\r +\r +The main branch will be further developed in order to be able to use the latest state of the art techniques and methods in the future. To reproduce the results of our manuscript, we refer to the branch [paper_version](https://github.com/MMV-Lab/mmv_im2im/tree/paper_version).\r +(We are actively working on the documentation and tutorials. Submit a feature request if there is anything you need.)\r +\r +---\r +\r +## Overview\r +\r +The overall package is designed with a generic image-to-image transformation framework, which could be directly used for semantic segmentation, instance segmentation, image restoration, image generation, labelfree prediction, staining transformation, etc.. The implementation takes advantage of the state-of-the-art ML engineering techniques for users to focus on researches without worrying about the engineering details. In our pre-print [arxiv link](https://arxiv.org/abs/2209.02498), we demonstrated the effectiveness of *MMV_Im2Im* in more than ten different biomedical problems/datasets. \r +\r +* For computational biomedical researchers (e.g., AI algorithm development or bioimage analysis workflow development), we hope this package could serve as the starting point for their specific problems, since the image-to-image "boilerplates" can be easily extended further development or adapted for users' specific problems.\r +* For experimental biomedical researchers, we hope this work provides a comprehensive view of the image-to-image transformation concept through diversified examples and use cases, so that deep learning based image-to-image transformation could be integrated into the assay development process and permit new biomedical studies that can hardly be done only with traditional experimental methods\r +\r +\r +## Installation\r +\r +Before starting, we recommend to [create a new conda environment](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html#creating-an-environment-with-commands) or [a virtual environment](https://docs.python.org/3/library/venv.html) with Python 3.9+.\r +\r +Please note that the proper setup of hardware is beyond the scope of this pacakge. This package was tested with GPU/CPU on Linux/Windows and CPU on MacOS. [Special note for MacOS users: Directly pip install in MacOS may need [additional setup of xcode](https://developer.apple.com/forums/thread/673827).]\r +\r +### Install MONAI\r +\r +To reproduce our results, we need to install MONAI's code version of a specific commit. To do this:\r +```\r +git clone https://github.com/Project-MONAI/MONAI.git\r +cd ./MONAI\r +git checkout 37b58fcec48f3ec1f84d7cabe9c7ad08a93882c0\r +pip install .\r +```\r +\r +We will remove this step for the main branch in the future to ensure a simplified installation of our tool.\r +\r +### Install MMV_Im2Im for basic usage:\r +\r +(For users only using this package, not planning to change any code or make any extension):\r +\r +**Option 1: core functionality only** `pip install mmv_im2im`
\r +**Option 2: advanced functionality (core + logger)** `pip install mmv_im2im[advance]`
\r +**Option 3: to reproduce paper:** `pip install mmv_im2im[paper]`
\r +**Option 4: install everything:** `pip install mmv_im2im[all]`
\r +\r +For MacOS users, additional ' ' marks are need when using installation tags in zsh. For example, `pip install mmv_im2im[paper]` should be `pip install mmv_im2im'[paper]'` in MacOS.\r +\r +### Install MMV_Im2Im for customization or extension:\r +\r +\r +```\r +git clone https://github.com/MMV-Lab/mmv_im2im.git\r +cd mmv_im2im\r +pip install -e .[all]\r +```\r +\r +Note: The `-e` option is the so-called "editable" mode. This will allow code changes taking effect immediately. The installation tags, `advance`, `paper`, `all`, are be selected based on your needs.\r +\r +### (Optional) Install using Docker\r +\r +It is also possible to use our package through [docker](https://www.docker.com/). The installation tutorial is [here](docker/tutorial.md).\r +\r +### (Optional) Use MMV_Im2Im with Google Colab\r +\r +We provide a web-based demo, if cloud computing is preferred. you can [![Open a 2D labelfree DEMO in Google Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/MMV-Lab/mmv_im2im/blob/main/tutorials/colab/labelfree_2d.ipynb). The same demo can de adapted for different applications.\r +\r +## Quick start\r +\r +You can try out on a simple example following [the quick start guide](tutorials/quick_start.md)\r +\r +Basically, you can specify your training configuration in a yaml file and run training with `run_im2im --config /path/to/train_config.yaml`. Then, you can specify the inference configuration in another yaml file and run inference with `run_im2im --config /path/to/inference_config.yaml`. You can also run the inference as a function with the provided API. This will be useful if you want to run the inference within another python script or workflow. Here is an example:\r +\r +```\r +from pathlib import Path\r +from aicsimageio import AICSImage\r +from aicsimageio.writers import OmeTiffWriter\r +from mmv_im2im.configs.config_base import ProgramConfig, parse_adaptor, configuration_validation\r +from mmv_im2im import ProjectTester\r +\r +# load the inference configuration\r +cfg = parse_adaptor(config_class=ProgramConfig, config="./paper_configs/semantic_seg_2d_inference.yaml")\r +cfg = configuration_validation(cfg)\r +\r +# define the executor for inference\r +executor = ProjectTester(cfg)\r +executor.setup_model()\r +executor.setup_data_processing()\r +\r +# get the data, run inference, and save the result\r +fn = Path("./data/img_00_IM.tiff")\r +img = AICSImage(fn).get_image_data("YX", Z=0, C=0, T=0)\r +# or using delayed loading if the data is large\r +# img = AICSImage(fn).get_image_dask_data("YX", Z=0, C=0, T=0)\r +seg = executor.process_one_image(img)\r +OmeTiffWriter.save(seg, "output.tiff", dim_orders="YX")\r +```\r +\r +\r +## Tutorials, examples, demonstrations and documentations\r +\r +The overall package aims to achieve both simplicty and flexibilty with the modularized image-to-image boilerplates. To help different users to best use this package, we provide documentations from four different aspects:\r +\r +* [Examples (i.e., scripts and config files)](tutorials/example_by_use_case.md) for reproducing all the experiments in our [pre-print](https://arxiv.org/abs/2209.02498)\r +* A bottom-up tutorials on [how to understand the modularized image-to-image boilerplates](tutorials/how_to_understand_boilerplates.md) (for extending or adapting the package) and [how to understand the configuration system in details](tutorials/how_to_understand_config.md) (for advance usage to make specific customization).\r +* A top-down tutorials as [FAQ](tutorials/FAQ.md), which will continuously grow as we receive more questions.\r +* All the models used in the manuscript and sample data can be found here: [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.10034416.svg)](https://doi.org/10.5281/zenodo.10034416)\r +\r +\r +### Contribute models to [BioImage Model Zoo](https://bioimage.io/#/)\r +\r +We highly appreciate the BioImage Model Zoo's initiative to provide a comprehensive collection of pre-trained models for a wide range of applications. To make MMV_Im2Im trained models available as well, the first step involves extracting the state_dict from the PyTorch Lightning checkpoint.\r +This can be done via:\r +\r +```python\r +import torch\r +\r +ckpt_path = "./lightning_logs/version_0/checkpoints/last.ckpt"\r +checkpoint = torch.load(ckpt_path, map_location=torch.device('cpu'))\r +state_dict = checkpoint['state_dict']\r +torch.save(state_dict, "./state_dict.pt")\r +```\r +\r +All further steps to provide models can be found in the [official documentation](https://bioimage.io/docs/#/contribute_models/README).\r +\r +## Development\r +\r +See [CONTRIBUTING.md](CONTRIBUTING.md) for information related to developing the code.\r +\r +\r +**MIT license**\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.626.1" ; + schema1:isBasedOn "https://github.com/MMV-Lab/mmv_im2im.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for MMV_Im2Im" ; + schema1:sdDatePublished "2024-07-12 13:26:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/626/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 129932 ; + schema1:creator ; + schema1:dateCreated "2023-10-27T08:14:19Z" ; + schema1:dateModified "2023-10-27T12:05:57Z" ; + schema1:description """# MMV Im2Im Transformation\r +\r +[![Build Status](https://github.com/MMV-Lab/mmv_im2im/workflows/Build%20Main/badge.svg)](https://github.com/MMV-Lab/mmv_im2im/actions)\r +\r +A generic python package for deep learning based image-to-image transformation in biomedical applications\r +\r +The main branch will be further developed in order to be able to use the latest state of the art techniques and methods in the future. To reproduce the results of our manuscript, we refer to the branch [paper_version](https://github.com/MMV-Lab/mmv_im2im/tree/paper_version).\r +(We are actively working on the documentation and tutorials. Submit a feature request if there is anything you need.)\r +\r +---\r +\r +## Overview\r +\r +The overall package is designed with a generic image-to-image transformation framework, which could be directly used for semantic segmentation, instance segmentation, image restoration, image generation, labelfree prediction, staining transformation, etc.. The implementation takes advantage of the state-of-the-art ML engineering techniques for users to focus on researches without worrying about the engineering details. In our pre-print [arxiv link](https://arxiv.org/abs/2209.02498), we demonstrated the effectiveness of *MMV_Im2Im* in more than ten different biomedical problems/datasets. \r +\r +* For computational biomedical researchers (e.g., AI algorithm development or bioimage analysis workflow development), we hope this package could serve as the starting point for their specific problems, since the image-to-image "boilerplates" can be easily extended further development or adapted for users' specific problems.\r +* For experimental biomedical researchers, we hope this work provides a comprehensive view of the image-to-image transformation concept through diversified examples and use cases, so that deep learning based image-to-image transformation could be integrated into the assay development process and permit new biomedical studies that can hardly be done only with traditional experimental methods\r +\r +\r +## Installation\r +\r +Before starting, we recommend to [create a new conda environment](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html#creating-an-environment-with-commands) or [a virtual environment](https://docs.python.org/3/library/venv.html) with Python 3.9+.\r +\r +Please note that the proper setup of hardware is beyond the scope of this pacakge. This package was tested with GPU/CPU on Linux/Windows and CPU on MacOS. [Special note for MacOS users: Directly pip install in MacOS may need [additional setup of xcode](https://developer.apple.com/forums/thread/673827).]\r +\r +### Install MONAI\r +\r +To reproduce our results, we need to install MONAI's code version of a specific commit. To do this:\r +```\r +git clone https://github.com/Project-MONAI/MONAI.git\r +cd ./MONAI\r +git checkout 37b58fcec48f3ec1f84d7cabe9c7ad08a93882c0\r +pip install .\r +```\r +\r +We will remove this step for the main branch in the future to ensure a simplified installation of our tool.\r +\r +### Install MMV_Im2Im for basic usage:\r +\r +(For users only using this package, not planning to change any code or make any extension):\r +\r +**Option 1: core functionality only** `pip install mmv_im2im`
\r +**Option 2: advanced functionality (core + logger)** `pip install mmv_im2im[advance]`
\r +**Option 3: to reproduce paper:** `pip install mmv_im2im[paper]`
\r +**Option 4: install everything:** `pip install mmv_im2im[all]`
\r +\r +For MacOS users, additional ' ' marks are need when using installation tags in zsh. For example, `pip install mmv_im2im[paper]` should be `pip install mmv_im2im'[paper]'` in MacOS.\r +\r +### Install MMV_Im2Im for customization or extension:\r +\r +\r +```\r +git clone https://github.com/MMV-Lab/mmv_im2im.git\r +cd mmv_im2im\r +pip install -e .[all]\r +```\r +\r +Note: The `-e` option is the so-called "editable" mode. This will allow code changes taking effect immediately. The installation tags, `advance`, `paper`, `all`, are be selected based on your needs.\r +\r +### (Optional) Install using Docker\r +\r +It is also possible to use our package through [docker](https://www.docker.com/). The installation tutorial is [here](docker/tutorial.md).\r +\r +### (Optional) Use MMV_Im2Im with Google Colab\r +\r +We provide a web-based demo, if cloud computing is preferred. you can [![Open a 2D labelfree DEMO in Google Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/MMV-Lab/mmv_im2im/blob/main/tutorials/colab/labelfree_2d.ipynb). The same demo can de adapted for different applications.\r +\r +## Quick start\r +\r +You can try out on a simple example following [the quick start guide](tutorials/quick_start.md)\r +\r +Basically, you can specify your training configuration in a yaml file and run training with `run_im2im --config /path/to/train_config.yaml`. Then, you can specify the inference configuration in another yaml file and run inference with `run_im2im --config /path/to/inference_config.yaml`. You can also run the inference as a function with the provided API. This will be useful if you want to run the inference within another python script or workflow. Here is an example:\r +\r +```\r +from pathlib import Path\r +from aicsimageio import AICSImage\r +from aicsimageio.writers import OmeTiffWriter\r +from mmv_im2im.configs.config_base import ProgramConfig, parse_adaptor, configuration_validation\r +from mmv_im2im import ProjectTester\r +\r +# load the inference configuration\r +cfg = parse_adaptor(config_class=ProgramConfig, config="./paper_configs/semantic_seg_2d_inference.yaml")\r +cfg = configuration_validation(cfg)\r +\r +# define the executor for inference\r +executor = ProjectTester(cfg)\r +executor.setup_model()\r +executor.setup_data_processing()\r +\r +# get the data, run inference, and save the result\r +fn = Path("./data/img_00_IM.tiff")\r +img = AICSImage(fn).get_image_data("YX", Z=0, C=0, T=0)\r +# or using delayed loading if the data is large\r +# img = AICSImage(fn).get_image_dask_data("YX", Z=0, C=0, T=0)\r +seg = executor.process_one_image(img)\r +OmeTiffWriter.save(seg, "output.tiff", dim_orders="YX")\r +```\r +\r +\r +## Tutorials, examples, demonstrations and documentations\r +\r +The overall package aims to achieve both simplicty and flexibilty with the modularized image-to-image boilerplates. To help different users to best use this package, we provide documentations from four different aspects:\r +\r +* [Examples (i.e., scripts and config files)](tutorials/example_by_use_case.md) for reproducing all the experiments in our [pre-print](https://arxiv.org/abs/2209.02498)\r +* A bottom-up tutorials on [how to understand the modularized image-to-image boilerplates](tutorials/how_to_understand_boilerplates.md) (for extending or adapting the package) and [how to understand the configuration system in details](tutorials/how_to_understand_config.md) (for advance usage to make specific customization).\r +* A top-down tutorials as [FAQ](tutorials/FAQ.md), which will continuously grow as we receive more questions.\r +* All the models used in the manuscript and sample data can be found here: [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.10034416.svg)](https://doi.org/10.5281/zenodo.10034416)\r +\r +\r +### Contribute models to [BioImage Model Zoo](https://bioimage.io/#/)\r +\r +We highly appreciate the BioImage Model Zoo's initiative to provide a comprehensive collection of pre-trained models for a wide range of applications. To make MMV_Im2Im trained models available as well, the first step involves extracting the state_dict from the PyTorch Lightning checkpoint.\r +This can be done via:\r +\r +```python\r +import torch\r +\r +ckpt_path = "./lightning_logs/version_0/checkpoints/last.ckpt"\r +checkpoint = torch.load(ckpt_path, map_location=torch.device('cpu'))\r +state_dict = checkpoint['state_dict']\r +torch.save(state_dict, "./state_dict.pt")\r +```\r +\r +All further steps to provide models can be found in the [official documentation](https://bioimage.io/docs/#/contribute_models/README).\r +\r +## Development\r +\r +See [CONTRIBUTING.md](CONTRIBUTING.md) for information related to developing the code.\r +\r +\r +**MIT license**\r +""" ; + schema1:keywords "Machine Learning, Python, image processing, Electron microscopy, imaging, jupyter" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "MMV_Im2Im" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/626?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Analysis pipeline for CUT&RUN and CUT&TAG experiments that includes sequencing QC, spike-in normalisation, IgG control normalisation, peak calling and downstream peak analysis." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/976?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/cutandrun" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/cutandrun" ; + schema1:sdDatePublished "2024-07-12 13:21:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/976/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11778 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:46Z" ; + schema1:dateModified "2024-06-11T12:54:46Z" ; + schema1:description "Analysis pipeline for CUT&RUN and CUT&TAG experiments that includes sequencing QC, spike-in normalisation, IgG control normalisation, peak calling and downstream peak analysis." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/976?version=7" ; + schema1:keywords "cutandrun, cutandrun-seq, cutandtag, cutandtag-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/cutandrun" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/976?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Molecular Structure Checking using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **checking** a **molecular structure** before using it as an input for a **Molecular Dynamics** simulation. The workflow uses the **BioExcel Building Blocks library (biobb)**. The particular structure used is the crystal structure of **human Adenylate Kinase 1A (AK1A)**, in complex with the **AP5A inhibitor** (PDB code [1Z83](https://www.rcsb.org/structure/1z83)). \r +\r +**Structure checking** is a key step before setting up a protein system for **simulations**. A number of **common issues** found in structures at **Protein Data Bank** may compromise the success of the **simulation**, or may suggest that longer **equilibration** procedures are necessary.\r +\r +The **workflow** shows how to:\r +\r +- Run **basic manipulations on structures** (selection of models, chains, alternative locations\r +- Detect and fix **amide assignments** and **wrong chiralities**\r +- Detect and fix **protein backbone** issues (missing fragments, and atoms, capping)\r +- Detect and fix **missing side-chain atoms**\r +- **Add hydrogen atoms** according to several criteria\r +- Detect and classify **atomic clashes**\r +- Detect possible **disulfide bonds (SS)**\r +\r +An implementation of this workflow in a **web-based Graphical User Interface (GUI)** can be found in the [https://mmb.irbbarcelona.org/biobb-wfs/](https://mmb.irbbarcelona.org/biobb-wfs/) server (see [https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check](https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check)).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.829.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/main/biobb_wf_structure_checking/docker" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Docker Molecular Structure Checking" ; + schema1:sdDatePublished "2024-07-12 13:23:08 +0100" ; + schema1:url "https://workflowhub.eu/workflows/829/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 770 ; + schema1:creator , + ; + schema1:dateCreated "2024-04-22T11:15:34Z" ; + schema1:dateModified "2024-05-22T13:51:08Z" ; + schema1:description """# Molecular Structure Checking using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **checking** a **molecular structure** before using it as an input for a **Molecular Dynamics** simulation. The workflow uses the **BioExcel Building Blocks library (biobb)**. The particular structure used is the crystal structure of **human Adenylate Kinase 1A (AK1A)**, in complex with the **AP5A inhibitor** (PDB code [1Z83](https://www.rcsb.org/structure/1z83)). \r +\r +**Structure checking** is a key step before setting up a protein system for **simulations**. A number of **common issues** found in structures at **Protein Data Bank** may compromise the success of the **simulation**, or may suggest that longer **equilibration** procedures are necessary.\r +\r +The **workflow** shows how to:\r +\r +- Run **basic manipulations on structures** (selection of models, chains, alternative locations\r +- Detect and fix **amide assignments** and **wrong chiralities**\r +- Detect and fix **protein backbone** issues (missing fragments, and atoms, capping)\r +- Detect and fix **missing side-chain atoms**\r +- **Add hydrogen atoms** according to several criteria\r +- Detect and classify **atomic clashes**\r +- Detect possible **disulfide bonds (SS)**\r +\r +An implementation of this workflow in a **web-based Graphical User Interface (GUI)** can be found in the [https://mmb.irbbarcelona.org/biobb-wfs/](https://mmb.irbbarcelona.org/biobb-wfs/) server (see [https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check](https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check)).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Docker Molecular Structure Checking" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/main/biobb_wf_structure_checking/docker/Dockerfile" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """## ARA (Automated Record Analysis) : An automatic pipeline for exploration of SRA datasets with sequences as a query\r +\r +### Requirements\r +\r +- **Docker**\r +\r + - Please checkout the [Docker installation](https://docs.docker.com/get-docker/) guide.\r +\r + _or_\r +\r +- **Mamba package manager**\r +\r + - Please checkout the [mamba or micromamba](https://mamba.readthedocs.io/en/latest/installation.html) official installation guide.\r +\r + - We prefer `mamba` over [`conda`](https://docs.conda.io/en/latest/) since it is faster and uses `libsolv` to effectively resolve the dependencies.\r +\r + - `conda` can still be used to install the pipeline using the same commands as described in the installation section.\r +\r + > Note: **It is important to include the 'bioconda' channel in addition to the other channels as indicated in the [official manual](https://bioconda.github.io/#usage "Bioconda - Usage")**. Use the following commands in the given order to configure the channels (one-time setup).\r + >\r + > ```bash\r + > conda config --add channels defaults\r + > conda config --add channels bioconda\r + > conda config --add channels conda-forge\r + > conda config --set channel_priority strict\r + > ```\r +\r +---\r +\r +### Installation\r +\r +The user can install the pipeline by using either Docker or Mamba using the steps mentioned below.\r +\r +First, click the green "Code" button, then select "Download Zip" to begin downloading the contents of this repository. Once the download is complete, extract the zip file by into the desired location before starting the setup. Please use the commands shown below to begin installing the pipeline.\r +\r +Alternatively, the github repo can also be cloned through the options shown after clicking the "Code" button. Navigate inside the folder after by using the `cd ARA/` command before starting the setup.\r +\r +> _Warning: Before starting any analysis with the pipeline, please make sure that the system has enough disk space available for the data you wish to retrieve and process from the SRA repository._\r +\r +- **Using Docker**\r +\r + ```bash\r + cd ARA-main/\r + docker build -t ara_img .\r + ```\r +\r +_or_\r +\r +- **Using Mamba**\r +\r + ```bash\r + cd ARA-main/\r + mamba env create --file requirements.yaml\r + mamba activate ara_env\r + perl setup.pl\r + ```\r +\r + > _Note: After installation, the virtual environment consumes approximately 1.5 GB of disk space. The installation was tested on "Ubuntu 20.04.4 LTS", "Ubuntu 22.04.1 LTS" and "Fedora 37" using the procedure mentioned above._\r +\r +Please be patient because downloading and configuring the tools/modules may take several minutes. The warning messages that appear during the installation of certain Perl modules can be ignored by users.\r +\r +Optional: The user can also add the current directory to PATH for ease of use. Use the `chmod +x ara.pl` followed by `export PATH="$(pwd):$PATH"` command. Alternatively, the user is free to create symbolic, copy the executable to `/bin/`, or use any other method depending on their operating system.\r +\r +Refer the 'Troubleshooting' section in case of any installation related issues.\r +\r +---\r +\r +### Example usage\r +\r +- **Docker**\r +\r + `docker run -it ara_img /home/ARA-main/ara.pl --input /home/ARA-main/example/SraRunInfo.csv --sequences /home/ARA-main/example/Arabidopsis_thaliana.TAIR10.ncrna.fa`\r +\r +- **Mamba environment**\r +\r + `perl ara.pl --input example/SraRunInfo.csv --sequences example/Arabidopsis_thaliana.TAIR10.ncrna.fa`\r +\r +To get full usage info: `perl ara.pl --help`\r +\r +> _Note_: The user can delete the contents of `results/` directory after testing the tool using the example mentioned above.\r +\r +### Configuration file\r +\r +The configuration file `conf.txt` is automatically generated during the installation by setup script. It contains certain default parameters as well as the location to the executable binaries of the tools incorporated in the pipeline.\r +\r +The user can modify the default parameters in `conf.txt` and pass it to the pipeline as an input. For example, the `data_perc` option in the configuration refers to the default value of 5% of the dataset selected for analysis. However, the user has the flexibility to provide any integer value between 1 and 100 to specify the desired percentage of the dataset to be used.\r +\r +Similarly, the user can choose between _blastn_ or _bowtie2_ by changing the 'execute flag' to either 0 or 1 in the configuration file while leaving the rest of the parameters to default values. By default, both the tools are enabled _ie_. `execute = 1`.\r +\r +The `read_drop_perc_cutoff` in `conf.txt` config file denotes the cutoff to discard a sample if the total reads left after executing the trimmomatic are higher than the threshold (by default, if the more than 70% of reads are dropped as per the trimmomatic log, then the sample will fail the quality criteria and will not be processed downstream). Please refer the documentation of [Trimmomatic ](https://github.com/usadellab/Trimmomatic) for more details about the parameters present in the config file.\r +\r +Similarly, the criteria to check the minimal alignment rate are indicated by the `alignment perc cutoff` parameter under blastn and bowtie2 in the `conf.txt` configuration file (if the total alignment percentage is less than the threshold then the pipeline will report that the sample failed the quality criteria). More details about the parameters used in the `conf.txt` file can be found in the respective documentations of [Blastn](https://www.ncbi.nlm.nih.gov/books/NBK279690/) and [Bowtie2](https://bowtie-bio.sourceforge.net/bowtie2/manual.shtml).\r +\r +By default, the pipeline uses a pre-built Kraken2 viral genomic database ([release: 9/8/2022](https://genome-idx.s3.amazonaws.com/kraken/k2_viral_20220908.tar.gz)) from . Users can provide their own database by changing the `kraken2_db_path` parameter in the `conf.txt` file.\r +\r +> _Note:_ If the user wishes to use a different installation than Bioconda, the user can manually install the required tools and specify the absolute path of the executable binaries in the configuration.\r +\r +---\r +\r +### Pipeline parameters\r +\r +- **`--input`** (mandatory) The user can provide input in either of the following ways:\r +\r + - A single SRA run accession. eg: **`perl ara.pl --input SRR12548227 --sequences example/Arabidopsis_thaliana.TAIR10.ncrna.fa`**\r +\r + - A list of run accessions in a text file (1 run accession per line). eg: **`perl ara.pl --input example/list.txt --sequences example/Arabidopsis_thaliana.TAIR10.ncrna.fa`**\r +\r + - The SRA runInfo exported directly from the NCBI-SRA web portal. Goto the [SRA homepage](https://www.ncbi.nlm.nih.gov/sra "Home - NCBI - SRA") and search for the desired keyword. Export the `SraRunInfo.csv` by clicking 'Send to' =\\> File =\\> RunInfo). eg: **`perl ara.pl --input example/SraRunInfo.csv --sequences example/Arabidopsis_thaliana.TAIR10.ncrna.fa`**\r +\r +- **`--sequences`** (mandatory) The user should provide a fasta file containing the query sequences.\r +\r +- **`--output`** (optional) The output directory to store the results. By default, the output will be stored into the **`results/`** directory of the package. eg: **`perl ara.pl --input example/SraRunInfo.csv --sequences example/Arabidopsis_thaliana.TAIR10.ncrna.fa --output /src/main/test/`**\r +\r +- **`--mode`** (optional) Choose one of the three modes to run the pipeline.\r +\r + - The **`screen`** is the default mode which will only download a fraction of the data-set per SRA-run accession and analyse the file as per the given configuration.\r +\r + - The **`full`** mode will execute the pipeline by downloading the complete fastq file per SRA-run accession.\r +\r + - The **`both`** option searches for samples using a fraction of the data that meet the minimum alignment cutoff from either 'bowtie2' or 'blastn', and then automatically performs alignment by downloading the entire fastq file. eg: **`perl ara.pl --input example/SraRunInfo.csv --sequences example/Arabidopsis_thaliana.TAIR10.ncrna.fa --output /src/main/test/ --mode screen`**\r +\r + > _Note:_ There is a supporting **`summary`** mode, that will generate a unified alignment summary by examining the output files created by either screen-mode or full-mode. The summary mode should only be used when the user needs to recreate the summary stats from the pre-existing results. The user must enter **`–mode summary`** along with the previously used command parameters to re-generate the summary.\r +\r + - **`--config`** (optional) Pipeline configuration. By default it will use the **`conf.txt`** generated by the setup script. eg: **`perl ara.pl --input example/SraRunInfo.csv --sequences example/Arabidopsis_thaliana.TAIR10.ncrna.fa --output /src/main/test/ --mode screen --config conf.txt`**\r +\r +---\r +\r +### Output structure\r +\r +The pipeline will create folders per SRA run accession and generate results using the run accession as the prefix. The analysis related to the screening a fraction of data will be stored in `screening_results` directory whereas the analysis conducted on the whole dataset will be stored in `full_analyis_results` directory.\r +\r +An outline of directory structure containing the results is shown below-\r +\r + results/\r + `-- test/ (name derived from the input fasta sequence file)\r + |-- test.screening.analysis.stats.sorted.by.alignment.txt (combined metadata and analysis report generated after processing all the SRA run accessions, sorted in decreasing order of total alignment percentage)\r + |-- metadata/\r + | |-- test.metadata.txt (Combined metadata downloaded from SRA)\r + | |-- test.metadata.screened.txt (List of SRA accessions which qualify the filter criteria specified in the config.)\r + | |-- SRA_RUN.run.metadata.txt (unprocessed metadata on a single SRA accession as retrieved from NCBI)\r + |-- reference/\r + | |-- blastn_db/ (folder containing the blast database created from the input fasta sequence)\r + | |-- bowtie2_index/ (folder containing the bowtie index created from the input fasta sequence)\r + | |-- bowtie2_index.stdout.txt (stdout captured from bowtie2 index creation)\r + | `-- makeblastdb.stdout.txt (stdout captured from blastn database creation)\r + `-- screening_results/ (similar structure for screeing or full mode)\r + |-- SRA_RUN/ (each SRA run accession will be processed into a seperate folder)\r + | |-- blastn/\r + | | |-- SRA_RUN.blast.results.txt (output from NCBI Blastn)\r + | | `-- blast.stats.txt (blastn overall alignment stats)\r + | |-- bowtie2/\r + | | |-- SRA_RUN.bam (output from bowtie2)\r + | | |-- alignment.stats.txt (bowtie2 stdout)\r + | | `-- alignment.txt (bowtie2 overall alignment summary)\r + | |-- fastQC/\r + | | |-- \r + | | |-- \r + | |-- kraken2/\r + | | |-- SRA_RUN.kraken (kraken2 standard classification table)\r + | | |-- SRA_RUN.report (kraken2 classification report)\r + | | `-- SRA_RUN.stdout.txt (kraken2 stdout)\r + | |-- raw_fastq/\r + | | |-- \r + | | |-- fastq_dump.stdout.txt\r + | | |-- sra/\r + | | `-- wget.full.sra.stdout.txt\r + | `-- trimmed_data/\r + | |-- \r + | `-- SRA_RUN_trim_stdout_log.txt (trimmomatic stdout)\r + `-- runlog.SRA_RUN.txt (Complete run log of the pipeline per SRA run accession)\r +\r +For a thorough understanding of the results of the third-party tools, take a look at the following documentations:\r +\r +- [Blastn](https://www.ncbi.nlm.nih.gov/books/NBK279690/)\r +- [Bowtie2](https://bowtie-bio.sourceforge.net/bowtie2/manual.shtml)\r +- [FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/)\r +- [Kraken2](https://github.com/DerrickWood/kraken2/blob/master/docs/MANUAL.markdown)\r +- [Trimmomatic](https://github.com/usadellab/Trimmomatic)\r +\r +---\r +\r +### Disk usage using the input from the example\r +\r +The table below provides a summary of the disk usage for different analyses conducted on varying dataset sizes. It demonstrates how disk usage can increase depending on the choice of the fraction of the dataset the user wishes to analyze.\r +\r +| RUN ACCESSION | 100% of dataset | 5% of dataset | 10% of dataset |\r +| ------------- | --------------- | ------------- | -------------- |\r +| SRR8392720 | 1.3G | 85M | 156M |\r +| SRR7289585 | 1.4G | 150M | 288M |\r +| SRR12548227 | 15M | 9.0M | 9.1M |\r +\r +This summary highlights how the disk usage (in megabytes or gigabytes) can vary depending on the chosen fraction of the dataset for analysis.\r +\r +---\r +\r +### Troubleshooting\r +\r +- Errors related to mamba/conda environment:\r +\r + Since `mamba` is a drop-in replacement and uses the same commands and configuration options as **conda**, it's possible to swap almost all commands between **conda** & **mamba**.\r +\r + Use **`conda list`** command to verify whether the packages mentioned in the `requirements.yaml` are successfully installed into your environment.\r +\r + > _Note:_ The `requirements.yaml` provided in this package was exported from `mamba 0.25.0` installation running on `Ubuntu 20.04.4 LTS`.\r +\r + In case of any missing tool/ conflicting dependencies in the environment, the user can try using **`conda search `** or `mamba repoquery search ` command to find the supported version of the tool and then manually install it by typing **`conda install `** or `mamba install ` inside the environment. Please refer the official [troubleshooting guide](https://conda.io/projects/conda/en/latest/user-guide/troubleshooting.html "User guide » Troubleshooting") for further help.\r +\r + > _Note:_ On macOS and Linux, the supported tools and their dependencies aren't always the same. Even when all of the requirements are completely aligned, the set of available versions isn't necessarily the same. User may try setting up the environment using any of the supplementary `requirements-*.txt` provided in the `src/main/resources/` directory.\r +\r +- Error installing Perl modules:\r +\r + Users must ensure that they have write permission to the `/Users/\\*/.cpan/` or similar directory, and the CPAN is properly configured.\r +\r + You might need to define the PERLLIB/PERL5LIB environment variable if you see an error similar to the following:\r +\r + ```bash\r + Cant locate My/Module.pm in @INC (@INC contains:\r + ...\r + ...\r + .).\r + BEGIN failed--compilation aborted.\r + ```\r +\r + > _Note about MAKE_: 'make' is an essential tool for building Perl modules. Please make sure that you have 'make' installed in your system. The setup script provided in this package utilizes 'cpan' to build the required Perl modules automatically.\r +\r + If the automatic setup provided in the package fails to install the required dependencies, you may need to install them manually by using the command `cpan install ` or searching the package on [Metacpan](https://metacpan.org/).\r +\r + Additionally, some Perl modules can also be installed through `mamba` (eg. the compatible version of Perl module `Config::Simple` can be searched on mamba by `mamba repoquery search perl-config-simple`)\r +\r +---\r +\r +### List of Perl modules and tools incorporated in the pipeline\r +\r +- Perl modules:\r +\r + - Config::Simple\r + - Parallel::ForkManager\r + - Log::Log4perl\r + - Getopt::Long\r + - Text::CSV\r + - Text::Unidecode\r +\r +- Tools:\r +\r + - [NCBI EDirect utilities \\>=16.2](https://www.ncbi.nlm.nih.gov/books/NBK179288/)\r + - [NCBI SRA Toolkit \\>=2.10.7](https://www.ncbi.nlm.nih.gov/home/tools/)\r + - [FastQC \\>=0.11.9](https://www.bioinformatics.babraham.ac.uk/projects/download.html#fastqc)\r + - [Trimmomatic \\>=0.39](http://www.usadellab.org/cms/?page=trimmomatic)\r + - [FASTX-Toolkit \\>=0.0.14](http://hannonlab.cshl.edu/fastx_toolkit/)\r + - [NCBI Blast \\>=2.10.1](https://blast.ncbi.nlm.nih.gov/Blast.cgi?PAGE_TYPE=BlastDocs&DOC_TYPE=Download)\r + - [Bowtie2 \\>=2.4.5](http://bowtie-bio.sourceforge.net/bowtie2/index.shtml)\r + - [Samtools \\>=1.15.1](http://www.htslib.org/download/)\r + - [Kraken2 \\>=2.1.2](https://ccb.jhu.edu/software/kraken2/)\r +\r +---\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.546.1" ; + schema1:isBasedOn "https://github.com/maurya-anand/ARA.git" ; + schema1:license "LGPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ARA (Automated Record Analysis)" ; + schema1:sdDatePublished "2024-07-12 13:32:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/546/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 23784 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-07-31T12:44:27Z" ; + schema1:dateModified "2023-07-31T12:49:45Z" ; + schema1:description """## ARA (Automated Record Analysis) : An automatic pipeline for exploration of SRA datasets with sequences as a query\r +\r +### Requirements\r +\r +- **Docker**\r +\r + - Please checkout the [Docker installation](https://docs.docker.com/get-docker/) guide.\r +\r + _or_\r +\r +- **Mamba package manager**\r +\r + - Please checkout the [mamba or micromamba](https://mamba.readthedocs.io/en/latest/installation.html) official installation guide.\r +\r + - We prefer `mamba` over [`conda`](https://docs.conda.io/en/latest/) since it is faster and uses `libsolv` to effectively resolve the dependencies.\r +\r + - `conda` can still be used to install the pipeline using the same commands as described in the installation section.\r +\r + > Note: **It is important to include the 'bioconda' channel in addition to the other channels as indicated in the [official manual](https://bioconda.github.io/#usage "Bioconda - Usage")**. Use the following commands in the given order to configure the channels (one-time setup).\r + >\r + > ```bash\r + > conda config --add channels defaults\r + > conda config --add channels bioconda\r + > conda config --add channels conda-forge\r + > conda config --set channel_priority strict\r + > ```\r +\r +---\r +\r +### Installation\r +\r +The user can install the pipeline by using either Docker or Mamba using the steps mentioned below.\r +\r +First, click the green "Code" button, then select "Download Zip" to begin downloading the contents of this repository. Once the download is complete, extract the zip file by into the desired location before starting the setup. Please use the commands shown below to begin installing the pipeline.\r +\r +Alternatively, the github repo can also be cloned through the options shown after clicking the "Code" button. Navigate inside the folder after by using the `cd ARA/` command before starting the setup.\r +\r +> _Warning: Before starting any analysis with the pipeline, please make sure that the system has enough disk space available for the data you wish to retrieve and process from the SRA repository._\r +\r +- **Using Docker**\r +\r + ```bash\r + cd ARA-main/\r + docker build -t ara_img .\r + ```\r +\r +_or_\r +\r +- **Using Mamba**\r +\r + ```bash\r + cd ARA-main/\r + mamba env create --file requirements.yaml\r + mamba activate ara_env\r + perl setup.pl\r + ```\r +\r + > _Note: After installation, the virtual environment consumes approximately 1.5 GB of disk space. The installation was tested on "Ubuntu 20.04.4 LTS", "Ubuntu 22.04.1 LTS" and "Fedora 37" using the procedure mentioned above._\r +\r +Please be patient because downloading and configuring the tools/modules may take several minutes. The warning messages that appear during the installation of certain Perl modules can be ignored by users.\r +\r +Optional: The user can also add the current directory to PATH for ease of use. Use the `chmod +x ara.pl` followed by `export PATH="$(pwd):$PATH"` command. Alternatively, the user is free to create symbolic, copy the executable to `/bin/`, or use any other method depending on their operating system.\r +\r +Refer the 'Troubleshooting' section in case of any installation related issues.\r +\r +---\r +\r +### Example usage\r +\r +- **Docker**\r +\r + `docker run -it ara_img /home/ARA-main/ara.pl --input /home/ARA-main/example/SraRunInfo.csv --sequences /home/ARA-main/example/Arabidopsis_thaliana.TAIR10.ncrna.fa`\r +\r +- **Mamba environment**\r +\r + `perl ara.pl --input example/SraRunInfo.csv --sequences example/Arabidopsis_thaliana.TAIR10.ncrna.fa`\r +\r +To get full usage info: `perl ara.pl --help`\r +\r +> _Note_: The user can delete the contents of `results/` directory after testing the tool using the example mentioned above.\r +\r +### Configuration file\r +\r +The configuration file `conf.txt` is automatically generated during the installation by setup script. It contains certain default parameters as well as the location to the executable binaries of the tools incorporated in the pipeline.\r +\r +The user can modify the default parameters in `conf.txt` and pass it to the pipeline as an input. For example, the `data_perc` option in the configuration refers to the default value of 5% of the dataset selected for analysis. However, the user has the flexibility to provide any integer value between 1 and 100 to specify the desired percentage of the dataset to be used.\r +\r +Similarly, the user can choose between _blastn_ or _bowtie2_ by changing the 'execute flag' to either 0 or 1 in the configuration file while leaving the rest of the parameters to default values. By default, both the tools are enabled _ie_. `execute = 1`.\r +\r +The `read_drop_perc_cutoff` in `conf.txt` config file denotes the cutoff to discard a sample if the total reads left after executing the trimmomatic are higher than the threshold (by default, if the more than 70% of reads are dropped as per the trimmomatic log, then the sample will fail the quality criteria and will not be processed downstream). Please refer the documentation of [Trimmomatic ](https://github.com/usadellab/Trimmomatic) for more details about the parameters present in the config file.\r +\r +Similarly, the criteria to check the minimal alignment rate are indicated by the `alignment perc cutoff` parameter under blastn and bowtie2 in the `conf.txt` configuration file (if the total alignment percentage is less than the threshold then the pipeline will report that the sample failed the quality criteria). More details about the parameters used in the `conf.txt` file can be found in the respective documentations of [Blastn](https://www.ncbi.nlm.nih.gov/books/NBK279690/) and [Bowtie2](https://bowtie-bio.sourceforge.net/bowtie2/manual.shtml).\r +\r +By default, the pipeline uses a pre-built Kraken2 viral genomic database ([release: 9/8/2022](https://genome-idx.s3.amazonaws.com/kraken/k2_viral_20220908.tar.gz)) from . Users can provide their own database by changing the `kraken2_db_path` parameter in the `conf.txt` file.\r +\r +> _Note:_ If the user wishes to use a different installation than Bioconda, the user can manually install the required tools and specify the absolute path of the executable binaries in the configuration.\r +\r +---\r +\r +### Pipeline parameters\r +\r +- **`--input`** (mandatory) The user can provide input in either of the following ways:\r +\r + - A single SRA run accession. eg: **`perl ara.pl --input SRR12548227 --sequences example/Arabidopsis_thaliana.TAIR10.ncrna.fa`**\r +\r + - A list of run accessions in a text file (1 run accession per line). eg: **`perl ara.pl --input example/list.txt --sequences example/Arabidopsis_thaliana.TAIR10.ncrna.fa`**\r +\r + - The SRA runInfo exported directly from the NCBI-SRA web portal. Goto the [SRA homepage](https://www.ncbi.nlm.nih.gov/sra "Home - NCBI - SRA") and search for the desired keyword. Export the `SraRunInfo.csv` by clicking 'Send to' =\\> File =\\> RunInfo). eg: **`perl ara.pl --input example/SraRunInfo.csv --sequences example/Arabidopsis_thaliana.TAIR10.ncrna.fa`**\r +\r +- **`--sequences`** (mandatory) The user should provide a fasta file containing the query sequences.\r +\r +- **`--output`** (optional) The output directory to store the results. By default, the output will be stored into the **`results/`** directory of the package. eg: **`perl ara.pl --input example/SraRunInfo.csv --sequences example/Arabidopsis_thaliana.TAIR10.ncrna.fa --output /src/main/test/`**\r +\r +- **`--mode`** (optional) Choose one of the three modes to run the pipeline.\r +\r + - The **`screen`** is the default mode which will only download a fraction of the data-set per SRA-run accession and analyse the file as per the given configuration.\r +\r + - The **`full`** mode will execute the pipeline by downloading the complete fastq file per SRA-run accession.\r +\r + - The **`both`** option searches for samples using a fraction of the data that meet the minimum alignment cutoff from either 'bowtie2' or 'blastn', and then automatically performs alignment by downloading the entire fastq file. eg: **`perl ara.pl --input example/SraRunInfo.csv --sequences example/Arabidopsis_thaliana.TAIR10.ncrna.fa --output /src/main/test/ --mode screen`**\r +\r + > _Note:_ There is a supporting **`summary`** mode, that will generate a unified alignment summary by examining the output files created by either screen-mode or full-mode. The summary mode should only be used when the user needs to recreate the summary stats from the pre-existing results. The user must enter **`–mode summary`** along with the previously used command parameters to re-generate the summary.\r +\r + - **`--config`** (optional) Pipeline configuration. By default it will use the **`conf.txt`** generated by the setup script. eg: **`perl ara.pl --input example/SraRunInfo.csv --sequences example/Arabidopsis_thaliana.TAIR10.ncrna.fa --output /src/main/test/ --mode screen --config conf.txt`**\r +\r +---\r +\r +### Output structure\r +\r +The pipeline will create folders per SRA run accession and generate results using the run accession as the prefix. The analysis related to the screening a fraction of data will be stored in `screening_results` directory whereas the analysis conducted on the whole dataset will be stored in `full_analyis_results` directory.\r +\r +An outline of directory structure containing the results is shown below-\r +\r + results/\r + `-- test/ (name derived from the input fasta sequence file)\r + |-- test.screening.analysis.stats.sorted.by.alignment.txt (combined metadata and analysis report generated after processing all the SRA run accessions, sorted in decreasing order of total alignment percentage)\r + |-- metadata/\r + | |-- test.metadata.txt (Combined metadata downloaded from SRA)\r + | |-- test.metadata.screened.txt (List of SRA accessions which qualify the filter criteria specified in the config.)\r + | |-- SRA_RUN.run.metadata.txt (unprocessed metadata on a single SRA accession as retrieved from NCBI)\r + |-- reference/\r + | |-- blastn_db/ (folder containing the blast database created from the input fasta sequence)\r + | |-- bowtie2_index/ (folder containing the bowtie index created from the input fasta sequence)\r + | |-- bowtie2_index.stdout.txt (stdout captured from bowtie2 index creation)\r + | `-- makeblastdb.stdout.txt (stdout captured from blastn database creation)\r + `-- screening_results/ (similar structure for screeing or full mode)\r + |-- SRA_RUN/ (each SRA run accession will be processed into a seperate folder)\r + | |-- blastn/\r + | | |-- SRA_RUN.blast.results.txt (output from NCBI Blastn)\r + | | `-- blast.stats.txt (blastn overall alignment stats)\r + | |-- bowtie2/\r + | | |-- SRA_RUN.bam (output from bowtie2)\r + | | |-- alignment.stats.txt (bowtie2 stdout)\r + | | `-- alignment.txt (bowtie2 overall alignment summary)\r + | |-- fastQC/\r + | | |-- \r + | | |-- \r + | |-- kraken2/\r + | | |-- SRA_RUN.kraken (kraken2 standard classification table)\r + | | |-- SRA_RUN.report (kraken2 classification report)\r + | | `-- SRA_RUN.stdout.txt (kraken2 stdout)\r + | |-- raw_fastq/\r + | | |-- \r + | | |-- fastq_dump.stdout.txt\r + | | |-- sra/\r + | | `-- wget.full.sra.stdout.txt\r + | `-- trimmed_data/\r + | |-- \r + | `-- SRA_RUN_trim_stdout_log.txt (trimmomatic stdout)\r + `-- runlog.SRA_RUN.txt (Complete run log of the pipeline per SRA run accession)\r +\r +For a thorough understanding of the results of the third-party tools, take a look at the following documentations:\r +\r +- [Blastn](https://www.ncbi.nlm.nih.gov/books/NBK279690/)\r +- [Bowtie2](https://bowtie-bio.sourceforge.net/bowtie2/manual.shtml)\r +- [FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/)\r +- [Kraken2](https://github.com/DerrickWood/kraken2/blob/master/docs/MANUAL.markdown)\r +- [Trimmomatic](https://github.com/usadellab/Trimmomatic)\r +\r +---\r +\r +### Disk usage using the input from the example\r +\r +The table below provides a summary of the disk usage for different analyses conducted on varying dataset sizes. It demonstrates how disk usage can increase depending on the choice of the fraction of the dataset the user wishes to analyze.\r +\r +| RUN ACCESSION | 100% of dataset | 5% of dataset | 10% of dataset |\r +| ------------- | --------------- | ------------- | -------------- |\r +| SRR8392720 | 1.3G | 85M | 156M |\r +| SRR7289585 | 1.4G | 150M | 288M |\r +| SRR12548227 | 15M | 9.0M | 9.1M |\r +\r +This summary highlights how the disk usage (in megabytes or gigabytes) can vary depending on the chosen fraction of the dataset for analysis.\r +\r +---\r +\r +### Troubleshooting\r +\r +- Errors related to mamba/conda environment:\r +\r + Since `mamba` is a drop-in replacement and uses the same commands and configuration options as **conda**, it's possible to swap almost all commands between **conda** & **mamba**.\r +\r + Use **`conda list`** command to verify whether the packages mentioned in the `requirements.yaml` are successfully installed into your environment.\r +\r + > _Note:_ The `requirements.yaml` provided in this package was exported from `mamba 0.25.0` installation running on `Ubuntu 20.04.4 LTS`.\r +\r + In case of any missing tool/ conflicting dependencies in the environment, the user can try using **`conda search `** or `mamba repoquery search ` command to find the supported version of the tool and then manually install it by typing **`conda install `** or `mamba install ` inside the environment. Please refer the official [troubleshooting guide](https://conda.io/projects/conda/en/latest/user-guide/troubleshooting.html "User guide » Troubleshooting") for further help.\r +\r + > _Note:_ On macOS and Linux, the supported tools and their dependencies aren't always the same. Even when all of the requirements are completely aligned, the set of available versions isn't necessarily the same. User may try setting up the environment using any of the supplementary `requirements-*.txt` provided in the `src/main/resources/` directory.\r +\r +- Error installing Perl modules:\r +\r + Users must ensure that they have write permission to the `/Users/\\*/.cpan/` or similar directory, and the CPAN is properly configured.\r +\r + You might need to define the PERLLIB/PERL5LIB environment variable if you see an error similar to the following:\r +\r + ```bash\r + Cant locate My/Module.pm in @INC (@INC contains:\r + ...\r + ...\r + .).\r + BEGIN failed--compilation aborted.\r + ```\r +\r + > _Note about MAKE_: 'make' is an essential tool for building Perl modules. Please make sure that you have 'make' installed in your system. The setup script provided in this package utilizes 'cpan' to build the required Perl modules automatically.\r +\r + If the automatic setup provided in the package fails to install the required dependencies, you may need to install them manually by using the command `cpan install ` or searching the package on [Metacpan](https://metacpan.org/).\r +\r + Additionally, some Perl modules can also be installed through `mamba` (eg. the compatible version of Perl module `Config::Simple` can be searched on mamba by `mamba repoquery search perl-config-simple`)\r +\r +---\r +\r +### List of Perl modules and tools incorporated in the pipeline\r +\r +- Perl modules:\r +\r + - Config::Simple\r + - Parallel::ForkManager\r + - Log::Log4perl\r + - Getopt::Long\r + - Text::CSV\r + - Text::Unidecode\r +\r +- Tools:\r +\r + - [NCBI EDirect utilities \\>=16.2](https://www.ncbi.nlm.nih.gov/books/NBK179288/)\r + - [NCBI SRA Toolkit \\>=2.10.7](https://www.ncbi.nlm.nih.gov/home/tools/)\r + - [FastQC \\>=0.11.9](https://www.bioinformatics.babraham.ac.uk/projects/download.html#fastqc)\r + - [Trimmomatic \\>=0.39](http://www.usadellab.org/cms/?page=trimmomatic)\r + - [FASTX-Toolkit \\>=0.0.14](http://hannonlab.cshl.edu/fastx_toolkit/)\r + - [NCBI Blast \\>=2.10.1](https://blast.ncbi.nlm.nih.gov/Blast.cgi?PAGE_TYPE=BlastDocs&DOC_TYPE=Download)\r + - [Bowtie2 \\>=2.4.5](http://bowtie-bio.sourceforge.net/bowtie2/index.shtml)\r + - [Samtools \\>=1.15.1](http://www.htslib.org/download/)\r + - [Kraken2 \\>=2.1.2](https://ccb.jhu.edu/software/kraken2/)\r +\r +---\r +""" ; + schema1:keywords "Genomics, Pipeline, Perl, ncbi sra, sequence annotation, sequence search" ; + schema1:license "https://spdx.org/licenses/LGPL-3.0" ; + schema1:name "ARA (Automated Record Analysis)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/546?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """\r +

+nf-core/viralrecon +

\r +

GitHub Actions CI Status GitHub Actions Linting Status Nextflow install with bioconda

\r +

Docker DOI

\r +

nfcore/viralrecon is a bioinformatics analysis pipeline used to perform assembly and intrahost/low-frequency variant calling for viral samples. The pipeline currently supports metagenomics and amplicon sequencing data derived from the Illumina sequencing platform.

\r +

This pipeline is a re-implementation of the SARS_Cov2_consensus-nf and SARS_Cov2_assembly-nf pipelines initially developed by Sarai Varona and Sara Monzon from BU-ISCIII. Porting both of these pipelines to nf-core was an international collaboration between numerous contributors and developers, led by Harshil Patel from the The Bioinformatics & Biostatistics Group at The Francis Crick Institute, London. We appreciated the need to have a portable, reproducible and scalable pipeline for the analysis of COVID-19 sequencing samples and so the Avengers Assembled! Please come and join us and add yourself to the contributor list :)

\r +

We have integrated a number of options in the pipeline to allow you to run specific aspects of the workflow if you so wish. For example, you can skip all of the assembly steps with the --skip_assembly parameter. See usage docs for all of the available options when running the pipeline.

\r +

Please click here to see an example MultiQC report generated using the parameters defined in this configuration file to run the pipeline on samples which were prepared from the ncov-2019 ARTIC Network V1 amplicon set and sequenced on the Illumina MiSeq platform in 301bp paired-end format.

\r +

The pipeline is built using Nextflow, a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It comes with docker containers making installation trivial and results highly reproducible. Furthermore, automated continuous integration tests to run the pipeline on a full-sized dataset are passing on AWS cloud.

\r +

+Pipeline summary

\r +
    \r +
  1. Download samples via SRA, ENA or GEO ids (ENA FTP, parallel-fastq-dump; if required)
  2. \r +
  3. Merge re-sequenced FastQ files (cat; if required)
  4. \r +
  5. Read QC (FastQC)
  6. \r +
  7. Adapter trimming (fastp)
  8. \r +
  9. Variant calling
    \r +i. Read alignment (Bowtie 2)
    \r +ii. Sort and index alignments (SAMtools)
    \r +iii. Primer sequence removal (iVar; amplicon data only)
    \r +iv. Duplicate read marking (picard; removal optional)
    \r +v. Alignment-level QC (picard, SAMtools)
    \r +vi. Choice of multiple variant calling and consensus sequence generation routes (VarScan 2, BCFTools, BEDTools || iVar variants and consensus || BCFTools, BEDTools)
    \r +- Variant annotation (SnpEff, SnpSift)
    \r +- Consensus assessment report (QUAST)
  10. \r +
  11. +De novo assembly
    \r +i. Primer trimming (Cutadapt; amplicon data only)
    \r +ii. Removal of host reads (Kraken 2)
    \r +iii. Choice of multiple assembly tools (SPAdes || metaSPAdes || Unicycler || minia)
    \r +- Blast to reference genome (blastn)
    \r +- Contiguate assembly (ABACAS)
    \r +- Assembly report (PlasmidID)
    \r +- Assembly assessment report (QUAST)
    \r +- Call variants relative to reference (Minimap2, seqwish, vg, Bandage)
    \r +- Variant annotation (SnpEff, SnpSift)
  12. \r +
  13. Present QC and visualisation for raw read, alignment, assembly and variant calling results (MultiQC)
  14. \r +
\r +

+Quick Start

\r +

i. Install nextflow

\r +

ii. Install either Docker or Singularity for full pipeline reproducibility (please only use Conda as a last resort; see docs)

\r +

iii. Download the pipeline and test it on a minimal dataset with a single command

\r +
nextflow run nf-core/viralrecon -profile test,<docker/singularity/conda/institute>\r
+
\r +
\r +

Please check nf-core/configs to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use -profile <institute> in your command. This will enable either docker or singularity and set the appropriate execution settings for your local compute environment.

\r +
\r +

iv. Start running your own analysis!

\r +
nextflow run nf-core/viralrecon -profile <docker/singularity/conda/institute> --input samplesheet.csv --genome 'NC_045512.2' -profile docker\r
+
\r +

See usage docs for all of the available options when running the pipeline.

\r +

+Documentation

\r +

The nf-core/viralrecon pipeline comes with documentation about the pipeline, found in the docs/ directory:

\r +
    \r +
  1. Installation
  2. \r +
  3. Pipeline configuration\r +\r +
  4. \r +
  5. Running the pipeline
  6. \r +
  7. Output and how to interpret the results
  8. \r +
  9. Troubleshooting
  10. \r +
\r +

+Credits

\r +

These scripts were originally written by Sarai Varona, Miguel Juliá and Sara Monzon from BU-ISCIII and co-ordinated by Isabel Cuesta for the Institute of Health Carlos III, Spain. Through collaboration with the nf-core community the pipeline has now been updated substantially to include additional processing steps, to standardise inputs/outputs and to improve pipeline reporting; implemented primarily by Harshil Patel from The Bioinformatics & Biostatistics Group at The Francis Crick Institute, London.

\r +

Many thanks to others who have helped out and contributed along the way too, including (but not limited to):

\r +\r +\r +\r +Name\r +Affiliation\r +\r +\r +\r +\r +Alexander Peltzer\r +Boehringer Ingelheim, Germany\r +\r +\r +Alison Meynert\r +University of Edinburgh, Scotland\r +\r +\r +Edgar Garriga Nogales\r +Centre for Genomic Regulation, Spain\r +\r +\r +Erik Garrison\r +UCSC, USA\r +\r +\r +Gisela Gabernet\r +QBiC, University of Tübingen, Germany\r +\r +\r +Joao Curado\r +Flomics Biotech, Spain\r +\r +\r +Jose Espinosa-Carrasco\r +Centre for Genomic Regulation, Spain\r +\r +\r +Katrin Sameith\r +DRESDEN-concept Genome Center, Germany\r +\r +\r +Lluc Cabus\r +Flomics Biotech, Spain\r +\r +\r +Marta Pozuelo\r +Flomics Biotech, Spain\r +\r +\r +Maxime Garcia\r +SciLifeLab, Sweden\r +\r +\r +Michael Heuer\r +UC Berkeley, USA\r +\r +\r +Phil Ewels\r +SciLifeLab, Sweden\r +\r +\r +Simon Heumos\r +QBiC, University of Tübingen, Germany\r +\r +\r +Stephen Kelly\r +Memorial Sloan Kettering Cancer Center, USA\r +\r +\r +Thanh Le Viet\r +Quadram Institute, UK\r +\r +\r +\r +
\r +

Listed in alphabetical order

\r +
\r +

+Contributions and Support

\r +

If you would like to contribute to this pipeline, please see the contributing guidelines.

\r +

For further information or help, don’t hesitate to get in touch on Slack (you can join with this invite).

\r +

+Citation

\r +

If you use nf-core/viralrecon for your analysis, please cite it using the following doi: 10.5281/zenodo.3872730

\r +

An extensive list of references for the tools used by the pipeline can be found in the CITATIONS.md file.

\r +

You can cite the nf-core publication as follows:

\r +
\r +

The nf-core framework for community-curated bioinformatics pipelines.

\r +

Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.

\r +

Nat Biotechnol. 2020 Feb 13. doi: 10.1038/s41587-020-0439-x.
\r +ReadCube: Full Access Link

\r +
\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/19?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/viralrecon/blob/master/main.nf" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/viralrecon" ; + schema1:sdDatePublished "2024-07-12 13:37:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/19/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 122308 ; + schema1:creator ; + schema1:dateCreated "2020-05-14T14:10:58Z" ; + schema1:dateModified "2023-01-16T13:41:25Z" ; + schema1:description """\r +

+nf-core/viralrecon +

\r +

GitHub Actions CI Status GitHub Actions Linting Status Nextflow install with bioconda

\r +

Docker DOI

\r +

nfcore/viralrecon is a bioinformatics analysis pipeline used to perform assembly and intrahost/low-frequency variant calling for viral samples. The pipeline currently supports metagenomics and amplicon sequencing data derived from the Illumina sequencing platform.

\r +

This pipeline is a re-implementation of the SARS_Cov2_consensus-nf and SARS_Cov2_assembly-nf pipelines initially developed by Sarai Varona and Sara Monzon from BU-ISCIII. Porting both of these pipelines to nf-core was an international collaboration between numerous contributors and developers, led by Harshil Patel from the The Bioinformatics & Biostatistics Group at The Francis Crick Institute, London. We appreciated the need to have a portable, reproducible and scalable pipeline for the analysis of COVID-19 sequencing samples and so the Avengers Assembled! Please come and join us and add yourself to the contributor list :)

\r +

We have integrated a number of options in the pipeline to allow you to run specific aspects of the workflow if you so wish. For example, you can skip all of the assembly steps with the --skip_assembly parameter. See usage docs for all of the available options when running the pipeline.

\r +

Please click here to see an example MultiQC report generated using the parameters defined in this configuration file to run the pipeline on samples which were prepared from the ncov-2019 ARTIC Network V1 amplicon set and sequenced on the Illumina MiSeq platform in 301bp paired-end format.

\r +

The pipeline is built using Nextflow, a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It comes with docker containers making installation trivial and results highly reproducible. Furthermore, automated continuous integration tests to run the pipeline on a full-sized dataset are passing on AWS cloud.

\r +

+Pipeline summary

\r +
    \r +
  1. Download samples via SRA, ENA or GEO ids (ENA FTP, parallel-fastq-dump; if required)
  2. \r +
  3. Merge re-sequenced FastQ files (cat; if required)
  4. \r +
  5. Read QC (FastQC)
  6. \r +
  7. Adapter trimming (fastp)
  8. \r +
  9. Variant calling
    \r +i. Read alignment (Bowtie 2)
    \r +ii. Sort and index alignments (SAMtools)
    \r +iii. Primer sequence removal (iVar; amplicon data only)
    \r +iv. Duplicate read marking (picard; removal optional)
    \r +v. Alignment-level QC (picard, SAMtools)
    \r +vi. Choice of multiple variant calling and consensus sequence generation routes (VarScan 2, BCFTools, BEDTools || iVar variants and consensus || BCFTools, BEDTools)
    \r +- Variant annotation (SnpEff, SnpSift)
    \r +- Consensus assessment report (QUAST)
  10. \r +
  11. +De novo assembly
    \r +i. Primer trimming (Cutadapt; amplicon data only)
    \r +ii. Removal of host reads (Kraken 2)
    \r +iii. Choice of multiple assembly tools (SPAdes || metaSPAdes || Unicycler || minia)
    \r +- Blast to reference genome (blastn)
    \r +- Contiguate assembly (ABACAS)
    \r +- Assembly report (PlasmidID)
    \r +- Assembly assessment report (QUAST)
    \r +- Call variants relative to reference (Minimap2, seqwish, vg, Bandage)
    \r +- Variant annotation (SnpEff, SnpSift)
  12. \r +
  13. Present QC and visualisation for raw read, alignment, assembly and variant calling results (MultiQC)
  14. \r +
\r +

+Quick Start

\r +

i. Install nextflow

\r +

ii. Install either Docker or Singularity for full pipeline reproducibility (please only use Conda as a last resort; see docs)

\r +

iii. Download the pipeline and test it on a minimal dataset with a single command

\r +
nextflow run nf-core/viralrecon -profile test,<docker/singularity/conda/institute>\r
+
\r +
\r +

Please check nf-core/configs to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use -profile <institute> in your command. This will enable either docker or singularity and set the appropriate execution settings for your local compute environment.

\r +
\r +

iv. Start running your own analysis!

\r +
nextflow run nf-core/viralrecon -profile <docker/singularity/conda/institute> --input samplesheet.csv --genome 'NC_045512.2' -profile docker\r
+
\r +

See usage docs for all of the available options when running the pipeline.

\r +

+Documentation

\r +

The nf-core/viralrecon pipeline comes with documentation about the pipeline, found in the docs/ directory:

\r +
    \r +
  1. Installation
  2. \r +
  3. Pipeline configuration\r +\r +
  4. \r +
  5. Running the pipeline
  6. \r +
  7. Output and how to interpret the results
  8. \r +
  9. Troubleshooting
  10. \r +
\r +

+Credits

\r +

These scripts were originally written by Sarai Varona, Miguel Juliá and Sara Monzon from BU-ISCIII and co-ordinated by Isabel Cuesta for the Institute of Health Carlos III, Spain. Through collaboration with the nf-core community the pipeline has now been updated substantially to include additional processing steps, to standardise inputs/outputs and to improve pipeline reporting; implemented primarily by Harshil Patel from The Bioinformatics & Biostatistics Group at The Francis Crick Institute, London.

\r +

Many thanks to others who have helped out and contributed along the way too, including (but not limited to):

\r +\r +\r +\r +Name\r +Affiliation\r +\r +\r +\r +\r +Alexander Peltzer\r +Boehringer Ingelheim, Germany\r +\r +\r +Alison Meynert\r +University of Edinburgh, Scotland\r +\r +\r +Edgar Garriga Nogales\r +Centre for Genomic Regulation, Spain\r +\r +\r +Erik Garrison\r +UCSC, USA\r +\r +\r +Gisela Gabernet\r +QBiC, University of Tübingen, Germany\r +\r +\r +Joao Curado\r +Flomics Biotech, Spain\r +\r +\r +Jose Espinosa-Carrasco\r +Centre for Genomic Regulation, Spain\r +\r +\r +Katrin Sameith\r +DRESDEN-concept Genome Center, Germany\r +\r +\r +Lluc Cabus\r +Flomics Biotech, Spain\r +\r +\r +Marta Pozuelo\r +Flomics Biotech, Spain\r +\r +\r +Maxime Garcia\r +SciLifeLab, Sweden\r +\r +\r +Michael Heuer\r +UC Berkeley, USA\r +\r +\r +Phil Ewels\r +SciLifeLab, Sweden\r +\r +\r +Simon Heumos\r +QBiC, University of Tübingen, Germany\r +\r +\r +Stephen Kelly\r +Memorial Sloan Kettering Cancer Center, USA\r +\r +\r +Thanh Le Viet\r +Quadram Institute, UK\r +\r +\r +\r +
\r +

Listed in alphabetical order

\r +
\r +

+Contributions and Support

\r +

If you would like to contribute to this pipeline, please see the contributing guidelines.

\r +

For further information or help, don’t hesitate to get in touch on Slack (you can join with this invite).

\r +

+Citation

\r +

If you use nf-core/viralrecon for your analysis, please cite it using the following doi: 10.5281/zenodo.3872730

\r +

An extensive list of references for the tools used by the pipeline can be found in the CITATIONS.md file.

\r +

You can cite the nf-core publication as follows:

\r +
\r +

The nf-core framework for community-curated bioinformatics pipelines.

\r +

Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.

\r +

Nat Biotechnol. 2020 Feb 13. doi: 10.1038/s41587-020-0439-x.
\r +ReadCube: Full Access Link

\r +
\r +""" ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/viralrecon" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/19?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Taxonomic classification and profiling of shotgun metagenomic data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1025?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/taxprofiler" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/taxprofiler" ; + schema1:sdDatePublished "2024-07-12 13:18:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1025/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12656 ; + schema1:creator , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:19Z" ; + schema1:dateModified "2024-06-11T12:55:19Z" ; + schema1:description "Taxonomic classification and profiling of shotgun metagenomic data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1025?version=10" ; + schema1:keywords "Classification, illumina, long-reads, Metagenomics, microbiome, nanopore, pathogen, Profiling, shotgun, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/taxprofiler" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1025?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + ; + schema1:description " A version of V-pipe (analysis of next generation sequencing (NGS) data from viral pathogens) specifically adapted to analyze high-throughput sequencing data of SARS-CoV-2. " ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/11?version=1" ; + schema1:isBasedOn "https://github.com/cbg-ethz/V-pipe.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for (old) SARS-COV2 version of the V-Pipe workflow" ; + schema1:sdDatePublished "2024-07-12 13:35:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/11/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 99939 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 726 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2020-04-10T13:46:55Z" ; + schema1:dateModified "2023-01-16T13:40:31Z" ; + schema1:description " A version of V-pipe (analysis of next generation sequencing (NGS) data from viral pathogens) specifically adapted to analyze high-throughput sequencing data of SARS-CoV-2. " ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "(old) SARS-COV2 version of the V-Pipe workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/11?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2022-02-04T11:28:57.510371" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-consensus-from-variation" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sars-cov-2-consensus-from-variation/COVID-19-CONSENSUS-CONSTRUCTION" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Snakemake workflow: dna-seq-varlociraptor\r +\r +[![Snakemake](https://img.shields.io/badge/snakemake-≥6.3.0-brightgreen.svg)](https://snakemake.github.io)\r +[![GitHub actions status](https://github.com/snakemake-workflows/dna-seq-varlociraptor/workflows/Tests/badge.svg?branch=master)](https://github.com/snakemake-workflows/dna-seq-varlociraptor/actions?query=branch%3Amaster+workflow%3ATests)\r +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.4675661.svg)](https://doi.org/10.5281/zenodo.4675661)\r +\r +\r +A Snakemake workflow for calling small and structural variants under any kind of scenario (tumor/normal, tumor/normal/relapse, germline, pedigree, populations) via the unified statistical model of [Varlociraptor](https://varlociraptor.github.io).\r +\r +\r +## Usage\r +\r +The usage of this workflow is described in the [Snakemake Workflow Catalog](https://snakemake.github.io/snakemake-workflow-catalog/?usage=snakemake-workflows%2Fdna-seq-varlociraptor).\r +\r +If you use this workflow in a paper, don't forget to give credits to the authors by citing the URL of this (original) repository and its DOI (see above).\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/686?version=2" ; + schema1:isBasedOn "https://github.com/snakemake-workflows/dna-seq-varlociraptor" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for dna-seq-varlociraptor" ; + schema1:sdDatePublished "2024-07-12 13:25:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/686/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1287 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-12-14T08:17:10Z" ; + schema1:dateModified "2023-12-14T08:17:10Z" ; + schema1:description """# Snakemake workflow: dna-seq-varlociraptor\r +\r +[![Snakemake](https://img.shields.io/badge/snakemake-≥6.3.0-brightgreen.svg)](https://snakemake.github.io)\r +[![GitHub actions status](https://github.com/snakemake-workflows/dna-seq-varlociraptor/workflows/Tests/badge.svg?branch=master)](https://github.com/snakemake-workflows/dna-seq-varlociraptor/actions?query=branch%3Amaster+workflow%3ATests)\r +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.4675661.svg)](https://doi.org/10.5281/zenodo.4675661)\r +\r +\r +A Snakemake workflow for calling small and structural variants under any kind of scenario (tumor/normal, tumor/normal/relapse, germline, pedigree, populations) via the unified statistical model of [Varlociraptor](https://varlociraptor.github.io).\r +\r +\r +## Usage\r +\r +The usage of this workflow is described in the [Snakemake Workflow Catalog](https://snakemake.github.io/snakemake-workflow-catalog/?usage=snakemake-workflows%2Fdna-seq-varlociraptor).\r +\r +If you use this workflow in a paper, don't forget to give credits to the authors by citing the URL of this (original) repository and its DOI (see above).\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/686?version=1" ; + schema1:keywords "Bioinformatics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "dna-seq-varlociraptor" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/686?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + schema1:datePublished "2023-01-16T18:24:17.024790" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/rnaseq-sr" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "rnaseq-sr/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.3" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Summary\r +\r +Common Workflow Language example that illustrate the process of setting up a simulation system containing a protein, step by step, using the BioExcel Building Blocks library (biobb). The particular example used is the Lysozyme protein (PDB code 1AKI).\r +\r +# Additional Links\r +\r +* [View Tutorial](http://mmb.irbbarcelona.org/biobb/workflows/tutorials/md_setup)\r +\r +* [View in Binder](https://mybinder.org/v2/gh/bioexcel/biobb_wf_md_setup/master?filepath=biobb_wf_md_setup%2Fnotebooks%2Fbiobb_MDsetup_tutorial.ipynb)\r +\r +* [GitHub Repository]( https://github.com/bioexcel/biobb-wf-md-setup-protein-cwl)\r +\r +* [Documentation](https://biobb-wf-md-setup.readthedocs.io/en/latest/index.html)""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/29?version=1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb-wf-md-setup-protein-cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Protein MD Setup tutorial using BioExcel Building Blocks (biobb)" ; + schema1:sdDatePublished "2024-07-12 13:36:20 +0100" ; + schema1:url "https://workflowhub.eu/workflows/29/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10361 ; + schema1:creator ; + schema1:dateCreated "2020-06-16T08:50:11Z" ; + schema1:dateModified "2021-05-07T15:18:57Z" ; + schema1:description """# Summary\r +\r +Common Workflow Language example that illustrate the process of setting up a simulation system containing a protein, step by step, using the BioExcel Building Blocks library (biobb). The particular example used is the Lysozyme protein (PDB code 1AKI).\r +\r +# Additional Links\r +\r +* [View Tutorial](http://mmb.irbbarcelona.org/biobb/workflows/tutorials/md_setup)\r +\r +* [View in Binder](https://mybinder.org/v2/gh/bioexcel/biobb_wf_md_setup/master?filepath=biobb_wf_md_setup%2Fnotebooks%2Fbiobb_MDsetup_tutorial.ipynb)\r +\r +* [GitHub Repository]( https://github.com/bioexcel/biobb-wf-md-setup-protein-cwl)\r +\r +* [Documentation](https://biobb-wf-md-setup.readthedocs.io/en/latest/index.html)""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/29?version=2" ; + schema1:isPartOf , + ; + schema1:keywords "molecular dynamics, trajectories, protein" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Protein MD Setup tutorial using BioExcel Building Blocks (biobb)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/29?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 60267 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/998?version=9" ; + schema1:isBasedOn "https://github.com/nf-core/methylseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/methylseq" ; + schema1:sdDatePublished "2024-07-12 13:18:24 +0100" ; + schema1:url "https://workflowhub.eu/workflows/998/ro_crate?version=9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9921 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:02Z" ; + schema1:dateModified "2024-06-11T12:55:02Z" ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/998?version=14" ; + schema1:keywords "bisulfite-sequencing, dna-methylation, em-seq, epigenome, Epigenomics, methyl-seq, pbat, rrbs" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/methylseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/998?version=9" ; + schema1:version 9 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/998?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/methylseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/methylseq" ; + schema1:sdDatePublished "2024-07-12 13:18:23 +0100" ; + schema1:url "https://workflowhub.eu/workflows/998/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6500 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:01Z" ; + schema1:dateModified "2024-06-11T12:55:01Z" ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/998?version=14" ; + schema1:keywords "bisulfite-sequencing, dna-methylation, em-seq, epigenome, Epigenomics, methyl-seq, pbat, rrbs" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/methylseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/998?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + schema1:datePublished "2024-05-27T12:41:08.869616" ; + schema1:hasPart , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/qiime2-II-denoising" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + ; + schema1:name "qiime2-II-denoising/IIa-denoising-se" . + + a schema1:Person ; + schema1:name "Debjyoti Ghosh" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "qiime2-II-denoising/IIb-denoising-pe" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/qiime2-II-denoising" ; + schema1:version "0.2" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """Trim and filter reads; can run alone or as part of a combined workflow for large genome assembly. \r +\r +* What it does: Trims and filters raw sequence reads according to specified settings. \r +* Inputs: Long reads (format fastq); Short reads R1 and R2 (format fastq) \r +* Outputs: Trimmed and filtered reads: fastp_filtered_long_reads.fastq.gz (But note: no trimming or filtering is on by default), fastp_filtered_R1.fastq.gz, fastp_filtered_R2.fastq.gz\r +* Reports: fastp report on long reads, html; fastp report on short reads, html\r +* Tools used: fastp (Note. The latest version (0.20.1) of fastp has an issue displaying plot results. Using version 0.19.5 here instead until this is rectified). \r +* Input parameters: None required, but recommend removing the long reads from the workflow if not using any trimming/filtering settings. \r +\r +Workflow steps:\r +\r +Long reads: fastp settings: \r +* These settings have been changed from the defaults (so that all filtering and trimming settings are now disabled). \r +* Adapter trimming options: Disable adapter trimming: yes\r +* Filter options: Quality filtering options: Disable quality filtering: yes\r +* Filter options: Length filtering options: Disable length filtering: yes\r +* Read modification options: PolyG tail trimming: Disable\r +* Output options: output JSON report: yes\r +\r +Short reads: fastp settings:\r +* adapter trimming (default setting: adapters are auto-detected)\r +* quality filtering (default: phred quality 15), unqualified bases limit (default = 40%), number of Ns allowed in a read (default = 5)\r +* length filtering (default length = min 15)\r +* polyG tail trimming (default = on for NextSeq/NovaSeq data which is auto detected)\r +* Output options: output JSON report: yes\r +\r +Options:\r +* Change any settings in fastp for any of the input reads. \r +* Adapter trimming: input the actual adapter sequences. (Alternative tool for long read adapter trimming: Porechop.) \r +* Trimming n bases from ends of reads if quality less than value x (Alternative tool for trimming long reads: NanoFilt.)\r +* Discard post-trimmed reads if length is < x (e.g. for long reads, 1000 bp)\r +* Example filtering/trimming that you might do on long reads: remove adapters (can also be done with Porechop), trim bases from ends of the reads with low quality (can also be done with NanoFilt), after this can keep only reads of length x (e.g. 1000 bp) \r +\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.224.1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Trim and filter reads - fastp" ; + schema1:sdDatePublished "2024-07-12 13:36:40 +0100" ; + schema1:url "https://workflowhub.eu/workflows/224/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14680 ; + schema1:creator ; + schema1:dateCreated "2021-11-08T04:56:09Z" ; + schema1:dateModified "2023-01-30T18:21:31Z" ; + schema1:description """Trim and filter reads; can run alone or as part of a combined workflow for large genome assembly. \r +\r +* What it does: Trims and filters raw sequence reads according to specified settings. \r +* Inputs: Long reads (format fastq); Short reads R1 and R2 (format fastq) \r +* Outputs: Trimmed and filtered reads: fastp_filtered_long_reads.fastq.gz (But note: no trimming or filtering is on by default), fastp_filtered_R1.fastq.gz, fastp_filtered_R2.fastq.gz\r +* Reports: fastp report on long reads, html; fastp report on short reads, html\r +* Tools used: fastp (Note. The latest version (0.20.1) of fastp has an issue displaying plot results. Using version 0.19.5 here instead until this is rectified). \r +* Input parameters: None required, but recommend removing the long reads from the workflow if not using any trimming/filtering settings. \r +\r +Workflow steps:\r +\r +Long reads: fastp settings: \r +* These settings have been changed from the defaults (so that all filtering and trimming settings are now disabled). \r +* Adapter trimming options: Disable adapter trimming: yes\r +* Filter options: Quality filtering options: Disable quality filtering: yes\r +* Filter options: Length filtering options: Disable length filtering: yes\r +* Read modification options: PolyG tail trimming: Disable\r +* Output options: output JSON report: yes\r +\r +Short reads: fastp settings:\r +* adapter trimming (default setting: adapters are auto-detected)\r +* quality filtering (default: phred quality 15), unqualified bases limit (default = 40%), number of Ns allowed in a read (default = 5)\r +* length filtering (default length = min 15)\r +* polyG tail trimming (default = on for NextSeq/NovaSeq data which is auto detected)\r +* Output options: output JSON report: yes\r +\r +Options:\r +* Change any settings in fastp for any of the input reads. \r +* Adapter trimming: input the actual adapter sequences. (Alternative tool for long read adapter trimming: Porechop.) \r +* Trimming n bases from ends of reads if quality less than value x (Alternative tool for trimming long reads: NanoFilt.)\r +* Discard post-trimmed reads if length is < x (e.g. for long reads, 1000 bp)\r +* Example filtering/trimming that you might do on long reads: remove adapters (can also be done with Porechop), trim bases from ends of the reads with low quality (can also be done with NanoFilt), after this can keep only reads of length x (e.g. 1000 bp) \r +\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "Large-genome-assembly" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Trim and filter reads - fastp" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/224?version=1" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 282014 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A pipeline to demultiplex, QC and map Nanopore data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1002?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/nanoseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/nanoseq" ; + schema1:sdDatePublished "2024-07-12 13:20:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1002/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8657 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:05Z" ; + schema1:dateModified "2024-06-11T12:55:05Z" ; + schema1:description "A pipeline to demultiplex, QC and map Nanopore data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1002?version=5" ; + schema1:keywords "Alignment, demultiplexing, nanopore, QC" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/nanoseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1002?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:description """### Workflow Kallisto RNAseq \r +**(pseudoalignment on transcripts)**\r + - Workflow Illumina Quality: https://workflowhub.eu/workflows/336?version=1 \r + - kallisto\r +\r +**All tool CWL files and other workflows can be found here:**
\r + Tools: https://git.wur.nl/unlock/cwl/-/tree/master/cwl
\r + Workflows: https://git.wur.nl/unlock/cwl/-/tree/master/cwl/workflows\r +\r +**How to setup and use an UNLOCK workflow:**
\r +https://m-unlock.gitlab.io/docs/setup/setup.html\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/344?version=1" ; + schema1:isBasedOn "https://gitlab.com/m-unlock/cwl/-/blob/master/cwl/workflows/workflow_RNAseq_kallisto.cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Kallisto RNAseq Workflow" ; + schema1:sdDatePublished "2024-07-12 13:34:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/344/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 16611 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4610 ; + schema1:creator , + ; + schema1:dateCreated "2022-05-20T10:10:26Z" ; + schema1:dateModified "2023-01-16T13:59:53Z" ; + schema1:description """### Workflow Kallisto RNAseq \r +**(pseudoalignment on transcripts)**\r + - Workflow Illumina Quality: https://workflowhub.eu/workflows/336?version=1 \r + - kallisto\r +\r +**All tool CWL files and other workflows can be found here:**
\r + Tools: https://git.wur.nl/unlock/cwl/-/tree/master/cwl
\r + Workflows: https://git.wur.nl/unlock/cwl/-/tree/master/cwl/workflows\r +\r +**How to setup and use an UNLOCK workflow:**
\r +https://m-unlock.gitlab.io/docs/setup/setup.html\r +\r +""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Kallisto RNAseq Workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/344?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + ; + ns1:output , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "MetaDEGalaxy: Galaxy workflow for differential abundance analysis of 16s metagenomic data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/232?version=1" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for 16S_biodiversity_for_overlap_paired_end" ; + schema1:sdDatePublished "2024-07-12 13:36:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/232/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 62961 ; + schema1:dateCreated "2021-11-10T00:15:14Z" ; + schema1:dateModified "2024-04-17T04:19:28Z" ; + schema1:description "MetaDEGalaxy: Galaxy workflow for differential abundance analysis of 16s metagenomic data" ; + schema1:isPartOf ; + schema1:keywords "MetaDEGalaxy" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "16S_biodiversity_for_overlap_paired_end" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/232?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description "This is a Galaxy workflow for performing molecular dynamics simulations and analysis with coronavirus helicases in the Apo or unbound state. The associated input files can be found at: https://zenodo.org/records/7492987. The associated output files can be found at: https://zenodo.org/records/7851000." ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.764.1" ; + schema1:isBasedOn "https://zenodo.org/records/7492987" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for coronavirushelicase_apo" ; + schema1:sdDatePublished "2024-07-12 13:24:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/764/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 75856 ; + schema1:creator ; + schema1:dateCreated "2024-02-16T19:18:45Z" ; + schema1:dateModified "2024-03-02T16:49:26Z" ; + schema1:description "This is a Galaxy workflow for performing molecular dynamics simulations and analysis with coronavirus helicases in the Apo or unbound state. The associated input files can be found at: https://zenodo.org/records/7492987. The associated output files can be found at: https://zenodo.org/records/7851000." ; + schema1:keywords "covid-19, SARS-CoV-2, covid19.galaxyproject.org, NSP13, helicase, coronavirus, rna virus, MERS, molecular dynamics" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "coronavirushelicase_apo" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/764?version=1" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=10" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-07-12 13:22:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9386 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:12Z" ; + schema1:dateModified "2024-06-11T12:55:12Z" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=10" ; + schema1:version 10 . + + a schema1:Dataset ; + schema1:datePublished "2024-04-29T08:35:24.526954" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/chipseq-sr" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "chipseq-sr/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/963?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/airrflow" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/airrflow" ; + schema1:sdDatePublished "2024-07-12 13:22:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/963/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9868 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:37Z" ; + schema1:dateModified "2024-06-11T12:54:37Z" ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/963?version=13" ; + schema1:keywords "airr, b-cell, immcantation, immunorepertoire, repseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/airrflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/963?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=12" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-07-12 13:21:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=12" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10982 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:50Z" ; + schema1:dateModified "2024-06-11T12:54:50Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=12" ; + schema1:version 12 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +****\r +\r +About this workflow:\r +\r +* Inputs: multiple transcriptome.gtfs from different tissues, genome.fasta, coding_seqs.fasta, non_coding_seqs.fasta \r +* Runs StringTie merge to combine transcriptomes, with default settings except for -m = 30 and -F = 0.1, to produce a merged_transcriptomes.gtf. \r +* Runs Convert GTF to BED12 with default settings, to produce a merged_transcriptomes.bed. \r +* Runs bedtools getfasta with default settings except for -name = yes, -s = yes, -split - yes, to produce a merged_transcriptomes.fasta\r +* Runs CPAT to generate seqs with high coding probability. \r +* Filters out non-coding seqs from the merged_transcriptomes.fasta\r +* Output: filtered_merged_transcriptomes.fasta""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.878.1" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Combine transcripts - TSI" ; + schema1:sdDatePublished "2024-07-12 13:22:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/878/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 22498 ; + schema1:creator , + ; + schema1:dateCreated "2024-05-08T07:07:37Z" ; + schema1:dateModified "2024-05-09T04:06:49Z" ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +****\r +\r +About this workflow:\r +\r +* Inputs: multiple transcriptome.gtfs from different tissues, genome.fasta, coding_seqs.fasta, non_coding_seqs.fasta \r +* Runs StringTie merge to combine transcriptomes, with default settings except for -m = 30 and -F = 0.1, to produce a merged_transcriptomes.gtf. \r +* Runs Convert GTF to BED12 with default settings, to produce a merged_transcriptomes.bed. \r +* Runs bedtools getfasta with default settings except for -name = yes, -s = yes, -split - yes, to produce a merged_transcriptomes.fasta\r +* Runs CPAT to generate seqs with high coding probability. \r +* Filters out non-coding seqs from the merged_transcriptomes.fasta\r +* Output: filtered_merged_transcriptomes.fasta""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "TSI-annotation" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Combine transcripts - TSI" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/878?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 641824 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "The workflow takes a trimmed HiFi reads collection, Hap1/Hap2 contigs, and the values for transition parameter and max coverage depth (calculated from WF1) to run Purge_Dups. It produces purged Hap1 and Hap2 contigs assemblies, and runs all the QC analysis (gfastats, BUSCO, and Merqury)." ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.606.2" ; + schema1:isBasedOn "https://github.com/ERGA-consortium/pipelines/tree/main/assembly/galaxy" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ERGA HiFi Hap1Hap2 Purge+QC v2309 (WF3)" ; + schema1:sdDatePublished "2024-07-12 13:25:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/606/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 73154 ; + schema1:creator , + ; + schema1:dateCreated "2024-01-02T13:04:04Z" ; + schema1:dateModified "2024-03-13T09:53:44Z" ; + schema1:description "The workflow takes a trimmed HiFi reads collection, Hap1/Hap2 contigs, and the values for transition parameter and max coverage depth (calculated from WF1) to run Purge_Dups. It produces purged Hap1 and Hap2 contigs assemblies, and runs all the QC analysis (gfastats, BUSCO, and Merqury)." ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/606?version=1" ; + schema1:isPartOf ; + schema1:keywords "ERGA, Assembly+QC, HiFi" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ERGA HiFi Hap1Hap2 Purge+QC v2309 (WF3)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/3.Purging/Galaxy-Workflow-ERGA_HiFi_Hap1Hap2_Purge_QC_v2310_(WF3).ga" ; + schema1:version 2 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 235117 ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/3.Purging/pics/Purge_hifi_2309.png" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Predict variants and drug resistance from M. tuberculosis sequence samples (Illumina)" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1035?version=1" ; + schema1:license "AGPL-3.0-or-later" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for TB Variant Analysis v1.0" ; + schema1:sdDatePublished "2024-07-12 13:18:19 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1035/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 46378 ; + schema1:creator ; + schema1:dateCreated "2024-06-07T19:07:04Z" ; + schema1:dateModified "2024-06-12T13:38:29Z" ; + schema1:description "Predict variants and drug resistance from M. tuberculosis sequence samples (Illumina)" ; + schema1:keywords "pathogen, tuberculosis" ; + schema1:license "https://spdx.org/licenses/AGPL-3.0-or-later" ; + schema1:name "TB Variant Analysis v1.0" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1035?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1027?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/viralrecon" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/viralrecon" ; + schema1:sdDatePublished "2024-07-12 13:18:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1027/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9965 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:20Z" ; + schema1:dateModified "2024-06-11T12:55:20Z" ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1027?version=10" ; + schema1:keywords "Amplicon, ARTIC, Assembly, covid-19, covid19, illumina, long-read-sequencing, Metagenomics, nanopore, ONT, oxford-nanopore, SARS-CoV-2, variant-calling, viral, Virus" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/viralrecon" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1027?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-07-12 13:20:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7318 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:58Z" ; + schema1:dateModified "2024-06-11T12:54:58Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=22" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-07-12 13:21:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=22" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 18600 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-07-05T03:02:52Z" ; + schema1:dateModified "2024-07-05T03:02:52Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=22" ; + schema1:version 22 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + , + , + , + , , , , @@ -877,648 +76857,155957 @@ ; schema1:identifier "https://workflowhub.eu/workflows/8?version=1" ; schema1:license "MIT" ; - schema1:mainEntity ; - schema1:name "Research Object Crate for Genomics - SE Variation" ; - schema1:sdDatePublished "2024-06-17 10:59:51 +0100" ; - schema1:url "https://workflowhub.eu/workflows/8/ro_crate?version=1" . + schema1:mainEntity ; + schema1:name "Research Object Crate for Genomics - SE Variation" ; + schema1:sdDatePublished "2024-07-12 13:37:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/8/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 6391 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 33953 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2020-04-10T12:54:10Z" ; + schema1:dateModified "2023-01-16T13:40:12Z" ; + schema1:description "Analysis of variation within individual COVID-19 samples using Illumina Single End data. More info can be found at https://covid19.galaxyproject.org/genomics/" ; + schema1:image ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Genomics - SE Variation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/8?version=1" ; + schema1:version 1 ; + ns1:input , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 11562 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=15" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-07-12 13:18:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=15" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10491 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:39Z" ; + schema1:dateModified "2024-06-11T12:54:39Z" ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=15" ; + schema1:version 15 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:description """Workflow for sequencing with ONT nanopore, from basecalling to assembly quality.\r +Steps:\r + - Kraken2 (taxonomic classification of FASTQ reads)\r + - Krona (classification visualization)\r + - Flye (de novo assembly)\r + - Medaka (assembly polishing)\r + - QUAST (assembly quality reports)\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/254?version=2" ; + schema1:isBasedOn "https://git.wur.nl/unlock/cwl/-/blob/master/cwl/workflows/workflow_nanopore_assembly.cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nanopore workflow with direct FASTQ reads inputs (without Guppy)" ; + schema1:sdDatePublished "2024-07-12 13:34:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/254/ro_crate?version=2" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 26748 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7733 ; + schema1:creator , + , + ; + schema1:dateCreated "2022-01-07T09:32:14Z" ; + schema1:dateModified "2022-04-20T09:12:00Z" ; + schema1:description """Workflow for sequencing with ONT nanopore, from basecalling to assembly quality.\r +Steps:\r + - Kraken2 (taxonomic classification of FASTQ reads)\r + - Krona (classification visualization)\r + - Flye (de novo assembly)\r + - Medaka (assembly polishing)\r + - QUAST (assembly quality reports)\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/254?version=2" ; + schema1:keywords "nanopore, Genomics, Metagenomics" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "nanopore workflow with direct FASTQ reads inputs (without Guppy)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/254?version=2" ; + schema1:version 2 ; + ns1:input , + , + , + , + ; + ns1:output , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=14" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-07-12 13:21:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=14" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11179 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:50Z" ; + schema1:dateModified "2024-06-11T12:54:50Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=14" ; + schema1:version 14 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Generates Dose-response curve fits on cell-based toxicity data. Outliers of replicate data-sets can be removed by setting a threshold for standard deviation (here set to 25). Curve fits for compounds showing low response can be removed by setting a threshold for minimum activity (here set to 75% confluence).\r +This workflow needs R-Server to run in the back-end. Start R and run the following command: library(Rserve); Rserve(args = "--vanilla")""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/387?version=1" ; + schema1:isBasedOn "https://hub.knime.com/fraunhoferitmp/spaces/Public/latest/Dose_Response_Cell-based-Assay/DRC_template_toxicity~pmQpY43FY6lczWF8" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for DRC_template_toxicity" ; + schema1:sdDatePublished "2024-07-12 13:35:09 +0100" ; + schema1:url "https://workflowhub.eu/workflows/387/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 48768 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 681435 ; + schema1:creator ; + schema1:dateCreated "2022-09-26T09:15:13Z" ; + schema1:dateModified "2023-01-16T14:02:31Z" ; + schema1:description """Generates Dose-response curve fits on cell-based toxicity data. Outliers of replicate data-sets can be removed by setting a threshold for standard deviation (here set to 25). Curve fits for compounds showing low response can be removed by setting a threshold for minimum activity (here set to 75% confluence).\r +This workflow needs R-Server to run in the back-end. Start R and run the following command: library(Rserve); Rserve(args = "--vanilla")""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "DRC_template_toxicity" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/387?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.127.6" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_virtual-screening" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein-ligand Docking tutorial (Cluster90)" ; + schema1:sdDatePublished "2024-07-12 13:34:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/127/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 39259 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-03-04T14:30:29Z" ; + schema1:dateModified "2024-05-14T10:11:15Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/127?version=5" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein-ligand Docking tutorial (Cluster90)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_virtual-screening/blob/main/biobb_wf_virtual-screening/notebooks/clusterBindingSite/wf_vs_clusterBindingSite.ipynb" ; + schema1:version 6 . + + a schema1:Dataset ; + schema1:datePublished "2024-04-09T08:37:08.173460" ; + schema1:hasPart , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/fastq-to-matrix-10x" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + ; + schema1:name "fastq-to-matrix-10x/scrna-seq-fastq-to-matrix-10x-cellplex" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + , + , + ; + schema1:name "fastq-to-matrix-10x/scrna-seq-fastq-to-matrix-10x-v3" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/fastq-to-matrix-10x" ; + schema1:version "0.4" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Generic variant calling\r +\r +\r +A generic workflow for identification of variants in a haploid genome such as genomes of bacteria or viruses. It can be readily used on MonkeyPox. The workflow accepts two inputs:\r +\r +- A genbank file with the reference genomes\r +- A collection of paired fastqsanger files\r +\r +The workflow outputs a collection of VCF files for each sample (each fastq pair). These VCF files serve as input to the [Reporting workflow](https://workflowhub.eu/workflows/354). \r +\r +Workflow can be accessed directly on [usegalaxy.org](https://usegalaxy.org/u/aun1/w/generic-variation-analysis-on-wgs-pe-data)\r +\r +The general idea of the workflow is:\r +\r +![](https://i.imgur.com/rk40Y4t.png)""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/353?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Generic variation analysis on WGS PE data" ; + schema1:sdDatePublished "2024-07-12 13:35:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/353/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 38255 ; + schema1:creator ; + schema1:dateCreated "2022-06-01T12:46:37Z" ; + schema1:dateModified "2023-02-13T14:06:44Z" ; + schema1:description """# Generic variant calling\r +\r +\r +A generic workflow for identification of variants in a haploid genome such as genomes of bacteria or viruses. It can be readily used on MonkeyPox. The workflow accepts two inputs:\r +\r +- A genbank file with the reference genomes\r +- A collection of paired fastqsanger files\r +\r +The workflow outputs a collection of VCF files for each sample (each fastq pair). These VCF files serve as input to the [Reporting workflow](https://workflowhub.eu/workflows/354). \r +\r +Workflow can be accessed directly on [usegalaxy.org](https://usegalaxy.org/u/aun1/w/generic-variation-analysis-on-wgs-pe-data)\r +\r +The general idea of the workflow is:\r +\r +![](https://i.imgur.com/rk40Y4t.png)""" ; + schema1:keywords "mpxv, generic" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Generic variation analysis on WGS PE data" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/353?version=1" ; + schema1:version 1 ; + ns1:input , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/998?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/methylseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/methylseq" ; + schema1:sdDatePublished "2024-07-12 13:18:23 +0100" ; + schema1:url "https://workflowhub.eu/workflows/998/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3868 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:01Z" ; + schema1:dateModified "2024-06-11T12:55:01Z" ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/998?version=14" ; + schema1:keywords "bisulfite-sequencing, dna-methylation, em-seq, epigenome, Epigenomics, methyl-seq, pbat, rrbs" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/methylseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/998?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/998?version=13" ; + schema1:isBasedOn "https://github.com/nf-core/methylseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/methylseq" ; + schema1:sdDatePublished "2024-07-12 13:18:24 +0100" ; + schema1:url "https://workflowhub.eu/workflows/998/ro_crate?version=13" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11292 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:02Z" ; + schema1:dateModified "2024-06-11T12:55:02Z" ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/998?version=14" ; + schema1:keywords "bisulfite-sequencing, dna-methylation, em-seq, epigenome, Epigenomics, methyl-seq, pbat, rrbs" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/methylseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/998?version=13" ; + schema1:version 13 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 337112 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.0.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.0.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.0.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.0.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.0.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.0.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.0.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.0.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.1.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.1.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.1.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.1.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.1.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.1.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.1.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.1.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.2.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.2.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.2.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.2.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.2.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.2.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.2.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.2.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.3.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.3.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.3.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.3.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.3.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.3.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.3.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.3.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.4.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.4.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.4.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.4.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.4.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.4.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.4.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.4.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.5.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.5.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.5.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.5.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.5.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.5.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.5.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.5.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.6.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.6.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.6.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.6.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.6.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.6.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.6.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.6.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.7.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.7.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.7.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.7.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.7.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.7.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.7.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.7.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.0.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.0.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.0.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.0.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.0.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.0.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.0.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.0.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.1.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.1.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.1.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.1.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.1.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.1.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.1.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.1.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.2.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.2.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.2.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.2.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.2.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.2.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.2.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.2.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.3.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.3.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.3.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.3.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.3.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.3.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.3.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.3.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.4.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.4.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.4.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.4.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.4.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.4.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.4.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.4.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.5.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.5.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.5.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.5.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.5.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.5.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.5.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.5.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.6.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.6.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.6.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.6.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.6.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.6.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.6.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.6.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.7.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.7.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.7.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.7.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.7.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.7.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.7.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.7.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Post-genome assembly quality control workflow using Quast, BUSCO, Meryl, Merqury and Fasta Statistics" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.403.2" ; + schema1:isBasedOn "https://github.com/AustralianBioCommons/Genome-assessment-post-assembly.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genome assessment post assembly v1.1" ; + schema1:sdDatePublished "2024-07-12 13:23:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/403/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15117 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2023-05-09T00:59:04Z" ; + schema1:dateModified "2023-05-09T01:34:26Z" ; + schema1:description "Post-genome assembly quality control workflow using Quast, BUSCO, Meryl, Merqury and Fasta Statistics" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/403?version=3" ; + schema1:isPartOf , + ; + schema1:keywords "HiFi, hifiasm, QC, Quast, Meryl, Merqury, BUSCO" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Genome assessment post assembly v1.1" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/403?version=2" ; + schema1:version 2 ; + ns1:input , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# PacBio-HiFi-genome-assembly-using-hifiasm\r +\r +## General recommendations for using `PacBio-HiFi-genome-assembly-using-hifiasm`\r +Please see the [`Genome assembly with hifiasm on Galaxy Australia`](https://australianbiocommons.github.io/how-to-guides/genome_assembly/hifi_assembly) guide.\r +\r +## Acknowledgements\r +\r +The workflow & the [doc_guidelines template used](https://github.com/AustralianBioCommons/doc_guidelines) are \r +supported by the Australian BioCommons via Bioplatforms Australia funding, the Australian Research Data Commons \r +(https://doi.org/10.47486/PL105) and the Queensland Government RICF programme. Bioplatforms Australia and the \r +Australian Research Data Commons are enabled by the National Collaborative Research Infrastructure Strategy (NCRIS).\r +\r +\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/221?version=2" ; + schema1:isBasedOn "https://github.com/AustralianBioCommons/PacBio-HiFi-genome-assembly-using-hifiasm" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for PacBio HiFi genome assembly using hifiasm v2.0" ; + schema1:sdDatePublished "2024-07-12 13:34:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/221/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 30627 ; + schema1:creator , + , + ; + schema1:dateCreated "2022-09-20T00:59:58Z" ; + schema1:dateModified "2022-09-20T01:01:01Z" ; + schema1:description """# PacBio-HiFi-genome-assembly-using-hifiasm\r +\r +## General recommendations for using `PacBio-HiFi-genome-assembly-using-hifiasm`\r +Please see the [`Genome assembly with hifiasm on Galaxy Australia`](https://australianbiocommons.github.io/how-to-guides/genome_assembly/hifi_assembly) guide.\r +\r +## Acknowledgements\r +\r +The workflow & the [doc_guidelines template used](https://github.com/AustralianBioCommons/doc_guidelines) are \r +supported by the Australian BioCommons via Bioplatforms Australia funding, the Australian Research Data Commons \r +(https://doi.org/10.47486/PL105) and the Queensland Government RICF programme. Bioplatforms Australia and the \r +Australian Research Data Commons are enabled by the National Collaborative Research Infrastructure Strategy (NCRIS).\r +\r +\r +\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/221?version=2" ; + schema1:isPartOf , + ; + schema1:keywords "FASTQ, hifiasm, HiFi, genome_assembly" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "PacBio HiFi genome assembly using hifiasm v2.0" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/221?version=2" ; + schema1:version 2 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "Just the cleaning then assembly of all reads. TO explore further follow one of the paths described in \"Global view\" (WF 0) " ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/103?version=1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for 3: Plant virus exploration" ; + schema1:sdDatePublished "2024-07-12 13:34:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/103/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8538 ; + schema1:dateCreated "2021-02-04T09:11:37Z" ; + schema1:dateModified "2023-02-13T14:06:45Z" ; + schema1:description "Just the cleaning then assembly of all reads. TO explore further follow one of the paths described in \"Global view\" (WF 0) " ; + schema1:keywords "Virology, exploration, DE_NOVO" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "3: Plant virus exploration" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/103?version=1" ; + schema1:version 1 ; + ns1:output , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=28" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-07-12 13:21:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=28" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13588 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:51Z" ; + schema1:dateModified "2024-06-11T12:54:51Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=28" ; + schema1:version 28 . + + a schema1:Dataset ; + schema1:datePublished "2024-03-26T19:05:29.978485" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/assembly-with-flye" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "assembly-with-flye/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Abstract CWL Automatically generated from the Galaxy workflow file: Copy of Genome-wide alternative splicing analysis" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/483?version=1" ; + schema1:license "CC-BY-NC-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for StringTie workflow" ; + schema1:sdDatePublished "2024-07-12 13:33:23 +0100" ; + schema1:url "https://workflowhub.eu/workflows/483/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 5852 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 23236 ; + schema1:creator ; + schema1:dateCreated "2023-05-25T23:15:06Z" ; + schema1:dateModified "2023-05-25T23:15:06Z" ; + schema1:description "Abstract CWL Automatically generated from the Galaxy workflow file: Copy of Genome-wide alternative splicing analysis" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-NC-4.0" ; + schema1:name "StringTie workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/483?version=1" ; + schema1:version 1 ; + ns1:input , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 15124 . + + a schema1:Dataset ; + schema1:datePublished "2024-04-29T08:20:28.253806" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/chipseq-pe" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "chipseq-pe/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 320859 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:MediaObject ; + schema1:contentSize 769 ; + schema1:dateModified "2024-03-21T11:54:38+00:00" ; + schema1:name "matmul_case1.csv" ; + schema1:sdDatePublished "2024-03-22T11:45:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 765 ; + schema1:dateModified "2023-11-28T00:25:59+00:00" ; + schema1:name "matmul_case2.csv" ; + schema1:sdDatePublished "2024-03-22T11:45:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 717 ; + schema1:dateModified "2023-11-28T19:49:11+00:00" ; + schema1:name "matmul_case3.csv" ; + schema1:sdDatePublished "2024-03-22T11:45:33+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.128.5" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_virtual-screening" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein-ligand Docking tutorial (PDBe REST API)" ; + schema1:sdDatePublished "2024-07-12 13:33:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/128/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 36817 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-03-04T14:32:45Z" ; + schema1:dateModified "2024-05-14T10:12:11Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/128?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein-ligand Docking tutorial (PDBe REST API)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_virtual-screening/blob/main/biobb_wf_virtual-screening/notebooks/ebi_api/wf_vs_ebi_api.ipynb" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "ATACSeq peak-calling and differential analysis pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/965?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/atacseq" ; + schema1:sdDatePublished "2024-07-12 13:22:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/965/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11439 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:42Z" ; + schema1:dateModified "2024-06-11T12:54:42Z" ; + schema1:description "ATACSeq peak-calling and differential analysis pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/965?version=8" ; + schema1:keywords "ATAC-seq, chromatin-accessibiity" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/atacseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/965?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + schema1:datePublished "2023-09-21T11:29:52.138601" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/repeatmasking" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "repeatmasking/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.12" . + + a schema1:Dataset ; + schema1:datePublished "2023-11-23T13:33:07.956730" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-decontamination-VGP9" ; + schema1:license "BSD-3-Clause" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-decontamination-VGP9/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# scRNA-Seq pipelines\r +\r +Here we forge the tools to analyze single cell RNA-Seq experiments. The analysis workflow is based on the Bioconductor packages [*scater*](https://bioconductor.org/packages/devel/bioc/vignettes/scater/inst/doc/overview.html) and [*scran*](https://bioconductor.org/packages/devel/bioc/vignettes/scran/inst/doc/scran.html) as well as the Bioconductor workflows by Lun ATL, McCarthy DJ, & Marioni JC [*A step-by-step workflow for low-level analysis of single-cell RNA-seq data.*](http://doi.org/10.12688/f1000research.9501.1) F1000Res. 2016 Aug 31 [revised 2016 Oct 31];5:2122 and Amezquita RA, Lun ATL et al. [*Orchestrating Single-Cell Analysis with Bioconductor*](https://osca.bioconductor.org/index.html) Nat Methods. 2020 Feb;17(2):137-145.\r +\r +## Implemented protocols\r + - MARS-Seq (massively parallel single-cell RNA-sequencing): The protocol is based on the publications of Jaitin DA, et al. (2014). *Massively parallel single-cell RNA-seq for marker-free decomposition of tissues into cell types.* Science (New York, N.Y.), 343(6172), 776–779. https://doi.org/10.1126/science.1247651 and Keren-Shaul H., et al. (2019). *MARS-seq2.0: an experimental and analytical pipeline for indexed sorting combined with single-cell RNA sequencing.* Nature Protocols. https://doi.org/10.1038/s41596-019-0164-4. The MARS-Seq library preparation protocol is given [here](https://github.com/imbforge/NGSpipe2go/blob/master/resources/MARS-Seq_protocol_Step-by-Step_MML.pdf). The sequencing reads are demultiplexed according to the respective pool barcodes before they are used as input for the analysis pipeline. \r +- Smart-seq2: Libraries are generated using the [Smart-seq2 kit](http://www.nature.com/nmeth/journal/v10/n11/full/nmeth.2639.html). \r +\r +## Pipeline Workflow\r +All analysis steps are illustrated in the pipeline [flowchart](https://www.draw.io/?lightbox=1&highlight=0000ff&edit=_blank&layers=1&nav=1&title=scRNA-Seq#R7R3ZcpvK8mtUlTxIxSIh6dF2oiyVODm2Uzk5LykEI4kYAWGxrXz9nZ6FdUBIQoDjm%2BTeIwYYZqZ7eu%2BegXq1fXrn697ms2sie6BI5tNAfTNQFFmdTPB%2FoGVHW6bKjDasfctkDyUNt9YfxBol1hpZJgoyD4aua4eWl200XMdBRphp033ffcw%2BtnLt7Fc9fY0KDbeGbhdbv1tmuGGtsjZPbrxH1nrDPj1TpvTGUjfu174bOex7jusgemer827YHIONbrqPqSb17UC98l03pL%2B2T1fIhmXlK0bfW5TcjYfsIyes88J76eNqcz0L7YfNzfL6%2Fe3i55%2F3Qw6AB92O2FoMFM3GHV6a1gOsrm2tHXJD%2Bx3BUC99sgzxJf61Zv8lry39fAseEumLt5LFCHd87Tfh1sa%2FZHzP1pfIvoyX9Mq1XZ88pC7IH%2FxIEPrufQwkvIiXK9cJGUbJGoxbDzbIZD2SfuKrlWXbqU4R%2BRN3yu8QGKqXhru1DHwp4d9rWw8C9juGo0T6D%2FXQcmGFhnMpnl4aKgxQD8gP0VOqiUHpHXK3KPR3%2BBF2dzbR6CtsMw1nDAsfU6g5ZW2bFFaOJZVtCbYd1nHfCV7gHww1xGjyYFz%2Fc%2Fs9%2Bhi9%2BfPf3Ye7xY%2Fg4b%2Bh%2FKLx5K0Gf8vwZO3rpoWhfQz6NIAtykzKYEtMU1PYMpYE2KLxB0%2FBlj%2Ffwzd30r%2F%2FfVzcvPW3snn3bvdhOJELwEMmprfs0vXDjbt2Hd1%2Bm7SmIAALkzzzyXU9BpZfKAx3DIR6FLq4KYUU6MkK%2F4XXRxN29SN1580T65lc7PiFg%2Bebegkuf6TvJa%2BRq%2BQ98wLYDr40ALYAaWhcWDYfThEBMwhXCvnAjXwDVexFtsdD3V%2BjsOo59iAsfSUi%2BcjGNOwhywpFSEFexfPWd6kHPNdywiDV81doSPBzPJtk8VMZZ9lU7vnJbFb1PP5BR5CgZzyVWhj75e7L%2Fa%2FN7Ofvf%2B9CZfHz%2BrusLYez54KwRaw6PyKNZ00j0km8SN3DiRIgctYDlH8YEDBc4AdkzXsq50vwcK1exsJerpTBxeVWtxwAiuUh28LcADdeptkc%2FcY%2Bflh7JJXzCTzdybfpwAp8tMr0uglDkLYvADrKYm2FmMuOfNsbORg7lIW1Xa5cjCn45%2FW7W5iasnbxxRD%2Fb2m7S%2BDe6AH0ggWfN6YKi8C4ub4I0G%2F4udX9EP8c8fsjzL7dh1167JLBWGXSiFF%2BGPqRY%2Bgh3m2FeVb0Gq%2FrIlkAKfUvhocuEEVE61YCurzg0gU2VvbSDbwxYILGwV3e6UnQbh6sYvLQCJEZEmJshfoSv50iM1j%2FRivLsYhCokivlnDndaXA%2Fezmbbu6GRBeE3jIINwGa%2BEwUdC5XTOyAQ2f%2F%2FwQxnIntHRgh7pjxpMbwrStFRY4CdPXMU9F%2FlETbgAHn5ARhbDeKRwsGUm5MncGHUgTqEBc20mrQPK4ARVILDMJhJTcEmBF0IOfWFH0sMoIk6cCI5fuZrwhtjyNhctVKSXVXkO5sGSKYMV422E6Q0HIHypZpWCa7cBdrQJgQjkgHCboiw0Zky4lff77R0bq36eaDtKKaaKnlqimTWoIslJX12xJQ%2FgWIP%2FL8heYgDGpBCtQPQ1g6T4BRbOcNaVpS9c3kT%2FEzSAaEEmBIT6heBLG8kl8J23jvSCihIrG8Dd%2BwtNNM%2B5b6Z8kRuEXjMKnjEHucOkr29FJEpeAI3GBpgi%2FcuDkmRXaZmeIUQdL8gaBTqpDqfBUDMPUU0o5IMOla%2B4KjQURKzRjPUXfejYaVAljycPF13mLxRsMSu14t5KDRYFUX1at%2FqGxOOaKWSx133DN5qcR9yu98tHvyPIB%2BzBwcf%2FS54ub2%2BEtIHL57GoO5OD5eq5rNz5Z2qn04U3ldF93Ml9MnxuELuuOTdWJtkvkdzIvsNU2PivSqfTKxry%2Fo2kZTWInnxbuNNo6HQOMEO7m5iZhfWkhk%2F%2BDn9IrA%2FMNn2zDR%2FhSJ3MEocJrbo6sOwn8EWtni5ywi0mNRqPmpqQ7IDW64QZ0XMnUQ%2F1cU4JGEUsn0hMRS9KabLWOoaSEX6aUpPQNUMksQ7cvmMc0BCXikvtPbbSCz7j4qZVNHEwr4lNKqxOPGytEt1iqgR4ffd2Lx3WaFq1kdDJ5pglUQpFOKM8PFuTxZUqWP0CBUzp11Tw3BY6D9NkocJ5Q%2FN%2Fi4VsOlfAlL9HZaPuQ7CC4p6XuYaQPh2xXXRAZ36GWsrJ4BN7wNTGnigxsy1pUx8u3bQpPHTVVSh7g5kysZl6mNK9ulM3Ycjl60P2gEaN%2FWZcnKaAFEJ0HIHkhy3cfLKLopC28IkSrZG%2BXsUH4Gr%2BYWRZmoyj9Ln0N5iq0JEuvXC8cHS4PpZZzL4dUqjnkqbyQKfyfyOuNxdlkueNYntbjjvIRQVnHc0eRc7xVuzP%2FYG8Nz%2BMsGM9geBZGmMjqy5Nbmgp4qh3xpPVEiollhIUehP9cEZJcR4D4HWHSF%2B6IDs7VU3cFA9YfybCJbxAGi%2Fnu%2FYB50nL8fBMtR3hvV%2FByxse3eHBA8Rex2xQeA88dvvHbGNHmmNvupeoUTqVUXUR3cpQ%2BS7hVrYS%2Bk0hO1rGURfgGCP1EygWgjYuUXtYEJEo73JlYh9ALQyy5ylVCTpC9dB9boCRKNSk5H0kY1yQJE6lpknBkEGTWQT3WpBxWnBbUKMSReDVfNo40DPpiwKqcg62a82ZTJGVvJUA%2FFYfU6bRRHBKKLT1yl%2B%2FBozKxRe1MbKlLo8Z9CdTO0ygljV%2F78XE8OT8%2Bys8mtaDRSO3aqNSWBFyAlqb52q%2FVxtve6xvVfHKt%2B2t1qPWHehzKhFoAljrvxb6fqrl9n%2BUr%2B59vOEHj9uHi8Z186%2F78cjn%2F8k19%2BrrZ3g97pD3%2FvXxo2gt8PJUPTWYn8aHauv3t3cVNbc3%2BBqvuxGjseRBkdE4dPsm3wBA%2FXIkfpyBzTiW%2B4QjguZxFmo6VdvHaMmk6ZZ0lfhMa3CR9TfwpOVoHvqzUQjNXVtYQAguZdy1vLdMkpDAHk6Tlhq2EOijJbiUJzsQBTskU2714WJNL%2FA%2Bv3hUw1wke5hW%2BlpNr%2FA8e98Mr18Fd6xYBLsII%2B4iCcFBMyG0ADdQZfDxrvSnggdBMr44PxoN6MtALlVi1ukyncQPNacmFWmGL1ncPkkQJ5iSUjnBFf4lCLwK2A%2FUrYINfFJgOvvnbqGI53fmeY7%2BiG0%2FDIhyJxTUm467jPzw9YCZNXnj%2B%2F0LfWjbg%2B3tkPyDoddBGZM2Uc6MKk7KQKh3BnWri%2BbRneC4VUN1HQWSHQT%2BR3cAEOo3i4DS3gELbOwJWDFVFirYWVLGh6WpE%2FjPR0Iw8G%2BMdPF2dmtdqfMKL35Iaz3XZuyUPD3aruSVnvd%2BStrvu6X7EIxvyvWi4to0l81RIPdIN2I%2BcQ%2FULwc%2BAzBO1Rf4ilnqVLqXerkx%2FXJjdK%2FXOO7PTVo67N6SnKPX6CFTJnlKfVsMumUixiFdkEWwsZ%2FczMHxHD9Dvn%2FQGVu5%2FhiTRaBEYI9o2utmap0Vi5rsqDbDMByv2eXHII%2FjGaINsD%2FnB6ObkRRJ2WViYfsWqfsXjxUQv3mywJX0wQqYrTBAmO0qNOI4O%2Feya1gpoFsEM8mAiChtREEJXmr4FRuosAyrHJZGnwLlpoYMQ40ewwcQgoLROkRyETJRGN1F2Yy%2BYeVvS6nxWj8HLSgMcXlzk7fREjF%2FR1uPPM3NjazGOcf029uGkeNtZnTFn8vUXnXS84h93zk%2FqBaU06B%2BxnBXy0ROmfhbJRavrKjGBGmwptQG7MaEJPFadRkeSmEjbWvo6Q%2BSz%2B1JykznYrTJuKzbyvLGQqtKtW%2BX%2B92P4aG23MlI%2Baab0dbwcXg6nz0XVODayOkN7yvSVHEURAL5xIqNpWa5UiHyj5LBAZIrUapyteKtMzxNCN82hMx9w6bjyocCnhdzVJp1BtASDoYJl1nDnoeAo2klMlKQ2E0nrwaQMZKudh9qhmPk5HE4yJ38FyRxLPQwf597O%2Fjslj63Q26hZp7Yzs%2FGQ45OMcMqzib%2FrQ650bSirPYmT4gSDG31Z2fjSOKnc89q4VWb2k7rMDgyYIi%2BRiCmw3mMNIGXVT9xrZ%2BRn6fq1dCKHMzLtWTKyfEjVRCnWg2iTkYnjwV9gOYimTBHTmvRuJveC3k3lrEljMt8jvOeeH8%2FboXdLfXsFxkRy5ExNamdgwkJqLy2t9Xdr3UaSJx6mwYZ5OEWbPkuKlhfNJ1q3ormQolUndv5fMhceTZCmYcJVbUswr00lLkzz2%2BcPl7Ts4R3esBLkiv%2FOBl6IiEa6Lr1uEh8J7mjAnSdJIUXmu7kh6wMfIGndBLN1k5gEiBn1Rq6MAcqWwW9H0MLTwrIdm0noknEfTqNm7dOo3LlBqwn8HRSPCML3luTvIB91je9o5A%2FsY%2Fh%2BCNtm0lQNk5xIpwhKfHVuzv3rJbr%2BmnOztVEm8tHmXDWLZi2Zc%2FmA%2B2bOXSMHXbrmDouFSm2R0OcKMBMlMbl%2BIFQbeoMPImcNrvU2bLnwSSiOiAejHEyIJ9KgdUJ8BmFxPOtWWBRb%2BP5631ej0mLtzNx%2BWvg0tZ8WPiysHWbcw9hHKtXS0DAeS8%2FKGxUE2cCzrTA8d9pkSv7k8zlc5uwgeTInJVaJmmXiabkIegaZU5O6lTkrTVy9CYAtxt6Hvm7c9zT%2BlVqtYMpskMxC%2F2AFUNEsoEfk9j7VpAFszwfqjQUmprNF4gstHz2qnNB%2FCWFWW0JoWkA4KX6fD%2Fso8nX2gso3CGLnHAN4%2BTvkuJlzSiqkhO7pGpVPSKEHDAG8JZ6I%2B3GdmUQ%2B0n2F9DDyc6fxkW%2Bsw1XbMd2FKO2kDjAL0LZdmowYkCkCTlcVWx4cWQ64weq%2BTcgkSoZIT%2BW60dRSA1RavIOLpSEKC3fewr0xDXm5hXvFKnaPHDIxzzzIISO3yEDngiAacYJgvzLg5uV1H3p%2FplwSaRMnZxFMqlYVXtr5a3RJssUpBuT8LbxT2flb1GOWy3HjKUz3mJXDWUj8GvN2Urs%2FrlNA%2FWk6QTZRJWdq19DxW7vACsg6Iy84%2FbScc53k8%2F%2BBnX5Q0HM492fOSxrE4lCxZpI8E7D22fR0aix0AT7%2F7PH97rs6x6%2Fy02rrqaNd8c54mJ1Yzz44dbLH9cfc4WE9sp7xuIy4lswfy%2FOQ%2BbpqZK3oio6%2BpaG3mIc9gE5IjpWnpj1PZ4dWIibC1M3NbVPXdV1bHt3IIxpBsv7T51EqR4zyWZtKc0q4ILq4ZR2c1%2BPtsRMA6odmEuF7RMaW%2BjYhYi%2FC1J9NWlTnNUtNnq%2FoTqdZXU3Y%2BqdtGvu5Y7KDGpWNhANMp4eFA0xnsxyKnSnhB2qf3LACLHVDAhi0QNag1mkiZWDKc2%2B6j84gqUkCE2oh14fVb4E5HBwGME%2Bjx%2FOJeMp76qfjolm8TU%2F9e%2BnjanM9C%2B2Hzc3y%2Bv3t4uef98NyR30BuYAPCPlc3mY1FrGzT2gNom2CvLS7AjtblrIzVrU5BSRhheV0VAZrKhR6Li3sLMKKLEVvhNNlCYlAVBvLFab%2FUxidEAmKpxwuYX8PUodbxgU5oa8VFUxsolTV8KoctxubWOlZdgvynZXegWPBSvPakY2v9CmOZS0vEqcsvnyFk0ay1iJtKVMxKlWJKgFY56LvkfLuIba%2FMyCXzGsE7cMudrBP89jVptOzEr1frs9TuCzP0BnGbO8L6tap2I4vze9VuRgv1ovT24F16F5qgMXMtWzmz0SQM8EjJDMcRjkTh%2BmyzL1FPRMLN2fa62Wg2wsw2OWRUyz%2FiLBTOxN2HlrxHSTfIR8ZQVHKkyp8DHV6EXfyIcSANNIR54k3grwWBSQfjaF5sUhrAZfLhIfCgDTxiN7QxEli%2F0E0XtGEoxSAd4sCLkh8RTXrL4yxEt9P1%2BjZnhBvnP3K%2FMb1rT94fnr8%2FVjGUJvZJAUlYVzcJMpsXtwkZ7Nq%2F%2FX5wEWbdrow7rratpC2cv%2BY2x%2FvrA9vl7e3T%2B7O3X388eXbsK6Rm5sQuq7ykjtdcpo9VXbv87ymx7mN3OyQCWKQ%2BATHYtQu9eJ6OwKP4B4%2BQU7UoPUZYjmBNXJx4YwW760VgA6iO8iN4JrNiljTYru3IrR8lyPbszN851N9px2XbBSegNrxMXIHEUJ88ZVUSibW64aPRT2eFAqSe6pIZvuhyULAF21izxPwchbwyplcvXUhX%2BkSbh%2F0wjjIjqu0ngv0x0L%2B0KDKuqggXPu2QiqrBlnLMB759u4S8m5hOvu4ZYId9Cqkqbnqm%2BH8wD1Xn72Op2ruUNaxoO6Qohb5qzptwPMgrhspdbu1zn4qa1UVtP26QGd1jIXD7lMd4x6f%2Fn4ayM9zMMnBMU6T3LklMyWNPnufV7STDn%2BvZ5GY9Acdz3K2XV1EElNWpReIlLcLqPuqxeafnyinIFJtO8JVFOqm7oW17Qe8JV2Y0Udb9wGsoKQrYpVluWyspk7ccRflHA02xao4unKUe3bWhHwYndrHKrN8l%2FY41j3EysS2r8Hu8eCklxf2nq1woyoyl%2B67C3wXBazkVjm1CoQeX8ZBJ6nCVwvyR8g508ut5eWzrFqVq7P1VoO%2FgxJnUb5gF2vGVM0yGBtfswMoCUvf6CbBAimrvzUU6jmf5tigKAJxKnD%2FqHzJGwcuZ8yl0UgHxBKStyI732LHns1%2F8m5EcoCS7dL8bohDxzwtQo5BE%2BNYkU6bnj5asfvtQiZX0nIHtIT0d0Xoj%2BfRzmm%2BePbIeILT%2BFcQZl2zVf0X5pSfkkGhwGbBvvvPFSVrmIKT4Rw5uWvX3%2BLP%2F9Ez2YPx2X0SCaGq19UHE3IQV8lKsIR681cUhOSIQZqCXzhjFE97BRuULuuD7lsQSUNSLS3HsEfJEhg7g0SCeRs9QK9rjusNySyV0BNeVJ%2BPjRUJ%2BHp1QcEaDm%2Bv38Zj%2Fvb54msChVpfubKjgMOC%2Bossuh67ZPhBtBx6rhfZvKLPIeu6o8zvPi4ym9Rx46tDh1Cz2wvHAfKE9g7vYJx6Y61IRSkoUkQX3kdBwHEiLq9AyjFAkMKQFGIi3JqNBAi%2Fd%2BgAcGORdOTDCnJc56%2FJAZhP8vpRkTEImb7aRBaA0ELbIzX8SKtQI2e%2FdFApfDzOWl20ec5OW79SeNYw3FalcD7gvlUK%2FxzZoYU5b%2B2suWiLSTZhoTHDRu0diFUzSW6vf6VV5b5A82rtmLJqtjOBfNz1CQpyp7ECXdb8PNRDKFw%2BRtt6U3SlapD9rPj5NZ2l5aysdeQXJNIeGXNo3U7%2BBZDlNz5aZT5QIKGYio982xs5kIFTSkaHCSk1qXa1oAsC7fDYiF4mVTZTySFM608aMdYPQz9yIOTVLE5D3CFfW73Oep9%2FdbMG6HZXHKsJ%2BhY0v0aWW9Bb1VqLJp4vHdv2gpDjMxtZjFxPPUC6OAK%2BpIHqhMC5Xf%2FedokdZ4u1xzXRf1PjFemB5d0e0YCVV8KSggO%2B%2B6q5AeKF0LHYCQyHjYCD3I78TFYbCJgklxor9oMk4F8K9n8EUBQr6sjAYoOftdW5BmjxKQDWmtfr2JYiuptUOY4TwuPtGr%2BIBcGAFT6mDelSyLSWY2JMoAiOf3x2A2poChHYG%2B4RAvxNFjFA5KyKpK5jyUxa3QZnTEw2fde746ISDK48Nf40yVvJCN4TVZSKK6oXNW0ge0csZosM07mVPCl9ec96lcuFpYv43NOX8SVmMWH6cYxgm8%2BuCSr12%2F8B). Specify desired analysis details for your data in the respective *essential.vars.groovy* file (see below) and run the selected pipeline *marsseq.pipeline.groovy* or *smartsseq.pipeline.groovy* as described [here](https://gitlab.rlp.net/imbforge/NGSpipe2go/-/blob/master/README.md). The analysis allows further parameter fine-tuning subsequent the initial analysis e.g. for plotting and QC thresholding. Therefore, a customisable *sc.report.Rmd* file will be generated in the output reports folder after running the pipeline. Go through the steps and modify the default settings where appropriate. Subsequently, the *sc.report.Rmd* file can be converted to a final html report using the *knitr* R-package.\r +\r +### The pipelines includes:\r +- FastQC, MultiQC and other tools for rawdata quality control\r +- Adapter trimming with Cutadapt\r +- Mapping to the genome using STAR\r +- generation of bigWig tracks for visualisation of alignment\r +- Quantification with featureCounts (Subread) and UMI-tools (if UMIs are used for deduplication)\r +- Downstream analysis in R using a pre-designed markdown report file (*sc.report.Rmd*). Modify this file to fit your custom parameter and thresholds and render it to your final html report. The Rmd file uses, among others, the following tools and methods:\r + - QC: the [scater](http://bioconductor.org/packages/release/bioc/html/scater.html) package.\r + - Normalization: the [scran](http://bioconductor.org/packages/release/bioc/html/scran.html) package.\r + - Differential expression analysis: the [scde](http://bioconductor.org/packages/release/bioc/html/scde.html) package.\r + - Trajectory analysis (pseudotime): the [monocle](https://bioconductor.org/packages/release/bioc/html/monocle.html) package.\r +\r +### Pipeline parameter settings\r +- essential.vars.groovy: essential parameter describing the experiment \r + - project folder name\r + - reference genome\r + - experiment design\r + - adapter sequence, etc.\r +- additional (more specialized) parameter can be given in the var.groovy-files of the individual pipeline modules \r +- targets.txt: comma-separated txt-file giving information about the analysed samples. The following columns are required \r + - sample: sample identifier. Must be a unique substring of the input sample file name (e.g. common prefixes and suffixes may be removed). These names are grebbed against the count file names to merge targets.txt to the count data.\r + - plate: plate ID (number) \r + - row: plate row (letter)\r + - col: late column (number)\r + - cells: 0c/1c/10c (control wells)\r + - group: default variable for cell grouping (e.g. by condition)\r + \r + for pool-based libraries like MARSseq required additionally:\r + - pool: the pool ID comprises all cells from 1 library pool (i.e. a set of unique cell barcodes; the cell barcodes are re-used in other pools). Must be a unique substring of the input sample file name. For pool-based design, the pool ID is grebbed against the respective count data filename instead of the sample name as stated above.\r + - barcode: cell barcodes used as cell identifier in the count files. After merging the count data with targets.txt, the barcodes are replaced with sample IDs given in the sample column (i.e. here, sample names need not be a substring of input sample file name).\r +\r +### Programs required\r +- FastQC\r +- STAR\r +- Samtools\r +- Bedtools\r +- Subread\r +- Picard\r +- UCSC utilities\r +- RSeQC\r +- UMI-tools\r +- R\r +\r +## Resources\r +- QC: the [scater](http://bioconductor.org/packages/release/bioc/html/scater.html) package.\r +- Normalization: the [scran](http://bioconductor.org/packages/release/bioc/html/scran.html) package.\r +- Trajectory analysis (pseudotime): the [monocle](https://bioconductor.org/packages/release/bioc/html/monocle.html) package.\r +- A [tutorial](https://scrnaseq-course.cog.sanger.ac.uk/website/index.html) from Hemberg lab\r +- Luecken and Theis 2019 [Current best practices in single‐cell RNA‐seq analysis: a tutorial](https://www.embopress.org/doi/10.15252/msb.20188746)\r +\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/62?version=1" ; + schema1:isBasedOn "https://gitlab.rlp.net/imbforge/NGSpipe2go/-/tree/master/pipelines/scRNAseq" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNA-seq Smart-seq 2" ; + schema1:sdDatePublished "2024-07-12 13:36:25 +0100" ; + schema1:url "https://workflowhub.eu/workflows/62/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2160 ; + schema1:creator ; + schema1:dateCreated "2020-10-07T07:47:46Z" ; + schema1:dateModified "2023-01-16T13:44:52Z" ; + schema1:description """# scRNA-Seq pipelines\r +\r +Here we forge the tools to analyze single cell RNA-Seq experiments. The analysis workflow is based on the Bioconductor packages [*scater*](https://bioconductor.org/packages/devel/bioc/vignettes/scater/inst/doc/overview.html) and [*scran*](https://bioconductor.org/packages/devel/bioc/vignettes/scran/inst/doc/scran.html) as well as the Bioconductor workflows by Lun ATL, McCarthy DJ, & Marioni JC [*A step-by-step workflow for low-level analysis of single-cell RNA-seq data.*](http://doi.org/10.12688/f1000research.9501.1) F1000Res. 2016 Aug 31 [revised 2016 Oct 31];5:2122 and Amezquita RA, Lun ATL et al. [*Orchestrating Single-Cell Analysis with Bioconductor*](https://osca.bioconductor.org/index.html) Nat Methods. 2020 Feb;17(2):137-145.\r +\r +## Implemented protocols\r + - MARS-Seq (massively parallel single-cell RNA-sequencing): The protocol is based on the publications of Jaitin DA, et al. (2014). *Massively parallel single-cell RNA-seq for marker-free decomposition of tissues into cell types.* Science (New York, N.Y.), 343(6172), 776–779. https://doi.org/10.1126/science.1247651 and Keren-Shaul H., et al. (2019). *MARS-seq2.0: an experimental and analytical pipeline for indexed sorting combined with single-cell RNA sequencing.* Nature Protocols. https://doi.org/10.1038/s41596-019-0164-4. The MARS-Seq library preparation protocol is given [here](https://github.com/imbforge/NGSpipe2go/blob/master/resources/MARS-Seq_protocol_Step-by-Step_MML.pdf). The sequencing reads are demultiplexed according to the respective pool barcodes before they are used as input for the analysis pipeline. \r +- Smart-seq2: Libraries are generated using the [Smart-seq2 kit](http://www.nature.com/nmeth/journal/v10/n11/full/nmeth.2639.html). \r +\r +## Pipeline Workflow\r +All analysis steps are illustrated in the pipeline [flowchart](https://www.draw.io/?lightbox=1&highlight=0000ff&edit=_blank&layers=1&nav=1&title=scRNA-Seq#R7R3ZcpvK8mtUlTxIxSIh6dF2oiyVODm2Uzk5LykEI4kYAWGxrXz9nZ6FdUBIQoDjm%2BTeIwYYZqZ7eu%2BegXq1fXrn697ms2sie6BI5tNAfTNQFFmdTPB%2FoGVHW6bKjDasfctkDyUNt9YfxBol1hpZJgoyD4aua4eWl200XMdBRphp033ffcw%2BtnLt7Fc9fY0KDbeGbhdbv1tmuGGtsjZPbrxH1nrDPj1TpvTGUjfu174bOex7jusgemer827YHIONbrqPqSb17UC98l03pL%2B2T1fIhmXlK0bfW5TcjYfsIyes88J76eNqcz0L7YfNzfL6%2Fe3i55%2F3Qw6AB92O2FoMFM3GHV6a1gOsrm2tHXJD%2Bx3BUC99sgzxJf61Zv8lry39fAseEumLt5LFCHd87Tfh1sa%2FZHzP1pfIvoyX9Mq1XZ88pC7IH%2FxIEPrufQwkvIiXK9cJGUbJGoxbDzbIZD2SfuKrlWXbqU4R%2BRN3yu8QGKqXhru1DHwp4d9rWw8C9juGo0T6D%2FXQcmGFhnMpnl4aKgxQD8gP0VOqiUHpHXK3KPR3%2BBF2dzbR6CtsMw1nDAsfU6g5ZW2bFFaOJZVtCbYd1nHfCV7gHww1xGjyYFz%2Fc%2Fs9%2Bhi9%2BfPf3Ye7xY%2Fg4b%2Bh%2FKLx5K0Gf8vwZO3rpoWhfQz6NIAtykzKYEtMU1PYMpYE2KLxB0%2FBlj%2Ffwzd30r%2F%2FfVzcvPW3snn3bvdhOJELwEMmprfs0vXDjbt2Hd1%2Bm7SmIAALkzzzyXU9BpZfKAx3DIR6FLq4KYUU6MkK%2F4XXRxN29SN1580T65lc7PiFg%2Bebegkuf6TvJa%2BRq%2BQ98wLYDr40ALYAaWhcWDYfThEBMwhXCvnAjXwDVexFtsdD3V%2BjsOo59iAsfSUi%2BcjGNOwhywpFSEFexfPWd6kHPNdywiDV81doSPBzPJtk8VMZZ9lU7vnJbFb1PP5BR5CgZzyVWhj75e7L%2Fa%2FN7Ofvf%2B9CZfHz%2BrusLYez54KwRaw6PyKNZ00j0km8SN3DiRIgctYDlH8YEDBc4AdkzXsq50vwcK1exsJerpTBxeVWtxwAiuUh28LcADdeptkc%2FcY%2Bflh7JJXzCTzdybfpwAp8tMr0uglDkLYvADrKYm2FmMuOfNsbORg7lIW1Xa5cjCn45%2FW7W5iasnbxxRD%2Fb2m7S%2BDe6AH0ggWfN6YKi8C4ub4I0G%2F4udX9EP8c8fsjzL7dh1167JLBWGXSiFF%2BGPqRY%2Bgh3m2FeVb0Gq%2FrIlkAKfUvhocuEEVE61YCurzg0gU2VvbSDbwxYILGwV3e6UnQbh6sYvLQCJEZEmJshfoSv50iM1j%2FRivLsYhCokivlnDndaXA%2Fezmbbu6GRBeE3jIINwGa%2BEwUdC5XTOyAQ2f%2F%2FwQxnIntHRgh7pjxpMbwrStFRY4CdPXMU9F%2FlETbgAHn5ARhbDeKRwsGUm5MncGHUgTqEBc20mrQPK4ARVILDMJhJTcEmBF0IOfWFH0sMoIk6cCI5fuZrwhtjyNhctVKSXVXkO5sGSKYMV422E6Q0HIHypZpWCa7cBdrQJgQjkgHCboiw0Zky4lff77R0bq36eaDtKKaaKnlqimTWoIslJX12xJQ%2FgWIP%2FL8heYgDGpBCtQPQ1g6T4BRbOcNaVpS9c3kT%2FEzSAaEEmBIT6heBLG8kl8J23jvSCihIrG8Dd%2BwtNNM%2B5b6Z8kRuEXjMKnjEHucOkr29FJEpeAI3GBpgi%2FcuDkmRXaZmeIUQdL8gaBTqpDqfBUDMPUU0o5IMOla%2B4KjQURKzRjPUXfejYaVAljycPF13mLxRsMSu14t5KDRYFUX1at%2FqGxOOaKWSx133DN5qcR9yu98tHvyPIB%2BzBwcf%2FS54ub2%2BEtIHL57GoO5OD5eq5rNz5Z2qn04U3ldF93Ml9MnxuELuuOTdWJtkvkdzIvsNU2PivSqfTKxry%2Fo2kZTWInnxbuNNo6HQOMEO7m5iZhfWkhk%2F%2BDn9IrA%2FMNn2zDR%2FhSJ3MEocJrbo6sOwn8EWtni5ywi0mNRqPmpqQ7IDW64QZ0XMnUQ%2F1cU4JGEUsn0hMRS9KabLWOoaSEX6aUpPQNUMksQ7cvmMc0BCXikvtPbbSCz7j4qZVNHEwr4lNKqxOPGytEt1iqgR4ffd2Lx3WaFq1kdDJ5pglUQpFOKM8PFuTxZUqWP0CBUzp11Tw3BY6D9NkocJ5Q%2FN%2Fi4VsOlfAlL9HZaPuQ7CC4p6XuYaQPh2xXXRAZ36GWsrJ4BN7wNTGnigxsy1pUx8u3bQpPHTVVSh7g5kysZl6mNK9ulM3Ycjl60P2gEaN%2FWZcnKaAFEJ0HIHkhy3cfLKLopC28IkSrZG%2BXsUH4Gr%2BYWRZmoyj9Ln0N5iq0JEuvXC8cHS4PpZZzL4dUqjnkqbyQKfyfyOuNxdlkueNYntbjjvIRQVnHc0eRc7xVuzP%2FYG8Nz%2BMsGM9geBZGmMjqy5Nbmgp4qh3xpPVEiollhIUehP9cEZJcR4D4HWHSF%2B6IDs7VU3cFA9YfybCJbxAGi%2Fnu%2FYB50nL8fBMtR3hvV%2FByxse3eHBA8Rex2xQeA88dvvHbGNHmmNvupeoUTqVUXUR3cpQ%2BS7hVrYS%2Bk0hO1rGURfgGCP1EygWgjYuUXtYEJEo73JlYh9ALQyy5ylVCTpC9dB9boCRKNSk5H0kY1yQJE6lpknBkEGTWQT3WpBxWnBbUKMSReDVfNo40DPpiwKqcg62a82ZTJGVvJUA%2FFYfU6bRRHBKKLT1yl%2B%2FBozKxRe1MbKlLo8Z9CdTO0ygljV%2F78XE8OT8%2Bys8mtaDRSO3aqNSWBFyAlqb52q%2FVxtve6xvVfHKt%2B2t1qPWHehzKhFoAljrvxb6fqrl9n%2BUr%2B59vOEHj9uHi8Z186%2F78cjn%2F8k19%2BrrZ3g97pD3%2FvXxo2gt8PJUPTWYn8aHauv3t3cVNbc3%2BBqvuxGjseRBkdE4dPsm3wBA%2FXIkfpyBzTiW%2B4QjguZxFmo6VdvHaMmk6ZZ0lfhMa3CR9TfwpOVoHvqzUQjNXVtYQAguZdy1vLdMkpDAHk6Tlhq2EOijJbiUJzsQBTskU2714WJNL%2FA%2Bv3hUw1wke5hW%2BlpNr%2FA8e98Mr18Fd6xYBLsII%2B4iCcFBMyG0ADdQZfDxrvSnggdBMr44PxoN6MtALlVi1ukyncQPNacmFWmGL1ncPkkQJ5iSUjnBFf4lCLwK2A%2FUrYINfFJgOvvnbqGI53fmeY7%2BiG0%2FDIhyJxTUm467jPzw9YCZNXnj%2B%2F0LfWjbg%2B3tkPyDoddBGZM2Uc6MKk7KQKh3BnWri%2BbRneC4VUN1HQWSHQT%2BR3cAEOo3i4DS3gELbOwJWDFVFirYWVLGh6WpE%2FjPR0Iw8G%2BMdPF2dmtdqfMKL35Iaz3XZuyUPD3aruSVnvd%2BStrvu6X7EIxvyvWi4to0l81RIPdIN2I%2BcQ%2FULwc%2BAzBO1Rf4ilnqVLqXerkx%2FXJjdK%2FXOO7PTVo67N6SnKPX6CFTJnlKfVsMumUixiFdkEWwsZ%2FczMHxHD9Dvn%2FQGVu5%2FhiTRaBEYI9o2utmap0Vi5rsqDbDMByv2eXHII%2FjGaINsD%2FnB6ObkRRJ2WViYfsWqfsXjxUQv3mywJX0wQqYrTBAmO0qNOI4O%2Feya1gpoFsEM8mAiChtREEJXmr4FRuosAyrHJZGnwLlpoYMQ40ewwcQgoLROkRyETJRGN1F2Yy%2BYeVvS6nxWj8HLSgMcXlzk7fREjF%2FR1uPPM3NjazGOcf029uGkeNtZnTFn8vUXnXS84h93zk%2FqBaU06B%2BxnBXy0ROmfhbJRavrKjGBGmwptQG7MaEJPFadRkeSmEjbWvo6Q%2BSz%2B1JykznYrTJuKzbyvLGQqtKtW%2BX%2B92P4aG23MlI%2Baab0dbwcXg6nz0XVODayOkN7yvSVHEURAL5xIqNpWa5UiHyj5LBAZIrUapyteKtMzxNCN82hMx9w6bjyocCnhdzVJp1BtASDoYJl1nDnoeAo2klMlKQ2E0nrwaQMZKudh9qhmPk5HE4yJ38FyRxLPQwf597O%2Fjslj63Q26hZp7Yzs%2FGQ45OMcMqzib%2FrQ650bSirPYmT4gSDG31Z2fjSOKnc89q4VWb2k7rMDgyYIi%2BRiCmw3mMNIGXVT9xrZ%2BRn6fq1dCKHMzLtWTKyfEjVRCnWg2iTkYnjwV9gOYimTBHTmvRuJveC3k3lrEljMt8jvOeeH8%2FboXdLfXsFxkRy5ExNamdgwkJqLy2t9Xdr3UaSJx6mwYZ5OEWbPkuKlhfNJ1q3ormQolUndv5fMhceTZCmYcJVbUswr00lLkzz2%2BcPl7Ts4R3esBLkiv%2FOBl6IiEa6Lr1uEh8J7mjAnSdJIUXmu7kh6wMfIGndBLN1k5gEiBn1Rq6MAcqWwW9H0MLTwrIdm0noknEfTqNm7dOo3LlBqwn8HRSPCML3luTvIB91je9o5A%2FsY%2Fh%2BCNtm0lQNk5xIpwhKfHVuzv3rJbr%2BmnOztVEm8tHmXDWLZi2Zc%2FmA%2B2bOXSMHXbrmDouFSm2R0OcKMBMlMbl%2BIFQbeoMPImcNrvU2bLnwSSiOiAejHEyIJ9KgdUJ8BmFxPOtWWBRb%2BP5631ej0mLtzNx%2BWvg0tZ8WPiysHWbcw9hHKtXS0DAeS8%2FKGxUE2cCzrTA8d9pkSv7k8zlc5uwgeTInJVaJmmXiabkIegaZU5O6lTkrTVy9CYAtxt6Hvm7c9zT%2BlVqtYMpskMxC%2F2AFUNEsoEfk9j7VpAFszwfqjQUmprNF4gstHz2qnNB%2FCWFWW0JoWkA4KX6fD%2Fso8nX2gso3CGLnHAN4%2BTvkuJlzSiqkhO7pGpVPSKEHDAG8JZ6I%2B3GdmUQ%2B0n2F9DDyc6fxkW%2Bsw1XbMd2FKO2kDjAL0LZdmowYkCkCTlcVWx4cWQ64weq%2BTcgkSoZIT%2BW60dRSA1RavIOLpSEKC3fewr0xDXm5hXvFKnaPHDIxzzzIISO3yEDngiAacYJgvzLg5uV1H3p%2FplwSaRMnZxFMqlYVXtr5a3RJssUpBuT8LbxT2flb1GOWy3HjKUz3mJXDWUj8GvN2Urs%2FrlNA%2FWk6QTZRJWdq19DxW7vACsg6Iy84%2FbScc53k8%2F%2BBnX5Q0HM492fOSxrE4lCxZpI8E7D22fR0aix0AT7%2F7PH97rs6x6%2Fy02rrqaNd8c54mJ1Yzz44dbLH9cfc4WE9sp7xuIy4lswfy%2FOQ%2BbpqZK3oio6%2BpaG3mIc9gE5IjpWnpj1PZ4dWIibC1M3NbVPXdV1bHt3IIxpBsv7T51EqR4zyWZtKc0q4ILq4ZR2c1%2BPtsRMA6odmEuF7RMaW%2BjYhYi%2FC1J9NWlTnNUtNnq%2FoTqdZXU3Y%2BqdtGvu5Y7KDGpWNhANMp4eFA0xnsxyKnSnhB2qf3LACLHVDAhi0QNag1mkiZWDKc2%2B6j84gqUkCE2oh14fVb4E5HBwGME%2Bjx%2FOJeMp76qfjolm8TU%2F9e%2BnjanM9C%2B2Hzc3y%2Bv3t4uef98NyR30BuYAPCPlc3mY1FrGzT2gNom2CvLS7AjtblrIzVrU5BSRhheV0VAZrKhR6Li3sLMKKLEVvhNNlCYlAVBvLFab%2FUxidEAmKpxwuYX8PUodbxgU5oa8VFUxsolTV8KoctxubWOlZdgvynZXegWPBSvPakY2v9CmOZS0vEqcsvnyFk0ay1iJtKVMxKlWJKgFY56LvkfLuIba%2FMyCXzGsE7cMudrBP89jVptOzEr1frs9TuCzP0BnGbO8L6tap2I4vze9VuRgv1ovT24F16F5qgMXMtWzmz0SQM8EjJDMcRjkTh%2BmyzL1FPRMLN2fa62Wg2wsw2OWRUyz%2FiLBTOxN2HlrxHSTfIR8ZQVHKkyp8DHV6EXfyIcSANNIR54k3grwWBSQfjaF5sUhrAZfLhIfCgDTxiN7QxEli%2F0E0XtGEoxSAd4sCLkh8RTXrL4yxEt9P1%2BjZnhBvnP3K%2FMb1rT94fnr8%2FVjGUJvZJAUlYVzcJMpsXtwkZ7Nq%2F%2FX5wEWbdrow7rratpC2cv%2BY2x%2FvrA9vl7e3T%2B7O3X388eXbsK6Rm5sQuq7ykjtdcpo9VXbv87ymx7mN3OyQCWKQ%2BATHYtQu9eJ6OwKP4B4%2BQU7UoPUZYjmBNXJx4YwW760VgA6iO8iN4JrNiljTYru3IrR8lyPbszN851N9px2XbBSegNrxMXIHEUJ88ZVUSibW64aPRT2eFAqSe6pIZvuhyULAF21izxPwchbwyplcvXUhX%2BkSbh%2F0wjjIjqu0ngv0x0L%2B0KDKuqggXPu2QiqrBlnLMB759u4S8m5hOvu4ZYId9Cqkqbnqm%2BH8wD1Xn72Op2ruUNaxoO6Qohb5qzptwPMgrhspdbu1zn4qa1UVtP26QGd1jIXD7lMd4x6f%2Fn4ayM9zMMnBMU6T3LklMyWNPnufV7STDn%2BvZ5GY9Acdz3K2XV1EElNWpReIlLcLqPuqxeafnyinIFJtO8JVFOqm7oW17Qe8JV2Y0Udb9wGsoKQrYpVluWyspk7ccRflHA02xao4unKUe3bWhHwYndrHKrN8l%2FY41j3EysS2r8Hu8eCklxf2nq1woyoyl%2B67C3wXBazkVjm1CoQeX8ZBJ6nCVwvyR8g508ut5eWzrFqVq7P1VoO%2FgxJnUb5gF2vGVM0yGBtfswMoCUvf6CbBAimrvzUU6jmf5tigKAJxKnD%2FqHzJGwcuZ8yl0UgHxBKStyI732LHns1%2F8m5EcoCS7dL8bohDxzwtQo5BE%2BNYkU6bnj5asfvtQiZX0nIHtIT0d0Xoj%2BfRzmm%2BePbIeILT%2BFcQZl2zVf0X5pSfkkGhwGbBvvvPFSVrmIKT4Rw5uWvX3%2BLP%2F9Ez2YPx2X0SCaGq19UHE3IQV8lKsIR681cUhOSIQZqCXzhjFE97BRuULuuD7lsQSUNSLS3HsEfJEhg7g0SCeRs9QK9rjusNySyV0BNeVJ%2BPjRUJ%2BHp1QcEaDm%2Bv38Zj%2Fvb54msChVpfubKjgMOC%2Bossuh67ZPhBtBx6rhfZvKLPIeu6o8zvPi4ym9Rx46tDh1Cz2wvHAfKE9g7vYJx6Y61IRSkoUkQX3kdBwHEiLq9AyjFAkMKQFGIi3JqNBAi%2Fd%2BgAcGORdOTDCnJc56%2FJAZhP8vpRkTEImb7aRBaA0ELbIzX8SKtQI2e%2FdFApfDzOWl20ec5OW79SeNYw3FalcD7gvlUK%2FxzZoYU5b%2B2suWiLSTZhoTHDRu0diFUzSW6vf6VV5b5A82rtmLJqtjOBfNz1CQpyp7ECXdb8PNRDKFw%2BRtt6U3SlapD9rPj5NZ2l5aysdeQXJNIeGXNo3U7%2BBZDlNz5aZT5QIKGYio982xs5kIFTSkaHCSk1qXa1oAsC7fDYiF4mVTZTySFM608aMdYPQz9yIOTVLE5D3CFfW73Oep9%2FdbMG6HZXHKsJ%2BhY0v0aWW9Bb1VqLJp4vHdv2gpDjMxtZjFxPPUC6OAK%2BpIHqhMC5Xf%2FedokdZ4u1xzXRf1PjFemB5d0e0YCVV8KSggO%2B%2B6q5AeKF0LHYCQyHjYCD3I78TFYbCJgklxor9oMk4F8K9n8EUBQr6sjAYoOftdW5BmjxKQDWmtfr2JYiuptUOY4TwuPtGr%2BIBcGAFT6mDelSyLSWY2JMoAiOf3x2A2poChHYG%2B4RAvxNFjFA5KyKpK5jyUxa3QZnTEw2fde746ISDK48Nf40yVvJCN4TVZSKK6oXNW0ge0csZosM07mVPCl9ec96lcuFpYv43NOX8SVmMWH6cYxgm8%2BuCSr12%2F8B). Specify desired analysis details for your data in the respective *essential.vars.groovy* file (see below) and run the selected pipeline *marsseq.pipeline.groovy* or *smartsseq.pipeline.groovy* as described [here](https://gitlab.rlp.net/imbforge/NGSpipe2go/-/blob/master/README.md). The analysis allows further parameter fine-tuning subsequent the initial analysis e.g. for plotting and QC thresholding. Therefore, a customisable *sc.report.Rmd* file will be generated in the output reports folder after running the pipeline. Go through the steps and modify the default settings where appropriate. Subsequently, the *sc.report.Rmd* file can be converted to a final html report using the *knitr* R-package.\r +\r +### The pipelines includes:\r +- FastQC, MultiQC and other tools for rawdata quality control\r +- Adapter trimming with Cutadapt\r +- Mapping to the genome using STAR\r +- generation of bigWig tracks for visualisation of alignment\r +- Quantification with featureCounts (Subread) and UMI-tools (if UMIs are used for deduplication)\r +- Downstream analysis in R using a pre-designed markdown report file (*sc.report.Rmd*). Modify this file to fit your custom parameter and thresholds and render it to your final html report. The Rmd file uses, among others, the following tools and methods:\r + - QC: the [scater](http://bioconductor.org/packages/release/bioc/html/scater.html) package.\r + - Normalization: the [scran](http://bioconductor.org/packages/release/bioc/html/scran.html) package.\r + - Differential expression analysis: the [scde](http://bioconductor.org/packages/release/bioc/html/scde.html) package.\r + - Trajectory analysis (pseudotime): the [monocle](https://bioconductor.org/packages/release/bioc/html/monocle.html) package.\r +\r +### Pipeline parameter settings\r +- essential.vars.groovy: essential parameter describing the experiment \r + - project folder name\r + - reference genome\r + - experiment design\r + - adapter sequence, etc.\r +- additional (more specialized) parameter can be given in the var.groovy-files of the individual pipeline modules \r +- targets.txt: comma-separated txt-file giving information about the analysed samples. The following columns are required \r + - sample: sample identifier. Must be a unique substring of the input sample file name (e.g. common prefixes and suffixes may be removed). These names are grebbed against the count file names to merge targets.txt to the count data.\r + - plate: plate ID (number) \r + - row: plate row (letter)\r + - col: late column (number)\r + - cells: 0c/1c/10c (control wells)\r + - group: default variable for cell grouping (e.g. by condition)\r + \r + for pool-based libraries like MARSseq required additionally:\r + - pool: the pool ID comprises all cells from 1 library pool (i.e. a set of unique cell barcodes; the cell barcodes are re-used in other pools). Must be a unique substring of the input sample file name. For pool-based design, the pool ID is grebbed against the respective count data filename instead of the sample name as stated above.\r + - barcode: cell barcodes used as cell identifier in the count files. After merging the count data with targets.txt, the barcodes are replaced with sample IDs given in the sample column (i.e. here, sample names need not be a substring of input sample file name).\r +\r +### Programs required\r +- FastQC\r +- STAR\r +- Samtools\r +- Bedtools\r +- Subread\r +- Picard\r +- UCSC utilities\r +- RSeQC\r +- UMI-tools\r +- R\r +\r +## Resources\r +- QC: the [scater](http://bioconductor.org/packages/release/bioc/html/scater.html) package.\r +- Normalization: the [scran](http://bioconductor.org/packages/release/bioc/html/scran.html) package.\r +- Trajectory analysis (pseudotime): the [monocle](https://bioconductor.org/packages/release/bioc/html/monocle.html) package.\r +- A [tutorial](https://scrnaseq-course.cog.sanger.ac.uk/website/index.html) from Hemberg lab\r +- Luecken and Theis 2019 [Current best practices in single‐cell RNA‐seq analysis: a tutorial](https://www.embopress.org/doi/10.15252/msb.20188746)\r +\r +\r +""" ; + schema1:keywords "scRNA-seq, smart-seq 2, bpipe, groovy" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "scRNA-seq Smart-seq 2" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/62?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2024-03-26T20:04:38.057044" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-only-VGP3" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-only-VGP3/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Rare disease researchers workflow is that they submit their raw data (fastq), run the mapping and variant calling RD-Connect pipeline and obtain unannotated gvcf files to further submit to the RD-Connect GPAP or analyse on their own.\r +\r +This demonstrator focuses on the variant calling pipeline. The raw genomic data is processed using the RD-Connect pipeline ([Laurie et al., 2016](https://www.ncbi.nlm.nih.gov/pubmed/27604516)) running on the standards (GA4GH) compliant, interoperable container orchestration platform.\r +\r +This demonstrator will be aligned with the current implementation study on [Development of Architecture for Software Containers at ELIXIR and its use by EXCELERATE use-case communities](docs/Appendix%201%20-%20Project%20Plan%202018-biocontainers%2020171117.pdf) \r +\r +For this implementation, different steps are required:\r +\r +1. Adapt the pipeline to CWL and dockerise elements \r +2. Align with IS efforts on software containers to package the different components (Nextflow) \r +3. Submit trio of Illumina NA12878 Platinum Genome or Exome to the GA4GH platform cloud (by Aspera or ftp server)\r +4. Run the RD-Connect pipeline on the container platform\r +5. Return corresponding gvcf files\r +6. OPTIONAL: annotate and update to RD-Connect playground instance\r +\r +N.B: The demonstrator might have some manual steps, which will not be in production. \r +\r +## RD-Connect pipeline\r +\r +Detailed information about the RD-Connect pipeline can be found in [Laurie et al., 2016](https://www.ncbi.nlm.nih.gov/pubmed/?term=27604516)\r +\r +![alt text](https://raw.githubusercontent.com/inab/Wetlab2Variations/eosc-life/docs/RD-Connect_pipeline.jpg)\r +\r +## The applications\r +\r +**1\\. Name of the application: Adaptor removal**\r +Function: remove sequencing adaptors \r +Container (readiness status, location, version): [cutadapt (v.1.18)](https://hub.docker.com/r/cnag/cutadapt) \r +Required resources in cores and RAM: current container size 169MB \r +Input data (amount, format, directory..): raw fastq \r +Output data: paired fastq without adaptors \r +\r +**2\\. Name of the application: Mapping and bam sorting**\r +Function: align data to reference genome \r +Container : [bwa-mem (v.0.7.17)](https://hub.docker.com/r/cnag/bwa) / [Sambamba (v. 0.6.8 )](https://hub.docker.com/r/cnag/sambamba)(or samtools) \r +Resources :current container size 111MB / 32MB \r +Input data: paired fastq without adaptors \r +Output data: sorted bam \r +\r +**3\\. Name of the application: MarkDuplicates** \r +Function: Mark (and remove) duplicates \r +Container: [Picard (v.2.18.25)](https://hub.docker.com/r/cnag/picard)\r +Resources: current container size 261MB \r +Input data:sorted bam \r +Output data: Sorted bam with marked (or removed) duplicates \r +\r +**4\\. Name of the application: Base quality recalibration (BQSR)** \r +Function: Base quality recalibration \r +Container: [GATK (v.3.6-0)](https://hub.docker.com/r/cnag/gatk)\r +Resources: current container size 270MB \r +Input data: Sorted bam with marked (or removed) duplicates \r +Output data: Sorted bam with marked duplicates & base quality recalculated \r +\r +**5\\. Name of the application: Variant calling** \r +Function: variant calling \r +Container: [GATK (v.3.6-0)](https://hub.docker.com/r/cnag/gatk)\r +Resources: current container size 270MB \r +Input data:Sorted bam with marked duplicates & base quality recalculated \r +Output data: unannotated gvcf per sample \r +\r +**6\\. (OPTIONAL)Name of the application: Quality of the fastq** \r +Function: report on the sequencing quality \r +Container: [fastqc 0.11.8](https://hub.docker.com/r/cnag/fastqc)\r +Resources: current container size 173MB \r +Input data: raw fastq \r +Output data: QC report \r +\r +## Licensing\r +\r +GATK declares that archived packages are made available for free to academic researchers under a limited license for non-commercial use. If you need to use one of these packages for commercial use. https://software.broadinstitute.org/gatk/download/archive """ ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.106.3" ; + schema1:isBasedOn "https://github.com/inab/Wetlab2Variations/tree/eosc-life/nextflow" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for VariantCaller_GATK3.6" ; + schema1:sdDatePublished "2024-07-12 13:37:02 +0100" ; + schema1:url "https://workflowhub.eu/workflows/106/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 22429 ; + schema1:creator ; + schema1:dateCreated "2021-05-21T08:45:49Z" ; + schema1:dateModified "2023-01-16T13:47:03Z" ; + schema1:description """Rare disease researchers workflow is that they submit their raw data (fastq), run the mapping and variant calling RD-Connect pipeline and obtain unannotated gvcf files to further submit to the RD-Connect GPAP or analyse on their own.\r +\r +This demonstrator focuses on the variant calling pipeline. The raw genomic data is processed using the RD-Connect pipeline ([Laurie et al., 2016](https://www.ncbi.nlm.nih.gov/pubmed/27604516)) running on the standards (GA4GH) compliant, interoperable container orchestration platform.\r +\r +This demonstrator will be aligned with the current implementation study on [Development of Architecture for Software Containers at ELIXIR and its use by EXCELERATE use-case communities](docs/Appendix%201%20-%20Project%20Plan%202018-biocontainers%2020171117.pdf) \r +\r +For this implementation, different steps are required:\r +\r +1. Adapt the pipeline to CWL and dockerise elements \r +2. Align with IS efforts on software containers to package the different components (Nextflow) \r +3. Submit trio of Illumina NA12878 Platinum Genome or Exome to the GA4GH platform cloud (by Aspera or ftp server)\r +4. Run the RD-Connect pipeline on the container platform\r +5. Return corresponding gvcf files\r +6. OPTIONAL: annotate and update to RD-Connect playground instance\r +\r +N.B: The demonstrator might have some manual steps, which will not be in production. \r +\r +## RD-Connect pipeline\r +\r +Detailed information about the RD-Connect pipeline can be found in [Laurie et al., 2016](https://www.ncbi.nlm.nih.gov/pubmed/?term=27604516)\r +\r +![alt text](https://raw.githubusercontent.com/inab/Wetlab2Variations/eosc-life/docs/RD-Connect_pipeline.jpg)\r +\r +## The applications\r +\r +**1\\. Name of the application: Adaptor removal**\r +Function: remove sequencing adaptors \r +Container (readiness status, location, version): [cutadapt (v.1.18)](https://hub.docker.com/r/cnag/cutadapt) \r +Required resources in cores and RAM: current container size 169MB \r +Input data (amount, format, directory..): raw fastq \r +Output data: paired fastq without adaptors \r +\r +**2\\. Name of the application: Mapping and bam sorting**\r +Function: align data to reference genome \r +Container : [bwa-mem (v.0.7.17)](https://hub.docker.com/r/cnag/bwa) / [Sambamba (v. 0.6.8 )](https://hub.docker.com/r/cnag/sambamba)(or samtools) \r +Resources :current container size 111MB / 32MB \r +Input data: paired fastq without adaptors \r +Output data: sorted bam \r +\r +**3\\. Name of the application: MarkDuplicates** \r +Function: Mark (and remove) duplicates \r +Container: [Picard (v.2.18.25)](https://hub.docker.com/r/cnag/picard)\r +Resources: current container size 261MB \r +Input data:sorted bam \r +Output data: Sorted bam with marked (or removed) duplicates \r +\r +**4\\. Name of the application: Base quality recalibration (BQSR)** \r +Function: Base quality recalibration \r +Container: [GATK (v.3.6-0)](https://hub.docker.com/r/cnag/gatk)\r +Resources: current container size 270MB \r +Input data: Sorted bam with marked (or removed) duplicates \r +Output data: Sorted bam with marked duplicates & base quality recalculated \r +\r +**5\\. Name of the application: Variant calling** \r +Function: variant calling \r +Container: [GATK (v.3.6-0)](https://hub.docker.com/r/cnag/gatk)\r +Resources: current container size 270MB \r +Input data:Sorted bam with marked duplicates & base quality recalculated \r +Output data: unannotated gvcf per sample \r +\r +**6\\. (OPTIONAL)Name of the application: Quality of the fastq** \r +Function: report on the sequencing quality \r +Container: [fastqc 0.11.8](https://hub.docker.com/r/cnag/fastqc)\r +Resources: current container size 173MB \r +Input data: raw fastq \r +Output data: QC report \r +\r +## Licensing\r +\r +GATK declares that archived packages are made available for free to academic researchers under a limited license for non-commercial use. If you need to use one of these packages for commercial use. https://software.broadinstitute.org/gatk/download/archive """ ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/106?version=2" ; + schema1:keywords "Nextflow, variant_calling" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "VariantCaller_GATK3.6" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/106?version=3" ; + schema1:version 3 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 83411 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.127.4" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_virtual-screening" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein-ligand Docking tutorial (Cluster90)" ; + schema1:sdDatePublished "2024-07-12 13:34:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/127/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 39368 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-04-14T08:09:43Z" ; + schema1:dateModified "2023-04-14T08:11:55Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/127?version=5" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein-ligand Docking tutorial (Cluster90)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_virtual-screening/blob/master/biobb_wf_virtual-screening/notebooks/clusterBindingSite/wf_vs_clusterBindingSite.ipynb" ; + schema1:version 4 . + + a schema1:Dataset ; + schema1:datePublished "2024-03-11T14:51:55.650444" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/dada2" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "dada2/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:Dataset ; + schema1:datePublished "2024-02-05T15:22:27.802677" ; + schema1:hasPart , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/fastq-to-matrix-10x" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + ; + schema1:name "fastq-to-matrix-10x/scrna-seq-fastq-to-matrix-10x-cellplex" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.20" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.20" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + , + , + ; + schema1:name "fastq-to-matrix-10x/scrna-seq-fastq-to-matrix-10x-v3" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/fastq-to-matrix-10x" ; + schema1:version "0.2" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Workflow for the GTN training \"Antibiotic resistance detection\"" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/406?version=1" ; + schema1:isBasedOn "https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/plasmid-metagenomics-nanopore/workflows/Workflow-plasmid-metagenomics-nanopore.ga" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for GTN Training - Antibiotic Resistance Detection" ; + schema1:sdDatePublished "2024-07-12 13:34:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/406/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 25395 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-24T13:28:47Z" ; + schema1:dateModified "2023-02-13T14:06:44Z" ; + schema1:description "Workflow for the GTN training \"Antibiotic resistance detection\"" ; + schema1:keywords "Metagenomics" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "GTN Training - Antibiotic Resistance Detection" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/406?version=1" ; + schema1:version 1 ; + ns1:input . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "fgbio Best Practices FASTQ to Consensus Pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/985?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/fastquorum" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/fastquorum" ; + schema1:sdDatePublished "2024-07-12 13:21:30 +0100" ; + schema1:url "https://workflowhub.eu/workflows/985/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10890 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:52Z" ; + schema1:dateModified "2024-06-11T12:54:52Z" ; + schema1:description "fgbio Best Practices FASTQ to Consensus Pipeline" ; + schema1:keywords "Consensus, umi, umis, unique-molecular-identifier" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/fastquorum" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/985?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 1287940 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:MediaObject ; + schema1:contentSize 1423529 ; + schema1:dateModified "2024-07-05T08:47:41+00:00" ; + schema1:name "housing.csv" ; + schema1:sdDatePublished "2024-07-05T11:38:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1266413 ; + schema1:dateModified "2024-07-05T11:38:26+00:00" ; + schema1:name "housing_one_hot_encoded.csv" ; + schema1:sdDatePublished "2024-07-05T11:38:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 141018 ; + schema1:dateModified "2024-07-05T11:38:46+00:00" ; + schema1:name "lat_lon_plot.png" ; + schema1:sdDatePublished "2024-07-05T11:38:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 22381 ; + schema1:dateModified "2024-07-05T11:38:46+00:00" ; + schema1:name "median_income_plot.png" ; + schema1:sdDatePublished "2024-07-05T11:38:51+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """Assembly polishing; can run alone or as part of a combined workflow for large genome assembly. \r +\r +* What it does: Polishes (corrects) an assembly, using long reads (with the tools Racon and Medaka) and short reads (with the tool Racon). (Note: medaka is only for nanopore reads, not PacBio reads). \r +* Inputs: assembly to be polished: assembly.fasta; long reads - the same set used in the assembly (e.g. may be raw or filtered) fastq.gz format; short reads, R1 only, in fastq.gz format\r +* Outputs: Racon+Medaka+Racon polished_assembly. fasta; Fasta statistics after each polishing tool\r +* Tools used: Minimap2, Racon, Fasta statistics, Medaka\r +* Input parameters: None required, but recommended to set the Medaka model correctly (default = r941_min_high_g360). See drop down list for options. \r +\r +Workflow steps:\r +\r +-1- Polish with long reads: using Racon\r +* Long reads and assembly contigs => Racon polishing (subworkflow): \r +* minimap2 : long reads are mapped to assembly => overlaps.paf. \r +* overaps, long reads, assembly => Racon => polished assembly 1\r +* using polished assembly 1 as input; repeat minimap2 + racon => polished assembly 2\r +* using polished assembly 2 as input, repeat minimap2 + racon => polished assembly 3\r +* using polished assembly 3 as input, repeat minimap2 + racon => polished assembly 4\r +* Racon long-read polished assembly => Fasta statistics\r +* Note: The Racon tool panel can be a bit confusing and is under review for improvement. Presently it requires sequences (= long reads), overlaps (= the paf file created by minimap2), and target sequences (= the contigs to be polished) as per "usage" described here https://github.com/isovic/racon/blob/master/README.md\r +* Note: Racon: the default setting for "output unpolished target sequences?" is No. This has been changed to Yes for all Racon steps in these polishing workflows. This means that even if no polishes are made in some contigs, they will be part of the output fasta file. \r +* Note: the contigs output by Racon have new tags in their headers. For more on this see https://github.com/isovic/racon/issues/85.\r +\r +-2- Polish with long reads: using Medaka\r +* Racon polished assembly + long reads => medaka polishing X1 => medaka polished assembly\r +* Medaka polished assembly => Fasta statistics\r +\r +-3- Polish with short reads: using Racon\r +* Short reads and Medaka polished assembly =>Racon polish (subworkflow):\r +* minimap2: short reads (R1 only) are mapped to the assembly => overlaps.paf. Minimap2 setting is for short reads.\r +* overlaps + short reads + assembly => Racon => polished assembly 1\r +* using polished assembly 1 as input; repeat minimap2 + racon => polished assembly 2\r +* Racon short-read polished assembly => Fasta statistics\r +\r +Options\r +* Change settings for Racon long read polishing if using PacBio reads: The default profile setting for Racon long read polishing: minimap2 read mapping is "Oxford Nanopore read to reference mapping", which is specified as an input parameter to the whole Assembly polishing workflow, as text: map-ont. If you are not using nanopore reads and/or need a different setting, change this input. To see the other available settings, open the minimap2 tool, find "Select a profile of preset options", and click on the drop down menu. For each described option, there is a short text in brackets at the end (e.g. map-pb). This is the text to enter into the assembly polishing workflow at runtime instead of the default (map-ont).\r +* Other options: change the number of polishes (in Racon and/or Medaka). There are ways to assess how much improvement in assembly quality has occurred per polishing round (for example, the number of corrections made; the change in Busco score - see section Genome quality assessment for more on Busco).\r +* Option: change polishing settings for any of these tools. Note: for Racon - these will have to be changed within those subworkflows first. Then, in the main workflow, update the subworkflows, and re-save. \r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.226.1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Assembly polishing" ; + schema1:sdDatePublished "2024-07-12 13:36:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/226/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 233028 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 68854 ; + schema1:creator ; + schema1:dateCreated "2021-11-08T05:32:22Z" ; + schema1:dateModified "2023-01-30T18:21:31Z" ; + schema1:description """Assembly polishing; can run alone or as part of a combined workflow for large genome assembly. \r +\r +* What it does: Polishes (corrects) an assembly, using long reads (with the tools Racon and Medaka) and short reads (with the tool Racon). (Note: medaka is only for nanopore reads, not PacBio reads). \r +* Inputs: assembly to be polished: assembly.fasta; long reads - the same set used in the assembly (e.g. may be raw or filtered) fastq.gz format; short reads, R1 only, in fastq.gz format\r +* Outputs: Racon+Medaka+Racon polished_assembly. fasta; Fasta statistics after each polishing tool\r +* Tools used: Minimap2, Racon, Fasta statistics, Medaka\r +* Input parameters: None required, but recommended to set the Medaka model correctly (default = r941_min_high_g360). See drop down list for options. \r +\r +Workflow steps:\r +\r +-1- Polish with long reads: using Racon\r +* Long reads and assembly contigs => Racon polishing (subworkflow): \r +* minimap2 : long reads are mapped to assembly => overlaps.paf. \r +* overaps, long reads, assembly => Racon => polished assembly 1\r +* using polished assembly 1 as input; repeat minimap2 + racon => polished assembly 2\r +* using polished assembly 2 as input, repeat minimap2 + racon => polished assembly 3\r +* using polished assembly 3 as input, repeat minimap2 + racon => polished assembly 4\r +* Racon long-read polished assembly => Fasta statistics\r +* Note: The Racon tool panel can be a bit confusing and is under review for improvement. Presently it requires sequences (= long reads), overlaps (= the paf file created by minimap2), and target sequences (= the contigs to be polished) as per "usage" described here https://github.com/isovic/racon/blob/master/README.md\r +* Note: Racon: the default setting for "output unpolished target sequences?" is No. This has been changed to Yes for all Racon steps in these polishing workflows. This means that even if no polishes are made in some contigs, they will be part of the output fasta file. \r +* Note: the contigs output by Racon have new tags in their headers. For more on this see https://github.com/isovic/racon/issues/85.\r +\r +-2- Polish with long reads: using Medaka\r +* Racon polished assembly + long reads => medaka polishing X1 => medaka polished assembly\r +* Medaka polished assembly => Fasta statistics\r +\r +-3- Polish with short reads: using Racon\r +* Short reads and Medaka polished assembly =>Racon polish (subworkflow):\r +* minimap2: short reads (R1 only) are mapped to the assembly => overlaps.paf. Minimap2 setting is for short reads.\r +* overlaps + short reads + assembly => Racon => polished assembly 1\r +* using polished assembly 1 as input; repeat minimap2 + racon => polished assembly 2\r +* Racon short-read polished assembly => Fasta statistics\r +\r +Options\r +* Change settings for Racon long read polishing if using PacBio reads: The default profile setting for Racon long read polishing: minimap2 read mapping is "Oxford Nanopore read to reference mapping", which is specified as an input parameter to the whole Assembly polishing workflow, as text: map-ont. If you are not using nanopore reads and/or need a different setting, change this input. To see the other available settings, open the minimap2 tool, find "Select a profile of preset options", and click on the drop down menu. For each described option, there is a short text in brackets at the end (e.g. map-pb). This is the text to enter into the assembly polishing workflow at runtime instead of the default (map-ont).\r +* Other options: change the number of polishes (in Racon and/or Medaka). There are ways to assess how much improvement in assembly quality has occurred per polishing round (for example, the number of corrections made; the change in Busco score - see section Genome quality assessment for more on Busco).\r +* Option: change polishing settings for any of these tools. Note: for Racon - these will have to be changed within those subworkflows first. Then, in the main workflow, update the subworkflows, and re-save. \r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "Large-genome-assembly" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Assembly polishing" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/226?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-23T09:05:16.346809" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/lcms-preprocessing" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "lcms-preprocessing/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.132.5" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Amber Constant pH MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/132/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 53518 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-04T15:33:00Z" ; + schema1:dateModified "2024-05-14T10:17:00Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/132?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Amber Constant pH MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_amber_md_setup/blob/main/biobb_wf_amber_md_setup/notebooks/mdsetup_ph/biobb_amber_CpHMD_notebook.ipynb" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=20" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-07-12 13:18:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=20" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13459 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:40Z" ; + schema1:dateModified "2024-06-11T12:54:40Z" ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=20" ; + schema1:version 20 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.255.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_ligand_parameterization/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL GMX Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-07-12 13:35:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/255/ro_crate?version=2" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 12313 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2695 ; + schema1:creator , + ; + schema1:dateCreated "2023-06-06T12:24:16Z" ; + schema1:dateModified "2023-06-06T12:33:16Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/255?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CWL GMX Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_ligand_parameterization/cwl/workflow.cwl" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + ; + ns1:output , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Analysis pipeline for CUT&RUN and CUT&TAG experiments that includes sequencing QC, spike-in normalisation, IgG control normalisation, peak calling and downstream peak analysis." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/976?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/cutandrun" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/cutandrun" ; + schema1:sdDatePublished "2024-07-12 13:21:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/976/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12785 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:46Z" ; + schema1:dateModified "2024-06-11T12:54:46Z" ; + schema1:description "Analysis pipeline for CUT&RUN and CUT&TAG experiments that includes sequencing QC, spike-in normalisation, IgG control normalisation, peak calling and downstream peak analysis." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/976?version=7" ; + schema1:keywords "cutandrun, cutandrun-seq, cutandtag, cutandtag-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/cutandrun" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/976?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/278?version=6" ; + schema1:isBasedOn "https://gitlab.bsc.es/lrodrig1/structuralvariants_poc/-/tree/1.1.3/structuralvariants/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CNV_pipeline" ; + schema1:sdDatePublished "2024-07-12 13:35:28 +0100" ; + schema1:url "https://workflowhub.eu/workflows/278/ro_crate?version=6" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 9858 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9607 ; + schema1:creator , + ; + schema1:dateCreated "2022-05-11T14:27:15Z" ; + schema1:dateModified "2022-05-11T14:27:15Z" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/278?version=10" ; + schema1:keywords "cancer, CODEX2, ExomeDepth, manta, TransBioNet, variant calling, GRIDSS, structural variants" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CNV_pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/278?version=6" ; + schema1:version 6 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 74039 . + + a schema1:Dataset ; + schema1:datePublished "2022-10-20T14:11:41.337603" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/chipseq-sr" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "chipseq-sr/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.11" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.280.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_ligand_parameterization/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python GMX Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-07-12 13:34:06 +0100" ; + schema1:url "https://workflowhub.eu/workflows/280/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1728 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-17T09:41:27Z" ; + schema1:dateModified "2022-03-23T10:02:13Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/280?version=3" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python GMX Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_ligand_parameterization/python/workflow.py" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 655617 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + schema1:dateModified "2023-10-20T11:03:48" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + ; + schema1:name "data" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 372 ; + schema1:dateModified "2023-10-20T11:03:48" ; + schema1:name "ConvectionDiffusionMaterials_center.json" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 365 ; + schema1:dateModified "2023-10-20T11:03:48" ; + schema1:name "ConvectionDiffusionMaterials_outside.json" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106382 ; + schema1:dateModified "2023-10-20T11:03:48" ; + schema1:name "GUI_test_center.mdpa" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 221975 ; + schema1:dateModified "2023-10-20T11:03:48" ; + schema1:name "GUI_test_outside.mdpa" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4504 ; + schema1:dateModified "2023-10-20T11:03:48" ; + schema1:name "ProjectParameters_CoSimulation.json" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4642 ; + schema1:dateModified "2023-10-20T11:03:48" ; + schema1:name "ProjectParameters_CoSimulation_workflow.json" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4608 ; + schema1:dateModified "2023-10-20T11:03:48" ; + schema1:name "ProjectParameters_CoSimulation_workflow_ROM.json" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4784 ; + schema1:dateModified "2023-10-20T11:03:48" ; + schema1:name "ProjectParameters_center.json" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 5551 ; + schema1:dateModified "2023-10-20T11:03:48" ; + schema1:name "ProjectParameters_center_workflow.json" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 6440 ; + schema1:dateModified "2023-10-20T11:03:48" ; + schema1:name "ProjectParameters_outside.json" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 7605 ; + schema1:dateModified "2023-10-20T11:03:48" ; + schema1:name "ProjectParameters_outside_workflow.json" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 179 ; + schema1:dateModified "2023-10-13T07:13:07" ; + schema1:name "load_parameters.json" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 228 ; + schema1:dateModified "2023-10-13T07:13:07" ; + schema1:name "run_cosim.json" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:Dataset ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:name "rom_data" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 128 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "HROM_ConditionIds.npy" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 128 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "HROM_ConditionWeights.npy" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1952 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "HROM_ElementIds.npy" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1952 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "HROM_ElementWeights.npy" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4680 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "NodeIds.npy" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 63856 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "RightBasisMatrix.npy" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1125 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "RomParameters.json" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 937 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "attrs.json" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1952 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "aux_w.npy" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1952 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "aux_z.npy" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 456 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "HROM_ConditionIds.npy" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 456 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "HROM_ConditionWeights.npy" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1608 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "HROM_ElementIds.npy" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1608 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "HROM_ElementWeights.npy" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 9600 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "NodeIds.npy" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123264 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "RightBasisMatrix.npy" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1127 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "RomParameters.json" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 947 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "attrs.json" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1936 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "aux_w.npy" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1936 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "aux_z.npy" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:description "This workflow employs a recombination detection algorithm (GARD) developed by Kosakovsky Pond et al. and implemented in the hyphy package. More info can be found at https://covid19.galaxyproject.org/genomics/" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/10?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genomics - Recombination and selection analysis" ; + schema1:sdDatePublished "2024-07-12 13:37:40 +0100" ; + schema1:url "https://workflowhub.eu/workflows/10/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 2947 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14263 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2020-04-10T13:30:37Z" ; + schema1:dateModified "2023-01-16T13:40:25Z" ; + schema1:description "This workflow employs a recombination detection algorithm (GARD) developed by Kosakovsky Pond et al. and implemented in the hyphy package. More info can be found at https://covid19.galaxyproject.org/genomics/" ; + schema1:image ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Genomics - Recombination and selection analysis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/10?version=1" ; + schema1:version 1 ; + ns1:input . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 6047 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """The workflows in this collection are from the '16S Microbial Analysis with mothur' tutorial for analysis of 16S data (Saskia Hiltemann, Bérénice Batut, Dave Clements), adapted for piepline use on galaxy australia (Ahmed Mehdi). The workflows developed in galaxy use mothur software package developed by Schloss et al https://pubmed.ncbi.nlm.nih.gov/19801464/. \r +\r +Please also refer to the 16S tutorials available at Galaxy https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop-short/tutorial.html and [https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop/tutorial.html\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/649?version=1" ; + schema1:isBasedOn "https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop-short/tutorial.html" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Workflow 2: Data Cleaning And Chimera Removal [16S Microbial Analysis With Mothur]" ; + schema1:sdDatePublished "2024-07-12 13:26:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/649/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 27542 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2023-11-09T05:11:57Z" ; + schema1:dateModified "2023-11-09T05:11:57Z" ; + schema1:description """The workflows in this collection are from the '16S Microbial Analysis with mothur' tutorial for analysis of 16S data (Saskia Hiltemann, Bérénice Batut, Dave Clements), adapted for piepline use on galaxy australia (Ahmed Mehdi). The workflows developed in galaxy use mothur software package developed by Schloss et al https://pubmed.ncbi.nlm.nih.gov/19801464/. \r +\r +Please also refer to the 16S tutorials available at Galaxy https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop-short/tutorial.html and [https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop/tutorial.html\r +""" ; + schema1:isPartOf ; + schema1:keywords "Metagenomics" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Workflow 2: Data Cleaning And Chimera Removal [16S Microbial Analysis With Mothur]" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/649?version=1" ; + schema1:version 1 ; + ns1:input , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2024-05-22T15:30:59.439533" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Scaffolding-HiC-VGP8" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Scaffolding-HiC-VGP8/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + schema1:datePublished "2022-11-29T12:08:35.581759" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "atacseq/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.2" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """Assembly with Flye; can run alone or as part of a combined workflow for large genome assembly. \r +\r +* What it does: Assembles long reads with the tool Flye\r +* Inputs: long reads (may be raw, or filtered, and/or corrected); fastq.gz format\r +* Outputs: Flye assembly fasta; Fasta stats on assembly.fasta; Assembly graph image from Bandage; Bar chart of contig sizes; Quast reports of genome assembly\r +* Tools used: Flye, Fasta statistics, Bandage, Bar chart, Quast\r +* Input parameters: None required, but recommend setting assembly mode to match input sequence type\r +\r +Workflow steps:\r +* Long reads are assembled with Flye, using default tool settings. Note: the default setting for read type ("mode") is nanopore raw. Change this at runtime if required. \r +* Statistics are computed from the assembly.fasta file output, using Fasta Statistics and Quast (is genome large: Yes; distinguish contigs with more that 50% unaligned bases: no)\r +* The graphical fragment assembly file is visualized with the tool Bandage. \r +* Assembly information sent to bar chart to visualize contig sizes\r +\r +Options\r +* See other Flye options. \r +* Use a different assembler (in a different workflow). \r +* Bandage image options - change size (max size is 32767), labels - add (e.g. node lengths). You can also install Bandage on your own computer and donwload the "graphical fragment assembly" file to view in greater detail. \r +\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.225.1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Assembly with Flye" ; + schema1:sdDatePublished "2024-07-12 13:36:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/225/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 567125 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14840 ; + schema1:creator ; + schema1:dateCreated "2021-11-08T05:07:16Z" ; + schema1:dateModified "2023-01-30T18:21:31Z" ; + schema1:description """Assembly with Flye; can run alone or as part of a combined workflow for large genome assembly. \r +\r +* What it does: Assembles long reads with the tool Flye\r +* Inputs: long reads (may be raw, or filtered, and/or corrected); fastq.gz format\r +* Outputs: Flye assembly fasta; Fasta stats on assembly.fasta; Assembly graph image from Bandage; Bar chart of contig sizes; Quast reports of genome assembly\r +* Tools used: Flye, Fasta statistics, Bandage, Bar chart, Quast\r +* Input parameters: None required, but recommend setting assembly mode to match input sequence type\r +\r +Workflow steps:\r +* Long reads are assembled with Flye, using default tool settings. Note: the default setting for read type ("mode") is nanopore raw. Change this at runtime if required. \r +* Statistics are computed from the assembly.fasta file output, using Fasta Statistics and Quast (is genome large: Yes; distinguish contigs with more that 50% unaligned bases: no)\r +* The graphical fragment assembly file is visualized with the tool Bandage. \r +* Assembly information sent to bar chart to visualize contig sizes\r +\r +Options\r +* See other Flye options. \r +* Use a different assembler (in a different workflow). \r +* Bandage image options - change size (max size is 32767), labels - add (e.g. node lengths). You can also install Bandage on your own computer and donwload the "graphical fragment assembly" file to view in greater detail. \r +\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)\r +\r +""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "Large-genome-assembly" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Assembly with Flye" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/225?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-07-12 13:19:02 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9540 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:15Z" ; + schema1:dateModified "2024-06-11T12:55:15Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Workflow converts one or multiple bam/cram files to fastq format" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/968?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/bamtofastq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/bamtofastq" ; + schema1:sdDatePublished "2024-07-12 13:22:09 +0100" ; + schema1:url "https://workflowhub.eu/workflows/968/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10671 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:44Z" ; + schema1:dateModified "2024-06-11T12:54:44Z" ; + schema1:description "Workflow converts one or multiple bam/cram files to fastq format" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/968?version=5" ; + schema1:keywords "bamtofastq, Conversion, cramtofastq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/bamtofastq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/968?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1021?version=9" ; + schema1:isBasedOn "https://github.com/nf-core/scrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/scrnaseq" ; + schema1:sdDatePublished "2024-07-12 13:19:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1021/ro_crate?version=9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10279 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:17Z" ; + schema1:dateModified "2024-06-11T12:55:17Z" ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1021?version=13" ; + schema1:keywords "10x-genomics, 10xgenomics, alevin, bustools, Cellranger, kallisto, rna-seq, single-cell, star-solo" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/scrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1021?version=9" ; + schema1:version 9 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for screening for functional components of assembled contigs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/987?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/funcscan" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/funcscan" ; + schema1:sdDatePublished "2024-07-12 13:21:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/987/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 16170 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:54Z" ; + schema1:dateModified "2024-06-11T12:54:54Z" ; + schema1:description "Pipeline for screening for functional components of assembled contigs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/987?version=8" ; + schema1:keywords "amp, AMR, antibiotic-resistance, antimicrobial-peptides, antimicrobial-resistance-genes, arg, Assembly, bgc, biosynthetic-gene-clusters, contigs, function, Metagenomics, natural-products, screening, secondary-metabolites" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/funcscan" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/987?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Genome-wide alternative splicing analysis v.2" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/482?version=6" ; + schema1:license "CC-BY-NC-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genome-wide alternative splicing analysis v.2" ; + schema1:sdDatePublished "2024-07-12 13:33:07 +0100" ; + schema1:url "https://workflowhub.eu/workflows/482/ro_crate?version=6" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 214419 . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 30115 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 109284 ; + schema1:creator ; + schema1:dateCreated "2023-06-11T12:35:19Z" ; + schema1:dateModified "2023-06-11T12:35:37Z" ; + schema1:description "Genome-wide alternative splicing analysis v.2" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/482?version=6" ; + schema1:keywords "Transcriptomics, Alternative splicing, isoform switching" ; + schema1:license "https://spdx.org/licenses/CC-BY-NC-4.0" ; + schema1:name "Genome-wide alternative splicing analysis v.2" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/482?version=6" ; + schema1:version 6 ; + ns1:input , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """# Generic consensus building\r +\r +This workflow generates consensus sequences using a list of variants generated by [Variant Calling Workflow](https://workflowhub.eu/workflows/353). \r +\r +The workflow accepts a single input:\r +\r +- A collection of VCF files\r +\r +The workflow produces a single output:\r +\r +- Consensus sequence for each input VCF file\r +\r +The workflow can be accessed at [usegalaxy.org](https://usegalaxy.org/u/aun1/w/consensus-construction)""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/356?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Generic consensus construction from VCF calls" ; + schema1:sdDatePublished "2024-07-12 13:35:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/356/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 59786 ; + schema1:creator ; + schema1:dateCreated "2022-06-03T09:33:10Z" ; + schema1:dateModified "2023-02-13T14:06:44Z" ; + schema1:description """# Generic consensus building\r +\r +This workflow generates consensus sequences using a list of variants generated by [Variant Calling Workflow](https://workflowhub.eu/workflows/353). \r +\r +The workflow accepts a single input:\r +\r +- A collection of VCF files\r +\r +The workflow produces a single output:\r +\r +- Consensus sequence for each input VCF file\r +\r +The workflow can be accessed at [usegalaxy.org](https://usegalaxy.org/u/aun1/w/consensus-construction)""" ; + schema1:keywords "mlxv, generic" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Generic consensus construction from VCF calls" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/356?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A pipeline to demultiplex, QC and map Nanopore data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1002?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/nanoseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/nanoseq" ; + schema1:sdDatePublished "2024-07-12 13:20:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1002/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5158 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:05Z" ; + schema1:dateModified "2024-06-11T12:55:05Z" ; + schema1:description "A pipeline to demultiplex, QC and map Nanopore data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1002?version=5" ; + schema1:keywords "Alignment, demultiplexing, nanopore, QC" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/nanoseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1002?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Detects SNPs and INDELs." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/35?version=1" ; + schema1:isBasedOn "https://usegalaxy.eu/u/ambarishk/w/gatk4" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for COVID-19: GATK4" ; + schema1:sdDatePublished "2024-07-12 13:37:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/35/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 27629 ; + schema1:dateCreated "2020-06-18T15:49:59Z" ; + schema1:dateModified "2023-02-13T14:06:46Z" ; + schema1:description "Detects SNPs and INDELs." ; + schema1:image ; + schema1:keywords "Galaxy, SNPs, INDELs, GATK4" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "COVID-19: GATK4" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/35?version=1" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 203637 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """In this analysis, we created an extended pathway, using the WikiPathways repository (Version 20210110) and the three -omics datasets. For this, each of the three -omics datasets was first analyzed to identify differentially expressed elements, and pathways associated with the significant miRNA-protein links were detected. A miRNA-protein link is deemed significant, and may possibly be implying causality, if both a miRNA and its target are significantly differentially expressed. \r +\r +The peptidome and the proteome datasets were quantile normalized and log2 transformed (Pan and Zhang 2018; Zhao, Wong, and Goh 2020). Before transformation, peptide IDs were mapped to protein IDs, using the information provided by the data uploaders, and were summarized into single protein-level values using geometric mean. The miRNome dataset was already normalized and transformed; thus, the information of their targeting genes was simply added to each miRNA ID, using the information provided by miTaRBase (Huang et al. 2019). As a result, all three datasets had been mapped to their appropriate gene product-level (or, protein-level) identifiers. """ ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/331?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for EJP-RD WP13 case-study: CAKUT proteome, peptidome and miRNome data analysis using WikiPathways" ; + schema1:sdDatePublished "2024-07-12 13:34:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/331/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1069 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2022-04-20T16:59:50Z" ; + schema1:dateModified "2023-01-16T13:59:42Z" ; + schema1:description """In this analysis, we created an extended pathway, using the WikiPathways repository (Version 20210110) and the three -omics datasets. For this, each of the three -omics datasets was first analyzed to identify differentially expressed elements, and pathways associated with the significant miRNA-protein links were detected. A miRNA-protein link is deemed significant, and may possibly be implying causality, if both a miRNA and its target are significantly differentially expressed. \r +\r +The peptidome and the proteome datasets were quantile normalized and log2 transformed (Pan and Zhang 2018; Zhao, Wong, and Goh 2020). Before transformation, peptide IDs were mapped to protein IDs, using the information provided by the data uploaders, and were summarized into single protein-level values using geometric mean. The miRNome dataset was already normalized and transformed; thus, the information of their targeting genes was simply added to each miRNA ID, using the information provided by miTaRBase (Huang et al. 2019). As a result, all three datasets had been mapped to their appropriate gene product-level (or, protein-level) identifiers. """ ; + schema1:keywords "rare diseases, Pathway Analysis, workflow, Proteomics, protein, mirna prediction" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "EJP-RD WP13 case-study: CAKUT proteome, peptidome and miRNome data analysis using WikiPathways" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/331?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.284.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_complex_md_setup/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/284/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7128 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-23T08:45:13Z" ; + schema1:dateModified "2023-01-16T13:58:31Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/284?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_complex_md_setup/python/workflow.py" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "The workflow takes a trimmed HiFi reads collection, runs Meryl to create a K-mer database, Genomescope2 to estimate genome properties and Smudgeplot to estimate ploidy. The main results are K-mer database and genome profiling plots, tables, and values useful for downstream analysis. Default K-mer length and ploidy for Genomescope are 21 and 2, respectively. " ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.603.1" ; + schema1:isBasedOn "https://github.com/ERGA-consortium/pipelines/tree/main/assembly/galaxy" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ERGA Profiling HiFi v2309 (WF1)" ; + schema1:sdDatePublished "2024-07-12 13:27:16 +0100" ; + schema1:url "https://workflowhub.eu/workflows/603/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 42208 ; + schema1:creator , + ; + schema1:dateCreated "2023-10-06T13:25:56Z" ; + schema1:dateModified "2024-03-13T09:07:03Z" ; + schema1:description "The workflow takes a trimmed HiFi reads collection, runs Meryl to create a K-mer database, Genomescope2 to estimate genome properties and Smudgeplot to estimate ploidy. The main results are K-mer database and genome profiling plots, tables, and values useful for downstream analysis. Default K-mer length and ploidy for Genomescope are 21 and 2, respectively. " ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "ERGA, Profiling, HiFi" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ERGA Profiling HiFi v2309 (WF1)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/1.Genome_Profiling/Galaxy-Workflow-ERGA_Profiling_HiFi_v2309_(WF1).ga" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 117463 ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/1.Genome_Profiling/pics/Prof_hifi_2309.png" . + + a schema1:Dataset ; + schema1:datePublished "2024-06-25T09:57:05.229158" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/pathogen-detection-pathogfair-samples-aggregation-and-visualisation" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "pathogen-detection-pathogfair-samples-aggregation-and-visualisation/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.283.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_md_setup/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Amber Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/283/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6933 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-23T08:41:32Z" ; + schema1:dateModified "2023-04-14T08:32:35Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/283?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Amber Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_md_setup/python/workflow.py" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=10" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-07-12 13:19:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11484 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:15Z" ; + schema1:dateModified "2024-06-11T12:55:15Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=10" ; + schema1:version 10 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "ATACSeq peak-calling and differential analysis pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/965?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/atacseq" ; + schema1:sdDatePublished "2024-07-12 13:22:33 +0100" ; + schema1:url "https://workflowhub.eu/workflows/965/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5755 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:41Z" ; + schema1:dateModified "2024-06-11T12:54:41Z" ; + schema1:description "ATACSeq peak-calling and differential analysis pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/965?version=8" ; + schema1:keywords "ATAC-seq, chromatin-accessibiity" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/atacseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/965?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """## Summary\r +\r +The data preparation pipeline contains tasks for two distinct scenarios: [leukaemia](https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE425) that contains microarray data for 119 patients and [ovarian](https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE140082) cancer that contains next generation sequencing data for 380 patients.\r +\r +The disease outcome prediction pipeline offers two strategies for this task:\r +\r +**Graph kernel method**: It starts generating personalized networks for each patient using the interactome file provided and generate the patient network checking if each PPI of the interactome has both proteins up regulated or down regulated according to the gene expression table provided. The first step generate a set of graphs for the patients that are evaluated with 4 distinct kernels for graph classification, which are: Linear kernel between edge histograms, Linear kernel between vertex histograms and the Weisfeiler lehman. These kernels functions calculate a similarity matrix for the graphs and then this matrix is used by the support vector machine classifier. Then the predictions are delivered to the last task that exports a report with the accuracy reached by each kernel. It allows some customizations about the network parameters to be used, such as the DEG cutoff to determine up and down regulated based on the log2 fold change, which will determine the topology and the labels distribution in the specific sample graphs. It is also possible customize the type of node/edge attributes passed to the kernel function, which may be only label, only weight or both.\r +\r +**GSEA based pathway scores method**: This method is faster and do not rely on tensor inputs such as the previous method. It uses geneset enrichment analysis on the pathways from KEGG 2021 of Human, and uses the scores of the pathways found enriched for the samples to build the numerical features matrix, that is then delivered to the AdaBoost classifier. The user may choose balance the dataset using oversampling strategy provided by SMOTE.\r +\r +## Usage Instructions\r +### Preparation:\r +1. ````git clone https://github.com/YasCoMa/screendop.git````\r +2. ````cd screendop````\r +3. Decompress screening_ovarian/raw_expression_table.tsv.tar.xz\r +4. Create conda environment to handle dependencies: ````conda env create -f drugresponse_env.yml````\r +5. ````conda activate drugresponse_env````\r +6. Setup an environment variable named "path_workflow_screendop" with the full path to this workflow folder\r +\r +### Data preparation - File ````data_preparation_for_pipeline.py```` :\r +\r +#### Files decompression\r +\r +- Decompress data_preparation/lekaemia.tar.xz\r +- Decompress data_preparation/ovarian/GSE140082_data.tar.xz\r + - Put the decompressed file GSE140082_series_matrix.txt in data_preparation/ovarian/\r + \r +#### Pipeline parameters\r +\r +- __-rt__ or __--running_type__
\r + Use to prepare data for the desired scenario:
\r + 1 - Run with Leukaemia data
\r + 2 - Run with Ovarian cancer data\r +\r +#### Running modes examples\r +\r +1. Run for Leukaemia data:
\r +````python3 data_preparation_for_pipeline.py -rt 1 ```` \r +\r +In this case, you must have [R](https://www.r-project.org/) installed and also the library [limma](https://bioconductor.org/packages/release/bioc/html/limma.html), it is used to determine DEGs from microarray data. For this dataset, the files are already prepared in the folder.\r +\r +2. Run for Ovarian cancer data:
\r +````python3 data_preparation_for_pipeline.py -rt 2 ```` \r +\r +In this case, you must have [R](https://www.r-project.org/) installed and also the library [DESeq](https://bioconductor.org/packages/release/bioc/html/DESeq.html), because this scenario treats next generation sequencing data\r +\r +### Disease outcome prediction execution - File ````main.py````:\r +\r +#### Pipeline parameters\r +\r +- __-rt__ or __--running_step__
\r + Use to prepare data for the desired scenario:
\r + 1 - Run graph kernel method
\r + 2 - Run gsea based pathway scores method\r +\r +- __-cf__ or __--configuration_file__
\r + File with the expression values for the genes by sample/patient in tsv format
\r + \r + Example of this file: config.json\r + \r +#### Input configuration file\r +\r +- Configuration file keys (see also the example in config.json):\r + - **folder** (mandatory for both methods): working directory\r + - **identifier**: project identifier to be used in the result files\r + - **mask_expression_table** (mandatory for both methods): Gene expression values file with the result of the fold change normalized value of a certain gene for each sample, already pruned by the significance (p-value). \r + - **raw_expression_table** (mandatory for both methods): Raw gene expression values already normalized following the method pf preference of the user.\r + - **labels_file** (mandatory for both methods): File with the prognosis label for each sample\r + - **deg_cutoff_up**: Cutoff value to determine up regulated gene. Default value is 1.\r + - **deg_cutoff_down**: Cutoff value to determine down regulated gene. Default value is -1.\r + - **nodes_enrichment**: Node attributes to be used in the screening evaluation. It may be a list combining the options "label", "weight" or "all". Examples: ["all", "weight"], ["label"], ["label", "weight"]. Default value is ["all"].\r + - **edges_enrichment**: Edge attributes to be used in the screening evaluation. It may be a list combining the options "label", "weight" or "all". Examples: ["all", "weight"], ["label"], ["label", "weight"]. Default value is ["all"].\r + - **flag_balance**: Flag to indicate whether the user wants to balance the samples in each outcome class, by SMOTE oversampling. Values may be false or true. Default value is false.\r +\r +#### Running modes examples\r +1. Running disease outcome prediction by graph kernel method:
\r + ````python3 main.py -rt 1 -cf config.json````\r +\r +2. Running disease outcome prediction by gsea enriched network method:
\r + ````python3 main.py -rt 2 -cf config.json````\r +\r +## Reference\r +Martins, Y. C. (2023). Multi-task analysis of gene expression data on cancer public datasets. medRxiv, 2023-09.\r +\r +## Bug Report\r +Please, use the [Issue](https://github.com/YasCoMa/screendop/issues) tab to report any bug.""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/621?version=1" ; + schema1:isBasedOn "https://github.com/YasCoMa/screendop" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ScreenDOP - Screening of strategies for disease outcome prediction" ; + schema1:sdDatePublished "2024-07-12 13:27:10 +0100" ; + schema1:url "https://workflowhub.eu/workflows/621/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9152 ; + schema1:creator ; + schema1:dateCreated "2023-10-22T00:18:50Z" ; + schema1:dateModified "2023-10-22T00:19:15Z" ; + schema1:description """## Summary\r +\r +The data preparation pipeline contains tasks for two distinct scenarios: [leukaemia](https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE425) that contains microarray data for 119 patients and [ovarian](https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE140082) cancer that contains next generation sequencing data for 380 patients.\r +\r +The disease outcome prediction pipeline offers two strategies for this task:\r +\r +**Graph kernel method**: It starts generating personalized networks for each patient using the interactome file provided and generate the patient network checking if each PPI of the interactome has both proteins up regulated or down regulated according to the gene expression table provided. The first step generate a set of graphs for the patients that are evaluated with 4 distinct kernels for graph classification, which are: Linear kernel between edge histograms, Linear kernel between vertex histograms and the Weisfeiler lehman. These kernels functions calculate a similarity matrix for the graphs and then this matrix is used by the support vector machine classifier. Then the predictions are delivered to the last task that exports a report with the accuracy reached by each kernel. It allows some customizations about the network parameters to be used, such as the DEG cutoff to determine up and down regulated based on the log2 fold change, which will determine the topology and the labels distribution in the specific sample graphs. It is also possible customize the type of node/edge attributes passed to the kernel function, which may be only label, only weight or both.\r +\r +**GSEA based pathway scores method**: This method is faster and do not rely on tensor inputs such as the previous method. It uses geneset enrichment analysis on the pathways from KEGG 2021 of Human, and uses the scores of the pathways found enriched for the samples to build the numerical features matrix, that is then delivered to the AdaBoost classifier. The user may choose balance the dataset using oversampling strategy provided by SMOTE.\r +\r +## Usage Instructions\r +### Preparation:\r +1. ````git clone https://github.com/YasCoMa/screendop.git````\r +2. ````cd screendop````\r +3. Decompress screening_ovarian/raw_expression_table.tsv.tar.xz\r +4. Create conda environment to handle dependencies: ````conda env create -f drugresponse_env.yml````\r +5. ````conda activate drugresponse_env````\r +6. Setup an environment variable named "path_workflow_screendop" with the full path to this workflow folder\r +\r +### Data preparation - File ````data_preparation_for_pipeline.py```` :\r +\r +#### Files decompression\r +\r +- Decompress data_preparation/lekaemia.tar.xz\r +- Decompress data_preparation/ovarian/GSE140082_data.tar.xz\r + - Put the decompressed file GSE140082_series_matrix.txt in data_preparation/ovarian/\r + \r +#### Pipeline parameters\r +\r +- __-rt__ or __--running_type__
\r + Use to prepare data for the desired scenario:
\r + 1 - Run with Leukaemia data
\r + 2 - Run with Ovarian cancer data\r +\r +#### Running modes examples\r +\r +1. Run for Leukaemia data:
\r +````python3 data_preparation_for_pipeline.py -rt 1 ```` \r +\r +In this case, you must have [R](https://www.r-project.org/) installed and also the library [limma](https://bioconductor.org/packages/release/bioc/html/limma.html), it is used to determine DEGs from microarray data. For this dataset, the files are already prepared in the folder.\r +\r +2. Run for Ovarian cancer data:
\r +````python3 data_preparation_for_pipeline.py -rt 2 ```` \r +\r +In this case, you must have [R](https://www.r-project.org/) installed and also the library [DESeq](https://bioconductor.org/packages/release/bioc/html/DESeq.html), because this scenario treats next generation sequencing data\r +\r +### Disease outcome prediction execution - File ````main.py````:\r +\r +#### Pipeline parameters\r +\r +- __-rt__ or __--running_step__
\r + Use to prepare data for the desired scenario:
\r + 1 - Run graph kernel method
\r + 2 - Run gsea based pathway scores method\r +\r +- __-cf__ or __--configuration_file__
\r + File with the expression values for the genes by sample/patient in tsv format
\r + \r + Example of this file: config.json\r + \r +#### Input configuration file\r +\r +- Configuration file keys (see also the example in config.json):\r + - **folder** (mandatory for both methods): working directory\r + - **identifier**: project identifier to be used in the result files\r + - **mask_expression_table** (mandatory for both methods): Gene expression values file with the result of the fold change normalized value of a certain gene for each sample, already pruned by the significance (p-value). \r + - **raw_expression_table** (mandatory for both methods): Raw gene expression values already normalized following the method pf preference of the user.\r + - **labels_file** (mandatory for both methods): File with the prognosis label for each sample\r + - **deg_cutoff_up**: Cutoff value to determine up regulated gene. Default value is 1.\r + - **deg_cutoff_down**: Cutoff value to determine down regulated gene. Default value is -1.\r + - **nodes_enrichment**: Node attributes to be used in the screening evaluation. It may be a list combining the options "label", "weight" or "all". Examples: ["all", "weight"], ["label"], ["label", "weight"]. Default value is ["all"].\r + - **edges_enrichment**: Edge attributes to be used in the screening evaluation. It may be a list combining the options "label", "weight" or "all". Examples: ["all", "weight"], ["label"], ["label", "weight"]. Default value is ["all"].\r + - **flag_balance**: Flag to indicate whether the user wants to balance the samples in each outcome class, by SMOTE oversampling. Values may be false or true. Default value is false.\r +\r +#### Running modes examples\r +1. Running disease outcome prediction by graph kernel method:
\r + ````python3 main.py -rt 1 -cf config.json````\r +\r +2. Running disease outcome prediction by gsea enriched network method:
\r + ````python3 main.py -rt 2 -cf config.json````\r +\r +## Reference\r +Martins, Y. C. (2023). Multi-task analysis of gene expression data on cancer public datasets. medRxiv, 2023-09.\r +\r +## Bug Report\r +Please, use the [Issue](https://github.com/YasCoMa/screendop/issues) tab to report any bug.""" ; + schema1:keywords "Bioinformatics, personalized medicine, gene set enrichment analysis, disease outcome prediction, public cancer datasets exploration, data wrangling, data transformation" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ScreenDOP - Screening of strategies for disease outcome prediction" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/621?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/986?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/fetchngs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/fetchngs" ; + schema1:sdDatePublished "2024-07-12 13:21:28 +0100" ; + schema1:url "https://workflowhub.eu/workflows/986/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6338 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:52Z" ; + schema1:dateModified "2024-06-11T12:54:52Z" ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/986?version=13" ; + schema1:keywords "ddbj, download, ena, FASTQ, geo, sra, synapse" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/fetchngs" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/986?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """# COVID-19 Multiscale Modelling of the Virus and Patients’ Tissue Workflow\r +\r +## Table of Contents\r +\r +- [COVID-19 Multiscale Modelling of the Virus and Patients’ Tissue Workflow](#covid-19-multiscale-modelling-of-the-virus-and-patients-tissue-workflow)\r + - [Table of Contents](#table-of-contents)\r + - [Description](#description)\r + - [Contents](#contents)\r + - [Building Blocks](#building-blocks)\r + - [Workflows](#workflows)\r + - [Resources](#resources)\r + - [Tests](#tests)\r + - [Instructions](#instructions)\r + - [Local machine](#local-machine)\r + - [Requirements](#requirements)\r + - [Usage steps](#usage-steps)\r + - [MareNostrum 4](#marenostrum-4)\r + - [Requirements in MN4](#requirements-in-mn4)\r + - [Usage steps in MN4](#usage-steps-in-mn4)\r + - [Mahti or Puhti](#mahti-or-puhti)\r + - [Requirements](#requirements)\r + - [Steps](#steps)\r + - [License](#license)\r + - [Contact](#contact)\r +\r +## Description\r +\r +Uses multiscale simulations to predict patient-specific SARS‑CoV‑2 severity subtypes\r +(moderate, severe or control), using single-cell RNA-Seq data, MaBoSS and PhysiBoSS.\r +Boolean models are used to determine the behaviour of individual agents as a function\r +of extracellular conditions and the concentration of different substrates, including\r +the number of virions. Predictions of severity subtypes are based on a meta-analysis of\r +personalised model outputs simulating cellular apoptosis regulation in epithelial cells\r +infected by SARS‑CoV‑2.\r +\r +The workflow uses the following building blocks, described in order of execution:\r +\r +1. High-throughput mutant analysis\r +2. Single-cell processing\r +3. Personalise patient\r +4. PhysiBoSS\r +5. Analysis of all simulations\r +\r +For details on individual workflow steps, see the user documentation for each building block.\r +\r +[`GitHub repository`]()\r +\r +\r +## Contents\r +\r +### Building Blocks\r +\r +The ``BuildingBlocks`` folder contains the script to install the\r +Building Blocks used in the COVID-19 Workflow.\r +\r +### Workflows\r +\r +The ``Workflow`` folder contains the workflows implementations.\r +\r +Currently contains the implementation using PyCOMPSs and Snakemake (in progress).\r +\r +### Resources\r +\r +The ``Resources`` folder contains dataset files.\r +\r +### Tests\r +\r +The ``Tests`` folder contains the scripts that run each Building Block\r +used in the workflow for the given small dataset.\r +They can be executed individually for testing purposes.\r +\r +## Instructions\r +\r +### Local machine\r +\r +This section explains the requirements and usage for the COVID19 Workflow in a laptop or desktop computer.\r +\r +#### Requirements\r +\r +- [`permedcoe`](https://github.com/PerMedCoE/permedcoe) package\r +- [PyCOMPSs](https://pycompss.readthedocs.io/en/stable/Sections/00_Quickstart.html) / [Snakemake](https://snakemake.readthedocs.io/en/stable/)\r +- [Singularity](https://sylabs.io/guides/3.0/user-guide/installation.html)\r +\r +#### Usage steps\r +\r +1. Clone this repository:\r +\r + ```bash\r + git clone https://github.com/PerMedCoE/covid-19-workflow.git\r + ```\r +\r +2. Install the Building Blocks required for the COVID19 Workflow:\r +\r + ```bash\r + covid-19-workflow/BuildingBlocks/./install_BBs.sh\r + ```\r +\r +3. Get the required Building Block images from the project [B2DROP](https://b2drop.bsc.es/index.php/f/444350):\r +\r + - Required images:\r + - MaBoSS.singularity\r + - meta_analysis.singularity\r + - PhysiCell-COVID19.singularity\r + - single_cell.singularity\r +\r + The path where these files are stored **MUST be exported in the `PERMEDCOE_IMAGES`** environment variable.\r +\r + > :warning: **TIP**: These containers can be built manually as follows (be patient since some of them may take some time):\r + 1. Clone the `BuildingBlocks` repository\r + ```bash\r + git clone https://github.com/PerMedCoE/BuildingBlocks.git\r + ```\r + 2. Build the required Building Block images\r + ```bash\r + cd BuildingBlocks/Resources/images\r + sudo singularity build MaBoSS.sif MaBoSS.singularity\r + sudo singularity build meta_analysis.sif meta_analysis.singularity\r + sudo singularity build PhysiCell-COVID19.sif PhysiCell-COVID19.singularity\r + sudo singularity build single_cell.sif single_cell.singularity\r + cd ../../..\r + ```\r +\r +**If using PyCOMPSs in local PC** (make sure that PyCOMPSs in installed):\r +\r +4. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflows/PyCOMPSs\r + ```\r +\r +5. Execute `./run.sh`\r +\r +**If using Snakemake in local PC** (make sure that SnakeMake is installed):\r +\r +4. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflows/SnakeMake\r + ```\r +\r +5. Execute `./run.sh`\r + > **TIP**: If you want to run the workflow with a different dataset, please update the `run.sh` script setting the `dataset` variable to the new dataset folder and their file names.\r +\r +\r +### MareNostrum 4\r +\r +This section explains the requirements and usage for the COVID19 Workflow in the MareNostrum 4 supercomputer.\r +\r +#### Requirements in MN4\r +\r +- Access to MN4\r +\r +All Building Blocks are already installed in MN4, and the COVID19 Workflow available.\r +\r +#### Usage steps in MN4\r +\r +1. Load the `COMPSs`, `Singularity` and `permedcoe` modules\r +\r + ```bash\r + export COMPSS_PYTHON_VERSION=3\r + module load COMPSs/3.1\r + module load singularity/3.5.2\r + module use /apps/modules/modulefiles/tools/COMPSs/libraries\r + module load permedcoe\r + ```\r +\r + > **TIP**: Include the loading into your `${HOME}/.bashrc` file to load it automatically on the session start.\r +\r + This commands will load COMPSs and the permedcoe package which provides all necessary dependencies, as well as the path to the singularity container images (`PERMEDCOE_IMAGES` environment variable) and testing dataset (`COVID19WORKFLOW_DATASET` environment variable).\r +\r +2. Get a copy of the pilot workflow into your desired folder\r +\r + ```bash\r + mkdir desired_folder\r + cd desired_folder\r + get_covid19workflow\r + ```\r +\r +3. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflow/PyCOMPSs\r + ```\r +\r +4. Execute `./launch.sh`\r +\r + This command will launch a job into the job queuing system (SLURM) requesting 2 nodes (one node acting half master and half worker, and other full worker node) for 20 minutes, and is prepared to use the singularity images that are already deployed in MN4 (located into the `PERMEDCOE_IMAGES` environment variable). It uses the dataset located into `../../Resources/data` folder.\r +\r + > :warning: **TIP**: If you want to run the workflow with a different dataset, please edit the `launch.sh` script and define the appropriate dataset path.\r +\r + After the execution, a `results` folder will be available with with COVID19 Workflow results.\r +\r +### Mahti or Puhti\r +\r +This section explains how to run the COVID19 workflow on CSC supercomputers using SnakeMake.\r +\r +#### Requirements\r +\r +- Install snakemake (or check if there is a version installed using `module spider snakemake`)\r +- Install workflow, using the same steps as for the local machine. With the exception that containers have to be built elsewhere.\r +\r +#### Steps\r +\r +\r +1. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflow/SnakeMake\r + ```\r +\r +2. Edit `launch.sh` with the correct partition, account, and resource specifications. \r +\r +3. Execute `./launch.sh`\r +\r + > :warning: Snakemake provides a `--cluster` flag, but this functionality should be avoided as it's really not suited for HPC systems.\r +\r +## License\r +\r +[Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)\r +\r +## Contact\r +\r +\r +\r +This software has been developed for the [PerMedCoE project](https://permedcoe.eu/), funded by the European Commission (EU H2020 [951773](https://cordis.europa.eu/project/id/951773)).\r +\r +![](https://permedcoe.eu/wp-content/uploads/2020/11/logo_1.png "PerMedCoE")\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/480?version=1" ; + schema1:isBasedOn "https://github.com/PerMedCoE/covid-19-workflow.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for PerMedCoE Covid19 Pilot workflow (Nextflow)" ; + schema1:sdDatePublished "2024-07-12 13:33:25 +0100" ; + schema1:url "https://workflowhub.eu/workflows/480/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3353 ; + schema1:creator ; + schema1:dateCreated "2023-05-23T13:23:14Z" ; + schema1:dateModified "2023-05-23T13:23:14Z" ; + schema1:description """# COVID-19 Multiscale Modelling of the Virus and Patients’ Tissue Workflow\r +\r +## Table of Contents\r +\r +- [COVID-19 Multiscale Modelling of the Virus and Patients’ Tissue Workflow](#covid-19-multiscale-modelling-of-the-virus-and-patients-tissue-workflow)\r + - [Table of Contents](#table-of-contents)\r + - [Description](#description)\r + - [Contents](#contents)\r + - [Building Blocks](#building-blocks)\r + - [Workflows](#workflows)\r + - [Resources](#resources)\r + - [Tests](#tests)\r + - [Instructions](#instructions)\r + - [Local machine](#local-machine)\r + - [Requirements](#requirements)\r + - [Usage steps](#usage-steps)\r + - [MareNostrum 4](#marenostrum-4)\r + - [Requirements in MN4](#requirements-in-mn4)\r + - [Usage steps in MN4](#usage-steps-in-mn4)\r + - [Mahti or Puhti](#mahti-or-puhti)\r + - [Requirements](#requirements)\r + - [Steps](#steps)\r + - [License](#license)\r + - [Contact](#contact)\r +\r +## Description\r +\r +Uses multiscale simulations to predict patient-specific SARS‑CoV‑2 severity subtypes\r +(moderate, severe or control), using single-cell RNA-Seq data, MaBoSS and PhysiBoSS.\r +Boolean models are used to determine the behaviour of individual agents as a function\r +of extracellular conditions and the concentration of different substrates, including\r +the number of virions. Predictions of severity subtypes are based on a meta-analysis of\r +personalised model outputs simulating cellular apoptosis regulation in epithelial cells\r +infected by SARS‑CoV‑2.\r +\r +The workflow uses the following building blocks, described in order of execution:\r +\r +1. High-throughput mutant analysis\r +2. Single-cell processing\r +3. Personalise patient\r +4. PhysiBoSS\r +5. Analysis of all simulations\r +\r +For details on individual workflow steps, see the user documentation for each building block.\r +\r +[`GitHub repository`]()\r +\r +\r +## Contents\r +\r +### Building Blocks\r +\r +The ``BuildingBlocks`` folder contains the script to install the\r +Building Blocks used in the COVID-19 Workflow.\r +\r +### Workflows\r +\r +The ``Workflow`` folder contains the workflows implementations.\r +\r +Currently contains the implementation using PyCOMPSs and Snakemake (in progress).\r +\r +### Resources\r +\r +The ``Resources`` folder contains dataset files.\r +\r +### Tests\r +\r +The ``Tests`` folder contains the scripts that run each Building Block\r +used in the workflow for the given small dataset.\r +They can be executed individually for testing purposes.\r +\r +## Instructions\r +\r +### Local machine\r +\r +This section explains the requirements and usage for the COVID19 Workflow in a laptop or desktop computer.\r +\r +#### Requirements\r +\r +- [`permedcoe`](https://github.com/PerMedCoE/permedcoe) package\r +- [PyCOMPSs](https://pycompss.readthedocs.io/en/stable/Sections/00_Quickstart.html) / [Snakemake](https://snakemake.readthedocs.io/en/stable/)\r +- [Singularity](https://sylabs.io/guides/3.0/user-guide/installation.html)\r +\r +#### Usage steps\r +\r +1. Clone this repository:\r +\r + ```bash\r + git clone https://github.com/PerMedCoE/covid-19-workflow.git\r + ```\r +\r +2. Install the Building Blocks required for the COVID19 Workflow:\r +\r + ```bash\r + covid-19-workflow/BuildingBlocks/./install_BBs.sh\r + ```\r +\r +3. Get the required Building Block images from the project [B2DROP](https://b2drop.bsc.es/index.php/f/444350):\r +\r + - Required images:\r + - MaBoSS.singularity\r + - meta_analysis.singularity\r + - PhysiCell-COVID19.singularity\r + - single_cell.singularity\r +\r + The path where these files are stored **MUST be exported in the `PERMEDCOE_IMAGES`** environment variable.\r +\r + > :warning: **TIP**: These containers can be built manually as follows (be patient since some of them may take some time):\r + 1. Clone the `BuildingBlocks` repository\r + ```bash\r + git clone https://github.com/PerMedCoE/BuildingBlocks.git\r + ```\r + 2. Build the required Building Block images\r + ```bash\r + cd BuildingBlocks/Resources/images\r + sudo singularity build MaBoSS.sif MaBoSS.singularity\r + sudo singularity build meta_analysis.sif meta_analysis.singularity\r + sudo singularity build PhysiCell-COVID19.sif PhysiCell-COVID19.singularity\r + sudo singularity build single_cell.sif single_cell.singularity\r + cd ../../..\r + ```\r +\r +**If using PyCOMPSs in local PC** (make sure that PyCOMPSs in installed):\r +\r +4. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflows/PyCOMPSs\r + ```\r +\r +5. Execute `./run.sh`\r +\r +**If using Snakemake in local PC** (make sure that SnakeMake is installed):\r +\r +4. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflows/SnakeMake\r + ```\r +\r +5. Execute `./run.sh`\r + > **TIP**: If you want to run the workflow with a different dataset, please update the `run.sh` script setting the `dataset` variable to the new dataset folder and their file names.\r +\r +\r +### MareNostrum 4\r +\r +This section explains the requirements and usage for the COVID19 Workflow in the MareNostrum 4 supercomputer.\r +\r +#### Requirements in MN4\r +\r +- Access to MN4\r +\r +All Building Blocks are already installed in MN4, and the COVID19 Workflow available.\r +\r +#### Usage steps in MN4\r +\r +1. Load the `COMPSs`, `Singularity` and `permedcoe` modules\r +\r + ```bash\r + export COMPSS_PYTHON_VERSION=3\r + module load COMPSs/3.1\r + module load singularity/3.5.2\r + module use /apps/modules/modulefiles/tools/COMPSs/libraries\r + module load permedcoe\r + ```\r +\r + > **TIP**: Include the loading into your `${HOME}/.bashrc` file to load it automatically on the session start.\r +\r + This commands will load COMPSs and the permedcoe package which provides all necessary dependencies, as well as the path to the singularity container images (`PERMEDCOE_IMAGES` environment variable) and testing dataset (`COVID19WORKFLOW_DATASET` environment variable).\r +\r +2. Get a copy of the pilot workflow into your desired folder\r +\r + ```bash\r + mkdir desired_folder\r + cd desired_folder\r + get_covid19workflow\r + ```\r +\r +3. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflow/PyCOMPSs\r + ```\r +\r +4. Execute `./launch.sh`\r +\r + This command will launch a job into the job queuing system (SLURM) requesting 2 nodes (one node acting half master and half worker, and other full worker node) for 20 minutes, and is prepared to use the singularity images that are already deployed in MN4 (located into the `PERMEDCOE_IMAGES` environment variable). It uses the dataset located into `../../Resources/data` folder.\r +\r + > :warning: **TIP**: If you want to run the workflow with a different dataset, please edit the `launch.sh` script and define the appropriate dataset path.\r +\r + After the execution, a `results` folder will be available with with COVID19 Workflow results.\r +\r +### Mahti or Puhti\r +\r +This section explains how to run the COVID19 workflow on CSC supercomputers using SnakeMake.\r +\r +#### Requirements\r +\r +- Install snakemake (or check if there is a version installed using `module spider snakemake`)\r +- Install workflow, using the same steps as for the local machine. With the exception that containers have to be built elsewhere.\r +\r +#### Steps\r +\r +\r +1. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflow/SnakeMake\r + ```\r +\r +2. Edit `launch.sh` with the correct partition, account, and resource specifications. \r +\r +3. Execute `./launch.sh`\r +\r + > :warning: Snakemake provides a `--cluster` flag, but this functionality should be avoided as it's really not suited for HPC systems.\r +\r +## License\r +\r +[Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)\r +\r +## Contact\r +\r +\r +\r +This software has been developed for the [PerMedCoE project](https://permedcoe.eu/), funded by the European Commission (EU H2020 [951773](https://cordis.europa.eu/project/id/951773)).\r +\r +![](https://permedcoe.eu/wp-content/uploads/2020/11/logo_1.png "PerMedCoE")\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "PerMedCoE Covid19 Pilot workflow (Nextflow)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/480?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """# Pangenome databases provide superior host removal and mycobacteria classification from clinical metagenomic data\r +\r +> Hall, M, Coin, L., Pangenome databases provide superior host removal and mycobacteria classification from clinical metagenomic data. bioRxiv 2023. doi: [10.1101/2023.09.18.558339][doi]\r +\r +Benchmarking different ways of doing read (taxonomic) classification, with a focus on\r +removal of contamination and classification of _M. tuberculosis_ reads.\r +\r +This repository contains the code and snakemake pipeline to build/download the\r +databases, obtain all results from [the paper][doi], along with accompanying configuration\r +files.\r +\r +Custom databases have all been uploaded to Zenodo, along with the simulated reads:\r +\r +- Nanopore simulated metagenomic reads - https://doi.org/10.5281/zenodo.8339788\r +- Illumina simulated metagenomic reads - https://doi.org/10.5281/zenodo.8339790\r +- Kraken2 database built from the Human Pangenome Reference Consortium\r + genomes - https://doi.org/10.5281/zenodo.8339731\r +- Kraken2 database built from the kraken2 Human\r + library - https://doi.org/10.5281/zenodo.8339699\r +- Kraken2 database built from a *Mycobacterium* representative set of\r + genomes - https://doi.org/10.5281/zenodo.8339821\r +- A (fasta) database of representative genomes from the *Mycobacterium*\r + genus - https://doi.org/10.5281/zenodo.8339940\r +- A (fasta) database of *M. tuberculosis* genomes from a variety of\r + lineages - https://doi.org/10.5281/zenodo.8339947\r +- The fasta file built from the [Clockwork](https://github.com/iqbal-lab-org/clockwork)\r + decontamination pipeline - https://doi.org/10.5281/zenodo.8339802\r +\r +## Example usage\r +\r +We provide some usage examples showing how to download the databases and then use them\r +on your reads.\r +\r +### Human read removal\r +\r +The method we found to give the best balance of runtime, memory usage, and precision and\r +recall was kraken2 with a database built from the Human Pangenome Reference Consortium\r +genomes.\r +\r +This example has been wrapped into a standalone tool called [`nohuman`](https://github.com/mbhall88/nohuman/) which takes a fastq as input and returns a fastq with human reads removed.\r +\r +#### Download human database\r +\r +```\r +mkdir HPRC_db/\r +cd HPRC_db\r +URL="https://zenodo.org/record/8339732/files/k2_HPRC_20230810.tar.gz"\r +wget "$URL"\r +tar -xzf k2_HPRC_20230810.tar.gz\r +rm k2_HPRC_20230810.tar.gz\r +```\r +\r +#### Run kraken2 with HPRC database\r +\r +You'll need [kraken2](https://github.com/DerrickWood/kraken2) installed for this step.\r +\r +```\r +kraken2 --threads 4 --db HPRC_db/ --output classifications.tsv reads.fq\r +```\r +\r +If you are using Illumina reads, a slight adjustment is needed\r +\r +```\r +kraken2 --paired --threads 4 --db HPRC_db/ --output classifications.tsv reads_1.fq reads_2.fq\r +```\r +\r +#### Extract non-human reads\r +\r +You'll need [seqkit](https://github.com/shenwei356/seqkit) installed for this step\r +\r +For Nanopore data\r +\r +```\r +awk -F'\\t' '$1=="U" {print $2}' classifications.tsv | \\\r + seqkit grep -f - -o reads.depleted.fq reads.fq\r +```\r +\r +For Illumina data\r +\r +```\r +awk -F'\\t' '$1=="U" {print $2}' classifications.tsv > ids.txt\r +seqkit grep --id-regexp '^(\\S+)/[12]' -f ids.txt -o reads_1.depleted.fq reads_1.fq\r +seqkit grep --id-regexp '^(\\S+)/[12]' -f ids.txt -o reads_2.depleted.fq reads_2.fq\r +```\r +\r +### *M. tuberculosis* classification/enrichment\r +\r +For this step we recommend either [minimap2](https://github.com/lh3/minimap2) or kraken2\r +with a *Mycobacterium* genus database. We leave it to the user to decide which approach\r +they prefer based on the results in our manuscript.\r +\r +#### Download databases\r +\r +```\r +mkdir Mycobacterium_db\r +cd Mycobacterium_db\r +# download database for use with minimap2\r +URL="https://zenodo.org/record/8339941/files/Mycobacterium.rep.fna.gz"\r +wget "$URL"\r +IDS_URL="https://zenodo.org/record/8343322/files/mtb.ids"\r +wget "$IDS_URL"\r +# download kraken database\r +URL="https://zenodo.org/record/8339822/files/k2_Mycobacterium_20230817.tar.gz"\r +wget "$URL"\r +tar -xzf k2_Mycobacterium_20230817.tar.gz\r +rm k2_Mycobacterium_20230817.tar.gz\r +```\r +\r +#### Classify reads\r +\r +**minimap2**\r +\r +```\r +# nanopore\r +minimap2 --secondary=no -c -t 4 -x map-ont -o reads.aln.paf Mycobacterium_db/Mycobacterium.rep.fna.gz reads.depleted.fq\r +# illumina\r +minimap2 --secondary=no -c -t 4 -x sr -o reads.aln.paf Mycobacterium_db/Mycobacterium.rep.fna.gz reads_1.depleted.fq reads_2.depleted.fq\r +```\r +\r +**kraken2**\r +\r +```\r +# nanopore\r +kraken2 --db Mycobacterium_db --threads 4 --report myco.kreport --output classifications.myco.tsv reads.depleted.fq\r +# illumina\r +kraken2 --db Mycobacterium_db --paired --threads 4 --report myco.kreport --output classifications.myco.tsv reads_1.depleted.fq reads_2.depleted.fq\r +```\r +\r +#### Extract *M. tuberculosis* reads\r +\r +**minimap2**\r +\r +```\r +# nanopore\r +grep -Ff Mycobacterium_db/mtb.ids reads.aln.paf | cut -f1 | \\\r + seqkit grep -f - -o reads.enriched.fq reads.depleted.fq\r +# illumina\r +grep -Ff Mycobacterium_db/mtb.ids reads.aln.paf | cut -f1 > keep.ids\r +seqkit grep -f keep.ids -o reads_1.enriched.fq reads_1.depleted.fq\r +seqkit grep -f keep.ids -o reads_2.enriched.fq reads_2.depleted.fq\r +```\r +\r +**kraken2**\r +\r +We'll use\r +the [`extract_kraken_reads.py` script](https://github.com/jenniferlu717/KrakenTools#extract_kraken_readspy)\r +for this\r +\r +```\r +# nanopore\r +python extract_kraken_reads.py -k classifications.myco.tsv -1 reads.depleted.fq -o reads.enriched.fq -t 1773 -r myco.kreport --include-children\r +# illumina\r +python extract_kraken_reads.py -k classifications.myco.tsv -1 reads_1.depleted.fq -2 reads_2.depleted.fq -o reads_1.enriched.fq -o2 reads_2.enriched.fq -t 1773 -r myco.kreport --include-children\r +```\r +\r +[doi]: https://doi.org/10.1101/2023.09.18.558339 \r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.700.1" ; + schema1:isBasedOn "https://github.com/mbhall88/classification_benchmark.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Pangenome databases provide superior host removal and mycobacteria classification from clinical metagenomic data" ; + schema1:sdDatePublished "2024-07-12 13:25:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/700/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1531 ; + schema1:creator ; + schema1:dateCreated "2024-01-09T05:43:00Z" ; + schema1:dateModified "2024-01-09T20:38:42Z" ; + schema1:description """# Pangenome databases provide superior host removal and mycobacteria classification from clinical metagenomic data\r +\r +> Hall, M, Coin, L., Pangenome databases provide superior host removal and mycobacteria classification from clinical metagenomic data. bioRxiv 2023. doi: [10.1101/2023.09.18.558339][doi]\r +\r +Benchmarking different ways of doing read (taxonomic) classification, with a focus on\r +removal of contamination and classification of _M. tuberculosis_ reads.\r +\r +This repository contains the code and snakemake pipeline to build/download the\r +databases, obtain all results from [the paper][doi], along with accompanying configuration\r +files.\r +\r +Custom databases have all been uploaded to Zenodo, along with the simulated reads:\r +\r +- Nanopore simulated metagenomic reads - https://doi.org/10.5281/zenodo.8339788\r +- Illumina simulated metagenomic reads - https://doi.org/10.5281/zenodo.8339790\r +- Kraken2 database built from the Human Pangenome Reference Consortium\r + genomes - https://doi.org/10.5281/zenodo.8339731\r +- Kraken2 database built from the kraken2 Human\r + library - https://doi.org/10.5281/zenodo.8339699\r +- Kraken2 database built from a *Mycobacterium* representative set of\r + genomes - https://doi.org/10.5281/zenodo.8339821\r +- A (fasta) database of representative genomes from the *Mycobacterium*\r + genus - https://doi.org/10.5281/zenodo.8339940\r +- A (fasta) database of *M. tuberculosis* genomes from a variety of\r + lineages - https://doi.org/10.5281/zenodo.8339947\r +- The fasta file built from the [Clockwork](https://github.com/iqbal-lab-org/clockwork)\r + decontamination pipeline - https://doi.org/10.5281/zenodo.8339802\r +\r +## Example usage\r +\r +We provide some usage examples showing how to download the databases and then use them\r +on your reads.\r +\r +### Human read removal\r +\r +The method we found to give the best balance of runtime, memory usage, and precision and\r +recall was kraken2 with a database built from the Human Pangenome Reference Consortium\r +genomes.\r +\r +This example has been wrapped into a standalone tool called [`nohuman`](https://github.com/mbhall88/nohuman/) which takes a fastq as input and returns a fastq with human reads removed.\r +\r +#### Download human database\r +\r +```\r +mkdir HPRC_db/\r +cd HPRC_db\r +URL="https://zenodo.org/record/8339732/files/k2_HPRC_20230810.tar.gz"\r +wget "$URL"\r +tar -xzf k2_HPRC_20230810.tar.gz\r +rm k2_HPRC_20230810.tar.gz\r +```\r +\r +#### Run kraken2 with HPRC database\r +\r +You'll need [kraken2](https://github.com/DerrickWood/kraken2) installed for this step.\r +\r +```\r +kraken2 --threads 4 --db HPRC_db/ --output classifications.tsv reads.fq\r +```\r +\r +If you are using Illumina reads, a slight adjustment is needed\r +\r +```\r +kraken2 --paired --threads 4 --db HPRC_db/ --output classifications.tsv reads_1.fq reads_2.fq\r +```\r +\r +#### Extract non-human reads\r +\r +You'll need [seqkit](https://github.com/shenwei356/seqkit) installed for this step\r +\r +For Nanopore data\r +\r +```\r +awk -F'\\t' '$1=="U" {print $2}' classifications.tsv | \\\r + seqkit grep -f - -o reads.depleted.fq reads.fq\r +```\r +\r +For Illumina data\r +\r +```\r +awk -F'\\t' '$1=="U" {print $2}' classifications.tsv > ids.txt\r +seqkit grep --id-regexp '^(\\S+)/[12]' -f ids.txt -o reads_1.depleted.fq reads_1.fq\r +seqkit grep --id-regexp '^(\\S+)/[12]' -f ids.txt -o reads_2.depleted.fq reads_2.fq\r +```\r +\r +### *M. tuberculosis* classification/enrichment\r +\r +For this step we recommend either [minimap2](https://github.com/lh3/minimap2) or kraken2\r +with a *Mycobacterium* genus database. We leave it to the user to decide which approach\r +they prefer based on the results in our manuscript.\r +\r +#### Download databases\r +\r +```\r +mkdir Mycobacterium_db\r +cd Mycobacterium_db\r +# download database for use with minimap2\r +URL="https://zenodo.org/record/8339941/files/Mycobacterium.rep.fna.gz"\r +wget "$URL"\r +IDS_URL="https://zenodo.org/record/8343322/files/mtb.ids"\r +wget "$IDS_URL"\r +# download kraken database\r +URL="https://zenodo.org/record/8339822/files/k2_Mycobacterium_20230817.tar.gz"\r +wget "$URL"\r +tar -xzf k2_Mycobacterium_20230817.tar.gz\r +rm k2_Mycobacterium_20230817.tar.gz\r +```\r +\r +#### Classify reads\r +\r +**minimap2**\r +\r +```\r +# nanopore\r +minimap2 --secondary=no -c -t 4 -x map-ont -o reads.aln.paf Mycobacterium_db/Mycobacterium.rep.fna.gz reads.depleted.fq\r +# illumina\r +minimap2 --secondary=no -c -t 4 -x sr -o reads.aln.paf Mycobacterium_db/Mycobacterium.rep.fna.gz reads_1.depleted.fq reads_2.depleted.fq\r +```\r +\r +**kraken2**\r +\r +```\r +# nanopore\r +kraken2 --db Mycobacterium_db --threads 4 --report myco.kreport --output classifications.myco.tsv reads.depleted.fq\r +# illumina\r +kraken2 --db Mycobacterium_db --paired --threads 4 --report myco.kreport --output classifications.myco.tsv reads_1.depleted.fq reads_2.depleted.fq\r +```\r +\r +#### Extract *M. tuberculosis* reads\r +\r +**minimap2**\r +\r +```\r +# nanopore\r +grep -Ff Mycobacterium_db/mtb.ids reads.aln.paf | cut -f1 | \\\r + seqkit grep -f - -o reads.enriched.fq reads.depleted.fq\r +# illumina\r +grep -Ff Mycobacterium_db/mtb.ids reads.aln.paf | cut -f1 > keep.ids\r +seqkit grep -f keep.ids -o reads_1.enriched.fq reads_1.depleted.fq\r +seqkit grep -f keep.ids -o reads_2.enriched.fq reads_2.depleted.fq\r +```\r +\r +**kraken2**\r +\r +We'll use\r +the [`extract_kraken_reads.py` script](https://github.com/jenniferlu717/KrakenTools#extract_kraken_readspy)\r +for this\r +\r +```\r +# nanopore\r +python extract_kraken_reads.py -k classifications.myco.tsv -1 reads.depleted.fq -o reads.enriched.fq -t 1773 -r myco.kreport --include-children\r +# illumina\r +python extract_kraken_reads.py -k classifications.myco.tsv -1 reads_1.depleted.fq -2 reads_2.depleted.fq -o reads_1.enriched.fq -o2 reads_2.enriched.fq -t 1773 -r myco.kreport --include-children\r +```\r +\r +[doi]: https://doi.org/10.1101/2023.09.18.558339 \r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/700?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Pangenome databases provide superior host removal and mycobacteria classification from clinical metagenomic data" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/700?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """Kmer counting step, can run alone or as part of a combined workflow for large genome assembly. \r +\r +* What it does: Estimates genome size and heterozygosity based on counts of kmers\r +* Inputs: One set of short reads: e.g. R1.fq.gz\r +* Outputs: GenomeScope graphs\r +* Tools used: Meryl, GenomeScope\r +* Input parameters: None required\r +* Workflow steps: The tool meryl counts kmers in the input reads (k=21), then converts this into a histogram. GenomeScope: runs a model on the histogram; reports estimates. k-mer size set to 21. \r +* Options: Use a different kmer counting tool. e.g. khmer.\r +\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.223.1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for kmer counting - meryl" ; + schema1:sdDatePublished "2024-07-12 13:36:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/223/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9477 ; + schema1:creator ; + schema1:dateCreated "2021-11-08T04:47:27Z" ; + schema1:dateModified "2023-01-30T18:21:31Z" ; + schema1:description """Kmer counting step, can run alone or as part of a combined workflow for large genome assembly. \r +\r +* What it does: Estimates genome size and heterozygosity based on counts of kmers\r +* Inputs: One set of short reads: e.g. R1.fq.gz\r +* Outputs: GenomeScope graphs\r +* Tools used: Meryl, GenomeScope\r +* Input parameters: None required\r +* Workflow steps: The tool meryl counts kmers in the input reads (k=21), then converts this into a histogram. GenomeScope: runs a model on the histogram; reports estimates. k-mer size set to 21. \r +* Options: Use a different kmer counting tool. e.g. khmer.\r +\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "Large-genome-assembly" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "kmer counting - meryl" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/223?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 368783 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """## Description\r +\r +The workflow takes an input file with Cancer Driver Genes predictions (i.e. the results provided by a participant), computes a set of metrics, and compares them against the data currently stored in OpenEBench within the TCGA community. Two assessment metrics are provided for that predicitions. Also, some plots (which are optional) that allow to visualize the performance of the tool are generated. The workflow consists in three standard steps, defined by OpenEBench. The tools needed to run these steps must be in one or more Docker images, generated from [Docker containers](https://github.com/inab/TCGA_benchmarking_dockers ). Separated instances are spawned from these images for each step:\r +1. **Validation**: the input file format is checked and, if required, the content of the file is validated (e.g check whether the submitted gene IDs exist)\r +2. **Metrics Computation**: the predictions are compared with the 'Gold Standards' provided by the community, which results in two performance metrics - precision (Positive Predictive Value) and recall(True Positive Rate).\r +3. **Results Consolidation**: the benchmark itself is performed by merging the tool metrics with the rest of TCGA data. The results are provided in JSON format and SVG format (scatter plot).\r +\r +![alt text](https://raw.githubusercontent.com/inab/TCGA_benchmarking_workflow/1.0.4/workflow.jpg)\r +\r +\r +## Data\r +\r +* [TCGA_sample_data](./TCGA_sample_data) folder contains all the reference data required by the steps. It is derived from the manuscript:\r +[Comprehensive Characterization of Cancer Driver Genes and Mutations](https://www.cell.com/cell/fulltext/S0092-8674%2818%2930237-X?code=cell-site), Bailey et al, 2018, Cell [![doi:10.1016/j.cell.2018.02.060](https://img.shields.io/badge/doi-10.1016%2Fj.cell.2018.02.060-green.svg)](https://doi.org/10.1016/j.cell.2018.02.060) \r +* [TCGA_sample_out](./TCGA_sample_out) folder contains an example output for a worklow run, with two cancer types / challenges selected (ACC, BRCA). Results obtained from the default execution should be similar to those ones available in this directory. Results found in [TCGA_sample_out/results](./TCGA_sample_out/results) can be visualized in the browser using [`benchmarking_workflows_results_visualizer` javascript library](https://github.com/inab/benchmarking_workflows_results_visualizer).\r +\r +\r +## Usage\r +In order to use the workflow you need to:\r +* Install [Nextflow](https://www.nextflow.io/), which depends on Java virtual machine (>=8 , <15 ). You can automate their installation for local testing using [run_local_nextflow.bash](run_local_nextflow.bash).\r +* Clone [TCGA benchmarking Docker definitions repository](https://github.com/inab/TCGA_benchmarking_dockers) from tag 1.0.3 in a separate directory, and build locally the three Docker images found in it, running the `build.sh 1.0.3` script within that repo.\r +* Run it just using either *`nextflow run main.nf -profile docker`* or *`./run_local_nextflow.bash run main.nf -profile docker`*. Arguments specifications:\r +\r +```\r +Usage:\r +Run the pipeline with default parameters:\r +nextflow run main.nf -profile docker\r +\r +Run with user parameters:\r +nextflow run main.nf -profile docker --predictionsFile {driver.genes.file} --public_ref_dir {validation.reference.file} --participant_name {tool.name} --metrics_ref_dir {gold.standards.dir} --cancer_types {analyzed.cancer.types} --assess_dir {benchmark.data.dir} --results_dir {output.dir}\r +\r +Mandatory arguments:\r + --input List of cancer genes prediction\r + --community_id Name or OEB permanent ID for the benchmarking community\r + --public_ref_dir Directory with list of cancer genes used to validate the predictions\r + --participant_id Name of the tool used for prediction\r + --goldstandard_dir Dir that contains metrics reference datasets for all cancer types\r + --event_id List of types of cancer selected by the user, separated by spaces\r + --assess_dir Dir where the data for the benchmark are stored\r +\r +Other options:\r + --validation_result The output file where the results from validation step will be saved\r + --assessment_results The output file where the results from the computed metrics step will be saved\r + --aggregation_results The output file where the consolidation of the benchmark will be saved\r + --statistics_results The output directory with nextflow statistics\r + --outdir The output directory where the consolidation of the benchmark will be saved\r + --statsdir The output directory with nextflow statistics\r + --data_model_export_dir The output dir where json file with benchmarking data model contents will be saved\r + --otherdir The output directory where custom results will be saved (no directory inside)\r +Flags:\r + --help Display this message\r +```\r +\r +Default input parameters and Docker images to use for each step can be specified in the [config](./nextflow.config) file\r +**NOTE: In order to make your workflow compatible with the [OpenEBench VRE Nextflow Executor](https://github.com/inab/vre-process_nextflow-executor), please make sure to use the same parameter names in your workflow.**""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/244?version=1" ; + schema1:isBasedOn "https://github.com/inab/TCGA_benchmarking_workflow/tree/1.0.8" ; + schema1:license "LGPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for OpenEBench assessment workflow" ; + schema1:sdDatePublished "2024-07-12 13:36:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/244/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5597 ; + schema1:creator , + , + ; + schema1:dateCreated "2021-11-23T16:55:30Z" ; + schema1:dateModified "2021-11-24T09:23:33Z" ; + schema1:description """## Description\r +\r +The workflow takes an input file with Cancer Driver Genes predictions (i.e. the results provided by a participant), computes a set of metrics, and compares them against the data currently stored in OpenEBench within the TCGA community. Two assessment metrics are provided for that predicitions. Also, some plots (which are optional) that allow to visualize the performance of the tool are generated. The workflow consists in three standard steps, defined by OpenEBench. The tools needed to run these steps must be in one or more Docker images, generated from [Docker containers](https://github.com/inab/TCGA_benchmarking_dockers ). Separated instances are spawned from these images for each step:\r +1. **Validation**: the input file format is checked and, if required, the content of the file is validated (e.g check whether the submitted gene IDs exist)\r +2. **Metrics Computation**: the predictions are compared with the 'Gold Standards' provided by the community, which results in two performance metrics - precision (Positive Predictive Value) and recall(True Positive Rate).\r +3. **Results Consolidation**: the benchmark itself is performed by merging the tool metrics with the rest of TCGA data. The results are provided in JSON format and SVG format (scatter plot).\r +\r +![alt text](https://raw.githubusercontent.com/inab/TCGA_benchmarking_workflow/1.0.4/workflow.jpg)\r +\r +\r +## Data\r +\r +* [TCGA_sample_data](./TCGA_sample_data) folder contains all the reference data required by the steps. It is derived from the manuscript:\r +[Comprehensive Characterization of Cancer Driver Genes and Mutations](https://www.cell.com/cell/fulltext/S0092-8674%2818%2930237-X?code=cell-site), Bailey et al, 2018, Cell [![doi:10.1016/j.cell.2018.02.060](https://img.shields.io/badge/doi-10.1016%2Fj.cell.2018.02.060-green.svg)](https://doi.org/10.1016/j.cell.2018.02.060) \r +* [TCGA_sample_out](./TCGA_sample_out) folder contains an example output for a worklow run, with two cancer types / challenges selected (ACC, BRCA). Results obtained from the default execution should be similar to those ones available in this directory. Results found in [TCGA_sample_out/results](./TCGA_sample_out/results) can be visualized in the browser using [`benchmarking_workflows_results_visualizer` javascript library](https://github.com/inab/benchmarking_workflows_results_visualizer).\r +\r +\r +## Usage\r +In order to use the workflow you need to:\r +* Install [Nextflow](https://www.nextflow.io/), which depends on Java virtual machine (>=8 , <15 ). You can automate their installation for local testing using [run_local_nextflow.bash](run_local_nextflow.bash).\r +* Clone [TCGA benchmarking Docker definitions repository](https://github.com/inab/TCGA_benchmarking_dockers) from tag 1.0.3 in a separate directory, and build locally the three Docker images found in it, running the `build.sh 1.0.3` script within that repo.\r +* Run it just using either *`nextflow run main.nf -profile docker`* or *`./run_local_nextflow.bash run main.nf -profile docker`*. Arguments specifications:\r +\r +```\r +Usage:\r +Run the pipeline with default parameters:\r +nextflow run main.nf -profile docker\r +\r +Run with user parameters:\r +nextflow run main.nf -profile docker --predictionsFile {driver.genes.file} --public_ref_dir {validation.reference.file} --participant_name {tool.name} --metrics_ref_dir {gold.standards.dir} --cancer_types {analyzed.cancer.types} --assess_dir {benchmark.data.dir} --results_dir {output.dir}\r +\r +Mandatory arguments:\r + --input List of cancer genes prediction\r + --community_id Name or OEB permanent ID for the benchmarking community\r + --public_ref_dir Directory with list of cancer genes used to validate the predictions\r + --participant_id Name of the tool used for prediction\r + --goldstandard_dir Dir that contains metrics reference datasets for all cancer types\r + --event_id List of types of cancer selected by the user, separated by spaces\r + --assess_dir Dir where the data for the benchmark are stored\r +\r +Other options:\r + --validation_result The output file where the results from validation step will be saved\r + --assessment_results The output file where the results from the computed metrics step will be saved\r + --aggregation_results The output file where the consolidation of the benchmark will be saved\r + --statistics_results The output directory with nextflow statistics\r + --outdir The output directory where the consolidation of the benchmark will be saved\r + --statsdir The output directory with nextflow statistics\r + --data_model_export_dir The output dir where json file with benchmarking data model contents will be saved\r + --otherdir The output directory where custom results will be saved (no directory inside)\r +Flags:\r + --help Display this message\r +```\r +\r +Default input parameters and Docker images to use for each step can be specified in the [config](./nextflow.config) file\r +**NOTE: In order to make your workflow compatible with the [OpenEBench VRE Nextflow Executor](https://github.com/inab/vre-process_nextflow-executor), please make sure to use the same parameter names in your workflow.**""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/244?version=3" ; + schema1:keywords "tcga, openebench, benchmarking" ; + schema1:license "https://spdx.org/licenses/LGPL-3.0" ; + schema1:name "OpenEBench assessment workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/244?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2021-12-20T17:04:47.633738" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-variant-calling" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sars-cov-2-pe-illumina-artic-variant-calling/COVID-19-PE-ARTIC-ILLUMINA" ; + schema1:sdDatePublished "2021-12-21 03:01:00 +0000" ; + schema1:softwareVersion "v0.4.2" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "# beacon-omop-worker-workflows" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/882?version=1" ; + schema1:isBasedOn "https://github.com/Health-Informatics-UoN/beacon-omop-worker-workflows.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for beacon-workflow" ; + schema1:sdDatePublished "2024-07-12 13:22:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/882/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 6001 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 626 ; + schema1:dateCreated "2024-05-10T12:40:07Z" ; + schema1:dateModified "2024-05-10T12:40:07Z" ; + schema1:description "# beacon-omop-worker-workflows" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/882?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "beacon-workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/882?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + ; + ns1:output . + + a schema1:Dataset ; + schema1:datePublished "2023-11-03T14:21:07.903072" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Purge-duplicate-contigs-VGP6/main" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Purge-duplicate-contigs-VGP6/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:version "0.1.1" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Differential abundance analysis" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/981?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/differentialabundance" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/differentialabundance" ; + schema1:sdDatePublished "2024-07-12 13:21:46 +0100" ; + schema1:url "https://workflowhub.eu/workflows/981/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15641 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:49Z" ; + schema1:dateModified "2024-06-11T12:54:49Z" ; + schema1:description "Differential abundance analysis" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/981?version=8" ; + schema1:keywords "ATAC-seq, ChIP-seq, deseq2, differential-abundance, differential-expression, gsea, limma, microarray, rna-seq, shiny" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/differentialabundance" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/981?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """Workflow for NonSpliced RNAseq data with multiple aligners.\r +\r +Steps: \r + - workflow_quality.cwl:\r + - FastQC (control)\r + - fastp (trimming)\r + - bowtie2 (read mapping)\r + - sam_to_sorted-bam\r + - featurecounts (transcript read counts)\r + - kallisto (transcript [pseudo]counts)\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/77?version=1" ; + schema1:isBasedOn "https://git.wur.nl/unlock/cwl/-/blob/master/cwl/workflows/workflow_RNAseq_NonSpliced.cwl" ; + schema1:license "CC0-1.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for NonSpliced RNAseq workflow" ; + schema1:sdDatePublished "2024-07-12 13:36:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/77/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 34088 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7251 ; + schema1:creator , + ; + schema1:dateCreated "2020-11-24T11:05:56Z" ; + schema1:dateModified "2023-01-16T13:46:21Z" ; + schema1:description """Workflow for NonSpliced RNAseq data with multiple aligners.\r +\r +Steps: \r + - workflow_quality.cwl:\r + - FastQC (control)\r + - fastp (trimming)\r + - bowtie2 (read mapping)\r + - sam_to_sorted-bam\r + - featurecounts (transcript read counts)\r + - kallisto (transcript [pseudo]counts)\r +""" ; + schema1:image ; + schema1:keywords "Alignment, bowtie2, featurecounts, kallisto, nonspliced" ; + schema1:license "https://spdx.org/licenses/CC0-1.0" ; + schema1:name "NonSpliced RNAseq workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/77?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2023-11-24T11:23:50.158837" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/gcms-metams" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "gcms-metams/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-07-12 13:22:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4744 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:11Z" ; + schema1:dateModified "2024-06-11T12:55:11Z" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +\r +Workflow information:\r +* Input = genome.fasta.\r +* Outputs = masked_genome.fasta and table of repeats found. \r +* Runs RepeatModeler with default settings, uses the output of this (repeat library) as input into RepeatMasker. \r +* Runs RepeatMasker with default settings except for: Skip masking of simple tandem repeats and low complexity regions. (-nolow) : default set to yes. Perform softmasking instead of hardmasking - set to yes. \r +* Workflow report displays an edited table of repeats found. Note: a known bug is that sometimes the workflow report text resets to default text. To restore, look for an earlier workflow version with correct workflow report text, and copy and paste report text into current version.\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/875?version=1" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Repeat masking - TSI" ; + schema1:sdDatePublished "2024-07-12 13:17:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/875/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8120 ; + schema1:creator , + ; + schema1:dateCreated "2024-05-08T03:59:07Z" ; + schema1:dateModified "2024-05-08T06:24:26Z" ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +\r +Workflow information:\r +* Input = genome.fasta.\r +* Outputs = masked_genome.fasta and table of repeats found. \r +* Runs RepeatModeler with default settings, uses the output of this (repeat library) as input into RepeatMasker. \r +* Runs RepeatMasker with default settings except for: Skip masking of simple tandem repeats and low complexity regions. (-nolow) : default set to yes. Perform softmasking instead of hardmasking - set to yes. \r +* Workflow report displays an edited table of repeats found. Note: a known bug is that sometimes the workflow report text resets to default text. To restore, look for an earlier workflow version with correct workflow report text, and copy and paste report text into current version.\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/875?version=2" ; + schema1:isPartOf ; + schema1:keywords "TSI-annotation" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Repeat masking - TSI" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/875?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 403507 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Summary\r +\r +Jupyter Notebook containing a tutorial to illustrate the process of ligand parameterization for a small molecule, step by step, using the BioExcel Building Blocks library (biobb). The particular example used is the Sulfasalazine protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +OpenBabel and ACPype packages are used to add hydrogens, energetically minimize the structure, and generate parameters for the GROMACS package. With Generalized Amber Force Field (GAFF) forcefield and AM1-BCC charges.\r +\r +\r +# Parameters\r +\r +## Inputs: \r +\r +\r +* **ligandCode**: 3-letter code of the ligand structure (e.g. IBP)\r +\r +* **mol_charge**: Molecule net charge (e.g. -1)\r +\r +* **pH**: Acidity or alkalinity for the small molecule. Hydrogen atoms will be added according to this pH. (e.g. 7.4)\r +\r +## Outputs\r +\r +\r +* **IBPparams.gro**: Structure of the parameterized ligand in gro (GROMACS) format.\r +\r +* **IBPparams.top**: Topology of the parameterized ligand, including a reference to the IBPparams.itp.\r +\r +* **IBPparams.itp**: Include Topology File (itp) of the parameterized ligand, including the parameters information: bonds, angles, dihedrals, etc.\r +\r +# Additional Resources\r +\r +* [Tutorial Page for this workflow](http://mmb.irbbarcelona.org/biobb/availability/tutorials/ligand-parameterization)\r +\r +* [GitHub repository](https://github.com/bioexcel/biobb_wf_ligand_parameterization)\r +\r +* [View in Binder](https://mybinder.org/v2/gh/bioexcel/biobb_wf_ligand_parameterization/master?filepath=biobb_wf_ligand_parameterization%2Fnotebooks%2Fbiobb_ligand_parameterization_tutorial.ipynb)\r +\r +* [Documentation](https://biobb-wf-ligand-parameterization.readthedocs.io/en/latest/index.html)""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.54.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_ligand_parameterization" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)" ; + schema1:sdDatePublished "2024-07-12 13:24:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/54/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14669 ; + schema1:creator , + , + ; + schema1:dateCreated "2020-09-14T10:01:04Z" ; + schema1:dateModified "2021-05-13T08:14:49Z" ; + schema1:description """# Summary\r +\r +Jupyter Notebook containing a tutorial to illustrate the process of ligand parameterization for a small molecule, step by step, using the BioExcel Building Blocks library (biobb). The particular example used is the Sulfasalazine protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +OpenBabel and ACPype packages are used to add hydrogens, energetically minimize the structure, and generate parameters for the GROMACS package. With Generalized Amber Force Field (GAFF) forcefield and AM1-BCC charges.\r +\r +\r +# Parameters\r +\r +## Inputs: \r +\r +\r +* **ligandCode**: 3-letter code of the ligand structure (e.g. IBP)\r +\r +* **mol_charge**: Molecule net charge (e.g. -1)\r +\r +* **pH**: Acidity or alkalinity for the small molecule. Hydrogen atoms will be added according to this pH. (e.g. 7.4)\r +\r +## Outputs\r +\r +\r +* **IBPparams.gro**: Structure of the parameterized ligand in gro (GROMACS) format.\r +\r +* **IBPparams.top**: Topology of the parameterized ligand, including a reference to the IBPparams.itp.\r +\r +* **IBPparams.itp**: Include Topology File (itp) of the parameterized ligand, including the parameters information: bonds, angles, dihedrals, etc.\r +\r +# Additional Resources\r +\r +* [Tutorial Page for this workflow](http://mmb.irbbarcelona.org/biobb/availability/tutorials/ligand-parameterization)\r +\r +* [GitHub repository](https://github.com/bioexcel/biobb_wf_ligand_parameterization)\r +\r +* [View in Binder](https://mybinder.org/v2/gh/bioexcel/biobb_wf_ligand_parameterization/master?filepath=biobb_wf_ligand_parameterization%2Fnotebooks%2Fbiobb_ligand_parameterization_tutorial.ipynb)\r +\r +* [Documentation](https://biobb-wf-ligand-parameterization.readthedocs.io/en/latest/index.html)""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/54?version=5" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/54?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2024-05-30T07:18:31.610037" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/chipseq-pe" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "chipseq-pe/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """MGnify (http://www.ebi.ac.uk/metagenomics) provides a free to use platform for the assembly, analysis and archiving of microbiome data derived from sequencing microbial populations that are present in particular environments. Over the past 2 years, MGnify (formerly EBI Metagenomics) has more than doubled the number of publicly available analysed datasets held within the resource. Recently, an updated approach to data analysis has been unveiled (version 5.0), replacing the previous single pipeline with multiple analysis pipelines that are tailored according to the input data, and that are formally described using the Common Workflow Language, enabling greater provenance, reusability, and reproducibility. MGnify's new analysis pipelines offer additional approaches for taxonomic assertions based on ribosomal internal transcribed spacer regions (ITS1/2) and expanded protein functional annotations. Biochemical pathways and systems predictions have also been added for assembled contigs. MGnify's growing focus on the assembly of metagenomic data has also seen the number of datasets it has assembled and analysed increase six-fold. The non-redundant protein database constructed from the proteins encoded by these assemblies now exceeds 1 billion sequences. Meanwhile, a newly developed contig viewer provides fine-grained visualisation of the assembled contigs and their enriched annotations.\r +\r +Documentation: https://docs.mgnify.org/en/latest/analysis.html#raw-reads-analysis-pipeline""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.362.1" ; + schema1:isBasedOn "https://github.com/EBI-Metagenomics/pipeline-v5.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for MGnify - raw-reads analysis pipeline" ; + schema1:sdDatePublished "2024-07-12 13:35:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/362/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 57591 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6196 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2022-06-07T08:40:40Z" ; + schema1:dateModified "2023-01-16T14:01:12Z" ; + schema1:description """MGnify (http://www.ebi.ac.uk/metagenomics) provides a free to use platform for the assembly, analysis and archiving of microbiome data derived from sequencing microbial populations that are present in particular environments. Over the past 2 years, MGnify (formerly EBI Metagenomics) has more than doubled the number of publicly available analysed datasets held within the resource. Recently, an updated approach to data analysis has been unveiled (version 5.0), replacing the previous single pipeline with multiple analysis pipelines that are tailored according to the input data, and that are formally described using the Common Workflow Language, enabling greater provenance, reusability, and reproducibility. MGnify's new analysis pipelines offer additional approaches for taxonomic assertions based on ribosomal internal transcribed spacer regions (ITS1/2) and expanded protein functional annotations. Biochemical pathways and systems predictions have also been added for assembled contigs. MGnify's growing focus on the assembly of metagenomic data has also seen the number of datasets it has assembled and analysed increase six-fold. The non-redundant protein database constructed from the proteins encoded by these assemblies now exceeds 1 billion sequences. Meanwhile, a newly developed contig viewer provides fine-grained visualisation of the assembled contigs and their enriched annotations.\r +\r +Documentation: https://docs.mgnify.org/en/latest/analysis.html#raw-reads-analysis-pipeline""" ; + schema1:image ; + schema1:keywords "Workflows, CWL, Metagenomics" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "MGnify - raw-reads analysis pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/362?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# COnSensus Interaction Network InFErence Service\r +Inference framework for reconstructing networks using a consensus approach between multiple methods and data sources.\r +\r +![alt text](https://github.com/PhosphorylatedRabbits/cosifer/raw/master/docs/_static/logo.png)\r +\r +## Reference\r +[Manica, Matteo, Charlotte, Bunne, Roland, Mathis, Joris, Cadow, Mehmet Eren, Ahsen, Gustavo A, Stolovitzky, and María Rodríguez, Martínez. "COSIFER: a python package for the consensus inference of molecular interaction networks".Bioinformatics (2020)](https://doi.org/10.1093/bioinformatics/btaa942).""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/118?version=1" ; + schema1:isBasedOn "https://github.com/inab/ipc_workflows/tree/main/cosifer/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for COSIFER" ; + schema1:sdDatePublished "2024-07-12 13:33:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/118/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 926 ; + schema1:creator , + ; + schema1:dateCreated "2021-05-05T15:50:31Z" ; + schema1:dateModified "2023-04-21T11:04:20Z" ; + schema1:description """# COnSensus Interaction Network InFErence Service\r +Inference framework for reconstructing networks using a consensus approach between multiple methods and data sources.\r +\r +![alt text](https://github.com/PhosphorylatedRabbits/cosifer/raw/master/docs/_static/logo.png)\r +\r +## Reference\r +[Manica, Matteo, Charlotte, Bunne, Roland, Mathis, Joris, Cadow, Mehmet Eren, Ahsen, Gustavo A, Stolovitzky, and María Rodríguez, Martínez. "COSIFER: a python package for the consensus inference of molecular interaction networks".Bioinformatics (2020)](https://doi.org/10.1093/bioinformatics/btaa942).""" ; + schema1:image ; + schema1:keywords "cosifer, cancer, pediatric, rna-seq" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "COSIFER" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/118?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + ; + ns1:output . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 7284 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """Description: SSP-based RCP scenario with high radiative forcing by the end of century. Following approximately RCP8.5 global forcing pathway with SSP5 socioeconomic conditions. Concentration-driven.\r +Rationale: the scenario represents the high end of plausible future pathways. SSP5 is the only SSP with emissions high enough to produce the 8.5 W/m2 level of forcing in 2100.\r +\r +This workflow is answering to the following scientific question:\r +- Is it worth investing in artificial snowmaking equipment at RATECE-PLANICA?""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/47?version=1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for RATECE-PLANICA ski station (Slovenia) under CMIP-6 SSP585 condition" ; + schema1:sdDatePublished "2024-07-12 13:37:26 +0100" ; + schema1:url "https://workflowhub.eu/workflows/47/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 834994 ; + schema1:dateCreated "2020-07-23T18:33:44Z" ; + schema1:dateModified "2023-01-16T13:44:15Z" ; + schema1:description """Description: SSP-based RCP scenario with high radiative forcing by the end of century. Following approximately RCP8.5 global forcing pathway with SSP5 socioeconomic conditions. Concentration-driven.\r +Rationale: the scenario represents the high end of plausible future pathways. SSP5 is the only SSP with emissions high enough to produce the 8.5 W/m2 level of forcing in 2100.\r +\r +This workflow is answering to the following scientific question:\r +- Is it worth investing in artificial snowmaking equipment at RATECE-PLANICA?""" ; + schema1:keywords "Climate, jupyter, cmip6" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "RATECE-PLANICA ski station (Slovenia) under CMIP-6 SSP585 condition" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/47?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +****\r +\r +About this workflow:\r +\r +* Input: merged_transcriptomes.fasta. \r +* Runs TransDecoder to produce longest_transcripts.fasta\r +* (Runs both the LongOrfs and Predict parts together. Default settings except Long Orfs options: -m =20)\r +* Runs Busco on output. """ ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.879.1" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Extract transcripts - TSI" ; + schema1:sdDatePublished "2024-07-12 13:22:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/879/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9310 ; + schema1:creator , + ; + schema1:dateCreated "2024-05-08T07:15:41Z" ; + schema1:dateModified "2024-05-09T04:08:31Z" ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +****\r +\r +About this workflow:\r +\r +* Input: merged_transcriptomes.fasta. \r +* Runs TransDecoder to produce longest_transcripts.fasta\r +* (Runs both the LongOrfs and Predict parts together. Default settings except Long Orfs options: -m =20)\r +* Runs Busco on output. """ ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "TSI-annotation" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Extract transcripts - TSI" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/879?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 314187 . + + a schema1:Dataset ; + schema1:datePublished "2024-07-10T15:58:48.295940" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/amr_gene_detection" ; + schema1:license "GPL-3.0-or-later" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "amr_gene_detection/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.24" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Global Run-On sequencing analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1004?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/nascent" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/nascent" ; + schema1:sdDatePublished "2024-07-12 13:20:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1004/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10409 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:06Z" ; + schema1:dateModified "2024-06-11T12:55:06Z" ; + schema1:description "Global Run-On sequencing analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1004?version=4" ; + schema1:keywords "gro-seq, nascent, pro-seq, rna, transcription, tss" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/nascent" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1004?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A mapping-based pipeline for creating a phylogeny from bacterial whole genome sequences" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/967?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/bactmap" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/bactmap" ; + schema1:sdDatePublished "2024-07-12 13:22:14 +0100" ; + schema1:url "https://workflowhub.eu/workflows/967/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6091 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:43Z" ; + schema1:dateModified "2024-06-11T12:54:43Z" ; + schema1:description "A mapping-based pipeline for creating a phylogeny from bacterial whole genome sequences" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/967?version=2" ; + schema1:keywords "bacteria, bacterial, bacterial-genome-analysis, Genomics, mapping, phylogeny, tree" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/bactmap" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/967?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/gmx-protein-ligand-complex-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/295?version=2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_protein_complex_md_setup/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:name "Research Object Crate for Galaxy Protein Ligand Complex MD Setup" ; + schema1:sdDatePublished "2024-07-12 13:35:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/295/ro_crate?version=2" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """Somatic-ShortV @ NCI-Gadi is a variant calling pipeline that calls somatic short variants (SNPs and indels) from tumour and matched normal BAM files following [GATK's Best Practice Workflow](https://gatk.broadinstitute.org/hc/en-us/articles/360035894731-Somatic-short-variant-discovery-SNVs-Indels-). This workflow is designed for the National Computational Infrastructure's (NCI) Gadi supercompter, leveraging multiple nodes on NCI Gadi to run all stages of the workflow in parallel. \r +\r +Infrastructure\\_deployment\\_metadata: Gadi (NCI)""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.148.1" ; + schema1:isBasedOn "https://github.com/Sydney-Informatics-Hub/Somatic-ShortV" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Somatic-ShortV @ NCI-Gadi" ; + schema1:sdDatePublished "2024-07-12 13:36:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/148/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 246369 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 20568 ; + schema1:creator , + , + ; + schema1:dateCreated "2021-08-18T23:14:55Z" ; + schema1:dateModified "2023-01-16T13:51:45Z" ; + schema1:description """Somatic-ShortV @ NCI-Gadi is a variant calling pipeline that calls somatic short variants (SNPs and indels) from tumour and matched normal BAM files following [GATK's Best Practice Workflow](https://gatk.broadinstitute.org/hc/en-us/articles/360035894731-Somatic-short-variant-discovery-SNVs-Indels-). This workflow is designed for the National Computational Infrastructure's (NCI) Gadi supercompter, leveraging multiple nodes on NCI Gadi to run all stages of the workflow in parallel. \r +\r +Infrastructure\\_deployment\\_metadata: Gadi (NCI)""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "GATK4, SNPs, INDELs, Somatic, variant_calling, Mutect2, NCI, NCI Gadi, Gadi, cancer, tumour, NCI-Gadi, scalable, VCF" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Somatic-ShortV @ NCI-Gadi" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/148?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=10" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-07-12 13:20:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13140 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:59Z" ; + schema1:dateModified "2024-06-11T12:54:59Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=10" ; + schema1:version 10 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.261.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_complex_md_setup/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:36:05 +0100" ; + schema1:url "https://workflowhub.eu/workflows/261/ro_crate?version=2" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 158190 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 32274 ; + schema1:creator , + ; + schema1:dateCreated "2023-06-09T06:44:44Z" ; + schema1:dateModified "2023-06-09T07:10:36Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/261?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CWL Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_complex_md_setup/cwl/workflow.cwl" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Automated quantitative analysis of DIA proteomics mass spectrometry measurements." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/980?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/diaproteomics" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/diaproteomics" ; + schema1:sdDatePublished "2024-07-12 13:21:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/980/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6252 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:48Z" ; + schema1:dateModified "2024-06-11T12:54:48Z" ; + schema1:description "Automated quantitative analysis of DIA proteomics mass spectrometry measurements." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/980?version=6" ; + schema1:keywords "data-independent-proteomics, dia-proteomics, openms, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/diaproteomics" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/980?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 41971 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:MediaObject ; + schema1:contentSize 299 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "A.0.0" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 298 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "A.0.1" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 298 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "A.0.2" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 301 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "A.0.3" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 296 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "A.1.0" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 299 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "A.1.1" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 299 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "A.1.2" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 298 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "A.1.3" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 296 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "A.2.0" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 300 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "A.2.1" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 300 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "A.2.2" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 300 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "A.2.3" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 298 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "A.3.0" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 304 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "A.3.1" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 300 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "A.3.2" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 299 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "A.3.3" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 298 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "B.0.0" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 293 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "B.0.1" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 302 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "B.0.2" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 301 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "B.0.3" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 300 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "B.1.0" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 297 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "B.1.1" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 299 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "B.1.2" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 299 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "B.1.3" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 299 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "B.2.0" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 297 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "B.2.1" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 302 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "B.2.2" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 300 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "B.2.3" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 296 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "B.3.0" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 297 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "B.3.1" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 296 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "B.3.2" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 297 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "B.3.3" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:Dataset ; + schema1:datePublished "2024-04-29T15:54:18.200332" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Purge-duplicate-contigs-VGP6/main" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Purge-duplicate-contigs-VGP6/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:version "0.3.8" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Conformational ensembles generation\r +\r +## Workflow included in the [ELIXIR 3D-Bioinfo](https://elixir-europe.org/communities/3d-bioinfo) Implementation Study:\r +\r +### Building on PDBe-KB to chart and characterize the conformation landscape of native proteins\r +\r +This tutorial aims to illustrate the process of generating **protein conformational ensembles** from** 3D structures **and analysing its **molecular flexibility**, step by step, using the **BioExcel Building Blocks library (biobb)**.\r +\r +## Conformational landscape of native proteins\r +**Proteins** are **dynamic** systems that adopt multiple **conformational states**, a property essential for many **biological processes** (e.g. binding other proteins, nucleic acids, small molecule ligands, or switching between functionaly active and inactive states). Characterizing the different **conformational states** of proteins and the transitions between them is therefore critical for gaining insight into their **biological function** and can help explain the effects of genetic variants in **health** and **disease** and the action of drugs.\r +\r +**Structural biology** has become increasingly efficient in sampling the different **conformational states** of proteins. The **PDB** has currently archived more than **170,000 individual structures**, but over two thirds of these structures represent **multiple conformations** of the same or related protein, observed in different crystal forms, when interacting with other proteins or other macromolecules, or upon binding small molecule ligands. Charting this conformational diversity across the PDB can therefore be employed to build a useful approximation of the **conformational landscape** of native proteins.\r +\r +A number of resources and **tools** describing and characterizing various often complementary aspects of protein **conformational diversity** in known structures have been developed, notably by groups in Europe. These tools include algorithms with varying degree of sophistication, for aligning the 3D structures of individual protein chains or domains, of protein assemblies, and evaluating their degree of **structural similarity**. Using such tools one can **align structures pairwise**, compute the corresponding **similarity matrix**, and identify ensembles of **structures/conformations** with a defined **similarity level** that tend to recur in different PDB entries, an operation typically performed using **clustering** methods. Such workflows are at the basis of resources such as **CATH, Contemplate, or PDBflex** that offer access to **conformational ensembles** comprised of similar **conformations** clustered according to various criteria. Other types of tools focus on differences between **protein conformations**, identifying regions of proteins that undergo large **collective displacements** in different PDB entries, those that act as **hinges or linkers**, or regions that are inherently **flexible**.\r +\r +To build a meaningful approximation of the **conformational landscape** of native proteins, the **conformational ensembles** (and the differences between them), identified on the basis of **structural similarity/dissimilarity** measures alone, need to be **biophysically characterized**. This may be approached at **two different levels**. \r +- At the **biological level**, it is important to link observed **conformational ensembles**, to their **functional roles** by evaluating the correspondence with **protein family classifications** based on sequence information and **functional annotations** in public databases e.g. Uniprot, PDKe-Knowledge Base (KB). These links should provide valuable mechanistic insights into how the **conformational and dynamic properties** of proteins are exploited by evolution to regulate their **biological function**.

\r +\r +- At the **physical level** one needs to introduce **energetic consideration** to evaluate the likelihood that the identified **conformational ensembles** represent **conformational states** that the protein (or domain under study) samples in isolation. Such evaluation is notoriously **challenging** and can only be roughly approximated by using **computational methods** to evaluate the extent to which the observed **conformational ensembles** can be reproduced by algorithms that simulate the **dynamic behavior** of protein systems. These algorithms include the computationally expensive **classical molecular dynamics (MD) simulations** to sample local thermal fluctuations but also faster more approximate methods such as **Elastic Network Models** and **Normal Node Analysis** (NMA) to model low energy **collective motions**. Alternatively, **enhanced sampling molecular dynamics** can be used to model complex types of **conformational changes** but at a very high computational cost. \r +\r +The **ELIXIR 3D-Bioinfo Implementation Study** *Building on PDBe-KB to chart and characterize the conformation landscape of native proteins* focuses on:\r +\r +1. Mapping the **conformational diversity** of proteins and their homologs across the PDB. \r +2. Characterize the different **flexibility properties** of protein regions, and link this information to sequence and functional annotation.\r +3. Benchmark **computational methods** that can predict a biophysical description of protein motions.\r +\r +This notebook is part of the third objective, where a list of **computational resources** that are able to predict **protein flexibility** and **conformational ensembles** have been collected, evaluated, and integrated in reproducible and interoperable workflows using the **BioExcel Building Blocks library**. Note that the list is not meant to be exhaustive, it is built following the expertise of the implementation study partners.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.490.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_flexdyn/galaxy" ; + schema1:license "other-open" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy Protein conformational ensembles generation" ; + schema1:sdDatePublished "2024-07-12 13:33:20 +0100" ; + schema1:url "https://workflowhub.eu/workflows/490/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 92310 ; + schema1:creator , + ; + schema1:dateCreated "2023-06-01T09:50:55Z" ; + schema1:dateModified "2023-06-01T09:53:56Z" ; + schema1:description """# Protein Conformational ensembles generation\r +\r +## Workflow included in the [ELIXIR 3D-Bioinfo](https://elixir-europe.org/communities/3d-bioinfo) Implementation Study:\r +\r +### Building on PDBe-KB to chart and characterize the conformation landscape of native proteins\r +\r +This tutorial aims to illustrate the process of generating **protein conformational ensembles** from** 3D structures **and analysing its **molecular flexibility**, step by step, using the **BioExcel Building Blocks library (biobb)**.\r +\r +## Conformational landscape of native proteins\r +**Proteins** are **dynamic** systems that adopt multiple **conformational states**, a property essential for many **biological processes** (e.g. binding other proteins, nucleic acids, small molecule ligands, or switching between functionaly active and inactive states). Characterizing the different **conformational states** of proteins and the transitions between them is therefore critical for gaining insight into their **biological function** and can help explain the effects of genetic variants in **health** and **disease** and the action of drugs.\r +\r +**Structural biology** has become increasingly efficient in sampling the different **conformational states** of proteins. The **PDB** has currently archived more than **170,000 individual structures**, but over two thirds of these structures represent **multiple conformations** of the same or related protein, observed in different crystal forms, when interacting with other proteins or other macromolecules, or upon binding small molecule ligands. Charting this conformational diversity across the PDB can therefore be employed to build a useful approximation of the **conformational landscape** of native proteins.\r +\r +A number of resources and **tools** describing and characterizing various often complementary aspects of protein **conformational diversity** in known structures have been developed, notably by groups in Europe. These tools include algorithms with varying degree of sophistication, for aligning the 3D structures of individual protein chains or domains, of protein assemblies, and evaluating their degree of **structural similarity**. Using such tools one can **align structures pairwise**, compute the corresponding **similarity matrix**, and identify ensembles of **structures/conformations** with a defined **similarity level** that tend to recur in different PDB entries, an operation typically performed using **clustering** methods. Such workflows are at the basis of resources such as **CATH, Contemplate, or PDBflex** that offer access to **conformational ensembles** comprised of similar **conformations** clustered according to various criteria. Other types of tools focus on differences between **protein conformations**, identifying regions of proteins that undergo large **collective displacements** in different PDB entries, those that act as **hinges or linkers**, or regions that are inherently **flexible**.\r +\r +To build a meaningful approximation of the **conformational landscape** of native proteins, the **conformational ensembles** (and the differences between them), identified on the basis of **structural similarity/dissimilarity** measures alone, need to be **biophysically characterized**. This may be approached at **two different levels**. \r +- At the **biological level**, it is important to link observed **conformational ensembles**, to their **functional roles** by evaluating the correspondence with **protein family classifications** based on sequence information and **functional annotations** in public databases e.g. Uniprot, PDKe-Knowledge Base (KB). These links should provide valuable mechanistic insights into how the **conformational and dynamic properties** of proteins are exploited by evolution to regulate their **biological function**.

\r +\r +- At the **physical level** one needs to introduce **energetic consideration** to evaluate the likelihood that the identified **conformational ensembles** represent **conformational states** that the protein (or domain under study) samples in isolation. Such evaluation is notoriously **challenging** and can only be roughly approximated by using **computational methods** to evaluate the extent to which the observed **conformational ensembles** can be reproduced by algorithms that simulate the **dynamic behavior** of protein systems. These algorithms include the computationally expensive **classical molecular dynamics (MD) simulations** to sample local thermal fluctuations but also faster more approximate methods such as **Elastic Network Models** and **Normal Node Analysis** (NMA) to model low energy **collective motions**. Alternatively, **enhanced sampling molecular dynamics** can be used to model complex types of **conformational changes** but at a very high computational cost. \r +\r +The **ELIXIR 3D-Bioinfo Implementation Study** *Building on PDBe-KB to chart and characterize the conformation landscape of native proteins* focuses on:\r +\r +1. Mapping the **conformational diversity** of proteins and their homologs across the PDB. \r +2. Characterize the different **flexibility properties** of protein regions, and link this information to sequence and functional annotation.\r +3. Benchmark **computational methods** that can predict a biophysical description of protein motions.\r +\r +This notebook is part of the third objective, where a list of **computational resources** that are able to predict **protein flexibility** and **conformational ensembles** have been collected, evaluated, and integrated in reproducible and interoperable workflows using the **BioExcel Building Blocks library**. Note that the list is not meant to be exhaustive, it is built following the expertise of the implementation study partners.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "" ; + schema1:name "Galaxy Protein conformational ensembles generation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_flexdyn/galaxy/biobb_wf_flexdyn.ga" ; + schema1:version 1 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2021-12-20T17:04:47.631291" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-ivar-analysis" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sars-cov-2-pe-illumina-artic-ivar-analysis/SARS-COV-2-ILLUMINA-AMPLICON-IVAR-PANGOLIN-NEXTCLADE" ; + schema1:sdDatePublished "2021-12-21 03:01:00 +0000" ; + schema1:softwareVersion "v0.2.2" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """This Galaxy-E workflow was made from the ["Cleaning GBIF data for the use in biogeography" tutorial](https://ropensci.github.io/CoordinateCleaner/articles/Cleaning_GBIF_data_with_CoordinateCleaner.html) and allows to:\r +- Use CoordinateCleaner to automatically flag problematic records\r +- Use GBIF provided meta-data to improve coordinate quality, tailored to your downstream analyses\r +- Use automated cleaning algorithms of CoordinateCleaner to identify problematic contributing datasets\r +- Visualize data on a map""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/404?version=1" ; + schema1:isBasedOn "https://ecology.usegalaxy.eu/u/ylebras/w/gbif-data-quality-check-and-filtering-workflow-feb-2020" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for GBIF data Quality check and filtering workflow Feb-2020" ; + schema1:sdDatePublished "2024-07-12 13:34:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/404/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 16297 ; + schema1:creator ; + schema1:dateCreated "2022-11-18T14:39:27Z" ; + schema1:dateModified "2023-11-09T21:03:31Z" ; + schema1:description """This Galaxy-E workflow was made from the ["Cleaning GBIF data for the use in biogeography" tutorial](https://ropensci.github.io/CoordinateCleaner/articles/Cleaning_GBIF_data_with_CoordinateCleaner.html) and allows to:\r +- Use CoordinateCleaner to automatically flag problematic records\r +- Use GBIF provided meta-data to improve coordinate quality, tailored to your downstream analyses\r +- Use automated cleaning algorithms of CoordinateCleaner to identify problematic contributing datasets\r +- Visualize data on a map""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "GBIF data Quality check and filtering workflow Feb-2020" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/404?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Genes and transcripts annotation with Isoseq using uLTRA and TAMA" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/993?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/isoseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/isoseq" ; + schema1:sdDatePublished "2024-07-12 13:21:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/993/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7742 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:57Z" ; + schema1:dateModified "2024-06-11T12:54:57Z" ; + schema1:description "Genes and transcripts annotation with Isoseq using uLTRA and TAMA" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/993?version=6" ; + schema1:keywords "isoseq, isoseq-3, rna, tama, ultra" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/isoseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/993?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Conformational Transitions calculations tutorial using BioExcel Building Blocks (biobb) and GOdMD\r +\r +This tutorial aims to illustrate the process of computing a conformational transition between two known structural conformations of a protein, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.550.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_godmd/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Protein Conformational Transitions calculations tutorial" ; + schema1:sdDatePublished "2024-07-12 13:32:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/550/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3599 ; + schema1:creator , + ; + schema1:dateCreated "2023-08-02T10:06:40Z" ; + schema1:dateModified "2023-08-02T10:10:41Z" ; + schema1:description """# Protein Conformational Transitions calculations tutorial using BioExcel Building Blocks (biobb) and GOdMD\r +\r +This tutorial aims to illustrate the process of computing a conformational transition between two known structural conformations of a protein, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Protein Conformational Transitions calculations tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_godmd/python/workflow.py" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/gmx-protein-ligand-complex-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.295.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_protein_complex_md_setup/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy Protein Ligand Complex MD Setup" ; + schema1:sdDatePublished "2024-07-12 13:35:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/295/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 90663 ; + schema1:creator , + ; + schema1:dateCreated "2023-05-03T13:46:26Z" ; + schema1:dateModified "2023-05-03T13:47:32Z" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/gmx-protein-ligand-complex-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/295?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy Protein Ligand Complex MD Setup" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_protein_complex_md_setup/galaxy/biobb_wf_protein_complex_md_setup.ga" ; + schema1:version 3 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:description """# CroMaSt: A workflow for domain family curation through cross-mapping of structural instances between protein domain databases\r +\r +CroMaSt (**Cro**ss **Ma**pper of domain **St**ructural instances) is an automated iterative workflow to clarify domain definition by cross-mapping of domain structural instances between domain databases. CroMaSt (for Cross-Mapper of domain Structural instances) will classify all structural instances of a given domain type into 3 different categories (core, true and domain-like). \r +\r +\r +## Requirements\r +1. [Conda](https://docs.conda.io/projects/conda/en/latest/) or [Miniconda](https://docs.conda.io/en/latest/miniconda.html)\r +2. [Kpax](http://kpax.loria.fr/download.php) \r +Download and install conda (or Miniconda) and Kpax by following the instructions from their official site.\r +\r +\r +## Get it running \r +(Considering the requirements are already met)\r +\r +1. Clone the repository and change the directory\r +\r +```\r +git clone https://gitlab.inria.fr/capsid.public_codes/CroMaSt.git\r +cd CroMaSt\r +```\r +\r +2. Create the conda environment for the workflow\r +```\r +conda env create --file yml/environment.yml\r +conda activate CroMaSt\r +```\r +\r +3. Change the path of variables in paramter file\r +```\r +sed -i 's/\\/home\\/hdhondge\\/CroMaSt\\//\\/YOUR\\/PATH\\/TO_CroMaSt\\//g' yml/CroMaSt_input.yml \r +```\r +\r +4. Create the directory to store files from PDB and SIFTS (if not already)\r +```\r +mkdir PDB_files SIFTS\r +```\r +\r +5. Download the source input data\r +```\r +cwl-runner Tools/download_data.cwl yml/download_data.yml\r +```\r +\r +## Basic example\r +\r +### 1. First, we will run the workflow for the KH domain with family identifiers `RRM_1` and `RRM` in Pfam and CATH, respectively.\r +Run the workflow -\r +\r +```\r +cwl-runner --parallel --outdir=Results/ CroMaSt.cwl yml/CroMaSt_input.yml\r +```\r +\r +### 2. Once the iteration is complete, check the `new_param.yml` file from the `outputdir` (Results), if there is any family identifier in either `pfam` or `cath`; run the next iteration using following command (Until there is no new families explored by workflow) -\r +\r +```\r +cwl-runner --parallel --outdir=Results/ CroMaSt.cwl Results/new_param.yml\r +```\r + \r +### **Extra:** Start the workflow with multiple families from one or both databases \r +If you would like to start the workflow with multiple families from one or both databases, then simply add a comma in between two family identifiers. \r +```\r +pfam: ['PF00076', 'PF08777']\r +cath: ['3.30.70.330']\r +```\r +\r +- **Pro Tip**: Don't forget to give different path to `--outdir` option while running the workflow multiple times or at least move the results to some other location after first run.\r +\r +## Run the workflow for protein domain of your choice \r +### 1. You can run the workflow for the domain of your choice by simply changing the family identifers in `yml/CroMaSt_input.yml` file.\r +\r +Simply replace the following values of family identifiers (for pfam and cath) with the family identifiers of your choice in `yml/CroMaSt_input.yml` file. \r +```\r +pfam: ['PF00076']\r +cath: ['3.30.70.330']\r +```\r +\r +\r +\r +## Data files used in current version are as follows:\r +**Files in Data directory can be downloaded as follows**:\r +\r +1. File used from Pfam database: [pdbmap.gz](http://ftp.ebi.ac.uk/pub/databases/Pfam/releases/Pfam33.0/pdbmap.gz) \r +\r +2. File used from CATH database: [cath-domain-description-file.txt](ftp://orengoftp.biochem.ucl.ac.uk:21/cath/releases/latest-release/cath-classification-data/cath-domain-description-file.txt) \r +\r +3. Obsolete entries from RCSB PDB\r +[obsolete_PDB_entry_ids.txt](https://data.rcsb.org/rest/v1/holdings/removed/entry_ids) \r +\r +\r +CATH Version - 4.3.0 (Ver_Date - 11-Sep-2019) [FTP site](ftp://orengoftp.biochem.ucl.ac.uk/cath/releases/latest-release/cath-classification-data/) \r +Pfam Version - 33.0 (Ver_Date - 18-Mar-2020) [FTP site](http://ftp.ebi.ac.uk/pub/databases/Pfam/releases/Pfam33.0/) \r +\r +## Reference\r +```\r +Poster - \r +1. Hrishikesh Dhondge, Isaure Chauvot de Beauchêne, Marie-Dominique Devignes. CroMaSt: A workflow for domain family curation through cross-mapping of structural instances between protein domain databases. 21st European Conference on Computational Biology, Sep 2022, Sitges, Spain. ⟨hal-03789541⟩\r +\r +```\r +\r +## Acknowledgements\r +This project has received funding from the Marie Skłodowska-Curie Innovative Training Network (MSCA-ITN) RNAct supported by European Union’s Horizon 2020 research and innovation programme under granta greement No 813239.\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.390.1" ; + schema1:isBasedOn "https://github.com/HrishiDhondge/CroMaSt.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CroMaSt: A workflow for domain family curation through cross-mapping of structural instances between protein domain databases" ; + schema1:sdDatePublished "2024-07-12 13:33:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/390/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 21416 ; + schema1:creator , + , + ; + schema1:dateCreated "2022-09-28T11:34:54Z" ; + schema1:dateModified "2023-01-16T14:02:36Z" ; + schema1:description """# CroMaSt: A workflow for domain family curation through cross-mapping of structural instances between protein domain databases\r +\r +CroMaSt (**Cro**ss **Ma**pper of domain **St**ructural instances) is an automated iterative workflow to clarify domain definition by cross-mapping of domain structural instances between domain databases. CroMaSt (for Cross-Mapper of domain Structural instances) will classify all structural instances of a given domain type into 3 different categories (core, true and domain-like). \r +\r +\r +## Requirements\r +1. [Conda](https://docs.conda.io/projects/conda/en/latest/) or [Miniconda](https://docs.conda.io/en/latest/miniconda.html)\r +2. [Kpax](http://kpax.loria.fr/download.php) \r +Download and install conda (or Miniconda) and Kpax by following the instructions from their official site.\r +\r +\r +## Get it running \r +(Considering the requirements are already met)\r +\r +1. Clone the repository and change the directory\r +\r +```\r +git clone https://gitlab.inria.fr/capsid.public_codes/CroMaSt.git\r +cd CroMaSt\r +```\r +\r +2. Create the conda environment for the workflow\r +```\r +conda env create --file yml/environment.yml\r +conda activate CroMaSt\r +```\r +\r +3. Change the path of variables in paramter file\r +```\r +sed -i 's/\\/home\\/hdhondge\\/CroMaSt\\//\\/YOUR\\/PATH\\/TO_CroMaSt\\//g' yml/CroMaSt_input.yml \r +```\r +\r +4. Create the directory to store files from PDB and SIFTS (if not already)\r +```\r +mkdir PDB_files SIFTS\r +```\r +\r +5. Download the source input data\r +```\r +cwl-runner Tools/download_data.cwl yml/download_data.yml\r +```\r +\r +## Basic example\r +\r +### 1. First, we will run the workflow for the KH domain with family identifiers `RRM_1` and `RRM` in Pfam and CATH, respectively.\r +Run the workflow -\r +\r +```\r +cwl-runner --parallel --outdir=Results/ CroMaSt.cwl yml/CroMaSt_input.yml\r +```\r +\r +### 2. Once the iteration is complete, check the `new_param.yml` file from the `outputdir` (Results), if there is any family identifier in either `pfam` or `cath`; run the next iteration using following command (Until there is no new families explored by workflow) -\r +\r +```\r +cwl-runner --parallel --outdir=Results/ CroMaSt.cwl Results/new_param.yml\r +```\r + \r +### **Extra:** Start the workflow with multiple families from one or both databases \r +If you would like to start the workflow with multiple families from one or both databases, then simply add a comma in between two family identifiers. \r +```\r +pfam: ['PF00076', 'PF08777']\r +cath: ['3.30.70.330']\r +```\r +\r +- **Pro Tip**: Don't forget to give different path to `--outdir` option while running the workflow multiple times or at least move the results to some other location after first run.\r +\r +## Run the workflow for protein domain of your choice \r +### 1. You can run the workflow for the domain of your choice by simply changing the family identifers in `yml/CroMaSt_input.yml` file.\r +\r +Simply replace the following values of family identifiers (for pfam and cath) with the family identifiers of your choice in `yml/CroMaSt_input.yml` file. \r +```\r +pfam: ['PF00076']\r +cath: ['3.30.70.330']\r +```\r +\r +\r +\r +## Data files used in current version are as follows:\r +**Files in Data directory can be downloaded as follows**:\r +\r +1. File used from Pfam database: [pdbmap.gz](http://ftp.ebi.ac.uk/pub/databases/Pfam/releases/Pfam33.0/pdbmap.gz) \r +\r +2. File used from CATH database: [cath-domain-description-file.txt](ftp://orengoftp.biochem.ucl.ac.uk:21/cath/releases/latest-release/cath-classification-data/cath-domain-description-file.txt) \r +\r +3. Obsolete entries from RCSB PDB\r +[obsolete_PDB_entry_ids.txt](https://data.rcsb.org/rest/v1/holdings/removed/entry_ids) \r +\r +\r +CATH Version - 4.3.0 (Ver_Date - 11-Sep-2019) [FTP site](ftp://orengoftp.biochem.ucl.ac.uk/cath/releases/latest-release/cath-classification-data/) \r +Pfam Version - 33.0 (Ver_Date - 18-Mar-2020) [FTP site](http://ftp.ebi.ac.uk/pub/databases/Pfam/releases/Pfam33.0/) \r +\r +## Reference\r +```\r +Poster - \r +1. Hrishikesh Dhondge, Isaure Chauvot de Beauchêne, Marie-Dominique Devignes. CroMaSt: A workflow for domain family curation through cross-mapping of structural instances between protein domain databases. 21st European Conference on Computational Biology, Sep 2022, Sitges, Spain. ⟨hal-03789541⟩\r +\r +```\r +\r +## Acknowledgements\r +This project has received funding from the Marie Skłodowska-Curie Innovative Training Network (MSCA-ITN) RNAct supported by European Union’s Horizon 2020 research and innovation programme under granta greement No 813239.\r +\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/390?version=1" ; + schema1:keywords "Pfam, CATH, Protein domains, data integration" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "CroMaSt: A workflow for domain family curation through cross-mapping of structural instances between protein domain databases" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/390?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 103397 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """# Inclusion Body Myositis Active Subnetwork Identification Workflow\r +\r +Workflow for Creating a large disease network from various datasets and databases for IBM, and applying the active subnetwork identification method MOGAMUN.\r +\r +## Links\r +[WorkflowHub](https://workflowhub.eu/workflows/681)\r +\r +[Docker Image](https://hub.docker.com/R/jdwijnbergen/multi-omics_asi)\r +\r +[Docker Image build requirements](https://doi.org/10.5281/zenodo.10210364)\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/681?version=3" ; + schema1:isBasedOn "https://github.com/jdwijnbergen/IBM_ASI_workflow.git" ; + schema1:license "notspecified" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Inclusion Body Myositis Active Subnetwork Identification Workflow" ; + schema1:sdDatePublished "2024-07-12 13:24:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/681/ro_crate?version=3" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 27177 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6953 ; + schema1:creator , + ; + schema1:dateCreated "2023-11-27T15:59:18Z" ; + schema1:dateModified "2023-11-27T15:59:18Z" ; + schema1:description """# Inclusion Body Myositis Active Subnetwork Identification Workflow\r +\r +Workflow for Creating a large disease network from various datasets and databases for IBM, and applying the active subnetwork identification method MOGAMUN.\r +\r +## Links\r +[WorkflowHub](https://workflowhub.eu/workflows/681)\r +\r +[Docker Image](https://hub.docker.com/R/jdwijnbergen/multi-omics_asi)\r +\r +[Docker Image build requirements](https://doi.org/10.5281/zenodo.10210364)\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/681?version=6" ; + schema1:keywords "Bioinformatics, CWL, Genomics, Transcriptomics, Protein-Protein Interaction" ; + schema1:license "https://choosealicense.com/no-permission/" ; + schema1:name "Inclusion Body Myositis Active Subnetwork Identification Workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/681?version=3" ; + schema1:version 3 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1021?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/scrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/scrnaseq" ; + schema1:sdDatePublished "2024-07-12 13:18:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1021/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8620 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:17Z" ; + schema1:dateModified "2024-06-11T12:55:17Z" ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1021?version=13" ; + schema1:keywords "10x-genomics, 10xgenomics, alevin, bustools, Cellranger, kallisto, rna-seq, single-cell, star-solo" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/scrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1021?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A Nanostring nCounter analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1003?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/nanostring" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/nanostring" ; + schema1:sdDatePublished "2024-07-12 13:20:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1003/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8701 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:05Z" ; + schema1:dateModified "2024-06-11T12:55:05Z" ; + schema1:description "A Nanostring nCounter analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1003?version=4" ; + schema1:keywords "nanostring, nanostringnorm" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/nanostring" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1003?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """\r +# Summary \r +\r +This notebook demonstrates how to recreate lineages published in the paper [Live imaging of remyelination in the adult mouse corpus callosum](https://www.pnas.org/content/118/28/e2025795118) and available at [idr0113-bottes-opcclones](https://idr.openmicroscopy.org/search/?query=Name:idr0113).\r +\r +The lineage is created from the metadata associated to the specified image.\r +\r +To load the data from the Image Data Resource, we use:\r +\r +* the [Python API](https://docs.openmicroscopy.org/omero/latest/developers/Python.html)\r +* the [JSON API](https://docs.openmicroscopy.org/omero/latest/developers/json-api.html)\r +\r +LPC-induced focal demyelination and in vivo imaging of genetically targeted OPCs and their progeny to describe the cellular dynamics of OPC-mediated remyelination in the CC.\r +\r +Longitudinal observation of OPCs and their progeny for up to two months reveals functional inter- and intraclonal heterogeneity and provides insights into the cell division capacity and the migration/differentiation dynamics of OPCs and their daughter cells in vivo.\r +\r +The majority of the clones remained quiescent or divided only few times. Some OPCs were highly proliferative. Large clones showed longer times between consecutive divisions compared to low proliferating clones.\r +\r +OPCs show distinct modes of cell division: from symmetric proliferative, to symmetric differentiating and also asymmetric cell division, where the OPC is self-renewed while the other daughter cell differentiates.\r +\r +Only 16.46% of OPC-derived cells differentiated into mature, remyelinating oligodendrocytes, with OPCs born at early divisions showing a higher probability to survive and to terminally differentiate.\r +\r +Cell death was associated with distinct cell division histories of different clones, with higher probability of death when generated at later divisions.\r +\r +Migratory behaviour was restricted to progenitors. Successfully differentiating progenitors moved shorter distances per day compared to dying cells.\r +\r +# Inputs\r +Parameters needed to configure the workflow:\r +\r +**imageId**: Identifier of an image in IDR.\r +\r +# Ouputs\r +Output file generated:\r +\r +**lineage_imageId.pdf**: A PDF with the generated lineage. Options to save as `png` or `svg` are also available.\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/267?version=1" ; + schema1:isBasedOn "https://github.com/IDR/idr0113-bottes-opcclones" ; + schema1:license "BSD-2-Clause" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Cell Lineage in the adult mouse corpus callosum" ; + schema1:sdDatePublished "2024-07-12 13:36:21 +0100" ; + schema1:url "https://workflowhub.eu/workflows/267/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 234305 ; + schema1:creator ; + schema1:dateCreated "2022-02-08T11:36:16Z" ; + schema1:dateModified "2023-01-16T13:57:29Z" ; + schema1:description """\r +# Summary \r +\r +This notebook demonstrates how to recreate lineages published in the paper [Live imaging of remyelination in the adult mouse corpus callosum](https://www.pnas.org/content/118/28/e2025795118) and available at [idr0113-bottes-opcclones](https://idr.openmicroscopy.org/search/?query=Name:idr0113).\r +\r +The lineage is created from the metadata associated to the specified image.\r +\r +To load the data from the Image Data Resource, we use:\r +\r +* the [Python API](https://docs.openmicroscopy.org/omero/latest/developers/Python.html)\r +* the [JSON API](https://docs.openmicroscopy.org/omero/latest/developers/json-api.html)\r +\r +LPC-induced focal demyelination and in vivo imaging of genetically targeted OPCs and their progeny to describe the cellular dynamics of OPC-mediated remyelination in the CC.\r +\r +Longitudinal observation of OPCs and their progeny for up to two months reveals functional inter- and intraclonal heterogeneity and provides insights into the cell division capacity and the migration/differentiation dynamics of OPCs and their daughter cells in vivo.\r +\r +The majority of the clones remained quiescent or divided only few times. Some OPCs were highly proliferative. Large clones showed longer times between consecutive divisions compared to low proliferating clones.\r +\r +OPCs show distinct modes of cell division: from symmetric proliferative, to symmetric differentiating and also asymmetric cell division, where the OPC is self-renewed while the other daughter cell differentiates.\r +\r +Only 16.46% of OPC-derived cells differentiated into mature, remyelinating oligodendrocytes, with OPCs born at early divisions showing a higher probability to survive and to terminally differentiate.\r +\r +Cell death was associated with distinct cell division histories of different clones, with higher probability of death when generated at later divisions.\r +\r +Migratory behaviour was restricted to progenitors. Successfully differentiating progenitors moved shorter distances per day compared to dying cells.\r +\r +# Inputs\r +Parameters needed to configure the workflow:\r +\r +**imageId**: Identifier of an image in IDR.\r +\r +# Ouputs\r +Output file generated:\r +\r +**lineage_imageId.pdf**: A PDF with the generated lineage. Options to save as `png` or `svg` are also available.\r +\r +""" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/BSD-2-Clause" ; + schema1:name "Cell Lineage in the adult mouse corpus callosum" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/IDR/idr0113-bottes-opcclones/blob/main/notebooks/idr0113_lineage.ipynb" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Generate possible metabolic routes for the production of a target molecule in an organism of choice" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/24?version=1" ; + schema1:isBasedOn "https://galaxy-synbiocad.org/u/mdulac/w/retropath2-3" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for RetroSynthesis" ; + schema1:sdDatePublished "2024-07-12 13:37:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/24/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8427 ; + schema1:creator ; + schema1:dateCreated "2020-05-29T10:08:17Z" ; + schema1:dateModified "2023-01-16T13:41:42Z" ; + schema1:description "Generate possible metabolic routes for the production of a target molecule in an organism of choice" ; + schema1:keywords "Retrosynthesis, pathway prediction, pathway design, Synthetic Biology, metabolic engineering" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "RetroSynthesis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/24?version=1" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1017?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/rnafusion" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnafusion" ; + schema1:sdDatePublished "2024-07-12 13:18:21 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1017/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5152 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:09Z" ; + schema1:dateModified "2024-06-11T12:55:09Z" ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1017?version=16" ; + schema1:keywords "fusion, fusion-genes, gene-fusion, rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnafusion" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1017?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Simple bacterial assembly and annotation" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/966?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/bacass" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/bacass" ; + schema1:sdDatePublished "2024-07-12 13:22:25 +0100" ; + schema1:url "https://workflowhub.eu/workflows/966/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12331 ; + schema1:creator ; + schema1:dateCreated "2024-06-13T03:03:06Z" ; + schema1:dateModified "2024-06-13T03:03:06Z" ; + schema1:description "Simple bacterial assembly and annotation" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/966?version=7" ; + schema1:keywords "Assembly, bacterial-genomes, denovo, denovo-assembly, genome-assembly, hybrid-assembly, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/bacass" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/966?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Exome Alignment Workflow\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/239?version=1" ; + schema1:isBasedOn "https://github.com/inab/ipc_workflows/tree/main/exome/alignment" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for exome-alignment" ; + schema1:sdDatePublished "2024-07-12 13:36:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/239/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 11999 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2241 ; + schema1:creator ; + schema1:dateCreated "2021-11-19T10:06:37Z" ; + schema1:dateModified "2023-01-16T13:55:02Z" ; + schema1:description """Exome Alignment Workflow\r +""" ; + schema1:image ; + schema1:keywords "cancer, pediatric, Alignment" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "exome-alignment" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/239?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + ; + ns1:output . + + a schema1:Dataset ; + schema1:datePublished "2023-11-09T11:09:07.998474" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/kmer-profiling-hifi-VGP1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "kmer-profiling-hifi-VGP1/main" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "kmer-profiling-hifi-VGP1/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/kmer-profiling-hifi-VGP1" ; + schema1:version "0.1.3" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/991?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/hlatyping" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hlatyping" ; + schema1:sdDatePublished "2024-07-12 13:18:26 +0100" ; + schema1:url "https://workflowhub.eu/workflows/991/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3699 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:56Z" ; + schema1:dateModified "2024-06-11T12:54:56Z" ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/991?version=9" ; + schema1:keywords "DNA, hla, hla-typing, immunology, optitype, personalized-medicine, rna" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hlatyping" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/991?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.195.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_dna_helparms" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Structural DNA helical parameters tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/195/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 72437 ; + schema1:creator , + ; + schema1:dateCreated "2023-04-14T08:45:43Z" ; + schema1:dateModified "2023-04-14T08:47:02Z" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/195?version=4" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Structural DNA helical parameters tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_dna_helparms/master/biobb_wf_dna_helparms/notebooks/biobb_dna_helparms_tutorial.ipynb" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Simulations and figures supporting the manuscript \"Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake\"" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.511.4" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake" ; + schema1:sdDatePublished "2024-07-12 13:32:46 +0100" ; + schema1:url "https://workflowhub.eu/workflows/511/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 332 ; + schema1:creator , + , + , + , + , + ; + schema1:dateCreated "2024-01-11T09:54:04Z" ; + schema1:dateModified "2024-01-11T10:07:57Z" ; + schema1:description "Simulations and figures supporting the manuscript \"Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake\"" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/511?version=4" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/511?version=4" ; + schema1:version 4 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 75460 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Assembly, visualisation and quality control workflow for high fidelity reads built from circular consensus sequence (PacBio HiFi) data.\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/221?version=1" ; + schema1:isBasedOn "https://github.com/AustralianBioCommons/PacBio-HiFi-genome-assembly-using-hifiasm" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for PacBio HiFi genome assembly using hifiasm (HiFi genome assembly stage 2)" ; + schema1:sdDatePublished "2024-07-12 13:34:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/221/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 27211 ; + schema1:creator , + ; + schema1:dateCreated "2021-10-26T00:25:23Z" ; + schema1:dateModified "2022-09-20T00:55:45Z" ; + schema1:description """Assembly, visualisation and quality control workflow for high fidelity reads built from circular consensus sequence (PacBio HiFi) data.\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/221?version=2" ; + schema1:isPartOf , + ; + schema1:keywords "FASTQ, hifiasm, HiFi, genome_assembly" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "PacBio HiFi genome assembly using hifiasm (HiFi genome assembly stage 2)" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/221?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 129485 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=17" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-07-12 13:21:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=17" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12455 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:50Z" ; + schema1:dateModified "2024-06-11T12:54:50Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=17" ; + schema1:version 17 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Metagenomic dataset taxonomic classification using kraken2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/124?version=1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for 1: Plant virus detection with kraken2 (SE)" ; + schema1:sdDatePublished "2024-07-12 13:36:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/124/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9835 ; + schema1:dateCreated "2021-06-17T09:14:19Z" ; + schema1:dateModified "2023-02-13T14:06:45Z" ; + schema1:description "Metagenomic dataset taxonomic classification using kraken2" ; + schema1:keywords "Virology, kraken" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "1: Plant virus detection with kraken2 (SE)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/124?version=1" ; + schema1:version 1 ; + ns1:output , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Workflow converts one or multiple bam files back to the fastq format" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/968?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/bamtofastq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for qbic-pipelines/bamtofastq" ; + schema1:sdDatePublished "2024-07-12 13:22:08 +0100" ; + schema1:url "https://workflowhub.eu/workflows/968/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4298 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:44Z" ; + schema1:dateModified "2024-06-11T12:54:44Z" ; + schema1:description "Workflow converts one or multiple bam files back to the fastq format" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/968?version=5" ; + schema1:keywords "bamtofastq, Conversion, cramtofastq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "qbic-pipelines/bamtofastq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/968?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """# HiFi *de novo* genome assembly workflow\r +\r +HiFi-assembly-workflow is a bioinformatics pipeline that can be used to analyse Pacbio CCS reads for *de novo* genome assembly using PacBio Circular Consensus Sequencing (CCS) reads. This workflow is implemented in Nextflow and has 3 major sections. \r + \r +Please refer to the following documentation for detailed description of each workflow section:\r + \r +- [Pre-assembly quality control (QC)](https://github.com/AusARG/hifi-assembly-workflow/blob/master/recommendations.md#stage-1-pre-assembly-quality-control)\r +- [Assembly](https://github.com/AusARG/hifi-assembly-workflow/blob/master/recommendations.md#stage-2-assembly)\r +- [Post-assembly QC](https://github.com/AusARG/hifi-assembly-workflow/blob/master/recommendations.md#stage-3-post-assembly-quality-control)\r +\r +## HiFi assembly workflow flowchart\r +\r +![](https://github.com/AusARG/hifi-assembly-workflow/blob/master/workflow.png?raw=true)\r +\r +# Quick Usage:\r +The pipeline has been tested on NCI Gadi and AGRF balder cluster. If needed to run on AGRF cluster, please contact us at bioinformatics@agrf.org.au.\r +Please note for running this on NCI Gadi you need access. Please refer to Gadi guidelines for account creation and usage: these can be found at https://opus.nci.org.au/display/Help/Access.\r +\r +Here is an example that can be used to run a phased assembly on Gadi:\r +\r +```\r +Module load nextflow/21.04.3\r +nextflow run Hifi_assembly.nf –bam_folder -profile gadi \r +\r +The workflow accepts 2 mandatory arguments:\r +--bam_folder -- Full Path to the CCS bam files\r +-profile -- gadi/balder/local\r +```\r +\r +Please note that you can either run jobs interactively or submit jobs to the cluster. This is determined by the -profile flag. By passing the gadi tag to the profile argument, the jobs are submitted and run on the cluster.\r +\r +# General recommendations for using the HiFi *de novo* genome assembly workflow\r +\r +## Example local profile usage\r +\r +```\r +Start a screen, submit a job, and run the workflow \r +Screen -S ‘name’\r +\r +qsub -I -qnormal -Pwz54 -lwalltime=48:00:00,ncpus=4,mem=200GB,storage=scratch/wz54+gdata/wz54,wd\r +export MODULEPATH=/apps/Modules/modulefiles:/g/data/wz54/groupResources/modules\r +\r +module load nextflow/21.04.3\r +nextflow run /g/data/wz54/groupResources/scripts/pl/hifi_assembly.nf --bam_folder -profile local\r +\r +#This load the scripts directory to the environmental PATH and load nextflow module\r +module load hifi_assembly/1.0.0 \r +```\r +\r +# Outputs\r +\r +Pipeline generates various files and folders here is a brief description: \r +The pipeline creates a folder called `secondary_analysis` that contains two sub folders named:\r +\r +- `exeReport` \r +- `Results` -- Contains preQC, assembly and postQC analysis files\r +\r +## exeReport\r +This folder contains a computation resource usage summary in various charts and a text file. \r +`report.html` provides a comprehensive summary.\r +\r +## Results\r +The `Results` folder contains three sub-directories preQC, assembly and postqc. As the name suggests, outputs from the respective workflow sections are placed in each of these folders.\r +\r +### preQC\r +The following table contains list of files and folder from preQC results\r +\r +| Output folder/file | File | Description |\r +| ------------------ | ---------------- | ------------------------------------------------------------------------------ |\r +| .fa | | Bam files converted to fasta format |\r +| kmer\\_analysis | | Folder containing kmer analysis outputs |\r +| | .jf | k-mer counts from each sample |\r +| | .histo | histogram of k-mer occurrence |\r +| genome\\_profiling | | genomescope profiling outputs |\r +| | summary.txt | Summary metrics of genome scope outputs |\r +| | linear\\_plot.png | Plot showing no. of times a k-mer observed by no. of k-mers with that coverage |\r +\r +\r +### Assembly\r +This folder contains final assembly results in format.\r +\r +- `_primary.fa` - Fasta file containing primary contigs\r +- `_associate.fa` - Fasta file containing associated contigs\r +\r +### postqc\r + \r +The postqc folder contains two sub folders \r +\r +- `assembly_completeness`\r +- `assembly_evaluation`\r +\r +#### assembly_completeness\r +This contains BUSCO evaluation results for primary and associate contig.\r +\r +#### assembly_evaluation\r +Assembly evaluation folder contains various file formats, here is a brief description for each of the outputs.\r +\r +| File | Description |\r +| ----------- | ----------------------------------------------------------------------------------------- |\r +| report.txt | Assessment summary in plain text format |\r +| report.tsv | Tab-separated version of the summary, suitable for spreadsheets (Google Docs, Excel, etc) |\r +| report.tex | LaTeX version of the summary |\r +| icarus.html | Icarus main menu with links to interactive viewers |\r +| report.html | HTML version of the report with interactive plots inside |\r +\r +\r +# Infrastructure usage and recommendations\r +\r +### NCI facility access\r +One should have a user account set with NCI to access gadi high performance computational facility. Setting up a NCI account is mentioned in detail at the following URL: https://opus.nci.org.au/display/Help/Setting+up+your+NCI+Account \r + \r +Documentation for a specific infrastructure should go into a infrastructure documentation template\r +https://github.com/AustralianBioCommons/doc_guidelines/blob/master/infrastructure_optimisation.md\r +\r +\r +## Compute resource usage across tested infrastructures\r +\r +| | Computational resource for plant case study |\r +| ------------------------------------- | ------------------------------------------- |\r +| | Time | CPU | Memory | I/O |\r +| Process | duration | realtime | %cpu | peak\\_rss | peak\\_vmem | rchar | wchar |\r +| Converting bam to fasta for sample | 12m 54s | 12m 48s | 99.80% | 5.2 MB | 197.7 MB | 43.3 GB | 50.1 GB |\r +| Generating k-mer counts and histogram | 26m 43s | 26m 36s | 1725.30% | 19.5 GB | 21 GB | 77.2 GB | 27.1 GB |\r +| Profiling genome characteristics | 34.7s | 13.2s | 89.00% | 135 MB | 601.2 MB | 8.5 MB | 845.9 KB |\r +| Denovo assembly | 6h 51m 15s | 6h 51m 11s | 4744.40% | 84.7 GB | 225.6 GB | 1.4 TB | 456 GB |\r +| evaluate\\_assemblies | 5m 18s | 4m 54s | 98.20% | 1.6 GB | 1.9 GB | 13.6 GB | 2.8 GB |\r +| assemblies\\_completeness | 25m 57s | 25m 53s | 2624.20% | 22 GB | 25.2 GB | 624.9 GB | 2.9 GB |\r +\r +\r +| | Computational resource for bird case study |\r +| ------------------------------------- | ------------------------------------------ |\r +| | Time | CPU | Memory | I/O |\r +| Process | duration | realtime | %cpu | peak\\_rss | peak\\_vmem | rchar | wchar |\r +| Converting bam to fasta for sample | 12m 54s | 7m 9s | 86.40% | 5.2 MB | 197.8 MB | 21.5 GB | 27.4 GB |\r +| Generating k-mer counts and histogram | 26m 43s | 15m 34s | 1687.70% | 10.1 GB | 11.7 GB | 44 GB | 16.6 GB |\r +| Profiling genome characteristics | 34.7s | 1m 15s | 15.30% | 181.7 MB | 562.2 MB | 8.5 MB | 819.1 KB |\r +| De novo assembly | 6h 51m 15s | 9h 2m 47s | 1853.50% | 67.3 GB | 98.4 GB | 1 TB | 395.6 GB |\r +| evaluate assemblies | 5m 18s | 2m 48s | 97.50% | 1.1 GB | 1.4 GB | 8.7 GB | 1.8 GB |\r +| assemblies completeness | 25m 57s | 22m 36s | 2144.00% | 22.2 GB | 25 GB | 389.7 GB | 1.4 GB |\r +\r +\r +# Workflow summaries\r +\r +## Metadata\r +\r +| Metadata field | Pre-assembly quality control | Primary assembly | Post-assembly quality control |\r +| ---------------- | --------------------------------------------------------------------------------- | ------------------ | ----------------------------- |\r +| Version | 1.0 | 1.0 | 1.0 |\r +| Maturity | Production | Production | production |\r +| Creators | Naga, Kenneth | Naga, Kenneth | Naga, Kenneth |\r +| Source | [AusARG/hifi-assembly-workflow](https://github.com/AusARG/hifi-assembly-workflow) |\r +| License | MIT License | MIT License | MIT License |\r +| Workflow manager | NextFlow | NextFlow | NextFlow |\r +| Container | No containers used | No containers used | No containers used |\r +| Install method | Manual | Manual | Manual |\r +\r +\r +## Component tools\r +​\r +| Workflow element | Workflow element version | Workflow title |\r +| --------------------------------- | ------------------------ | ----------------------------- |\r +| Samtools, jellyfish, genomescope | 1.0 | Pre-assembly quality control |\r +| Improved phased assembler (pbipa) | 1.0 | Primary assembly |\r +| Quast and busco | 1.0 | Post-assembly quality control |\r +\r +\r +## Required (minimum) inputs/parameters\r + \r +PATH to HIFI bam folder is the minimum requirement for the processing the pipeline.\r +\r +## Third party tools / dependencies\r +\r +The following packages are used by the pipeline.\r +\r +- `nextflow/21.04.3`\r +- `samtools/1.12`\r +- `jellyfish/2.3.0`\r +- `genomescope/2.0`\r +- `ipa/1.3.1`\r +- `quast/5.0.2`\r +- `busco/5.2.2`\r +\r +The following paths contain all modules required for the pipeline.\r +\r +- `/apps/Modules/modulefiles`\r +- `/g/data/wz54/groupResources/modules`\r +\r +---\r +\r +# Help/FAQ/Troubleshooting\r +\r +Direct training and help is available if you are new to HPC and/or new to NCI/Gadi.\r +\r +- Basic information to get started with the NCI Gadi for bioinformatics can be found at https://github.com/AusARG/ABLeS/wiki/temppage.\r +- For NCI support, contact the NCI helpdesk directly at https://www.nci.org.au/users/nci-helpdesk\r +- Queue limits and structure explained at https://opus.nci.org.au/display/Help/4.+PBS+Jobs\r +\r +---\r +\r +# 3rd party Tutorials \r +\r +A tutorial by Andrew Severin on running GenomeScope 1.0 is available here:\r +https://github.com/AusARG/hifi-assembly-workflow.git\r +\r +Improved Phased Assembler tutorial is available at \r +https://github.com/PacificBiosciences/pbbioconda/wiki/Improved-Phased-Assembler\r +\r +Busco tutorial\r +https://wurmlab.com/genomicscourse/2016-SIB/practicals/busco/busco_tutorial\r +\r +---\r +\r +# Licence(s)\r +\r +MIT License\r +\r +Copyright (c) 2022 AusARG\r +\r +Permission is hereby granted, free of charge, to any person obtaining a copy\r +of this software and associated documentation files (the "Software"), to deal\r +in the Software without restriction, including without limitation the rights\r +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r +copies of the Software, and to permit persons to whom the Software is\r +furnished to do so, subject to the following conditions:\r +\r +The above copyright notice and this permission notice shall be included in all\r +copies or substantial portions of the Software.\r +\r +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r +SOFTWARE.\r +\r +---\r +\r +# Acknowledgements/citations/credits\r +\r +> Jung, H. et al. Twelve quick steps for genome assembly and annotation in the classroom. PLoS Comput. Biol. 16, 1–25 (2020).\r +\r +> 2020, G. A. W. No Title. https://ucdavis-bioinformatics-training.github.io/2020-Genome_Assembly_Workshop/kmers/kmers.\r +\r +> Sović, I. et al. Improved Phased Assembly using HiFi Data. (2020).\r +\r +> Gurevich, A., Saveliev, V., Vyahhi, N. & Tesler, G. QUAST: Quality assessment tool for genome assemblies. Bioinformatics 29, 1072–1075 (2013).\r +\r +> Waterhouse, R. M. et al. BUSCO applications from quality assessments to gene prediction and phylogenomics. Mol. Biol. Evol. 35, 543–548 (2018).\r +\r +---\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/340?version=1" ; + schema1:isBasedOn "https://github.com/AusARG/hifi-assembly-workflow.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for HiFi de novo genome assembly workflow" ; + schema1:sdDatePublished "2024-07-12 13:35:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/340/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2978 ; + schema1:dateCreated "2022-05-10T23:42:38Z" ; + schema1:dateModified "2023-01-16T13:59:52Z" ; + schema1:description """# HiFi *de novo* genome assembly workflow\r +\r +HiFi-assembly-workflow is a bioinformatics pipeline that can be used to analyse Pacbio CCS reads for *de novo* genome assembly using PacBio Circular Consensus Sequencing (CCS) reads. This workflow is implemented in Nextflow and has 3 major sections. \r + \r +Please refer to the following documentation for detailed description of each workflow section:\r + \r +- [Pre-assembly quality control (QC)](https://github.com/AusARG/hifi-assembly-workflow/blob/master/recommendations.md#stage-1-pre-assembly-quality-control)\r +- [Assembly](https://github.com/AusARG/hifi-assembly-workflow/blob/master/recommendations.md#stage-2-assembly)\r +- [Post-assembly QC](https://github.com/AusARG/hifi-assembly-workflow/blob/master/recommendations.md#stage-3-post-assembly-quality-control)\r +\r +## HiFi assembly workflow flowchart\r +\r +![](https://github.com/AusARG/hifi-assembly-workflow/blob/master/workflow.png?raw=true)\r +\r +# Quick Usage:\r +The pipeline has been tested on NCI Gadi and AGRF balder cluster. If needed to run on AGRF cluster, please contact us at bioinformatics@agrf.org.au.\r +Please note for running this on NCI Gadi you need access. Please refer to Gadi guidelines for account creation and usage: these can be found at https://opus.nci.org.au/display/Help/Access.\r +\r +Here is an example that can be used to run a phased assembly on Gadi:\r +\r +```\r +Module load nextflow/21.04.3\r +nextflow run Hifi_assembly.nf –bam_folder -profile gadi \r +\r +The workflow accepts 2 mandatory arguments:\r +--bam_folder -- Full Path to the CCS bam files\r +-profile -- gadi/balder/local\r +```\r +\r +Please note that you can either run jobs interactively or submit jobs to the cluster. This is determined by the -profile flag. By passing the gadi tag to the profile argument, the jobs are submitted and run on the cluster.\r +\r +# General recommendations for using the HiFi *de novo* genome assembly workflow\r +\r +## Example local profile usage\r +\r +```\r +Start a screen, submit a job, and run the workflow \r +Screen -S ‘name’\r +\r +qsub -I -qnormal -Pwz54 -lwalltime=48:00:00,ncpus=4,mem=200GB,storage=scratch/wz54+gdata/wz54,wd\r +export MODULEPATH=/apps/Modules/modulefiles:/g/data/wz54/groupResources/modules\r +\r +module load nextflow/21.04.3\r +nextflow run /g/data/wz54/groupResources/scripts/pl/hifi_assembly.nf --bam_folder -profile local\r +\r +#This load the scripts directory to the environmental PATH and load nextflow module\r +module load hifi_assembly/1.0.0 \r +```\r +\r +# Outputs\r +\r +Pipeline generates various files and folders here is a brief description: \r +The pipeline creates a folder called `secondary_analysis` that contains two sub folders named:\r +\r +- `exeReport` \r +- `Results` -- Contains preQC, assembly and postQC analysis files\r +\r +## exeReport\r +This folder contains a computation resource usage summary in various charts and a text file. \r +`report.html` provides a comprehensive summary.\r +\r +## Results\r +The `Results` folder contains three sub-directories preQC, assembly and postqc. As the name suggests, outputs from the respective workflow sections are placed in each of these folders.\r +\r +### preQC\r +The following table contains list of files and folder from preQC results\r +\r +| Output folder/file | File | Description |\r +| ------------------ | ---------------- | ------------------------------------------------------------------------------ |\r +| .fa | | Bam files converted to fasta format |\r +| kmer\\_analysis | | Folder containing kmer analysis outputs |\r +| | .jf | k-mer counts from each sample |\r +| | .histo | histogram of k-mer occurrence |\r +| genome\\_profiling | | genomescope profiling outputs |\r +| | summary.txt | Summary metrics of genome scope outputs |\r +| | linear\\_plot.png | Plot showing no. of times a k-mer observed by no. of k-mers with that coverage |\r +\r +\r +### Assembly\r +This folder contains final assembly results in format.\r +\r +- `_primary.fa` - Fasta file containing primary contigs\r +- `_associate.fa` - Fasta file containing associated contigs\r +\r +### postqc\r + \r +The postqc folder contains two sub folders \r +\r +- `assembly_completeness`\r +- `assembly_evaluation`\r +\r +#### assembly_completeness\r +This contains BUSCO evaluation results for primary and associate contig.\r +\r +#### assembly_evaluation\r +Assembly evaluation folder contains various file formats, here is a brief description for each of the outputs.\r +\r +| File | Description |\r +| ----------- | ----------------------------------------------------------------------------------------- |\r +| report.txt | Assessment summary in plain text format |\r +| report.tsv | Tab-separated version of the summary, suitable for spreadsheets (Google Docs, Excel, etc) |\r +| report.tex | LaTeX version of the summary |\r +| icarus.html | Icarus main menu with links to interactive viewers |\r +| report.html | HTML version of the report with interactive plots inside |\r +\r +\r +# Infrastructure usage and recommendations\r +\r +### NCI facility access\r +One should have a user account set with NCI to access gadi high performance computational facility. Setting up a NCI account is mentioned in detail at the following URL: https://opus.nci.org.au/display/Help/Setting+up+your+NCI+Account \r + \r +Documentation for a specific infrastructure should go into a infrastructure documentation template\r +https://github.com/AustralianBioCommons/doc_guidelines/blob/master/infrastructure_optimisation.md\r +\r +\r +## Compute resource usage across tested infrastructures\r +\r +| | Computational resource for plant case study |\r +| ------------------------------------- | ------------------------------------------- |\r +| | Time | CPU | Memory | I/O |\r +| Process | duration | realtime | %cpu | peak\\_rss | peak\\_vmem | rchar | wchar |\r +| Converting bam to fasta for sample | 12m 54s | 12m 48s | 99.80% | 5.2 MB | 197.7 MB | 43.3 GB | 50.1 GB |\r +| Generating k-mer counts and histogram | 26m 43s | 26m 36s | 1725.30% | 19.5 GB | 21 GB | 77.2 GB | 27.1 GB |\r +| Profiling genome characteristics | 34.7s | 13.2s | 89.00% | 135 MB | 601.2 MB | 8.5 MB | 845.9 KB |\r +| Denovo assembly | 6h 51m 15s | 6h 51m 11s | 4744.40% | 84.7 GB | 225.6 GB | 1.4 TB | 456 GB |\r +| evaluate\\_assemblies | 5m 18s | 4m 54s | 98.20% | 1.6 GB | 1.9 GB | 13.6 GB | 2.8 GB |\r +| assemblies\\_completeness | 25m 57s | 25m 53s | 2624.20% | 22 GB | 25.2 GB | 624.9 GB | 2.9 GB |\r +\r +\r +| | Computational resource for bird case study |\r +| ------------------------------------- | ------------------------------------------ |\r +| | Time | CPU | Memory | I/O |\r +| Process | duration | realtime | %cpu | peak\\_rss | peak\\_vmem | rchar | wchar |\r +| Converting bam to fasta for sample | 12m 54s | 7m 9s | 86.40% | 5.2 MB | 197.8 MB | 21.5 GB | 27.4 GB |\r +| Generating k-mer counts and histogram | 26m 43s | 15m 34s | 1687.70% | 10.1 GB | 11.7 GB | 44 GB | 16.6 GB |\r +| Profiling genome characteristics | 34.7s | 1m 15s | 15.30% | 181.7 MB | 562.2 MB | 8.5 MB | 819.1 KB |\r +| De novo assembly | 6h 51m 15s | 9h 2m 47s | 1853.50% | 67.3 GB | 98.4 GB | 1 TB | 395.6 GB |\r +| evaluate assemblies | 5m 18s | 2m 48s | 97.50% | 1.1 GB | 1.4 GB | 8.7 GB | 1.8 GB |\r +| assemblies completeness | 25m 57s | 22m 36s | 2144.00% | 22.2 GB | 25 GB | 389.7 GB | 1.4 GB |\r +\r +\r +# Workflow summaries\r +\r +## Metadata\r +\r +| Metadata field | Pre-assembly quality control | Primary assembly | Post-assembly quality control |\r +| ---------------- | --------------------------------------------------------------------------------- | ------------------ | ----------------------------- |\r +| Version | 1.0 | 1.0 | 1.0 |\r +| Maturity | Production | Production | production |\r +| Creators | Naga, Kenneth | Naga, Kenneth | Naga, Kenneth |\r +| Source | [AusARG/hifi-assembly-workflow](https://github.com/AusARG/hifi-assembly-workflow) |\r +| License | MIT License | MIT License | MIT License |\r +| Workflow manager | NextFlow | NextFlow | NextFlow |\r +| Container | No containers used | No containers used | No containers used |\r +| Install method | Manual | Manual | Manual |\r +\r +\r +## Component tools\r +​\r +| Workflow element | Workflow element version | Workflow title |\r +| --------------------------------- | ------------------------ | ----------------------------- |\r +| Samtools, jellyfish, genomescope | 1.0 | Pre-assembly quality control |\r +| Improved phased assembler (pbipa) | 1.0 | Primary assembly |\r +| Quast and busco | 1.0 | Post-assembly quality control |\r +\r +\r +## Required (minimum) inputs/parameters\r + \r +PATH to HIFI bam folder is the minimum requirement for the processing the pipeline.\r +\r +## Third party tools / dependencies\r +\r +The following packages are used by the pipeline.\r +\r +- `nextflow/21.04.3`\r +- `samtools/1.12`\r +- `jellyfish/2.3.0`\r +- `genomescope/2.0`\r +- `ipa/1.3.1`\r +- `quast/5.0.2`\r +- `busco/5.2.2`\r +\r +The following paths contain all modules required for the pipeline.\r +\r +- `/apps/Modules/modulefiles`\r +- `/g/data/wz54/groupResources/modules`\r +\r +---\r +\r +# Help/FAQ/Troubleshooting\r +\r +Direct training and help is available if you are new to HPC and/or new to NCI/Gadi.\r +\r +- Basic information to get started with the NCI Gadi for bioinformatics can be found at https://github.com/AusARG/ABLeS/wiki/temppage.\r +- For NCI support, contact the NCI helpdesk directly at https://www.nci.org.au/users/nci-helpdesk\r +- Queue limits and structure explained at https://opus.nci.org.au/display/Help/4.+PBS+Jobs\r +\r +---\r +\r +# 3rd party Tutorials \r +\r +A tutorial by Andrew Severin on running GenomeScope 1.0 is available here:\r +https://github.com/AusARG/hifi-assembly-workflow.git\r +\r +Improved Phased Assembler tutorial is available at \r +https://github.com/PacificBiosciences/pbbioconda/wiki/Improved-Phased-Assembler\r +\r +Busco tutorial\r +https://wurmlab.com/genomicscourse/2016-SIB/practicals/busco/busco_tutorial\r +\r +---\r +\r +# Licence(s)\r +\r +MIT License\r +\r +Copyright (c) 2022 AusARG\r +\r +Permission is hereby granted, free of charge, to any person obtaining a copy\r +of this software and associated documentation files (the "Software"), to deal\r +in the Software without restriction, including without limitation the rights\r +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r +copies of the Software, and to permit persons to whom the Software is\r +furnished to do so, subject to the following conditions:\r +\r +The above copyright notice and this permission notice shall be included in all\r +copies or substantial portions of the Software.\r +\r +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r +SOFTWARE.\r +\r +---\r +\r +# Acknowledgements/citations/credits\r +\r +> Jung, H. et al. Twelve quick steps for genome assembly and annotation in the classroom. PLoS Comput. Biol. 16, 1–25 (2020).\r +\r +> 2020, G. A. W. No Title. https://ucdavis-bioinformatics-training.github.io/2020-Genome_Assembly_Workshop/kmers/kmers.\r +\r +> Sović, I. et al. Improved Phased Assembly using HiFi Data. (2020).\r +\r +> Gurevich, A., Saveliev, V., Vyahhi, N. & Tesler, G. QUAST: Quality assessment tool for genome assemblies. Bioinformatics 29, 1072–1075 (2013).\r +\r +> Waterhouse, R. M. et al. BUSCO applications from quality assessments to gene prediction and phylogenomics. Mol. Biol. Evol. 35, 543–548 (2018).\r +\r +---\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "HiFi de novo genome assembly workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/340?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "# beacon-omop-worker-workflows" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/882?version=2" ; + schema1:isBasedOn "https://github.com/Health-Informatics-UoN/beacon-omop-worker-workflows.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for beacon-workflow" ; + schema1:sdDatePublished "2024-07-12 13:22:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/882/ro_crate?version=2" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 2831 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 362 ; + schema1:dateCreated "2024-05-22T13:18:49Z" ; + schema1:dateModified "2024-05-22T13:18:49Z" ; + schema1:description "# beacon-omop-worker-workflows" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/882?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "beacon-workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/882?version=2" ; + schema1:version 2 ; + ns1:input ; + ns1:output . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +**Based on the official [pmx tutorial](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate how to compute a **fast-growth mutation free energy** calculation, step by step, using the BioExcel **Building Blocks library (biobb)**. The particular example used is the **Staphylococcal nuclease** protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +The **non-equilibrium free energy calculation** protocol performs a **fast alchemical transition** in the direction **WT->Mut** and back **Mut->WT**. The two equilibrium trajectories needed for the tutorial, one for **Wild Type (WT)** and another for the **Mutated (Mut)** protein (Isoleucine 10 to Alanine -I10A-), have already been generated and are included in this example. We will name **WT as stateA** and **Mut as stateB**.\r +\r +![](https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/schema.png)\r +\r +The tutorial calculates the **free energy difference** in the folded state of a protein. Starting from **two 1ns-length independent equilibrium simulations** (WT and mutant), snapshots are selected to start **fast (50ps) transitions** driving the system in the **forward** (WT to mutant) and **reverse** (mutant to WT) directions, and the **work values** required to perform these transitions are collected. With these values, **Crooks Gaussian Intersection** (CGI), **Bennett Acceptance Ratio** (BAR) and **Jarzynski estimator** methods are used to calculate the **free energy difference** between the two states.\r +\r +*Please note that for the sake of disk space this tutorial is using 1ns-length equilibrium trajectories, whereas in the [original example](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/eq.mdp) the equilibrium trajectories used were obtained from 10ns-length simulations.*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.55.4" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_pmx_tutorial" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)" ; + schema1:sdDatePublished "2024-07-12 13:24:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/55/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 52945 ; + schema1:creator , + ; + schema1:dateCreated "2022-09-16T09:35:38Z" ; + schema1:dateModified "2023-01-16T13:44:51Z" ; + schema1:description """# Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +**Based on the official [pmx tutorial](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate how to compute a **fast-growth mutation free energy** calculation, step by step, using the BioExcel **Building Blocks library (biobb)**. The particular example used is the **Staphylococcal nuclease** protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +The **non-equilibrium free energy calculation** protocol performs a **fast alchemical transition** in the direction **WT->Mut** and back **Mut->WT**. The two equilibrium trajectories needed for the tutorial, one for **Wild Type (WT)** and another for the **Mutated (Mut)** protein (Isoleucine 10 to Alanine -I10A-), have already been generated and are included in this example. We will name **WT as stateA** and **Mut as stateB**.\r +\r +![](https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/schema.png)\r +\r +The tutorial calculates the **free energy difference** in the folded state of a protein. Starting from **two 1ns-length independent equilibrium simulations** (WT and mutant), snapshots are selected to start **fast (50ps) transitions** driving the system in the **forward** (WT to mutant) and **reverse** (mutant to WT) directions, and the **work values** required to perform these transitions are collected. With these values, **Crooks Gaussian Intersection** (CGI), **Bennett Acceptance Ratio** (BAR) and **Jarzynski estimator** methods are used to calculate the **free energy difference** between the two states.\r +\r +*Please note that for the sake of disk space this tutorial is using 1ns-length equilibrium trajectories, whereas in the [original example](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/eq.mdp) the equilibrium trajectories used were obtained from 10ns-length simulations.*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/55?version=5" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/biobb_wf_pmx_tutorial.ipynb" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "An example workflow for the Specimen Data Refinery tool, allowing an individual tool to be used" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.375.1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for HTR-Collections-test" ; + schema1:sdDatePublished "2024-07-12 13:35:26 +0100" ; + schema1:url "https://workflowhub.eu/workflows/375/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15329 ; + schema1:creator , + , + ; + schema1:dateCreated "2022-07-08T13:05:11Z" ; + schema1:dateModified "2023-01-16T14:02:00Z" ; + schema1:description "An example workflow for the Specimen Data Refinery tool, allowing an individual tool to be used" ; + schema1:keywords "Default-SDR, multi-specimen-input, collections, validated-2022-06-29" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "HTR-Collections-test" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/375?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description "This repository contains the python code to reproduce the experiments in Dłotko, Gurnari \"Euler Characteristic Curves and Profiles: a stable shape invariant for big data problems\"" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.576.1" ; + schema1:isBasedOn "https://github.com/dioscuri-tda/ecp_experiments" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ECP experiments" ; + schema1:sdDatePublished "2024-07-12 13:27:20 +0100" ; + schema1:url "https://workflowhub.eu/workflows/576/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 185 ; + schema1:creator ; + schema1:dateCreated "2023-09-25T12:10:08Z" ; + schema1:dateModified "2023-09-25T14:35:37Z" ; + schema1:description "This repository contains the python code to reproduce the experiments in Dłotko, Gurnari \"Euler Characteristic Curves and Profiles: a stable shape invariant for big data problems\"" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ECP experiments" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/576?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/963?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/airrflow" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/bcellmagic" ; + schema1:sdDatePublished "2024-07-12 13:22:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/963/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6709 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:36Z" ; + schema1:dateModified "2024-06-11T12:54:36Z" ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/963?version=13" ; + schema1:keywords "airr, b-cell, immcantation, immunorepertoire, repseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/bcellmagic" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/963?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.195.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_dna_helparms" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Structural DNA helical parameters tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/195/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 72225 ; + schema1:creator , + ; + schema1:dateCreated "2021-09-28T08:43:19Z" ; + schema1:dateModified "2022-09-15T12:36:32Z" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/195?version=4" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Structural DNA helical parameters tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/195?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.281.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_protein_complex_md_setup/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:34:02 +0100" ; + schema1:url "https://workflowhub.eu/workflows/281/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8747 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-17T09:57:03Z" ; + schema1:dateModified "2022-11-22T09:59:54Z" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/281?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_protein_complex_md_setup/python/workflow.py" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "The workflow takes a trimmed Illumina WGS paired-end reads collection, Collapsed contigs, and the values for transition parameter and max coverage depth (calculated from WF1) to run Purge_Dups. It produces purged Collapsed contigs assemblies, and runs all the QC analysis (gfastats, BUSCO, and Merqury). " ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/701?version=1" ; + schema1:isBasedOn "https://github.com/ERGA-consortium/pipelines/tree/main/assembly/galaxy" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ERGA ONT+Illumina Collapsed Purge+QC v2311 (WF3)" ; + schema1:sdDatePublished "2024-07-12 13:25:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/701/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 43492 ; + schema1:creator , + ; + schema1:dateCreated "2024-01-09T10:40:06Z" ; + schema1:dateModified "2024-01-09T10:44:51Z" ; + schema1:description "The workflow takes a trimmed Illumina WGS paired-end reads collection, Collapsed contigs, and the values for transition parameter and max coverage depth (calculated from WF1) to run Purge_Dups. It produces purged Collapsed contigs assemblies, and runs all the QC analysis (gfastats, BUSCO, and Merqury). " ; + schema1:image ; + schema1:isPartOf , + ; + schema1:keywords "name:ASSEMBLY+QC, ERGA, illumina" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ERGA ONT+Illumina Collapsed Purge+QC v2311 (WF3)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/3.Purging/Galaxy-Workflow-ERGA_Illumina_Collapsed_Purge_QC_v2311_(WF3).ga" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + ; + ns1:output . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 128917 ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/3.Purging/pics/Purge_illumina_2311.png" . + + a schema1:Dataset ; + schema1:datePublished "2024-02-16T10:01:26.347512" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Purge-duplicate-contigs-VGP6/main" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Purge-duplicate-contigs-VGP6/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:version "0.3.4" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """The containerised pipeline for profiling shotgun metagenomic data is derived from the [MGnify](https://www.ebi.ac.uk/metagenomics/) pipeline raw-reads analyses, a well-established resource used for analyzing microbiome data.\r +Key components:\r +- Quality control and decontamination\r +- rRNA and ncRNA detection using Rfam database\r +- Taxonomic classification of SSU and LSU regions \r +- Abundance analysis with mOTUs""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/450?version=1" ; + schema1:isBasedOn "https://github.com/EBI-Metagenomics/motus_pipeline" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for MGnify raw reads taxonomic profiling pipeline" ; + schema1:sdDatePublished "2024-07-12 13:34:23 +0100" ; + schema1:url "https://workflowhub.eu/workflows/450/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 385320 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 131 ; + schema1:creator , + ; + schema1:dateCreated "2023-03-30T20:49:19Z" ; + schema1:dateModified "2023-03-30T20:58:56Z" ; + schema1:description """The containerised pipeline for profiling shotgun metagenomic data is derived from the [MGnify](https://www.ebi.ac.uk/metagenomics/) pipeline raw-reads analyses, a well-established resource used for analyzing microbiome data.\r +Key components:\r +- Quality control and decontamination\r +- rRNA and ncRNA detection using Rfam database\r +- Taxonomic classification of SSU and LSU regions \r +- Abundance analysis with mOTUs""" ; + schema1:image ; + schema1:keywords "Nextflow, Metagenomics" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "MGnify raw reads taxonomic profiling pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/450?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# IndexReferenceFasta-nf\r +===========\r +\r + - [Description](#description)\r + - [Diagram](#diagram)\r + - [User guide](#user-guide)\r + - [Benchmarking](#benchmarking)\r + - [Workflow summaries](#workflow-summaries)\r + - [Metadata](#metadata)\r + - [Component tools](#component-tools)\r + - [Required (minimum)\r + inputs/parameters](#required-minimum-inputsparameters)\r + - [Additional notes](#additional-notes)\r + - [Help/FAQ/Troubleshooting](#helpfaqtroubleshooting)\r + - [Acknowledgements/citations/credits](#acknowledgementscitationscredits)\r +\r +---\r +\r +## Description\r +This is a flexible pipeline for generating common reference genome index files for WGS data analysis. IndexReferenceFasta-nf is a Nextflow (DSL2) pipeline that runs the following tools using Singularity containers:\r +* Samtools faidx\r +* BWA index\r +* GATK CreateSequenceDictionary \r +\r +## Diagram\r +

\r +\r +

\r +\r +## User guide\r +**1. Set up**\r +\r +Clone this repository by running:\r +```\r +git clone https://github.com/Sydney-Informatics-Hub/IndexReferenceFasta-nf.git\r +cd IndexReferenceFasta-nf\r +``` \r +\r +**2. Generate indexes** \r +\r +Users can specify which index files to create by using the `--samtools`, `--bwa`, and/or `--gatk` flags. All are optional. Run the pipeline with:\r +\r +```\r +nextflow run main.nf /path/to/ref.fasta --bwa --samtools --gatk \r +```\r +\r +## Benchmarking\r +\r +### Human hg38 reference assembly @ Pawsey's Nimbus (NCPU/task = 1)\r +|task_id|hash |native_id|name |status |exit|submit |duration |realtime |%cpu |peak_rss|peak_vmem|rchar |wchar |\r +|-------|---------|---------|--------------|---------|----|-------|----------|----------|-------|--------|---------|-------|-------|\r +|3 |27/33fffc|131621 |samtools_index|COMPLETED|0 |55:44.9|12.2s |12s |99.20% |6.3 MB |11.8 MB |3 GB |19.1 KB|\r +|1 |80/f03e46|131999 |gatk_index |COMPLETED|0 |55:46.7|22.6s |22.3s |231.90%|3.8 GB |37.1 GB |3.1 GB |726 KB |\r +|2 |ea/e29535|131594 |bwa_index |COMPLETED|0 |55:44.9|1h 50m 16s|1h 50m 15s|99.50% |4.5 GB |4.5 GB |12.1 GB|8.2 GB |\r +\r +## Workflow summaries\r +\r +### Metadata\r +|metadata field | workflow_name / workflow_version |\r +|-------------------|:---------------------------------:|\r +|Version | workflow_version |\r +|Maturity | under development |\r +|Creators | Georgie Samaha |\r +|Source | NA |\r +|License | GPL-3.0 license |\r +|Workflow manager | NextFlow |\r +|Container | None |\r +|Install method | Manual |\r +|GitHub | Sydney-Informatics-Hub/IndexReferenceFasta-nf |\r +|bio.tools | NA |\r +|BioContainers | NA | \r +|bioconda | NA |\r +\r +### Component tools\r +\r +* samtools/1.15.1\r +* gatk/4.2.6.1 \r +* bwa/0.7.17\r +\r +### Required (minimum) inputs/parameters\r +\r +* A reference genome file in fasta format.\r +\r +## Additional notes\r +\r +### Help/FAQ/Troubleshooting\r +\r +## Acknowledgements/citations/credits\r +### Authors \r +- Georgie Samaha (Sydney Informatics Hub, University of Sydney) \r +\r +### Acknowledgements \r +\r +- This pipeline was built using the [Nextflow DSL2 template](https://github.com/Sydney-Informatics-Hub/Nextflow_DSL2_template). \r +- Documentation was created following the [Australian BioCommons documentation guidelines](https://github.com/AustralianBioCommons/doc_guidelines). \r +\r +### Cite us to support us! \r +Acknowledgements (and co-authorship, where appropriate) are an important way for us to demonstrate the value we bring to your research. Your research outcomes are vital for ongoing funding of the Sydney Informatics Hub and national compute facilities. We suggest including the following acknowledgement in any publications that follow from this work: \r +\r +The authors acknowledge the technical assistance provided by the Sydney Informatics Hub, a Core Research Facility of the University of Sydney and the Australian BioCommons which is enabled by NCRIS via Bioplatforms Australia. \r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.393.1" ; + schema1:isBasedOn "https://github.com/Sydney-Informatics-Hub/IndexReferenceFasta-nf" ; + schema1:license "LGPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for IndexReferenceFasta-nf" ; + schema1:sdDatePublished "2024-07-12 13:35:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/393/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2496 ; + schema1:creator ; + schema1:dateCreated "2022-10-12T02:34:32Z" ; + schema1:dateModified "2023-01-16T14:02:39Z" ; + schema1:description """# IndexReferenceFasta-nf\r +===========\r +\r + - [Description](#description)\r + - [Diagram](#diagram)\r + - [User guide](#user-guide)\r + - [Benchmarking](#benchmarking)\r + - [Workflow summaries](#workflow-summaries)\r + - [Metadata](#metadata)\r + - [Component tools](#component-tools)\r + - [Required (minimum)\r + inputs/parameters](#required-minimum-inputsparameters)\r + - [Additional notes](#additional-notes)\r + - [Help/FAQ/Troubleshooting](#helpfaqtroubleshooting)\r + - [Acknowledgements/citations/credits](#acknowledgementscitationscredits)\r +\r +---\r +\r +## Description\r +This is a flexible pipeline for generating common reference genome index files for WGS data analysis. IndexReferenceFasta-nf is a Nextflow (DSL2) pipeline that runs the following tools using Singularity containers:\r +* Samtools faidx\r +* BWA index\r +* GATK CreateSequenceDictionary \r +\r +## Diagram\r +

\r +\r +

\r +\r +## User guide\r +**1. Set up**\r +\r +Clone this repository by running:\r +```\r +git clone https://github.com/Sydney-Informatics-Hub/IndexReferenceFasta-nf.git\r +cd IndexReferenceFasta-nf\r +``` \r +\r +**2. Generate indexes** \r +\r +Users can specify which index files to create by using the `--samtools`, `--bwa`, and/or `--gatk` flags. All are optional. Run the pipeline with:\r +\r +```\r +nextflow run main.nf /path/to/ref.fasta --bwa --samtools --gatk \r +```\r +\r +## Benchmarking\r +\r +### Human hg38 reference assembly @ Pawsey's Nimbus (NCPU/task = 1)\r +|task_id|hash |native_id|name |status |exit|submit |duration |realtime |%cpu |peak_rss|peak_vmem|rchar |wchar |\r +|-------|---------|---------|--------------|---------|----|-------|----------|----------|-------|--------|---------|-------|-------|\r +|3 |27/33fffc|131621 |samtools_index|COMPLETED|0 |55:44.9|12.2s |12s |99.20% |6.3 MB |11.8 MB |3 GB |19.1 KB|\r +|1 |80/f03e46|131999 |gatk_index |COMPLETED|0 |55:46.7|22.6s |22.3s |231.90%|3.8 GB |37.1 GB |3.1 GB |726 KB |\r +|2 |ea/e29535|131594 |bwa_index |COMPLETED|0 |55:44.9|1h 50m 16s|1h 50m 15s|99.50% |4.5 GB |4.5 GB |12.1 GB|8.2 GB |\r +\r +## Workflow summaries\r +\r +### Metadata\r +|metadata field | workflow_name / workflow_version |\r +|-------------------|:---------------------------------:|\r +|Version | workflow_version |\r +|Maturity | under development |\r +|Creators | Georgie Samaha |\r +|Source | NA |\r +|License | GPL-3.0 license |\r +|Workflow manager | NextFlow |\r +|Container | None |\r +|Install method | Manual |\r +|GitHub | Sydney-Informatics-Hub/IndexReferenceFasta-nf |\r +|bio.tools | NA |\r +|BioContainers | NA | \r +|bioconda | NA |\r +\r +### Component tools\r +\r +* samtools/1.15.1\r +* gatk/4.2.6.1 \r +* bwa/0.7.17\r +\r +### Required (minimum) inputs/parameters\r +\r +* A reference genome file in fasta format.\r +\r +## Additional notes\r +\r +### Help/FAQ/Troubleshooting\r +\r +## Acknowledgements/citations/credits\r +### Authors \r +- Georgie Samaha (Sydney Informatics Hub, University of Sydney) \r +\r +### Acknowledgements \r +\r +- This pipeline was built using the [Nextflow DSL2 template](https://github.com/Sydney-Informatics-Hub/Nextflow_DSL2_template). \r +- Documentation was created following the [Australian BioCommons documentation guidelines](https://github.com/AustralianBioCommons/doc_guidelines). \r +\r +### Cite us to support us! \r +Acknowledgements (and co-authorship, where appropriate) are an important way for us to demonstrate the value we bring to your research. Your research outcomes are vital for ongoing funding of the Sydney Informatics Hub and national compute facilities. We suggest including the following acknowledgement in any publications that follow from this work: \r +\r +The authors acknowledge the technical assistance provided by the Sydney Informatics Hub, a Core Research Facility of the University of Sydney and the Australian BioCommons which is enabled by NCRIS via Bioplatforms Australia. \r +""" ; + schema1:keywords "Bioinformatics, Nextflow, WGS, index, referencegenome, SAMTools, GATK, BWA, Genomics" ; + schema1:license "https://spdx.org/licenses/LGPL-3.0" ; + schema1:name "IndexReferenceFasta-nf" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/393?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=14" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-07-12 13:19:04 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=14" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15644 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:15Z" ; + schema1:dateModified "2024-06-11T12:55:15Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=14" ; + schema1:version 14 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=15" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-07-12 13:21:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=15" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11341 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:50Z" ; + schema1:dateModified "2024-06-11T12:54:50Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=15" ; + schema1:version 15 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """\r +Author: AMBARISH KUMAR er.ambarish@gmail.com; ambari73_sit@jnu.ac.in\r +\r +This is a proposed standard operating procedure for genomic variant detection using VARSCAN.\r +\r +It is hoped to be effective and useful for getting SARS-CoV-2 genome variants.\r +\r +\r +\r +It uses Illumina RNASEQ reads and genome sequence.\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/31?version=1" ; + schema1:isBasedOn "https://github.com/ambarishK/bio-cwl-tools/blob/release/varscanW.cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genomic variants - SNPs and INDELs detection using VARSCAN2." ; + schema1:sdDatePublished "2024-07-12 13:37:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/31/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 57754 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2917 ; + schema1:creator ; + schema1:dateCreated "2020-06-17T06:24:44Z" ; + schema1:dateModified "2023-01-16T13:42:27Z" ; + schema1:description """\r +Author: AMBARISH KUMAR er.ambarish@gmail.com; ambari73_sit@jnu.ac.in\r +\r +This is a proposed standard operating procedure for genomic variant detection using VARSCAN.\r +\r +It is hoped to be effective and useful for getting SARS-CoV-2 genome variants.\r +\r +\r +\r +It uses Illumina RNASEQ reads and genome sequence.\r +""" ; + schema1:image ; + schema1:keywords "CWL, SNPs, INDELs, VARSCAN2" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Genomic variants - SNPs and INDELs detection using VARSCAN2." ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/31?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + ; + ns1:output , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# ProGFASTAGen\r +\r +The ProGFASTAGen (**Pro**tein-**G**raph-**FASTA**-**Gen**erator or **Pro**t**G**raph-**FASTA**-**Gen**erator) repository contains workflows to generate so-called precursor-specific-FASTAs (using the precursors from MGF-files) including feature-peptides, like VARIANTs or CONFLICTs if desired, or global-FASTAs (as described in [ProtGraph](https://github.com/mpc-bioinformatics/ProtGraph)). The single workflow scripts have been implemented with [Nextflow-DSL-2](https://www.nextflow.io/docs/latest/dsl2.html) and are independent to each other. Each of these workflows can be used on their own or can be imported to other workflows for other use-cases. Further, we included three main-workflows, to show how the single workflows can be chained together. The `main_workflow_protein_fasta.nf`-workflow converts Thermo-RAW-files into MGF, searches with Comet (and Percolator) and the identification results are then further summarized. The workflows `main_workflow_global_fasta.nf` and `main_workflow_precursor_specific_fasta.nf` generate specific FASTA-files before search-engine-identification. Below are example nextflow-calls, which can be used.\r +\r +Regarding the precursor-specific-FASTA-generation: The source-code of the C++ implementation for traversal can be found in `bin`. There, four implementations are present: `Float/Int`-Versions as well as `DryRun/VarLimitter`-Versions of the traversal. The `Float/Int`-Versions can be faster/slower depending on th processor-architecture and can be used via a flag in the `create_precursor_specific_fasta.nf`-workflow. The `DryRun`-Version does not generate a FASTA but tests the used system (depending on a query-timeout) to determine the maximum number of variants which can be used, while not timing out. The actual FASTA-generation happens in the `VarLimitter`-Version using the generated protein-graphs at hand.\r +\r +in **Prerequisites** a small description of dependencies and how to set up the host system is given. **Individual steps** describes the single workflows and how they can be called, while **Main Workflow Scripts** shows example-calls of the main workflows. In **Regenerate Results from Publication**, the calls and parameters are shown, which were used in the publication. Using the same FASTA or UniProt flat file format with a similar server-setting should yield similar results as used in the publication.\r +\r +## Prerequisites\r +\r +### Executing on Linux\r +\r +This workflow can be only executed on linux (tested on Ubuntu 22.04 and ArchLinux). Before setting up the `bin`-folder, some requiered binaries need to be present on the OS. (Focusing on Ubuntu:) The following packages need to be installed on Ubuntu (via `apt`), if not already:\r +\r +```text\r +build-essential\r +wget\r +curl\r +unzip\r +cmake\r +mono-complete\r +python3-pip (or any environment with Python3, where pip is available)\r +python-is-python3 (needed for ubuntu, so that python points to python3)\r +```\r +\r +If all packages are installed (and the python environment is set up), the setup-script needs to be executed, which downloads needed dependencies and compiles the source-code located in the `bin`-folder:\r +\r +```shell\r +chmod +x compile_and_setup_depencies.sh # In case this file is not executable\r +./compile_and_setup_depencies.sh # Downloads dependencies, compiles the C++-implementation and sets all binaries in the bin-folder as executable\r +```\r +\r +If the script exits without errors, the provided workflows can be executed with the command `nextflow`.\r +\r +### Executing in Docker\r +\r +Alternatively, docker can be used. For this, please follow the [installation guide](https://docs.docker.com/engine/install/ubuntu/) for docker. After installing docker, a local docker-container can be build with all needed dependencies for the workflows. We provide a `Dockerfile` in the `docker`-folder. To build it, execute (while beeing with a shell in the root-folder of this repository) the following:\r +\r +```shell\r +docker build -t progfastagen:local . -f docker/Dockerfile\r +```\r +\r +This command builds a local docker container, tagging it with `progfastagen:local`, which can be later used by nextflow. To use it with nextflow, make sure that `nextflow` is installed on the host-system. For each of the workflow example calls below, the `-with-docker progfastagen:local` then needs to be appended, to let `nextflow` know to use the local docker-container.\r +\r +## Individual Steps\r +\r +Each step has been implemented in such a way, that it can be executed on its own. Each subsection below, provides a brief overview and an example call of the required parameters to demonstrate how the workflow can be called. If you are interested for all the available parameters within a workflow and want modify or tune them, then please refer to the source of the workflows, where each parameter is described briefly.\r +\r +### Converting RAW-files to MGF\r +\r +The workflow `convert_to_mgf.nf` is a wrapper around the ThermoRawFileParser and converts RAW-files to the MGF-format. The `ctm_raws` parameter needs to be set, in order to generate the MGF-files:\r +\r +```text\r +nextflow run convert_to_mgf.nf \\\r + --ctm_raws < Folder containing RAW-files > \\\r + --ctm_outdir < Output-Folder, where the MGFs should be stored >\r +```\r +\r +### Generating a Precursor-Specific-FASTA\r +\r +The workflow `create_precursor_specific_fasta.nf` generates a precursor-specific-FASTA-file, tailored to a set of MGF-files. Here, Protein-Graphs are generated, using the UniProt flat file format (which can be downloaded from [UniProt](https://www.uniprot.org/) by selecting `Text` as format) and a python script prepares the queries, by extracting the MS2-precursors from the MGF-files (using a tolerance, in ppm). Using the Protein-Graphs and a `DryRun`-Version of the traversal, the maximum-variant-limits are determined for each Protein-Graph (and mass-query-range) using a binary-search. These limits are then used for the actual ms2-specific-FASTA-generation in conjunction with the extracted MS2-precursors and a compacted FASTA is returned, which is tailored to the MGF-files.\r +\r +Altough of the complexity, the workflow only requires the following parameters to generate such a FASTA:\r +\r +```text\r +nextflow run create_precursor_specific_fasta.nf \\\r + --cmf_mgf_files < Folder containing MGF-files > \\\r + --cmf_sp_embl_file < Path to a SP-EMBL-File (UniProt flat file format) > \\\r + --cmf_outdir \r +```\r +\r +The optional parameter: `cmf_pg_additional_params` is added to ProtGraph directly, allowing every parameter, ProtGraph provides to be set there (e.g. useful if the digestion should be changed or features/PTMs should be included/excluded, etc...), allowing arbitrary settings to generate Protein-Graphs if desired. It defaults to use all features, ProtGraph can parse.\r +\r +**Note regarding PTMs/Tolerance**: The FASTA is tailored to the MS2-precursors, therefore variable and fixed modifications need to be set to the same settings as for the actual identification. This workflow defaults to carbamidomethylation (C, fixed) and oxidation (M, variable). See ProtGraph (and the workflow-parameter `cmf_pg_additional_params`) to set the PTMs accordingly in the Protein-Graphs. The same applies for the MS2-precursor-tolereance which can be set with `cmf_query_ppm` and defaults to `5ppm`.\r +\r +**Note regarding Limits**: This workflows defaults to allow up to 5 seconds per query and limits peptides to contain at most 5 variants (with a maximum of 5000 Da per peptide), resulting into FASTA-files which can be 15-200GB large (depending on dataset and species). Changing these settings can drastically increase/decrease the runtime/memory usage/disk usage. We advise to change those settings slightly and to pay attention on the runtime/memory usage/disk usage if run with the newly set limits (and dataset + species) the first time.\r +\r +**Note regarding identification**: If digestion is enabled (default is `Trypsin`), the resulting FASTA contains already digested entries, thus searching with a search-engine, the digestion should be set to `off/no_cut`.\r +\r +### Generating a Global-FASTA\r +\r +This workflow generates a so called global-FASTA, using ProtGraph, the UniProt flat file format and some global limits for writing out peptides/proteins. Global-FASTAs can be generated with the `create_global_fasta.nf`-workflow. To generate a global-FASTA, only a path to a single SP-EMBL-file (UniProt flat file format) is required. Such a file can be downloaded from [UniProt](https://www.uniprot.org/) directly, by selecting `Text` instead of `FASTA` as the download format.\r +\r +```text\r +nextflow run create_global_fasta.nf \\\r + --cgf_sp_embl_file < Path to a SP-EMBL-File (UniProt flat file format) > \\\r + --cgf_outdir < The output-folder, where the gloabl-FASTA and some Protein-Graph-statistics should be saved >\r +```\r +\r +Per default, this workflow does not export feature-peptides and is set to only export peptides with up to 5000 Da mass and maximum of two miscleavages. It is possible to generate global-FASTA with some specific features (like containing, `SIGNAL`, `PEPTIDE` or others) and other limits. The parameters `cgf_features_in_graphs` and `cgf_peptide_limits` can be set accordingly. These are added to ProtGraph directly, hence every parameter ProtGraph provides, can be set here (including different digestion settings).\r +\r +**Note**: A dry run with ProtGraph to generate statistics how many peptide would be theoretically exported is advised prior for testing. Some Protein-Graphs with some features (e.g. P53 using variants) can contain to many peptides, which could result to very long runtimes and huge FASTAs.\r +\r +**Note regarding identification**: If digestion is enabled (default is `Trypsin`), the resulting FASTA contains already digested entries, thus searching with a search-engine, the digestion should be set to `off/no_cut`.\r +\r +### Identification via Coment (and Percolator)\r +\r +We provide an identification workflow to showcase, that the generated FASTAs can be used with search-engines. The workflow `identification_via_comet.nf` identifies MGF-files individually, using custom search-settings for Comet (and if desired rescores the results with Percolator), applies an FDR-cutoff using the q-value (for each file) and exposes the identification results into an output-folder.\r +\r +Three parameters are required, to execute the workflow:\r +\r +1. The MGFs which should be identified\r +2. The Comet-Parameter file to set the search-settings\r +3. The FASTA-file which should be used for identification\r +\r +Below is an example call with all required parameters (Percolator is enabled by default):\r +\r +```text\r +nextflow run identification_via_comet.nf \\\r + --idc_mgf_folder < Folder containing MGF-files > \\\r + --idc_fasta_file < The FASTA which should be used for identification > \\\r + --idc_search_parameter_file < The Comet-Parameters file (Search Configuration) > \\\r + --idc_outdir < Output-Folder where the results of the identification files are stored >\r +```\r +\r +Here is another example call with all required parameters (this time, turning Percolator off):\r +\r +```text\r +nextflow run identification_via_comet.nf \\\r + --idc_mgf_folder < Folder containing MGF-files > \\\r + --idc_fasta_file < The FASTA which should be used for identification > \\\r + --idc_search_parameter_file < The Comet-Parameters file (Search Configuration) > \\\r + --idc_outdir < Output-Folder where the results of the identification files are stored > \\\r + --idc_use_percolator 0\r +```\r +\r +**Note**: This identification-workflow defaults to an FDR-cutoff (q-value) of `--idc_fdr "0.01"`, reporting only 1% filtered PSMs. Arbitrary and multiple FDR-cutoffs can be set and can be changed to the desired value.\r +\r +### Summarization of results\r +\r +The `summarize_ident_results.nf`-workflow genereates convenient summarization of the identification results. Here, the identification-results are binned into 4 groups:\r +\r +1. Unique PSMs (a match, which can only originate from one protein)\r +2. Shared PSMs (a match, which can originate from multiple proteins)\r +3. Unique Feature PSMs (as 1., but only containing peptides, which can be explained by a features)\r +4. Shared Feature PSMs (as 2., but only can be explained by features from all originating proteins)\r +\r +Furthermore, heatmaps are generated to provide an overview of found peptides across all MGFs/RAW-files.\r +\r +To call this method, a `glob` needs to be specified in this workflow:\r +\r +```text\r +nextflow run summarize_ident_results.nf \\\r + --sir_identified_files_glob < The glob matching the desired output from the identification results >\r + --sir_outdir < The output directory where the summarized results should be saved >\r +```\r +\r +In case, the identification workflow was executed using an FDR of 0.01, you could use the following `glob`:\r +\r +```text\r +nextflow run summarize_ident_results.nf \\\r + --sir_identified_files_glob "/*qvalue_no_decoys_fdr_0.01.tsv"\r + --sir_outdir < The output directory where the summarized results should be saved >\r +```\r +\r +**Note**: This step can be used only if specific columns are present in the tables. Furthermore, it distinguishes between the identification results from a FASTA by UniProt or by ProtGraph. The additional parameters control, whether to bin results in group 3 and 4, decide if variable modifications should be considered as unique, as well as if a peptide, which originates multiple times to the same protein should be considered as unique. The main-workflows set these parameters accordingly and can be used as an example.\r +\r +## Main Workflow Scripts\r +\r +Each individual step described above, is also imported and chained into three main-workflows:\r +\r +1. `main_workflow_protein_fasta.nf` (UniProt-FASTA-search)\r +2. `main_workflow_global_fasta.nf` (Generation of a global-FASTA and search)\r +3. `main_workflow_precursor_specific_fasta.nf` (Generation of a precursor-specific-FASTA and search)\r +\r +generating summarized identification results across multiple RAW-files.\r +\r +In each of these workflows, it is possible to modify the parameters of the imported subworkflows, by using the imported subworkflows parameters directly (as shown in the **Individual Steps** above).\r +\r +For protein-FASTA identification, only three parameters are required:\r +\r +```text\r +nextflow run main_workflow_protein_fasta.nf \\\r + --main_fasta_file < The FASTA-file, to be used for identification > \\\r + --main_raw_files_folder < The folder containing RAW-files > \\\r + --main_comet_params < The parameters file for comet (for identification) > \\\r + --main_outdir < Output-Folder where all the results from the workflows should be saved >\r +```\r +\r +This is also true for the other two workflows, where instead of a FASTA-file, the UniProt flat file format needs to be provided. Such a file can be downloaded from [UniProt](https://www.uniprot.org/) directly, by selecting the format `Text` instead of the format `FASTA`.\r +\r +Here are the correpsonding calls for global-FASTA and precurosr-specific-FASTA generation and identification:\r +\r +```text\r +# global-FASTA\r +nextflow run main_workflow_global_fasta.nf \\\r + --main_sp_embl_file < The SP-EMBL-file used for Protein-Graph- and FASTA-generation (UniProt flat file format) > \\\r + --main_raw_files_folder < The folder containing RAW-files > \\\r + --main_comet_params< The parameters file for comet (for identification) > \\\r + --main_outdir < Output-Folder where all the results from the workflows should be saved >\r +\r +# precursor-specific-FASTA\r +nextflow run main_workflow_precursor_specific_fasta.nf \\\r + --main_sp_embl_file < The SP-EMBL-file used for Protein-Graph- and FASTA-generation (UniProt flat file format) > \\\r + --main_raw_files_folder < The folder containing RAW-files > \\\r + --main_comet_params < The parameters file for comet (for identification) > \\\r + --main_outdir < Output-Folder where all the results from the workflows should be saved >\r +```\r +\r +**Note**: Only defining the required parameters, uses the default parameters for every other setting. For all workflows, this would mean, that the FDR-cutoff (q-value) is set to `0.01` resulting into both FDRs considered. Furthermore, the global-FASTA and precursor-specific-FASTA workflows assume Trypsin digestion. For the global-FASTA-workflow, no features are exported by default, which may not be desired, if someone whishes to search for peptide-features (like `SIGNAL`, etc..). For the precursor-specific-FASTA-workflow, the PTMs carbamidomethylation (C, fixed) and oxidation (M, variable) are assumed, which may need to be modified.\r +\r +**Note regarding example calls**: Further below you can find the calls as used in the publication. These set the most minimal parameters for a correct execution on custom datasets and can be used as an example.\r +\r +## Regenerate Results from Publication\r +\r +In this subsection you can find the nextflow-calls which were used to execute the 3 workflows. Executing this with the same UniProt flat file/FASTA-file should yield the similar/same results. For generated precursor-specific-FASTAs it may happen, that these are generated with slightly different variant-limits, therefore a slightly different FASTA to search with and slightly different identification results.\r +\r +The FASTA/UniProt flat file used for identification can be found [here](https://cloud.mpc.rub.de/s/LJ2bgGNmsxzSaod). The Comet configuration files are provided in the `example_configuration`-folder. The datasets can be retrieved from [PRIDE](https://www.ebi.ac.uk/pride/).\r +\r +### PXD002171\r +\r +```shell\r +# PXD002171 Precursor-Specific\r +nextflow run main_workflow_precursor_specific_fasta.nf \\\r + -with-report "PXD002171_results_precursor_specific/nextflow_report.html" \\\r + -with-timeline "PXD002171_results_precursor_specific/nextflow_timeline.html" \\\r + --main_sp_embl_file 20230619_homo_sapiens_proteome.txt \\\r + --main_raw_files_folder PXD002171 \\\r + --main_comet_params example_configurations/PXD002171_no_dig.txt \\\r + --main_outdir PXD002171_results_precursor_specific \\\r + --cmf_max_precursor_da 5000 \\\r + --cmf_query_ppm 5 \\\r + --cmf_timeout_for_single_query 5 \\\r + --cmf_maximum_variant_limit 5 \\\r + --cmf_pg_additional_params "-ft VARIANT -ft SIGNAL -ft INIT_MET -ft CONFLICT -ft VAR_SEQ -ft PEPTIDE -ft PROPEP -ft CHAIN -vm 'M:15.994915' -vm 'C:71.037114'" \\\r + --idc_fdr "0.01"\r + \r +# PXD002171 Global digested FASTA\r +nextflow run main_workflow_global_fasta.nf \\\r + -with-report "PXD002171_global_fasta/nextflow_report.html" \\\r + -with-timeline "PXD002171_global_fasta/nextflow_timeline.html" \\\r + --main_sp_embl_file 20230619_homo_sapiens_proteome.txt \\\r + --main_raw_files_folder PXD002171 \\\r + --main_comet_params example_configurations/PXD002171_no_dig.txt \\\r + --main_outdir PXD002171_global_fasta \\\r + --cgf_features_in_graphs "-ft None" \\\r + --cgf_peptide_limits "--pep_miscleavages 2 --pep_min_pep_length 5" \\\r + --idc_fdr "0.01"\r +\r +# PXD002171 Protein FASTA\r +nextflow run main_workflow_protein_fasta.nf \\\r + -with-report "PXD002171_protein_fasta/nextflow_report.html" \\\r + -with-timeline "PXD002171_protein_fasta/nextflow_timeline.html" \\\r + --main_fasta_file 20230619_homo_sapiens_proteome.fasta \\\r + --main_raw_files_folder PXD002171 \\\r + --main_comet_params example_configurations/PXD002171_trypsin_dig.txt \\\r + --main_outdir PXD002171_protein_fasta \\\r + --idc_fdr "0.01"\r +```\r +\r +### PXD028605\r +\r +```shell\r +# PXD028605 Precursor-Specific\r +nextflow run main_workflow_precursor_specific_fasta.nf \\\r + -with-report "PXD028605_results_precursor_specific/nextflow_report.html" \\\r + -with-timeline "PXD028605_results_precursor_specific/nextflow_timeline.html" \\\r + --main_sp_embl_file 20230619_homo_sapiens_proteome.txt \\\r + --main_raw_files_folder PXD028605 \\\r + --main_comet_params example_configurations/PXD028605_no_dig.txt \\\r + --main_outdir PXD028605_results_precursor_specific \\\r + --cmf_max_precursor_da 5000 \\\r + --cmf_query_ppm 20 \\\r + --cmf_timeout_for_single_query 5 \\\r + --cmf_maximum_variant_limit 5 \\\r + --cmf_pg_additional_params "-ft VARIANT -ft SIGNAL -ft INIT_MET -ft CONFLICT -ft VAR_SEQ -ft PEPTIDE -ft PROPEP -ft CHAIN -fm 'C:57.021464' -vm 'M:15.9949'" \\\r + --idc_fdr "0.01"\r +\r +# PXD028605 Global digested FASTA\r +nextflow run main_workflow_global_fasta.nf \\\r + -with-report "PXD028605_global_fasta/nextflow_report.html" \\\r + -with-timeline "PXD028605_global_fasta/nextflow_timeline.html" \\\r + --main_sp_embl_file 20230619_homo_sapiens_proteome.txt \\\r + --main_raw_files_folder PXD028605 \\\r + --main_comet_params example_configurations/PXD028605_no_dig.txt \\\r + --main_outdir PXD028605_global_fasta \\\r + --cgf_features_in_graphs "-ft None" \\\r + --cgf_peptide_limits "--pep_miscleavages 2 --pep_min_pep_length 5" \\\r + --idc_fdr "0.01"\r +\r +# PXD028605 Protein FASTA\r +nextflow run main_workflow_protein_fasta.nf \\\r + -with-report "PXD028605_protein_fasta/nextflow_report.html" \\\r + -with-timeline "PXD028605_protein_fasta/nextflow_timeline.html" \\\r + --main_fasta_file 20230619_homo_sapiens_proteome.fasta \\\r + --main_raw_files_folder PXD028605 \\\r + --main_comet_params example_configurations/PXD028605_trypsin_dig.txt \\\r + --main_outdir PXD028605_protein_fasta \\\r + --idc_fdr "0.01"\r +```\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.837.1" ; + schema1:isBasedOn "https://github.com/mpc-bioinformatics/ProGFASTAGen" ; + schema1:license "BSD-3-Clause" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ProGFASTAGen - Protein-Graph FASTA Generation (and Identification) Workflows" ; + schema1:sdDatePublished "2024-07-12 13:23:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/837/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2815 ; + schema1:creator , + ; + schema1:dateCreated "2024-04-26T09:54:07Z" ; + schema1:dateModified "2024-04-26T09:54:51Z" ; + schema1:description """# ProGFASTAGen\r +\r +The ProGFASTAGen (**Pro**tein-**G**raph-**FASTA**-**Gen**erator or **Pro**t**G**raph-**FASTA**-**Gen**erator) repository contains workflows to generate so-called precursor-specific-FASTAs (using the precursors from MGF-files) including feature-peptides, like VARIANTs or CONFLICTs if desired, or global-FASTAs (as described in [ProtGraph](https://github.com/mpc-bioinformatics/ProtGraph)). The single workflow scripts have been implemented with [Nextflow-DSL-2](https://www.nextflow.io/docs/latest/dsl2.html) and are independent to each other. Each of these workflows can be used on their own or can be imported to other workflows for other use-cases. Further, we included three main-workflows, to show how the single workflows can be chained together. The `main_workflow_protein_fasta.nf`-workflow converts Thermo-RAW-files into MGF, searches with Comet (and Percolator) and the identification results are then further summarized. The workflows `main_workflow_global_fasta.nf` and `main_workflow_precursor_specific_fasta.nf` generate specific FASTA-files before search-engine-identification. Below are example nextflow-calls, which can be used.\r +\r +Regarding the precursor-specific-FASTA-generation: The source-code of the C++ implementation for traversal can be found in `bin`. There, four implementations are present: `Float/Int`-Versions as well as `DryRun/VarLimitter`-Versions of the traversal. The `Float/Int`-Versions can be faster/slower depending on th processor-architecture and can be used via a flag in the `create_precursor_specific_fasta.nf`-workflow. The `DryRun`-Version does not generate a FASTA but tests the used system (depending on a query-timeout) to determine the maximum number of variants which can be used, while not timing out. The actual FASTA-generation happens in the `VarLimitter`-Version using the generated protein-graphs at hand.\r +\r +in **Prerequisites** a small description of dependencies and how to set up the host system is given. **Individual steps** describes the single workflows and how they can be called, while **Main Workflow Scripts** shows example-calls of the main workflows. In **Regenerate Results from Publication**, the calls and parameters are shown, which were used in the publication. Using the same FASTA or UniProt flat file format with a similar server-setting should yield similar results as used in the publication.\r +\r +## Prerequisites\r +\r +### Executing on Linux\r +\r +This workflow can be only executed on linux (tested on Ubuntu 22.04 and ArchLinux). Before setting up the `bin`-folder, some requiered binaries need to be present on the OS. (Focusing on Ubuntu:) The following packages need to be installed on Ubuntu (via `apt`), if not already:\r +\r +```text\r +build-essential\r +wget\r +curl\r +unzip\r +cmake\r +mono-complete\r +python3-pip (or any environment with Python3, where pip is available)\r +python-is-python3 (needed for ubuntu, so that python points to python3)\r +```\r +\r +If all packages are installed (and the python environment is set up), the setup-script needs to be executed, which downloads needed dependencies and compiles the source-code located in the `bin`-folder:\r +\r +```shell\r +chmod +x compile_and_setup_depencies.sh # In case this file is not executable\r +./compile_and_setup_depencies.sh # Downloads dependencies, compiles the C++-implementation and sets all binaries in the bin-folder as executable\r +```\r +\r +If the script exits without errors, the provided workflows can be executed with the command `nextflow`.\r +\r +### Executing in Docker\r +\r +Alternatively, docker can be used. For this, please follow the [installation guide](https://docs.docker.com/engine/install/ubuntu/) for docker. After installing docker, a local docker-container can be build with all needed dependencies for the workflows. We provide a `Dockerfile` in the `docker`-folder. To build it, execute (while beeing with a shell in the root-folder of this repository) the following:\r +\r +```shell\r +docker build -t progfastagen:local . -f docker/Dockerfile\r +```\r +\r +This command builds a local docker container, tagging it with `progfastagen:local`, which can be later used by nextflow. To use it with nextflow, make sure that `nextflow` is installed on the host-system. For each of the workflow example calls below, the `-with-docker progfastagen:local` then needs to be appended, to let `nextflow` know to use the local docker-container.\r +\r +## Individual Steps\r +\r +Each step has been implemented in such a way, that it can be executed on its own. Each subsection below, provides a brief overview and an example call of the required parameters to demonstrate how the workflow can be called. If you are interested for all the available parameters within a workflow and want modify or tune them, then please refer to the source of the workflows, where each parameter is described briefly.\r +\r +### Converting RAW-files to MGF\r +\r +The workflow `convert_to_mgf.nf` is a wrapper around the ThermoRawFileParser and converts RAW-files to the MGF-format. The `ctm_raws` parameter needs to be set, in order to generate the MGF-files:\r +\r +```text\r +nextflow run convert_to_mgf.nf \\\r + --ctm_raws < Folder containing RAW-files > \\\r + --ctm_outdir < Output-Folder, where the MGFs should be stored >\r +```\r +\r +### Generating a Precursor-Specific-FASTA\r +\r +The workflow `create_precursor_specific_fasta.nf` generates a precursor-specific-FASTA-file, tailored to a set of MGF-files. Here, Protein-Graphs are generated, using the UniProt flat file format (which can be downloaded from [UniProt](https://www.uniprot.org/) by selecting `Text` as format) and a python script prepares the queries, by extracting the MS2-precursors from the MGF-files (using a tolerance, in ppm). Using the Protein-Graphs and a `DryRun`-Version of the traversal, the maximum-variant-limits are determined for each Protein-Graph (and mass-query-range) using a binary-search. These limits are then used for the actual ms2-specific-FASTA-generation in conjunction with the extracted MS2-precursors and a compacted FASTA is returned, which is tailored to the MGF-files.\r +\r +Altough of the complexity, the workflow only requires the following parameters to generate such a FASTA:\r +\r +```text\r +nextflow run create_precursor_specific_fasta.nf \\\r + --cmf_mgf_files < Folder containing MGF-files > \\\r + --cmf_sp_embl_file < Path to a SP-EMBL-File (UniProt flat file format) > \\\r + --cmf_outdir \r +```\r +\r +The optional parameter: `cmf_pg_additional_params` is added to ProtGraph directly, allowing every parameter, ProtGraph provides to be set there (e.g. useful if the digestion should be changed or features/PTMs should be included/excluded, etc...), allowing arbitrary settings to generate Protein-Graphs if desired. It defaults to use all features, ProtGraph can parse.\r +\r +**Note regarding PTMs/Tolerance**: The FASTA is tailored to the MS2-precursors, therefore variable and fixed modifications need to be set to the same settings as for the actual identification. This workflow defaults to carbamidomethylation (C, fixed) and oxidation (M, variable). See ProtGraph (and the workflow-parameter `cmf_pg_additional_params`) to set the PTMs accordingly in the Protein-Graphs. The same applies for the MS2-precursor-tolereance which can be set with `cmf_query_ppm` and defaults to `5ppm`.\r +\r +**Note regarding Limits**: This workflows defaults to allow up to 5 seconds per query and limits peptides to contain at most 5 variants (with a maximum of 5000 Da per peptide), resulting into FASTA-files which can be 15-200GB large (depending on dataset and species). Changing these settings can drastically increase/decrease the runtime/memory usage/disk usage. We advise to change those settings slightly and to pay attention on the runtime/memory usage/disk usage if run with the newly set limits (and dataset + species) the first time.\r +\r +**Note regarding identification**: If digestion is enabled (default is `Trypsin`), the resulting FASTA contains already digested entries, thus searching with a search-engine, the digestion should be set to `off/no_cut`.\r +\r +### Generating a Global-FASTA\r +\r +This workflow generates a so called global-FASTA, using ProtGraph, the UniProt flat file format and some global limits for writing out peptides/proteins. Global-FASTAs can be generated with the `create_global_fasta.nf`-workflow. To generate a global-FASTA, only a path to a single SP-EMBL-file (UniProt flat file format) is required. Such a file can be downloaded from [UniProt](https://www.uniprot.org/) directly, by selecting `Text` instead of `FASTA` as the download format.\r +\r +```text\r +nextflow run create_global_fasta.nf \\\r + --cgf_sp_embl_file < Path to a SP-EMBL-File (UniProt flat file format) > \\\r + --cgf_outdir < The output-folder, where the gloabl-FASTA and some Protein-Graph-statistics should be saved >\r +```\r +\r +Per default, this workflow does not export feature-peptides and is set to only export peptides with up to 5000 Da mass and maximum of two miscleavages. It is possible to generate global-FASTA with some specific features (like containing, `SIGNAL`, `PEPTIDE` or others) and other limits. The parameters `cgf_features_in_graphs` and `cgf_peptide_limits` can be set accordingly. These are added to ProtGraph directly, hence every parameter ProtGraph provides, can be set here (including different digestion settings).\r +\r +**Note**: A dry run with ProtGraph to generate statistics how many peptide would be theoretically exported is advised prior for testing. Some Protein-Graphs with some features (e.g. P53 using variants) can contain to many peptides, which could result to very long runtimes and huge FASTAs.\r +\r +**Note regarding identification**: If digestion is enabled (default is `Trypsin`), the resulting FASTA contains already digested entries, thus searching with a search-engine, the digestion should be set to `off/no_cut`.\r +\r +### Identification via Coment (and Percolator)\r +\r +We provide an identification workflow to showcase, that the generated FASTAs can be used with search-engines. The workflow `identification_via_comet.nf` identifies MGF-files individually, using custom search-settings for Comet (and if desired rescores the results with Percolator), applies an FDR-cutoff using the q-value (for each file) and exposes the identification results into an output-folder.\r +\r +Three parameters are required, to execute the workflow:\r +\r +1. The MGFs which should be identified\r +2. The Comet-Parameter file to set the search-settings\r +3. The FASTA-file which should be used for identification\r +\r +Below is an example call with all required parameters (Percolator is enabled by default):\r +\r +```text\r +nextflow run identification_via_comet.nf \\\r + --idc_mgf_folder < Folder containing MGF-files > \\\r + --idc_fasta_file < The FASTA which should be used for identification > \\\r + --idc_search_parameter_file < The Comet-Parameters file (Search Configuration) > \\\r + --idc_outdir < Output-Folder where the results of the identification files are stored >\r +```\r +\r +Here is another example call with all required parameters (this time, turning Percolator off):\r +\r +```text\r +nextflow run identification_via_comet.nf \\\r + --idc_mgf_folder < Folder containing MGF-files > \\\r + --idc_fasta_file < The FASTA which should be used for identification > \\\r + --idc_search_parameter_file < The Comet-Parameters file (Search Configuration) > \\\r + --idc_outdir < Output-Folder where the results of the identification files are stored > \\\r + --idc_use_percolator 0\r +```\r +\r +**Note**: This identification-workflow defaults to an FDR-cutoff (q-value) of `--idc_fdr "0.01"`, reporting only 1% filtered PSMs. Arbitrary and multiple FDR-cutoffs can be set and can be changed to the desired value.\r +\r +### Summarization of results\r +\r +The `summarize_ident_results.nf`-workflow genereates convenient summarization of the identification results. Here, the identification-results are binned into 4 groups:\r +\r +1. Unique PSMs (a match, which can only originate from one protein)\r +2. Shared PSMs (a match, which can originate from multiple proteins)\r +3. Unique Feature PSMs (as 1., but only containing peptides, which can be explained by a features)\r +4. Shared Feature PSMs (as 2., but only can be explained by features from all originating proteins)\r +\r +Furthermore, heatmaps are generated to provide an overview of found peptides across all MGFs/RAW-files.\r +\r +To call this method, a `glob` needs to be specified in this workflow:\r +\r +```text\r +nextflow run summarize_ident_results.nf \\\r + --sir_identified_files_glob < The glob matching the desired output from the identification results >\r + --sir_outdir < The output directory where the summarized results should be saved >\r +```\r +\r +In case, the identification workflow was executed using an FDR of 0.01, you could use the following `glob`:\r +\r +```text\r +nextflow run summarize_ident_results.nf \\\r + --sir_identified_files_glob "/*qvalue_no_decoys_fdr_0.01.tsv"\r + --sir_outdir < The output directory where the summarized results should be saved >\r +```\r +\r +**Note**: This step can be used only if specific columns are present in the tables. Furthermore, it distinguishes between the identification results from a FASTA by UniProt or by ProtGraph. The additional parameters control, whether to bin results in group 3 and 4, decide if variable modifications should be considered as unique, as well as if a peptide, which originates multiple times to the same protein should be considered as unique. The main-workflows set these parameters accordingly and can be used as an example.\r +\r +## Main Workflow Scripts\r +\r +Each individual step described above, is also imported and chained into three main-workflows:\r +\r +1. `main_workflow_protein_fasta.nf` (UniProt-FASTA-search)\r +2. `main_workflow_global_fasta.nf` (Generation of a global-FASTA and search)\r +3. `main_workflow_precursor_specific_fasta.nf` (Generation of a precursor-specific-FASTA and search)\r +\r +generating summarized identification results across multiple RAW-files.\r +\r +In each of these workflows, it is possible to modify the parameters of the imported subworkflows, by using the imported subworkflows parameters directly (as shown in the **Individual Steps** above).\r +\r +For protein-FASTA identification, only three parameters are required:\r +\r +```text\r +nextflow run main_workflow_protein_fasta.nf \\\r + --main_fasta_file < The FASTA-file, to be used for identification > \\\r + --main_raw_files_folder < The folder containing RAW-files > \\\r + --main_comet_params < The parameters file for comet (for identification) > \\\r + --main_outdir < Output-Folder where all the results from the workflows should be saved >\r +```\r +\r +This is also true for the other two workflows, where instead of a FASTA-file, the UniProt flat file format needs to be provided. Such a file can be downloaded from [UniProt](https://www.uniprot.org/) directly, by selecting the format `Text` instead of the format `FASTA`.\r +\r +Here are the correpsonding calls for global-FASTA and precurosr-specific-FASTA generation and identification:\r +\r +```text\r +# global-FASTA\r +nextflow run main_workflow_global_fasta.nf \\\r + --main_sp_embl_file < The SP-EMBL-file used for Protein-Graph- and FASTA-generation (UniProt flat file format) > \\\r + --main_raw_files_folder < The folder containing RAW-files > \\\r + --main_comet_params< The parameters file for comet (for identification) > \\\r + --main_outdir < Output-Folder where all the results from the workflows should be saved >\r +\r +# precursor-specific-FASTA\r +nextflow run main_workflow_precursor_specific_fasta.nf \\\r + --main_sp_embl_file < The SP-EMBL-file used for Protein-Graph- and FASTA-generation (UniProt flat file format) > \\\r + --main_raw_files_folder < The folder containing RAW-files > \\\r + --main_comet_params < The parameters file for comet (for identification) > \\\r + --main_outdir < Output-Folder where all the results from the workflows should be saved >\r +```\r +\r +**Note**: Only defining the required parameters, uses the default parameters for every other setting. For all workflows, this would mean, that the FDR-cutoff (q-value) is set to `0.01` resulting into both FDRs considered. Furthermore, the global-FASTA and precursor-specific-FASTA workflows assume Trypsin digestion. For the global-FASTA-workflow, no features are exported by default, which may not be desired, if someone whishes to search for peptide-features (like `SIGNAL`, etc..). For the precursor-specific-FASTA-workflow, the PTMs carbamidomethylation (C, fixed) and oxidation (M, variable) are assumed, which may need to be modified.\r +\r +**Note regarding example calls**: Further below you can find the calls as used in the publication. These set the most minimal parameters for a correct execution on custom datasets and can be used as an example.\r +\r +## Regenerate Results from Publication\r +\r +In this subsection you can find the nextflow-calls which were used to execute the 3 workflows. Executing this with the same UniProt flat file/FASTA-file should yield the similar/same results. For generated precursor-specific-FASTAs it may happen, that these are generated with slightly different variant-limits, therefore a slightly different FASTA to search with and slightly different identification results.\r +\r +The FASTA/UniProt flat file used for identification can be found [here](https://cloud.mpc.rub.de/s/LJ2bgGNmsxzSaod). The Comet configuration files are provided in the `example_configuration`-folder. The datasets can be retrieved from [PRIDE](https://www.ebi.ac.uk/pride/).\r +\r +### PXD002171\r +\r +```shell\r +# PXD002171 Precursor-Specific\r +nextflow run main_workflow_precursor_specific_fasta.nf \\\r + -with-report "PXD002171_results_precursor_specific/nextflow_report.html" \\\r + -with-timeline "PXD002171_results_precursor_specific/nextflow_timeline.html" \\\r + --main_sp_embl_file 20230619_homo_sapiens_proteome.txt \\\r + --main_raw_files_folder PXD002171 \\\r + --main_comet_params example_configurations/PXD002171_no_dig.txt \\\r + --main_outdir PXD002171_results_precursor_specific \\\r + --cmf_max_precursor_da 5000 \\\r + --cmf_query_ppm 5 \\\r + --cmf_timeout_for_single_query 5 \\\r + --cmf_maximum_variant_limit 5 \\\r + --cmf_pg_additional_params "-ft VARIANT -ft SIGNAL -ft INIT_MET -ft CONFLICT -ft VAR_SEQ -ft PEPTIDE -ft PROPEP -ft CHAIN -vm 'M:15.994915' -vm 'C:71.037114'" \\\r + --idc_fdr "0.01"\r + \r +# PXD002171 Global digested FASTA\r +nextflow run main_workflow_global_fasta.nf \\\r + -with-report "PXD002171_global_fasta/nextflow_report.html" \\\r + -with-timeline "PXD002171_global_fasta/nextflow_timeline.html" \\\r + --main_sp_embl_file 20230619_homo_sapiens_proteome.txt \\\r + --main_raw_files_folder PXD002171 \\\r + --main_comet_params example_configurations/PXD002171_no_dig.txt \\\r + --main_outdir PXD002171_global_fasta \\\r + --cgf_features_in_graphs "-ft None" \\\r + --cgf_peptide_limits "--pep_miscleavages 2 --pep_min_pep_length 5" \\\r + --idc_fdr "0.01"\r +\r +# PXD002171 Protein FASTA\r +nextflow run main_workflow_protein_fasta.nf \\\r + -with-report "PXD002171_protein_fasta/nextflow_report.html" \\\r + -with-timeline "PXD002171_protein_fasta/nextflow_timeline.html" \\\r + --main_fasta_file 20230619_homo_sapiens_proteome.fasta \\\r + --main_raw_files_folder PXD002171 \\\r + --main_comet_params example_configurations/PXD002171_trypsin_dig.txt \\\r + --main_outdir PXD002171_protein_fasta \\\r + --idc_fdr "0.01"\r +```\r +\r +### PXD028605\r +\r +```shell\r +# PXD028605 Precursor-Specific\r +nextflow run main_workflow_precursor_specific_fasta.nf \\\r + -with-report "PXD028605_results_precursor_specific/nextflow_report.html" \\\r + -with-timeline "PXD028605_results_precursor_specific/nextflow_timeline.html" \\\r + --main_sp_embl_file 20230619_homo_sapiens_proteome.txt \\\r + --main_raw_files_folder PXD028605 \\\r + --main_comet_params example_configurations/PXD028605_no_dig.txt \\\r + --main_outdir PXD028605_results_precursor_specific \\\r + --cmf_max_precursor_da 5000 \\\r + --cmf_query_ppm 20 \\\r + --cmf_timeout_for_single_query 5 \\\r + --cmf_maximum_variant_limit 5 \\\r + --cmf_pg_additional_params "-ft VARIANT -ft SIGNAL -ft INIT_MET -ft CONFLICT -ft VAR_SEQ -ft PEPTIDE -ft PROPEP -ft CHAIN -fm 'C:57.021464' -vm 'M:15.9949'" \\\r + --idc_fdr "0.01"\r +\r +# PXD028605 Global digested FASTA\r +nextflow run main_workflow_global_fasta.nf \\\r + -with-report "PXD028605_global_fasta/nextflow_report.html" \\\r + -with-timeline "PXD028605_global_fasta/nextflow_timeline.html" \\\r + --main_sp_embl_file 20230619_homo_sapiens_proteome.txt \\\r + --main_raw_files_folder PXD028605 \\\r + --main_comet_params example_configurations/PXD028605_no_dig.txt \\\r + --main_outdir PXD028605_global_fasta \\\r + --cgf_features_in_graphs "-ft None" \\\r + --cgf_peptide_limits "--pep_miscleavages 2 --pep_min_pep_length 5" \\\r + --idc_fdr "0.01"\r +\r +# PXD028605 Protein FASTA\r +nextflow run main_workflow_protein_fasta.nf \\\r + -with-report "PXD028605_protein_fasta/nextflow_report.html" \\\r + -with-timeline "PXD028605_protein_fasta/nextflow_timeline.html" \\\r + --main_fasta_file 20230619_homo_sapiens_proteome.fasta \\\r + --main_raw_files_folder PXD028605 \\\r + --main_comet_params example_configurations/PXD028605_trypsin_dig.txt \\\r + --main_outdir PXD028605_protein_fasta \\\r + --idc_fdr "0.01"\r +```\r +""" ; + schema1:keywords "Bioinformatics, Proteomics" ; + schema1:license "https://spdx.org/licenses/BSD-3-Clause" ; + schema1:name "ProGFASTAGen - Protein-Graph FASTA Generation (and Identification) Workflows" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/837?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:description """### - deprecated - \r +\r +Workflow for sequencing with ONT Nanopore, from basecalling to assembly.\r + - Guppy (basecalling of raw reads)\r + - MinIONQC (quality check)\r + - FASTQ merging from multi into one file\r + - Kraken2 (taxonomic classification)\r + - Krona (classification visualization)\r + - Flye (de novo assembly)\r + - Medaka (assembly polishing)\r + - QUAST (assembly quality reports)\r +\r +**All tool CWL files and other workflows can be found here:**
\r + Tools: https://git.wur.nl/unlock/cwl/-/tree/master/cwl
\r + Workflows: https://git.wur.nl/unlock/cwl/-/tree/master/cwl/workflows
\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/253?version=1" ; + schema1:isBasedOn "https://git.wur.nl/unlock/cwl/-/raw/dev/cwl/workflows/workflow_nanopore.cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Nanopore Guppy Basecalling Assembly Workflow" ; + schema1:sdDatePublished "2024-07-12 13:34:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/253/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 18286 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6073 ; + schema1:creator , + , + ; + schema1:dateCreated "2022-01-06T07:36:38Z" ; + schema1:dateModified "2023-01-16T13:56:37Z" ; + schema1:description """### - deprecated - \r +\r +Workflow for sequencing with ONT Nanopore, from basecalling to assembly.\r + - Guppy (basecalling of raw reads)\r + - MinIONQC (quality check)\r + - FASTQ merging from multi into one file\r + - Kraken2 (taxonomic classification)\r + - Krona (classification visualization)\r + - Flye (de novo assembly)\r + - Medaka (assembly polishing)\r + - QUAST (assembly quality reports)\r +\r +**All tool CWL files and other workflows can be found here:**
\r + Tools: https://git.wur.nl/unlock/cwl/-/tree/master/cwl
\r + Workflows: https://git.wur.nl/unlock/cwl/-/tree/master/cwl/workflows
\r +\r +""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Nanopore Guppy Basecalling Assembly Workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/253?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """PAIRED-END workflow. Align reads on fasta reference/assembly using bwa mem, get a consensus, variants, mutation explanations.\r +\r +IMPORTANT: \r +* For "bcftools call" consensus step, the --ploidy file is in "Données partagées" (Shared Data) and must be imported in your history to use the worflow by providing this file (tells bcftools to consider haploid variant calling). \r +* SELECT THE MOST ADAPTED VADR MODEL for annotation (see vadr parameters).\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/518?version=1" ; + schema1:isBasedOn "https://github.com/ANSES-Ploufragan/vvv2_display" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for VVV2_align_PE" ; + schema1:sdDatePublished "2024-07-12 13:27:19 +0100" ; + schema1:url "https://workflowhub.eu/workflows/518/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 31003 ; + schema1:creator ; + schema1:dateCreated "2023-06-28T09:52:35Z" ; + schema1:dateModified "2023-10-16T11:59:29Z" ; + schema1:description """PAIRED-END workflow. Align reads on fasta reference/assembly using bwa mem, get a consensus, variants, mutation explanations.\r +\r +IMPORTANT: \r +* For "bcftools call" consensus step, the --ploidy file is in "Données partagées" (Shared Data) and must be imported in your history to use the worflow by providing this file (tells bcftools to consider haploid variant calling). \r +* SELECT THE MOST ADAPTED VADR MODEL for annotation (see vadr parameters).\r +""" ; + schema1:image ; + schema1:keywords "paired-end, variant_calling, Annotation, Virus, Alignment, Bioinformatics, Galaxy, SNPs, covid-19, variant calling, workflow" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "VVV2_align_PE" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/518?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 236295 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "16S rRNA amplicon sequencing analysis workflow using QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-07-12 13:18:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4145 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:38Z" ; + schema1:dateModified "2024-06-11T12:54:38Z" ; + schema1:description "16S rRNA amplicon sequencing analysis workflow using QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """Combined workflow for large genome assembly\r +\r +The tutorial document for this workflow is here: https://doi.org/10.5281/zenodo.5655813\r +\r +\r +What it does: A workflow for genome assembly, containing subworkflows:\r +* Data QC\r +* Kmer counting\r +* Trim and filter reads\r +* Assembly with Flye\r +* Assembly polishing\r +* Assess genome quality\r +\r +Inputs: \r +* long reads and short reads in fastq format\r +* reference genome for Quast\r +\r +Outputs: \r +* Data information - QC, kmers\r +* Filtered, trimmed reads\r +* Genome assembly, assembly graph, stats\r +* Polished assembly, stats\r +* Quality metrics - Busco, Quast\r +\r +Options\r +* Omit some steps - e.g. Data QC and kmer counting\r +* Replace a module with one using a different tool - e.g. change assembly tool\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.230.1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Combined workflows for large genome assembly" ; + schema1:sdDatePublished "2024-07-12 13:36:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/230/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 440166 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 191181 ; + schema1:creator ; + schema1:dateCreated "2021-11-08T06:08:25Z" ; + schema1:dateModified "2023-01-16T13:54:36Z" ; + schema1:description """Combined workflow for large genome assembly\r +\r +The tutorial document for this workflow is here: https://doi.org/10.5281/zenodo.5655813\r +\r +\r +What it does: A workflow for genome assembly, containing subworkflows:\r +* Data QC\r +* Kmer counting\r +* Trim and filter reads\r +* Assembly with Flye\r +* Assembly polishing\r +* Assess genome quality\r +\r +Inputs: \r +* long reads and short reads in fastq format\r +* reference genome for Quast\r +\r +Outputs: \r +* Data information - QC, kmers\r +* Filtered, trimmed reads\r +* Genome assembly, assembly graph, stats\r +* Polished assembly, stats\r +* Quality metrics - Busco, Quast\r +\r +Options\r +* Omit some steps - e.g. Data QC and kmer counting\r +* Replace a module with one using a different tool - e.g. change assembly tool\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "Large-genome-assembly" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Combined workflows for large genome assembly" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/230?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Genes and transcripts annotation with Isoseq using uLTRA and TAMA" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/993?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/isoseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/isoseq" ; + schema1:sdDatePublished "2024-07-12 13:21:07 +0100" ; + schema1:url "https://workflowhub.eu/workflows/993/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8438 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:58Z" ; + schema1:dateModified "2024-06-11T12:54:58Z" ; + schema1:description "Genes and transcripts annotation with Isoseq using uLTRA and TAMA" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/993?version=6" ; + schema1:keywords "isoseq, isoseq-3, rna, tama, ultra" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/isoseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/993?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-16T14:32:18.215710" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Scaffolding-HiC-VGP8" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Scaffolding-HiC-VGP8/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """This workflow can be used to fit dose-response curves from normalised biochemical assay data (%Inhibition) using the HCS extension. This workflow needs R-Server to run in the back-end. Start R and run the following command: library(Rserve); Rserve(args = "--vanilla")\r +IC50 values will not be extrapolated outside the tested concentration range\r +For activity classification the following criteria are applied:\r +- maximum (average % inhibion) >25 % and slope is >0 and IC50 > 5 µM or\r +- minimum (average % inhibion) >75 %\r +Results are formatted for upload to the European Chemical Biology Database (ECBD)""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/389?version=1" ; + schema1:isBasedOn "https://hub.knime.com/fraunhoferitmp/spaces/Public/latest/Dose_Response_Biochemical/DRCfit_biochemical_ECBD~6NLZB5Jkgn6j5a6Y" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for DRC_biochemical_toECBD" ; + schema1:sdDatePublished "2024-07-12 13:35:06 +0100" ; + schema1:url "https://workflowhub.eu/workflows/389/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 49286 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10087717 ; + schema1:creator ; + schema1:dateCreated "2022-09-26T10:15:37Z" ; + schema1:dateModified "2023-01-16T14:02:33Z" ; + schema1:description """This workflow can be used to fit dose-response curves from normalised biochemical assay data (%Inhibition) using the HCS extension. This workflow needs R-Server to run in the back-end. Start R and run the following command: library(Rserve); Rserve(args = "--vanilla")\r +IC50 values will not be extrapolated outside the tested concentration range\r +For activity classification the following criteria are applied:\r +- maximum (average % inhibion) >25 % and slope is >0 and IC50 > 5 µM or\r +- minimum (average % inhibion) >75 %\r +Results are formatted for upload to the European Chemical Biology Database (ECBD)""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "DRC_biochemical_toECBD" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/389?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-07-12 13:19:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6470 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:14Z" ; + schema1:dateModified "2024-06-11T12:55:14Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """The Regulatory Mendelian Mutation (ReMM) score was created for relevance prediction of non-coding variations (SNVs and small InDels) in the human genome (GRCh37) in terms of Mendelian diseases. This project updates the ReMM score for the genome build GRCh38 and combines GRCh37 and GRCh38 into one workflow.\r +\r +## Pre-requirements\r +\r +### Conda\r +We use Conda as software and dependency management tool. Conda installation guidelines can be found here:\r +\r +https://conda.io/projects/conda/en/latest/user-guide/install/index.html\r +\r +### Additional programs\r +These programs are used during the workflow. They usually need to be compiled, however, the repository already contains the executables or generated files.\r +\r +- [AttributeDB](https://github.com/visze/attributedb)\r +- [Jannovar](https://github.com/charite/jannovar) \r +- [parSMURF](https://github.com/AnacletoLAB/parSMURF)\r +\r +### Snakemake\r +\r +The workflow is managed by Snakemake - a workflow management system used to create reproducible and scalable data analyses. To install Snakemake as well as all other required packages, you need to create a working environment according to the description in the file env/ReMM.yaml. For that, first\r +\r +Clone the repository\r +```\r +git clone https://github.com/kircherlab/ReMM\r +cd ReMM\r +```\r +\r +Create a working environment and activate it\r +\r +```\r +conda env create -n ReMM --file workflow/envs/ReMM.yaml\r +conda activate ReMM\r +```\r +\r +All paths are relative to the Snakemake file so you do not need to change any path variables. Additionally, Snakemake creates all missing directories, so no need to create any aditional folders either.\r +\r +## Workflow\r +\r +The workflow consists of four main parts:\r +\r +- Download of feature data\r +- Data processing and cleaning\r +- Model training and validation\r +- Calculation of ReMM for the whole genome\r +\r +The `workflow` folder contains a graph of the workflow and more detailed information on the most important steps.\r +\r +To launch a snakemake workflow, you need to tell snakemake which file you want to generate. We defined all rules for multiple steps. They can be found here: `workflow/Snakefile`. For example, you want to generate all feature sets defined in a config file you can run:\r +\r +```\r +snakemake -c1 all_feature_sets\r +```\r +\r +To execute any step separately (see `README.md` in the `workflow` folder for details on workflow steps), you need to look up the name of the desired output file in the scripts and call Snakemake with the exact name. Using a flag `-n`, you can initiate a 'dry run': Snakemake will check the consistency of all rules and files and show the number of steps. However, a clean dry run does not necessarily mean that no errors will occur during a normal run. ReMM score is not allele-specific so that you get only one score independent of the variant itself. The workflow from the download of data up to computing the scores may take several days or weeks depending on the computing power and internet connection.\r +\r +\r +### The config files\r +\r +The main config file can be found in `config/config.yaml`. This config file was used to generate the ReMM score. Here most of the configuration magic happens. There is a second config file `config/features.yaml` where all features are listed (with additional description). Config files are controled via [json-schema](http://json-schema.org). \r +\r +We also provide a slurm config file for runtimes, memory and number of threads per rule: `config/slurm.yaml`.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.414.1" ; + schema1:isBasedOn "https://github.com/kircherlab/ReMM.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ReMM score" ; + schema1:sdDatePublished "2024-07-12 13:34:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/414/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6006 ; + schema1:creator ; + schema1:dateCreated "2023-01-03T09:09:05Z" ; + schema1:dateModified "2023-01-16T14:04:58Z" ; + schema1:description """The Regulatory Mendelian Mutation (ReMM) score was created for relevance prediction of non-coding variations (SNVs and small InDels) in the human genome (GRCh37) in terms of Mendelian diseases. This project updates the ReMM score for the genome build GRCh38 and combines GRCh37 and GRCh38 into one workflow.\r +\r +## Pre-requirements\r +\r +### Conda\r +We use Conda as software and dependency management tool. Conda installation guidelines can be found here:\r +\r +https://conda.io/projects/conda/en/latest/user-guide/install/index.html\r +\r +### Additional programs\r +These programs are used during the workflow. They usually need to be compiled, however, the repository already contains the executables or generated files.\r +\r +- [AttributeDB](https://github.com/visze/attributedb)\r +- [Jannovar](https://github.com/charite/jannovar) \r +- [parSMURF](https://github.com/AnacletoLAB/parSMURF)\r +\r +### Snakemake\r +\r +The workflow is managed by Snakemake - a workflow management system used to create reproducible and scalable data analyses. To install Snakemake as well as all other required packages, you need to create a working environment according to the description in the file env/ReMM.yaml. For that, first\r +\r +Clone the repository\r +```\r +git clone https://github.com/kircherlab/ReMM\r +cd ReMM\r +```\r +\r +Create a working environment and activate it\r +\r +```\r +conda env create -n ReMM --file workflow/envs/ReMM.yaml\r +conda activate ReMM\r +```\r +\r +All paths are relative to the Snakemake file so you do not need to change any path variables. Additionally, Snakemake creates all missing directories, so no need to create any aditional folders either.\r +\r +## Workflow\r +\r +The workflow consists of four main parts:\r +\r +- Download of feature data\r +- Data processing and cleaning\r +- Model training and validation\r +- Calculation of ReMM for the whole genome\r +\r +The `workflow` folder contains a graph of the workflow and more detailed information on the most important steps.\r +\r +To launch a snakemake workflow, you need to tell snakemake which file you want to generate. We defined all rules for multiple steps. They can be found here: `workflow/Snakefile`. For example, you want to generate all feature sets defined in a config file you can run:\r +\r +```\r +snakemake -c1 all_feature_sets\r +```\r +\r +To execute any step separately (see `README.md` in the `workflow` folder for details on workflow steps), you need to look up the name of the desired output file in the scripts and call Snakemake with the exact name. Using a flag `-n`, you can initiate a 'dry run': Snakemake will check the consistency of all rules and files and show the number of steps. However, a clean dry run does not necessarily mean that no errors will occur during a normal run. ReMM score is not allele-specific so that you get only one score independent of the variant itself. The workflow from the download of data up to computing the scores may take several days or weeks depending on the computing power and internet connection.\r +\r +\r +### The config files\r +\r +The main config file can be found in `config/config.yaml`. This config file was used to generate the ReMM score. Here most of the configuration magic happens. There is a second config file `config/features.yaml` where all features are listed (with additional description). Config files are controled via [json-schema](http://json-schema.org). \r +\r +We also provide a slurm config file for runtimes, memory and number of threads per rule: `config/slurm.yaml`.\r +""" ; + schema1:keywords "non-coding, pathogenicity score, variant pathogenicity prediction, Snakemake, ReMM, Regulatory Mendelian Mutation score" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ReMM score" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/414?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/protein-ligand-docking).\r +***\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.296.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_virtual_screening/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy Protein-ligand Docking tutorial (Fpocket)" ; + schema1:sdDatePublished "2024-07-12 13:33:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/296/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 26367 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-22T10:13:05Z" ; + schema1:dateModified "2023-01-16T13:58:43Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/protein-ligand-docking).\r +***\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/296?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy Protein-ligand Docking tutorial (Fpocket)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_virtual_screening/galaxy/biobb_wf_virtual_screening.ga" ; + schema1:version 2 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "De novo assembly pipeline for 10X linked-reads, used at the SciLifeLab National Genomics Infrastructure." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1005?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/neutronstar" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/neutronstar" ; + schema1:sdDatePublished "2024-07-12 13:20:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1005/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3657 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:06Z" ; + schema1:dateModified "2024-06-11T12:55:06Z" ; + schema1:description "De novo assembly pipeline for 10X linked-reads, used at the SciLifeLab National Genomics Infrastructure." ; + schema1:keywords "10x-genomics, 10xgenomics, denovo-assembly, genome-assembly, linked-reads, Supernova" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/neutronstar" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1005?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """## Description\r +\r +The workflow takes an input file with Cancer Driver Genes predictions (i.e. the results provided by a participant), computes a set of metrics, and compares them against the data currently stored in OpenEBench within the TCGA community. Two assessment metrics are provided for that predicitions. Also, some plots (which are optional) that allow to visualize the performance of the tool are generated. The workflow consists in three standard steps, defined by OpenEBench. The tools needed to run these steps must be in one or more Docker images, generated from [Docker containers](https://github.com/inab/TCGA_benchmarking_dockers ). Separated instances are spawned from these images for each step:\r +1. **Validation**: the input file format is checked and, if required, the content of the file is validated (e.g check whether the submitted gene IDs exist)\r +2. **Metrics Computation**: the predictions are compared with the 'Gold Standards' provided by the community, which results in two performance metrics - precision (Positive Predictive Value) and recall(True Positive Rate).\r +3. **Results Consolidation**: the benchmark itself is performed by merging the tool metrics with the rest of TCGA data. The results are provided in JSON format and SVG format (scatter plot).\r +\r +![alt text](https://raw.githubusercontent.com/inab/TCGA_benchmarking_workflow/1.0.7/workflow.jpg)\r +\r +\r +## Data\r +\r +* [TCGA_sample_data](./TCGA_sample_data) folder contains all the reference data required by the steps. It is derived from the manuscript:\r +[Comprehensive Characterization of Cancer Driver Genes and Mutations](https://www.cell.com/cell/fulltext/S0092-8674%2818%2930237-X?code=cell-site), Bailey et al, 2018, Cell [![doi:10.1016/j.cell.2018.02.060](https://img.shields.io/badge/doi-10.1016%2Fj.cell.2018.02.060-green.svg)](https://doi.org/10.1016/j.cell.2018.02.060) \r +* [TCGA_sample_out](./TCGA_sample_out) folder contains an example output for a worklow run, with two cancer types / challenges selected (ACC, BRCA). Results obtained from the default execution should be similar to those ones available in this directory. Results found in [TCGA_sample_out/results](./TCGA_sample_out/results) can be visualized in the browser using [`benchmarking_workflows_results_visualizer` javascript library](https://github.com/inab/benchmarking_workflows_results_visualizer).\r +\r +\r +## Usage\r +In order to use the workflow you need to:\r +* Install [Nextflow](https://www.nextflow.io/), which depends on Java virtual machine (>=8 , <15 ). You can automate their installation for local testing using [run_local_nextflow.bash](run_local_nextflow.bash).\r +* Clone [TCGA benchmarking Docker definitions repository](https://github.com/inab/TCGA_benchmarking_dockers) from tag 1.0.3 in a separate directory, and build locally the three Docker images found in it, running the `build.sh 1.0.3` script within that repo.\r +* Run it just using either *`nextflow run main.nf -profile docker`* or *`./run_local_nextflow.bash run main.nf -profile docker`*. Arguments specifications:\r +\r +```\r +Usage:\r +Run the pipeline with default parameters:\r +nextflow run main.nf -profile docker\r +\r +Run with user parameters:\r +nextflow run main.nf -profile docker --predictionsFile {driver.genes.file} --public_ref_dir {validation.reference.file} --participant_name {tool.name} --metrics_ref_dir {gold.standards.dir} --cancer_types {analyzed.cancer.types} --assess_dir {benchmark.data.dir} --results_dir {output.dir}\r +\r +Mandatory arguments:\r + --input List of cancer genes prediction\r + --community_id Name or OEB permanent ID for the benchmarking community\r + --public_ref_dir Directory with list of cancer genes used to validate the predictions\r + --participant_id Name of the tool used for prediction\r + --goldstandard_dir Dir that contains metrics reference datasets for all cancer types\r + --event_id List of types of cancer selected by the user, separated by spaces\r + --assess_dir Dir where the data for the benchmark are stored\r +\r +Other options:\r + --validation_result The output file where the results from validation step will be saved\r + --assessment_results The output file where the results from the computed metrics step will be saved\r + --aggregation_results The output file where the consolidation of the benchmark will be saved\r + --statistics_results The output directory with nextflow statistics\r + --outdir The output directory where the consolidation of the benchmark will be saved\r + --statsdir The output directory with nextflow statistics\r + --data_model_export_dir The output dir where json file with benchmarking data model contents will be saved\r + --otherdir The output directory where custom results will be saved (no directory inside)\r +Flags:\r + --help Display this message\r +```\r +\r +Default input parameters and Docker images to use for each step can be specified in the [config](./nextflow.config) file\r +**NOTE: In order to make your workflow compatible with the [OpenEBench VRE Nextflow Executor](https://github.com/inab/vre-process_nextflow-executor), please make sure to use the same parameter names in your workflow.**""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/244?version=3" ; + schema1:isBasedOn "https://github.com/inab/TCGA_benchmarking_workflow/tree/1.0.8" ; + schema1:license "LGPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for OpenEBench assessment workflow" ; + schema1:sdDatePublished "2024-07-12 13:36:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/244/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6150 ; + schema1:creator , + , + ; + schema1:dateCreated "2021-11-25T13:31:19Z" ; + schema1:dateModified "2021-11-26T09:08:21Z" ; + schema1:description """## Description\r +\r +The workflow takes an input file with Cancer Driver Genes predictions (i.e. the results provided by a participant), computes a set of metrics, and compares them against the data currently stored in OpenEBench within the TCGA community. Two assessment metrics are provided for that predicitions. Also, some plots (which are optional) that allow to visualize the performance of the tool are generated. The workflow consists in three standard steps, defined by OpenEBench. The tools needed to run these steps must be in one or more Docker images, generated from [Docker containers](https://github.com/inab/TCGA_benchmarking_dockers ). Separated instances are spawned from these images for each step:\r +1. **Validation**: the input file format is checked and, if required, the content of the file is validated (e.g check whether the submitted gene IDs exist)\r +2. **Metrics Computation**: the predictions are compared with the 'Gold Standards' provided by the community, which results in two performance metrics - precision (Positive Predictive Value) and recall(True Positive Rate).\r +3. **Results Consolidation**: the benchmark itself is performed by merging the tool metrics with the rest of TCGA data. The results are provided in JSON format and SVG format (scatter plot).\r +\r +![alt text](https://raw.githubusercontent.com/inab/TCGA_benchmarking_workflow/1.0.7/workflow.jpg)\r +\r +\r +## Data\r +\r +* [TCGA_sample_data](./TCGA_sample_data) folder contains all the reference data required by the steps. It is derived from the manuscript:\r +[Comprehensive Characterization of Cancer Driver Genes and Mutations](https://www.cell.com/cell/fulltext/S0092-8674%2818%2930237-X?code=cell-site), Bailey et al, 2018, Cell [![doi:10.1016/j.cell.2018.02.060](https://img.shields.io/badge/doi-10.1016%2Fj.cell.2018.02.060-green.svg)](https://doi.org/10.1016/j.cell.2018.02.060) \r +* [TCGA_sample_out](./TCGA_sample_out) folder contains an example output for a worklow run, with two cancer types / challenges selected (ACC, BRCA). Results obtained from the default execution should be similar to those ones available in this directory. Results found in [TCGA_sample_out/results](./TCGA_sample_out/results) can be visualized in the browser using [`benchmarking_workflows_results_visualizer` javascript library](https://github.com/inab/benchmarking_workflows_results_visualizer).\r +\r +\r +## Usage\r +In order to use the workflow you need to:\r +* Install [Nextflow](https://www.nextflow.io/), which depends on Java virtual machine (>=8 , <15 ). You can automate their installation for local testing using [run_local_nextflow.bash](run_local_nextflow.bash).\r +* Clone [TCGA benchmarking Docker definitions repository](https://github.com/inab/TCGA_benchmarking_dockers) from tag 1.0.3 in a separate directory, and build locally the three Docker images found in it, running the `build.sh 1.0.3` script within that repo.\r +* Run it just using either *`nextflow run main.nf -profile docker`* or *`./run_local_nextflow.bash run main.nf -profile docker`*. Arguments specifications:\r +\r +```\r +Usage:\r +Run the pipeline with default parameters:\r +nextflow run main.nf -profile docker\r +\r +Run with user parameters:\r +nextflow run main.nf -profile docker --predictionsFile {driver.genes.file} --public_ref_dir {validation.reference.file} --participant_name {tool.name} --metrics_ref_dir {gold.standards.dir} --cancer_types {analyzed.cancer.types} --assess_dir {benchmark.data.dir} --results_dir {output.dir}\r +\r +Mandatory arguments:\r + --input List of cancer genes prediction\r + --community_id Name or OEB permanent ID for the benchmarking community\r + --public_ref_dir Directory with list of cancer genes used to validate the predictions\r + --participant_id Name of the tool used for prediction\r + --goldstandard_dir Dir that contains metrics reference datasets for all cancer types\r + --event_id List of types of cancer selected by the user, separated by spaces\r + --assess_dir Dir where the data for the benchmark are stored\r +\r +Other options:\r + --validation_result The output file where the results from validation step will be saved\r + --assessment_results The output file where the results from the computed metrics step will be saved\r + --aggregation_results The output file where the consolidation of the benchmark will be saved\r + --statistics_results The output directory with nextflow statistics\r + --outdir The output directory where the consolidation of the benchmark will be saved\r + --statsdir The output directory with nextflow statistics\r + --data_model_export_dir The output dir where json file with benchmarking data model contents will be saved\r + --otherdir The output directory where custom results will be saved (no directory inside)\r +Flags:\r + --help Display this message\r +```\r +\r +Default input parameters and Docker images to use for each step can be specified in the [config](./nextflow.config) file\r +**NOTE: In order to make your workflow compatible with the [OpenEBench VRE Nextflow Executor](https://github.com/inab/vre-process_nextflow-executor), please make sure to use the same parameter names in your workflow.**""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/244?version=3" ; + schema1:keywords "tcga, openebench, benchmarking" ; + schema1:license "https://spdx.org/licenses/LGPL-3.0" ; + schema1:name "OpenEBench assessment workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/244?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-ont-artic-variant-calling" ; + schema1:mainEntity ; + schema1:name "COVID-19-ARTIC-ONT (v0.1)" ; + schema1:sdDatePublished "2021-03-12 13:41:26 +0000" ; + schema1:softwareVersion "v0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 45258 ; + schema1:name "COVID-19-ARTIC-ONT" ; + schema1:programmingLanguage . + + a schema1:Dataset ; + schema1:datePublished "2024-05-27T12:41:08.866362" ; + schema1:hasPart , + , + , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/qiime2-I-import" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + , + , + ; + schema1:name "qiime2-I-import/Ia-import-multiplexed-se" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "qiime2-I-import/Ib-import-multiplexed-pe" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/qiime2-I-import" ; + schema1:version "0.2" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "qiime2-I-import/Ic-import-demultiplexed-se" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/qiime2-I-import" ; + schema1:version "0.2" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "qiime2-I-import/Id-import-demultiplexed-pe" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/qiime2-I-import" ; + schema1:version "0.2" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly and annotation of metatranscriptomic data, both prokaryotic and eukaryotic" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/997?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/metatdenovo" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/metatdenovo" ; + schema1:sdDatePublished "2024-07-12 13:20:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/997/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11793 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:00Z" ; + schema1:dateModified "2024-06-11T12:55:00Z" ; + schema1:description "Assembly and annotation of metatranscriptomic data, both prokaryotic and eukaryotic" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/997?version=1" ; + schema1:keywords "eukaryotes, Metagenomics, metatranscriptomics, prokaryotes, viruses" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/metatdenovo" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/997?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """\r +# BridgeDb tutorial: Gene HGNC name to Ensembl identifier\r +\r +This tutorial explains how to use the BridgeDb identifier mapping service to translate HGNC names to Ensembl identifiers. This step is part of the OpenRiskNet use case to link Adverse Outcome Pathways to [WikiPathways](https://wikipathways.org/).\r +\r +First we need to load the Python library to allow calls to the [BridgeDb REST webservice](http://bridgedb.prod.openrisknet.org/swagger/):\r +\r +\r +```python\r +import requests\r +```\r +\r +Let's assume we're interested in the gene with HGNC MECP2 (FIXME: look up a gene in AOPWiki), the API call to make mappings is given below as `callUrl`. Here, the `H` indicates that the query (`MECP2`) is an HGNC symbol:\r +\r +\r +```python\r +callUrl = 'http://bridgedb.prod.openrisknet.org/Human/xrefs/H/MECP2'\r +```\r +\r +The default call returns all identifiers, not just for Ensembl:\r +\r +\r +```python\r +response = requests.get(callUrl)\r +response.text\r +```\r +\r +\r +\r +\r + 'GO:0001964\\tGeneOntology\\nuc065cav.1\\tUCSC Genome Browser\\n312750\\tOMIM\\nGO:0042551\\tGeneOntology\\nuc065car.1\\tUCSC Genome Browser\\nA0A087X1U4\\tUniprot-TrEMBL\\n4204\\tWikiGenes\\nGO:0043524\\tGeneOntology\\nILMN_1702715\\tIllumina\\n34355_at\\tAffy\\nGO:0007268\\tGeneOntology\\nMECP2\\tHGNC\\nuc065caz.1\\tUCSC Genome Browser\\nA_33_P3339036\\tAgilent\\nGO:0006576\\tGeneOntology\\nuc065cbg.1\\tUCSC Genome Browser\\nGO:0006342\\tGeneOntology\\n300496\\tOMIM\\nGO:0035176\\tGeneOntology\\nuc065cbc.1\\tUCSC Genome Browser\\nGO:0033555\\tGeneOntology\\nGO:0045892\\tGeneOntology\\nA_23_P114361\\tAgilent\\nGO:0045893\\tGeneOntology\\nENSG00000169057\\tEnsembl\\nGO:0090063\\tGeneOntology\\nGO:0005515\\tGeneOntology\\nGO:0002087\\tGeneOntology\\nGO:0005634\\tGeneOntology\\nGO:0007416\\tGeneOntology\\nGO:0008104\\tGeneOntology\\nGO:0042826\\tGeneOntology\\nGO:0007420\\tGeneOntology\\nGO:0035067\\tGeneOntology\\n300005\\tOMIM\\nNP_001104262\\tRefSeq\\nA0A087WVW7\\tUniprot-TrEMBL\\nNP_004983\\tRefSeq\\nGO:0046470\\tGeneOntology\\nGO:0010385\\tGeneOntology\\n11722682_at\\tAffy\\nGO:0051965\\tGeneOntology\\nNM_001316337\\tRefSeq\\nuc065caw.1\\tUCSC Genome Browser\\nA0A0D9SFX7\\tUniprot-TrEMBL\\nA0A140VKC4\\tUniprot-TrEMBL\\nGO:0003723\\tGeneOntology\\nGO:0019233\\tGeneOntology\\nGO:0001666\\tGeneOntology\\nGO:0003729\\tGeneOntology\\nGO:0021591\\tGeneOntology\\nuc065cas.1\\tUCSC Genome Browser\\nGO:0019230\\tGeneOntology\\nGO:0003682\\tGeneOntology\\nGO:0001662\\tGeneOntology\\nuc065cbh.1\\tUCSC Genome Browser\\nX99687_at\\tAffy\\nGO:0008344\\tGeneOntology\\nGO:0009791\\tGeneOntology\\nuc065cbd.1\\tUCSC Genome Browser\\nGO:0019904\\tGeneOntology\\nGO:0030182\\tGeneOntology\\nGO:0035197\\tGeneOntology\\n8175998\\tAffy\\nGO:0016358\\tGeneOntology\\nNM_004992\\tRefSeq\\nGO:0003714\\tGeneOntology\\nGO:0005739\\tGeneOntology\\nGO:0005615\\tGeneOntology\\nGO:0005737\\tGeneOntology\\nuc004fjv.3\\tUCSC Genome Browser\\n202617_s_at\\tAffy\\nGO:0050905\\tGeneOntology\\nGO:0008327\\tGeneOntology\\nD3YJ43\\tUniprot-TrEMBL\\nGO:0003677\\tGeneOntology\\nGO:0006541\\tGeneOntology\\nGO:0040029\\tGeneOntology\\nA_33_P3317211\\tAgilent\\nNP_001303266\\tRefSeq\\n11722683_a_at\\tAffy\\nGO:0008211\\tGeneOntology\\nGO:0051151\\tGeneOntology\\nNM_001110792\\tRefSeq\\nX89430_at\\tAffy\\nGO:2000820\\tGeneOntology\\nuc065cat.1\\tUCSC Genome Browser\\nGO:0003700\\tGeneOntology\\nGO:0047485\\tGeneOntology\\n4204\\tEntrez Gene\\nGO:0009405\\tGeneOntology\\nA0A0D9SEX1\\tUniprot-TrEMBL\\nGO:0098794\\tGeneOntology\\n3C2I\\tPDB\\nHs.200716\\tUniGene\\nGO:0000792\\tGeneOntology\\nuc065cax.1\\tUCSC Genome Browser\\n300055\\tOMIM\\n5BT2\\tPDB\\nGO:0006020\\tGeneOntology\\nGO:0031175\\tGeneOntology\\nuc065cbe.1\\tUCSC Genome Browser\\nGO:0008284\\tGeneOntology\\nuc065cba.1\\tUCSC Genome Browser\\nGO:0060291\\tGeneOntology\\n202618_s_at\\tAffy\\nGO:0016573\\tGeneOntology\\n17115453\\tAffy\\nA0A1B0GTV0\\tUniprot-TrEMBL\\nuc065cbi.1\\tUCSC Genome Browser\\nGO:0048167\\tGeneOntology\\nGO:0007616\\tGeneOntology\\nGO:0016571\\tGeneOntology\\nuc004fjw.3\\tUCSC Genome Browser\\nGO:0007613\\tGeneOntology\\nGO:0007612\\tGeneOntology\\nGO:0021549\\tGeneOntology\\n11722684_a_at\\tAffy\\nGO:0001078\\tGeneOntology\\nX94628_rna1_s_at\\tAffy\\nGO:0007585\\tGeneOntology\\nGO:0010468\\tGeneOntology\\nGO:0031061\\tGeneOntology\\nA_24_P237486\\tAgilent\\nGO:0050884\\tGeneOntology\\nGO:0000930\\tGeneOntology\\nGO:0005829\\tGeneOntology\\nuc065cau.1\\tUCSC Genome Browser\\nH7BY72\\tUniprot-TrEMBL\\n202616_s_at\\tAffy\\nGO:0006355\\tGeneOntology\\nuc065cay.1\\tUCSC Genome Browser\\nGO:0010971\\tGeneOntology\\n300673\\tOMIM\\nGO:0008542\\tGeneOntology\\nGO:0060079\\tGeneOntology\\nuc065cbf.1\\tUCSC Genome Browser\\nGO:0006122\\tGeneOntology\\nuc065cbb.1\\tUCSC Genome Browser\\nGO:0007052\\tGeneOntology\\nC9JH89\\tUniprot-TrEMBL\\nB5MCB4\\tUniprot-TrEMBL\\nGO:0032048\\tGeneOntology\\nGO:0050432\\tGeneOntology\\nGO:0001976\\tGeneOntology\\nI6LM39\\tUniprot-TrEMBL\\nGO:0005813\\tGeneOntology\\nILMN_1682091\\tIllumina\\nP51608\\tUniprot-TrEMBL\\n1QK9\\tPDB\\nGO:0006349\\tGeneOntology\\nGO:1900114\\tGeneOntology\\nGO:0000122\\tGeneOntology\\nGO:0006351\\tGeneOntology\\nGO:0008134\\tGeneOntology\\nILMN_1824898\\tIllumina\\n300260\\tOMIM\\n0006510725\\tIllumina\\n'\r +\r +\r +\r +You can also see the results are returned as a TSV file, consisting of two columns, the identifier and the matching database.\r +\r +We will want to convert this reply into a Python dictionary (with the identifier as key, as one database may have multiple identifiers):\r +\r +\r +```python\r +lines = response.text.split("\\n")\r +mappings = {}\r +for line in lines:\r + if ('\\t' in line):\r + tuple = line.split('\\t')\r + identifier = tuple[0]\r + database = tuple[1]\r + if (database == "Ensembl"):\r + mappings[identifier] = database\r +\r +print(mappings)\r +```\r +\r + {'ENSG00000169057': 'Ensembl'}\r +\r +\r +Alternatively, we can restrivct the return values from the BridgeDb webservice to just return Ensembl identifiers (system code `En`). For this, we add the `?dataSource=En` call parameter:\r +\r +\r +```python\r +callUrl = 'http://bridgedb-swagger.prod.openrisknet.org/Human/xrefs/H/MECP2?dataSource=En'\r +response = requests.get(callUrl)\r +response.text\r +```\r +\r +\r +\r +\r + 'ENSG00000169057\\tEnsembl\\n'\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.326.3" ; + schema1:isBasedOn "https://github.com/OpenRiskNet/notebooks.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for BridgeDb tutorial: Gene HGNC name to Ensembl identifier" ; + schema1:sdDatePublished "2024-07-12 13:35:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/326/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8112 ; + schema1:creator , + ; + schema1:dateCreated "2022-04-06T13:12:39Z" ; + schema1:dateModified "2023-01-16T13:59:41Z" ; + schema1:description """\r +# BridgeDb tutorial: Gene HGNC name to Ensembl identifier\r +\r +This tutorial explains how to use the BridgeDb identifier mapping service to translate HGNC names to Ensembl identifiers. This step is part of the OpenRiskNet use case to link Adverse Outcome Pathways to [WikiPathways](https://wikipathways.org/).\r +\r +First we need to load the Python library to allow calls to the [BridgeDb REST webservice](http://bridgedb.prod.openrisknet.org/swagger/):\r +\r +\r +```python\r +import requests\r +```\r +\r +Let's assume we're interested in the gene with HGNC MECP2 (FIXME: look up a gene in AOPWiki), the API call to make mappings is given below as `callUrl`. Here, the `H` indicates that the query (`MECP2`) is an HGNC symbol:\r +\r +\r +```python\r +callUrl = 'http://bridgedb.prod.openrisknet.org/Human/xrefs/H/MECP2'\r +```\r +\r +The default call returns all identifiers, not just for Ensembl:\r +\r +\r +```python\r +response = requests.get(callUrl)\r +response.text\r +```\r +\r +\r +\r +\r + 'GO:0001964\\tGeneOntology\\nuc065cav.1\\tUCSC Genome Browser\\n312750\\tOMIM\\nGO:0042551\\tGeneOntology\\nuc065car.1\\tUCSC Genome Browser\\nA0A087X1U4\\tUniprot-TrEMBL\\n4204\\tWikiGenes\\nGO:0043524\\tGeneOntology\\nILMN_1702715\\tIllumina\\n34355_at\\tAffy\\nGO:0007268\\tGeneOntology\\nMECP2\\tHGNC\\nuc065caz.1\\tUCSC Genome Browser\\nA_33_P3339036\\tAgilent\\nGO:0006576\\tGeneOntology\\nuc065cbg.1\\tUCSC Genome Browser\\nGO:0006342\\tGeneOntology\\n300496\\tOMIM\\nGO:0035176\\tGeneOntology\\nuc065cbc.1\\tUCSC Genome Browser\\nGO:0033555\\tGeneOntology\\nGO:0045892\\tGeneOntology\\nA_23_P114361\\tAgilent\\nGO:0045893\\tGeneOntology\\nENSG00000169057\\tEnsembl\\nGO:0090063\\tGeneOntology\\nGO:0005515\\tGeneOntology\\nGO:0002087\\tGeneOntology\\nGO:0005634\\tGeneOntology\\nGO:0007416\\tGeneOntology\\nGO:0008104\\tGeneOntology\\nGO:0042826\\tGeneOntology\\nGO:0007420\\tGeneOntology\\nGO:0035067\\tGeneOntology\\n300005\\tOMIM\\nNP_001104262\\tRefSeq\\nA0A087WVW7\\tUniprot-TrEMBL\\nNP_004983\\tRefSeq\\nGO:0046470\\tGeneOntology\\nGO:0010385\\tGeneOntology\\n11722682_at\\tAffy\\nGO:0051965\\tGeneOntology\\nNM_001316337\\tRefSeq\\nuc065caw.1\\tUCSC Genome Browser\\nA0A0D9SFX7\\tUniprot-TrEMBL\\nA0A140VKC4\\tUniprot-TrEMBL\\nGO:0003723\\tGeneOntology\\nGO:0019233\\tGeneOntology\\nGO:0001666\\tGeneOntology\\nGO:0003729\\tGeneOntology\\nGO:0021591\\tGeneOntology\\nuc065cas.1\\tUCSC Genome Browser\\nGO:0019230\\tGeneOntology\\nGO:0003682\\tGeneOntology\\nGO:0001662\\tGeneOntology\\nuc065cbh.1\\tUCSC Genome Browser\\nX99687_at\\tAffy\\nGO:0008344\\tGeneOntology\\nGO:0009791\\tGeneOntology\\nuc065cbd.1\\tUCSC Genome Browser\\nGO:0019904\\tGeneOntology\\nGO:0030182\\tGeneOntology\\nGO:0035197\\tGeneOntology\\n8175998\\tAffy\\nGO:0016358\\tGeneOntology\\nNM_004992\\tRefSeq\\nGO:0003714\\tGeneOntology\\nGO:0005739\\tGeneOntology\\nGO:0005615\\tGeneOntology\\nGO:0005737\\tGeneOntology\\nuc004fjv.3\\tUCSC Genome Browser\\n202617_s_at\\tAffy\\nGO:0050905\\tGeneOntology\\nGO:0008327\\tGeneOntology\\nD3YJ43\\tUniprot-TrEMBL\\nGO:0003677\\tGeneOntology\\nGO:0006541\\tGeneOntology\\nGO:0040029\\tGeneOntology\\nA_33_P3317211\\tAgilent\\nNP_001303266\\tRefSeq\\n11722683_a_at\\tAffy\\nGO:0008211\\tGeneOntology\\nGO:0051151\\tGeneOntology\\nNM_001110792\\tRefSeq\\nX89430_at\\tAffy\\nGO:2000820\\tGeneOntology\\nuc065cat.1\\tUCSC Genome Browser\\nGO:0003700\\tGeneOntology\\nGO:0047485\\tGeneOntology\\n4204\\tEntrez Gene\\nGO:0009405\\tGeneOntology\\nA0A0D9SEX1\\tUniprot-TrEMBL\\nGO:0098794\\tGeneOntology\\n3C2I\\tPDB\\nHs.200716\\tUniGene\\nGO:0000792\\tGeneOntology\\nuc065cax.1\\tUCSC Genome Browser\\n300055\\tOMIM\\n5BT2\\tPDB\\nGO:0006020\\tGeneOntology\\nGO:0031175\\tGeneOntology\\nuc065cbe.1\\tUCSC Genome Browser\\nGO:0008284\\tGeneOntology\\nuc065cba.1\\tUCSC Genome Browser\\nGO:0060291\\tGeneOntology\\n202618_s_at\\tAffy\\nGO:0016573\\tGeneOntology\\n17115453\\tAffy\\nA0A1B0GTV0\\tUniprot-TrEMBL\\nuc065cbi.1\\tUCSC Genome Browser\\nGO:0048167\\tGeneOntology\\nGO:0007616\\tGeneOntology\\nGO:0016571\\tGeneOntology\\nuc004fjw.3\\tUCSC Genome Browser\\nGO:0007613\\tGeneOntology\\nGO:0007612\\tGeneOntology\\nGO:0021549\\tGeneOntology\\n11722684_a_at\\tAffy\\nGO:0001078\\tGeneOntology\\nX94628_rna1_s_at\\tAffy\\nGO:0007585\\tGeneOntology\\nGO:0010468\\tGeneOntology\\nGO:0031061\\tGeneOntology\\nA_24_P237486\\tAgilent\\nGO:0050884\\tGeneOntology\\nGO:0000930\\tGeneOntology\\nGO:0005829\\tGeneOntology\\nuc065cau.1\\tUCSC Genome Browser\\nH7BY72\\tUniprot-TrEMBL\\n202616_s_at\\tAffy\\nGO:0006355\\tGeneOntology\\nuc065cay.1\\tUCSC Genome Browser\\nGO:0010971\\tGeneOntology\\n300673\\tOMIM\\nGO:0008542\\tGeneOntology\\nGO:0060079\\tGeneOntology\\nuc065cbf.1\\tUCSC Genome Browser\\nGO:0006122\\tGeneOntology\\nuc065cbb.1\\tUCSC Genome Browser\\nGO:0007052\\tGeneOntology\\nC9JH89\\tUniprot-TrEMBL\\nB5MCB4\\tUniprot-TrEMBL\\nGO:0032048\\tGeneOntology\\nGO:0050432\\tGeneOntology\\nGO:0001976\\tGeneOntology\\nI6LM39\\tUniprot-TrEMBL\\nGO:0005813\\tGeneOntology\\nILMN_1682091\\tIllumina\\nP51608\\tUniprot-TrEMBL\\n1QK9\\tPDB\\nGO:0006349\\tGeneOntology\\nGO:1900114\\tGeneOntology\\nGO:0000122\\tGeneOntology\\nGO:0006351\\tGeneOntology\\nGO:0008134\\tGeneOntology\\nILMN_1824898\\tIllumina\\n300260\\tOMIM\\n0006510725\\tIllumina\\n'\r +\r +\r +\r +You can also see the results are returned as a TSV file, consisting of two columns, the identifier and the matching database.\r +\r +We will want to convert this reply into a Python dictionary (with the identifier as key, as one database may have multiple identifiers):\r +\r +\r +```python\r +lines = response.text.split("\\n")\r +mappings = {}\r +for line in lines:\r + if ('\\t' in line):\r + tuple = line.split('\\t')\r + identifier = tuple[0]\r + database = tuple[1]\r + if (database == "Ensembl"):\r + mappings[identifier] = database\r +\r +print(mappings)\r +```\r +\r + {'ENSG00000169057': 'Ensembl'}\r +\r +\r +Alternatively, we can restrivct the return values from the BridgeDb webservice to just return Ensembl identifiers (system code `En`). For this, we add the `?dataSource=En` call parameter:\r +\r +\r +```python\r +callUrl = 'http://bridgedb-swagger.prod.openrisknet.org/Human/xrefs/H/MECP2?dataSource=En'\r +response = requests.get(callUrl)\r +response.text\r +```\r +\r +\r +\r +\r + 'ENSG00000169057\\tEnsembl\\n'\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/326?version=2" ; + schema1:keywords "Toxicology, jupyter" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "BridgeDb tutorial: Gene HGNC name to Ensembl identifier" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/326?version=3" ; + schema1:version 3 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 1640625 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "Workflow to take DataOne data packages (raw datasets + metadata written in Ecological Metadata Standard) as input and create a DwC occurence.csv file almost ready to put in a Dawrin core Archive using eml-annotations at the attribute level" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/117?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Workflow constructed from history 'test dwc from PNDB Data package EML DwC annotations'" ; + schema1:sdDatePublished "2024-07-12 13:37:04 +0100" ; + schema1:url "https://workflowhub.eu/workflows/117/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 49971 ; + schema1:creator ; + schema1:dateCreated "2021-04-09T15:45:27Z" ; + schema1:dateModified "2023-11-09T21:04:38Z" ; + schema1:description "Workflow to take DataOne data packages (raw datasets + metadata written in Ecological Metadata Standard) as input and create a DwC occurence.csv file almost ready to put in a Dawrin core Archive using eml-annotations at the attribute level" ; + schema1:keywords "DataOne, Data package, EML, Ecological metadata language, eml-annotation, Darwin core, Galaxy-E, Galaxy" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Workflow constructed from history 'test dwc from PNDB Data package EML DwC annotations'" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/117?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2024-06-25T09:57:05.226299" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/gene-based-pathogen-identification" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "gene-based-pathogen-identification/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """### Workflow (hybrid) metagenomic assembly and binning + GEMs\r +_Accepts both Illumina and Long reads (ONT/PacBio)_\r +\r +- **Workflow Illumina Quality:** https://workflowhub.eu/workflows/336?version=1 \r +- **Workflow LongRead Quality:** https://workflowhub.eu/workflows/337\r + \r +- Kraken2 taxonomic classification of FASTQ reads\r +- SPAdes/Flye (Assembly)\r +- QUAST (Assembly quality report)\r +\r +**Workflow binnning** https://workflowhub.eu/workflows/64?version=11 (optional)\r +- Metabat2/MaxBin2/SemiBin\r +- DAS Tool\r +- CheckM\r +- BUSCO\r +- GTDB-Tk\r + \r +**Workflow Genome-scale metabolic models** https://workflowhub.eu/workflows/372 (optional)\r +- CarveMe (GEM generation)\r +- MEMOTE (GEM test suite)\r +- SMETANA (Species METabolic interaction ANAlysis)\r +\r +Other UNLOCK workflows on WorkflowHub: https://workflowhub.eu/projects/16/workflows?view=default\r +\r +**All tool CWL files and other workflows can be found here:**
\r +https://gitlab.com/m-unlock/cwl
\r +\r +**How to setup and use an UNLOCK workflow:**
\r +https://m-unlock.gitlab.io/docs/setup/setup.html
\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/367?version=1" ; + schema1:isBasedOn "https://gitlab.com/m-unlock/cwl/-/blob/master/cwl/workflows/workflow_metagenomics_assembly.cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for (Hybrid) Metagenomics workflow" ; + schema1:sdDatePublished "2024-07-12 13:34:20 +0100" ; + schema1:url "https://workflowhub.eu/workflows/367/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 175595 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 34941 ; + schema1:creator , + ; + schema1:dateCreated "2022-06-14T08:14:10Z" ; + schema1:dateModified "2024-01-02T12:20:13Z" ; + schema1:description """### Workflow (hybrid) metagenomic assembly and binning + GEMs\r +_Accepts both Illumina and Long reads (ONT/PacBio)_\r +\r +- **Workflow Illumina Quality:** https://workflowhub.eu/workflows/336?version=1 \r +- **Workflow LongRead Quality:** https://workflowhub.eu/workflows/337\r + \r +- Kraken2 taxonomic classification of FASTQ reads\r +- SPAdes/Flye (Assembly)\r +- QUAST (Assembly quality report)\r +\r +**Workflow binnning** https://workflowhub.eu/workflows/64?version=11 (optional)\r +- Metabat2/MaxBin2/SemiBin\r +- DAS Tool\r +- CheckM\r +- BUSCO\r +- GTDB-Tk\r + \r +**Workflow Genome-scale metabolic models** https://workflowhub.eu/workflows/372 (optional)\r +- CarveMe (GEM generation)\r +- MEMOTE (GEM test suite)\r +- SMETANA (Species METabolic interaction ANAlysis)\r +\r +Other UNLOCK workflows on WorkflowHub: https://workflowhub.eu/projects/16/workflows?view=default\r +\r +**All tool CWL files and other workflows can be found here:**
\r +https://gitlab.com/m-unlock/cwl
\r +\r +**How to setup and use an UNLOCK workflow:**
\r +https://m-unlock.gitlab.io/docs/setup/setup.html
\r +""" ; + schema1:image ; + schema1:keywords "Metagenomics, Assembly, illumina, binning" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "(Hybrid) Metagenomics workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/367?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Analysis of ribosome profiling, or Ribo-seq (also named ribosome footprinting)" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1016?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/riboseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/riboseq" ; + schema1:sdDatePublished "2024-07-12 13:19:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1016/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12175 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:09Z" ; + schema1:dateModified "2024-06-11T12:55:09Z" ; + schema1:description "Analysis of ribosome profiling, or Ribo-seq (also named ribosome footprinting)" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1016?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/riboseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1016?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 7130329 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + schema1:datePublished "2024-05-09T20:55:41.110246" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Purge-duplicates-one-haplotype-VGP6b" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Purge-duplicates-one-haplotype-VGP6b/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=11" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-07-12 13:18:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9397 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:39Z" ; + schema1:dateModified "2024-06-11T12:54:39Z" ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=11" ; + schema1:version 11 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "call and score variants from WGS/WES of rare disease patients" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1014?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/raredisease" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/raredisease" ; + schema1:sdDatePublished "2024-07-12 13:19:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1014/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11550 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:08Z" ; + schema1:dateModified "2024-06-11T12:55:08Z" ; + schema1:description "call and score variants from WGS/WES of rare disease patients" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1014?version=5" ; + schema1:keywords "diagnostics, rare-disease, snv, structural-variants, variant-annotation, variant-calling, wes, WGS" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/raredisease" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1014?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# CLAWS (CNAG's Long-read Assembly Workflow in Snakemake)\r + Snakemake Pipeline used for de novo genome assembly @CNAG. It has been developed for Snakemake v6.0.5.\r +\r +It accepts Oxford Nanopore Technologies (ONT) reads, PacBio HFi reads, illumina paired-end data, illumina 10X data and Hi-C reads. It does the preprocessing of the reads, assembly, polishing, purge_dups, scaffodling and different evaluation steps. By default it will preprocess the reads, run Flye + Hypo + purge_dups + yahs and evaluate the resulting assemblies with BUSCO, MERQURY, Nseries and assembly_stats. \r +It needs a config file and a spec file (json file with instructions on which resources should slurm use for each of the jobs). Both files are created by the script "create_config_assembly.py" that is located in the bin directory. To check all the options accepted by the script, do:\r +\r +```\r +bin/create_config_assembly.py -h\r +```\r +\r +Once the 2 config files are produced, the pipeline can be launched using snakemake like this:\r +\r +``snakemake --notemp -j 999 --snakefile assembly_pipeline.smk --configfile assembly.config --is --cluster-conf assembly.spec --use-conda --use-envmodules``\r +\r +If you are using an HPC cluster, please check how should you run snakemake to launch the jobs to the cluster. \r +\r +Most of the tools used will be installed via conda using the environments of the "envs" directory after providing the "--use-conda" option to snakemake. However, a few tools cannot be installed via conda and will have to be available in your PATH, or as a module in the cluster. Those tools are:\r +\r +- NextDenovo/2.5.0\r +- NextPolish/1.4.1\r +\r +# How to provide input data:\r +\r +There are several ways of providing the reads.\r +\r +### 1- ONT reads\r +\r +1.1 Using the option ``--ont-dir {DIR}`` in create_config_assembly.py.\r +\r +If you do so, it will look for all the files in the directory that end in '.fastq.gz' and will add the basenames to "ONT_wildcards". These wildcards will be processed by the pipeline that will:\r +\r +- Concatenate all the files into a single file\r +\r +- Run filtlong with the default or specified parameters. \r +\r +- Use the resulting file for assembly, polishing and/or purging.\r +\r +You can also specify the basenames of the files that you want to use with the ``--ont-list `` option. In this case, the pipeline will use the wildcards that you're providing instead of merging all the files in the directory.\r +\r +1.2 Using the option ```--ont-reads {FILE}``` in create_config_assembly.py.\r +\r +If you do so, it will consider that you already have all the reads in one file and will: \r +\r +- Run filtlong with the default or specified parameters.\r +\r +- Use the resulting file for assembly, polishing and/or purging.\r +\r +1.3 Using the option ```--ont-filt {FILE}```. It will use this file as the output from filtlong. Hence, it will skip the preprocessing steps and directly use it for assembly, polishing and/or purging. \r +\r +\r +\r +### 2-Illumina 10X-linked data\r +\r +2.1 Using the ```--raw-10X {DIR:list}``` option. \r +\r +Dictionary with 10X raw read directories, it has to be the mkfastq dir. You must specify as well the sampleIDs from this run. Example: '{"mkfastq- dir":"sample1,sample2,sample3"}'...\r +\r +It will take each basename in the list to get the fastqs from the corresponding directory and run longranger on each sample. Afterwards, it will build meryldbs for each "barcoded" file. Finally, it will concatenate all the meryldbs and "barcoded" files. Resulting "barcoded" file will be used for polishing. \r +\r +2.2 Using the ``--processed-10X {DIR}`` parameter. \r +\r +This directory can already be there or be produced by the pipeline as described in step 2.1. Once all the "barcoded" fastq files are there, meryldbs will be built for each "barcoded" file. Finally, it will concatenate all the meryldbs and "barcoded" files. Resulting "barcoded" file will be used for polishing. \r +\r +2.3 Using the ``--10X`` option. \r +\r +The argument to this is the path to the concatenated ".barcoded" file that needs to be used for polishing. If the pre-concatenated files are not given, meryldbs will be directly generated with this file, but it may run out of memory. \r +\r +### 3- Illumina short-read data\r +\r +3.1 Using the ``--illumina-dir {DIR}`` option, that will look for all the files in the directory that end in '.1.fastq.gz' and will add the basenames to "illumina_wildcards". These wildcards will be processed by the pipeline that will: \r +\r +- Trim adaptors with Trimgalore\r +\r +- Concatenate all the trimmed *.1.fastq.gz and the *2.fastq.gz in one file per pair. \r +\r +- The resulting reads will be used for building meryldbs and polishing. \r +\r +3.2 Using the ``--processed-illumina`` option. If the directory exists and contains files, the pipeline will look for all the files in the directory that end in '.1.fastq.gz' and will add the basenames to "illumina_wildcards". These wildcards will be processed by the pipeline that will:\r +\r +- Concatenate all the trimmed *.1.fastq.gz and the *2.fastq.gz in one file per pair. \r +\r +- The resulting reads will be used for building meryldbs and polishing. \r +\r +3.3 Using the ``--pe1 {FILE} and --pe2 {FILE}`` options. That will consider that these are the paired files containing all the illumina reads ready to be used and will build meryldbs and polish with them.\r +\r +### 4- Input assemblies\r +\r +If you want to polish an already assembled assembly, you can give it to the pipeline by using the option ``--assembly-in ASSEMBLY_IN [ASSEMBLY_IN ...]\r + Dictionary with assemblies that need to be polished but not assembled and directory where they should\r + be polished. Example: '{"assembly1":"polishing_dir1"}' '{"assembly2"="polishing_dir2"}' ...``\r + \r +If you want to improve an already polished assembly, you can give it to the pipeline by using the option ``--postpolish-assemblies POSTPOLISH_ASSEMBLIES [POSTPOLISH_ASSEMBLIES ...]\r + Dictionary with assemblies for whic postpolishing steps need to be run but that are not assembled and\r + base step for the directory where the first postpolishing step should be run. Example:\r + '{"assembly1":"s04.1_p03.1"}' '{"assembly2"="s04.2_p03.2"}' ...``\r +\r +\r +\r +# Description of implemented rules\r +\r +1- Preprocessing:\r + \r +- **Read concatenation:**\r +\r +``zcat {input.fastqs} | pigz -p {threads} -c > {output.final_fastq}``\r + \r +- **Longranger for 10X reads**: it uses the Longranger version installed in the path specified in the configfile\r +\r +``longranger basic --id={params.sample} --sample={params.sample} --fastqs={input.mkfastq_dir} --localcores={threads}``\r +\r +- **Trimgalore:** By default it gives the ``--gzip -q 20 --paired --retain_unpaired`` options, but it can be changed with the ``--trim-galore-opts `` argument. \r +\r +``trim_galore -j {threads} {params.opts} {input.read1} {input.read2}``\r +\r +- **Filtlong:** it uses the Filtlong version installed in the path specified in the configfile. By default it gives the min_length and min_mean_q parameters, but extra parameters can be added with the ``--filtlong-opts`` option.\r +\r +``filtlong --min_length {params.minlen} --min_mean_q {params.min_mean_q} {params.opts} {input.reads} | pigz -p {threads} -c > {output.outreads}``\r + \r +- **Build meryldb** (with processed 10X reads or illumina reads): it uses the merqury conda environment specified in the configfile. It takes as argument the `--mery-k` value that needs to be estimated first for the genime size. \r +\r +``meryl k={params.kmer} count output {output.out_dir} {input.fastq}``\r + \r +- Concat meryldbs: with the merqury conda environment specified in the configfile\r +\r +``meryl union-sum output {output.meryl_all} {input.input_run}``\r + \r +- **Align ONT (Minimap2):** it aligns the reads using minimap2 and outputs the alignment either in bam or in paf.gz formats. It uses the minimap2 conda environment specified in the configfile\r +\r +``minimap2 -{params.align_opts} -t {threads} {input.genome} {input.reads} ``\r +\r +- **Align Illumina (BWA-MEM):** it aligns the reads with BWA-mem and outputs a bam file\r +\r +``bwa mem -Y {params.options} -t {threads} {input.genome} {input.reads} | samtools view -Sb - | samtools sort -@ {threads} -o {output.mapping} -``\r +\r +2- Assembly\r +\r +- **Flye (default)**. It is run by default, if you don't want the pipeline to run it, you can give `--no-flye` option when creating the config. It uses the conda environment specified in the config. By default it is set to 2 polishing iterations and gives the genome-size estimate that has been given when creating the config. Extra options can be provided with the `--flye-opts`.\r +\r +``flye --{params.readtype} {input.reads} -o {params.outdir}out -t {threads} -i {params.pol_iterations} {params.other_flye_opts} ``\r + \r +- **Nextdenovo (if ``run-nextdenovo``):** It uses the cluster module specified in the config. If nextdenovo option is turned on, the create_config script will also create the nextdenovo config file. Check the create_config help to see which options can be modified on it. \r +\r +``nextDenovo {input.config}``\r +\r +3- Polishing\r +\r +- **Hypo (default):** It is the polisher that the pipeline uses by default, it can be turned off specifying ``--no-hypo`` when creating the config. If selected, the reads will be aligned in previous rules and then hypo will be run, it requires illumina data. It uses the conda environment specified in the config. \r +\r +``hypo -r @short_reads.list.txt -d {input.genome} -b {input.sr_bam} -c {coverage} -s {params.genome_size} -B {input.lr_bam} -t {threads} -o {output.polished} -p {params.proc} {params.opts} ``\r + \r +- **Racon (if turned on):** to run racon, specify ``--racon-rounds `` and the number of rounds of it you want to run. It uses the conda environment specified in the config file. \r +\r +``{params.racon_env}/scripts/racon_wrapper.py -u {params.opts} -t {threads} reads4racon.fastq.gz {input.mapping} {input.assembly} > {output.polished} ``\r + \r +- **Medaka (if turned on):** to run medaka, specify ``--medaka-rounds`` and the nummber of rounds of it you want to run. It uses the conda environment specified in the config file. It'll run after racon and before pilon, if they are also selected. \r +\r +`` medaka consensus {input.mapping} {wildcards.directory}/rmp/{wildcards.base}.medaka{wildcards.param}.hdf --threads {medaka_threads} --model {params.model} {params.consensus_opts};\r +medaka stitch --threads {threads} {wildcards.directory}/rmp/{wildcards.base}.medaka{wildcards.param}.hdf {input.assembly} {output.polished}``\r + \r +- **Pilon (if turned on):** to run Pilon, specify ``--pilon-rounds`` and the number of rounds of it you want to run. If it's a big genome, the pipeline will split the consensus step in several jobs, each of them running on certain scaffolds. It uses the version installed in the path specified in the config. \r +\r +``{scripts_dir}split_bam.py assembly.len {input.mapping} {params.chunks} {threads};\r +java {params.java_opts} -jar {params.path} --genome {input.assembly} --frags {input.alignment} {params.opts} --threads {threads} --output {basename}; \r +{scripts_dir}/concat_pilon.py {params.splitdir} {params.chunks} > {output.polished}``\r +\r + \r +- **Nextpolish ont (if turned on):** to run nextpolish with ONT reads, specify ``--nextpolish-ont-rounds`` and the number of rounds you want to run of it. \r +\r +``"python /apps/NEXTPOLISH/1.3.1/lib/nextpolish2.py -g {input.genome} -p {threads} -l lgs.fofn -r {params.lrtype} > {output.polished}``\r + \r +- **Nextpolish illumina (if turned on):** to run nextpolish with ONT reads, specify ``--nextpolish-ill-rounds`` and the number of rounds you want to run of it. \r +\r +``"python /apps/NEXTPOLISH/1.3.1/lib/nextpolish1.py -g {input.genome} -p {threads} -s {input.bam} -t {params.task} > {output.polished}``\r +\r +4- Post-assembly\r +\r +- **Purge_dups (by default):** select ``--no-purgedups`` if you don't want to run it. If no manual cutoffs are given, it'll run purgedups with automatic cutoffs and then will rerun it selecting the mean cutoff as 0.75\\*cov. It uses the version installed in the cluster module specified in the config. \r +\r +5- Evaluations\r + \r +- **Merqury:** It runs on each 'terminal' assembly. This is, the base assembly and the resulting assembly from each branch of the pipeline. \r + \r +- **Busco:** It can be run only in the terminal assemblies or on all the assemblies produced by the pipeline. It uses the conda environment specified in the config as well as the parameters specified. \r + \r +- **Nseries:** This is run during the *finalize* on all the assemblies that are evaluated. After it, that rule combines the statistics produced by all the evaluation rules. \r +\r +# Description of all options\r +```\r +bin/create_config_assembly.py -h\r +usage: create_configuration_file [-h] [--configFile configFile]\r + [--specFile specFile]\r + [--ndconfFile ndconfFile]\r + [--concat-cores concat_cores]\r + [--genome-size genome_size]\r + [--lr-type lr_type] [--basename base_name]\r + [--species species] [--keep-intermediate]\r + [--preprocess-lr-step PREPROCESS_ONT_STEP]\r + [--preprocess-10X-step PREPROCESS_10X_STEP]\r + [--preprocess-illumina-step PREPROCESS_ILLUMINA_STEP]\r + [--preprocess-hic-step PREPROCESS_HIC_STEP]\r + [--flye-step FLYE_STEP] [--no-flye]\r + [--nextdenovo-step NEXTDENOVO_STEP]\r + [--run-nextdenovo]\r + [--nextpolish-cores nextpolish_cores]\r + [--minimap2-cores minimap2_cores]\r + [--bwa-cores bwa_cores]\r + [--hypo-cores hypo_cores]\r + [--pairtools-cores pairtools_cores]\r + [--busco-cores busco_cores]\r + [--nextpolish-ont-rounds nextpolish_ont_rounds]\r + [--nextpolish-ill-rounds nextpolish_ill_rounds]\r + [--hypo-rounds hypo_rounds]\r + [--longranger-cores longranger_cores]\r + [--longranger-path longranger_path]\r + [--genomescope-opts genomescope_additional]\r + [--no-purgedups] [--ploidy ploidy]\r + [--run-tigmint] [--run-kraken2] [--no-yahs]\r + [--scripts-dir SCRIPTS_DIR]\r + [--ont-reads ONT_READS] [--ont-dir ONT_DIR]\r + [--ont-filt ONT_FILTERED] [--pe1 PE1]\r + [--pe2 PE2]\r + [--processed-illumina PROCESSED_ILLUMINA]\r + [--raw-10X RAW_10X [RAW_10X ...]]\r + [--processed-10X PROCESSED_10X] [--10X R10X]\r + [--illumina-dir ILLUMINA_DIR]\r + [--assembly-in ASSEMBLY_IN [ASSEMBLY_IN ...]]\r + [--postpolish-assemblies POSTPOLISH_ASSEMBLIES [POSTPOLISH_ASSEMBLIES ...]]\r + [--hic-dir HIC_DIR]\r + [--pipeline-workdir PIPELINE_WORKDIR]\r + [--filtlong-dir FILTLONG_DIR]\r + [--concat-hic-dir CONCAT_HIC_DIR]\r + [--flye-dir FLYE_DIR]\r + [--nextdenovo-dir NEXTDENOVO_DIR]\r + [--flye-polishing-dir POLISH_FLYE_DIR]\r + [--nextdenovo-polishing-dir POLISH_NEXTDENOVO_DIR]\r + [--eval-dir eval_dir] [--stats-out stats_out]\r + [--hic-qc-dir hic_qc_dir]\r + [--filtlong-minlen filtlong_minlen]\r + [--filtlong-min-mean-q filtlong_min_mean_q]\r + [--filtlong-opts filtlong_opts]\r + [--kraken2-db kraken2_db]\r + [--kraken2-kmer kraken2_kmers]\r + [--kraken2-opts additional_kraken2_opts]\r + [--kraken2-cores kraken2_threads]\r + [--trim-galore-opts trim_galore_opts]\r + [--trim-Illumina-cores Trim_Illumina_cores]\r + [--flye-cores flye_cores]\r + [--flye-polishing-iterations flye_pol_it]\r + [--other-flye-opts other_flye_opts]\r + [--nextdenovo-cores nextdenovo_cores]\r + [--nextdenovo-jobtype nextdenovo_type]\r + [--nextdenovo-task nextdenovo_task]\r + [--nextdenovo-rewrite nextdenovo_rewrite]\r + [--nextdenovo-parallel_jobs nextdenovo_parallel_jobs]\r + [--nextdenovo-minreadlen nextdenovo_minreadlen]\r + [--nextdenovo-seeddepth nextdenovo_seeddepth]\r + [--nextdenovo-seedcutoff nextdenovo_seedcutoff]\r + [--nextdenovo-blocksize nextdenovo_blocksize]\r + [--nextdenovo-pa-correction nextdenovo_pa_correction]\r + [--nextdenovo-minimap_raw nextdenovo_minimap_raw]\r + [--nextdenovo-minimap_cns nextdenovo_minimap_cns]\r + [--nextdenovo-minimap_map nextdenovo_minimap_map]\r + [--nextdenovo-sort nextdenovo_sort]\r + [--nextdenovo-correction_opts nextdenovo_correction_opts]\r + [--nextdenovo-nextgraph_opt nextdenovo_nextgraph_opt]\r + [--sr-cov ill_cov]\r + [--hypo-proc hypo_processes] [--hypo-no-lr]\r + [--hypo-opts hypo_opts]\r + [--purgedups-cores purgedups_cores]\r + [--purgedups-calcuts-opts calcuts_opts]\r + [--tigmint-cores tigmint_cores]\r + [--tigmint-opts tigmint_opts] [--hic-qc]\r + [--no-pretext] [--assembly-qc assembly_qc]\r + [--yahs-cores yahs_cores] [--yahs-mq yahs_mq]\r + [--yahs-opts yahs_opts]\r + [--hic-map-opts hic_map_opts]\r + [--mq mq [mq ...]]\r + [--hic-qc-assemblylen hic_qc_assemblylen]\r + [--blast-cores blast_cores]\r + [--hic-blastdb blastdb]\r + [--hic-readsblast hic_readsblast]\r + [--no-final-evals]\r + [--busco-lin busco_lineage]\r + [--merqury-db merqury_db] [--meryl-k meryl_k]\r + [--meryl-threads meryl_threads]\r + [--ont-list ONT_wildcards]\r + [--illumina-list illumina_wildcards]\r + [--r10X-list r10X_wildcards]\r + [--hic-list hic_wildcards]\r +\r +Create a configuration json file for the assembly pipeline.\r +\r +optional arguments:\r + -h, --help show this help message and exit\r +\r +General Parameters:\r + --configFile configFile\r + Configuration JSON to be generated. Default\r + assembly.config\r + --specFile specFile Cluster specifications JSON fileto be generated.\r + Default assembly.spec\r + --ndconfFile ndconfFile\r + Name pf the nextdenovo config file. Default\r + nextdenovo.config\r + --concat-cores concat_cores\r + Number of threads to concatenate reads and to run\r + filtlong. Default 4\r + --genome-size genome_size\r + Approximate genome size. Example: 615m or 2.6g.\r + Default None\r + --lr-type lr_type Type of long reads (options are flye read-type\r + options). Default nano-raw\r + --basename base_name Base name for the project. Default None\r + --species species Name of the species to be assembled. Default None\r + --keep-intermediate Set this to True if you do not want intermediate files\r + to be removed. Default False\r + --preprocess-lr-step PREPROCESS_ONT_STEP\r + Step for preprocessing long-reads. Default 02.1\r + --preprocess-10X-step PREPROCESS_10X_STEP\r + Step for preprocessing 10X reads. Default 02.2\r + --preprocess-illumina-step PREPROCESS_ILLUMINA_STEP\r + Step for preprocessing illumina reads. Default 02.2\r + --preprocess-hic-step PREPROCESS_HIC_STEP\r + Step for preprocessing hic reads. Default 02.3\r + --flye-step FLYE_STEP\r + Step for running flye. Default 03.1\r + --no-flye Give this option if you do not want to run Flye.\r + --nextdenovo-step NEXTDENOVO_STEP\r + Step for running nextdenovo. Default 03.2\r + --run-nextdenovo Give this option if you do want to run Nextdenovo.\r + --nextpolish-cores nextpolish_cores\r + Number of threads to run the nextpolish step. Default\r + 24\r + --minimap2-cores minimap2_cores\r + Number of threads to run the alignment with minimap2.\r + Default 32\r + --bwa-cores bwa_cores\r + Number of threads to run the alignments with BWA-Mem2.\r + Default 16\r + --hypo-cores hypo_cores\r + Number of threads to run the hypo step. Default 24\r + --pairtools-cores pairtools_cores\r + Number of threads to run the pairtools step. Default\r + 64\r + --busco-cores busco_cores\r + Number of threads to run BUSCO. Default 32\r + --nextpolish-ont-rounds nextpolish_ont_rounds\r + Number of rounds to run the Nextpolish with ONT step.\r + Default 0\r + --nextpolish-ill-rounds nextpolish_ill_rounds\r + Number of rounds to run the Nextpolish with illumina\r + step. Default 0\r + --hypo-rounds hypo_rounds\r + Number of rounds to run the Hypostep. Default 1\r + --longranger-cores longranger_cores\r + Number of threads to run longranger. Default 16\r + --longranger-path longranger_path\r + Path to longranger executable. Default\r + /scratch/project/devel/aateam/src/10X/longranger-2.2.2\r + --genomescope-opts genomescope_additional\r + Additional options to run Genomescope2 with. Default\r + --no-purgedups Give this option if you do not want to run Purgedups.\r + --ploidy ploidy Expected ploidy. Default 2\r + --run-tigmint Give this option if you want to run the scaffolding\r + with 10X reads step.\r + --run-kraken2 Give this option if you want to run Kraken2 on the\r + input reads.\r + --no-yahs Give this option if you do not want to run yahs.\r +\r +Inputs:\r + --scripts-dir SCRIPTS_DIR\r + Directory with the different scripts for the pipeline.\r + Default bin/../scripts/\r + --ont-reads ONT_READS\r + File with all the ONT reads. Default None\r + --ont-dir ONT_DIR Directory where the ONT fastqs are stored. Default\r + None\r + --ont-filt ONT_FILTERED\r + File with the ONT reads after running filtlong on\r + them. Default None\r + --pe1 PE1 File with the illumina paired-end fastqs, already\r + trimmed, pair 1.\r + --pe2 PE2 File with the illumina paired-end fastqs, already\r + trimmed, pair 2.\r + --processed-illumina PROCESSED_ILLUMINA\r + Directory to Processed illumina reads. Already there\r + or to be produced by the pipeline.\r + --raw-10X RAW_10X [RAW_10X ...]\r + Dictionary with 10X raw read directories, it has to be\r + the mkfastq dir. You must specify as well the\r + sampleIDs from this run. Example: '{"mkfastq-\r + dir":"sample1,sample2,sample3"}'...\r + --processed-10X PROCESSED_10X\r + Directory to Processed 10X reads. Already there or to\r + be produced by the pipeline.\r + --10X R10X File with barcoded 10X reads in fastq.gz format,\r + concatenated.\r + --illumina-dir ILLUMINA_DIR\r + Directory where the raw illumina fastqs are stored.\r + Default None\r + --assembly-in ASSEMBLY_IN [ASSEMBLY_IN ...]\r + Dictionary with assemblies that need to be polished\r + but not assembled and directory where they should be\r + polished. Example: '{"assembly1":"polishing_dir1"}'\r + '{"assembly2"="polishing_dir2"}' ...\r + --postpolish-assemblies POSTPOLISH_ASSEMBLIES [POSTPOLISH_ASSEMBLIES ...]\r + Dictionary with assemblies for whic postpolishing\r + steps need to be run but that are not assembled and\r + base step for the directory where the first\r + postpolishing step should be run. Example:\r + '{"assembly1":"s04.1_p03.1"}'\r + '{"assembly2":"s04.2_p03.2"}' ...\r + --hic-dir HIC_DIR Directory where the HiC fastqs are stored. Default\r + None\r +\r +Outputs:\r + --pipeline-workdir PIPELINE_WORKDIR\r + Base directory for the pipeline run. Default /software\r + /assembly/pipelines/Assembly_pipeline/v2.1/GLAWS/\r + --filtlong-dir FILTLONG_DIR\r + Directory to process the ONT reads with filtlong.\r + Default s02.1_p01.1_Filtlong\r + --concat-hic-dir CONCAT_HIC_DIR\r + Directory to concatenate the HiC reads. Default\r + s02.3_p01.1_Concat_HiC\r + --flye-dir FLYE_DIR Directory to run flye. Default s03.1_p02.1_flye/\r + --nextdenovo-dir NEXTDENOVO_DIR\r + Directory to run nextdenovo. Default\r + s03.2_p02.1_nextdenovo/\r + --flye-polishing-dir POLISH_FLYE_DIR\r + Directory to polish the flye assembly. Default\r + s04.1_p03.1_polishing/\r + --nextdenovo-polishing-dir POLISH_NEXTDENOVO_DIR\r + Directory to run nextdenovo. Default\r + s04.2_p03.2_polishing/\r + --eval-dir eval_dir Base directory for the evaluations. Default\r + evaluations/\r + --stats-out stats_out\r + Path to the file with the final statistics.\r + --hic-qc-dir hic_qc_dir\r + Directory to run the hic_qc. Default hic_qc/\r +\r +Filtlong:\r + --filtlong-minlen filtlong_minlen\r + Minimum read length to use with Filtlong. Default 1000\r + --filtlong-min-mean-q filtlong_min_mean_q\r + Minimum mean quality to use with Filtlong. Default 80\r + --filtlong-opts filtlong_opts\r + Extra options to run Filtlong (eg. -t 4000000000)\r +\r +Kraken2:\r + --kraken2-db kraken2_db\r + Database to be used for running Kraken2. Default None\r + --kraken2-kmer kraken2_kmers\r + Database to be used for running Kraken2. Default None\r + --kraken2-opts additional_kraken2_opts\r + Optional parameters for the rule Kraken2. Default\r + --kraken2-cores kraken2_threads\r + Number of threads to run the Kraken2 step. Default 16\r +\r +Trim_Galore:\r + --trim-galore-opts trim_galore_opts\r + Optional parameters for the rule trim_galore. Default\r + --gzip -q 20 --paired --retain_unpaired\r + --trim-Illumina-cores Trim_Illumina_cores\r + Number of threads to run the Illumina trimming step.\r + Default 8\r +\r +Flye:\r + --flye-cores flye_cores\r + Number of threads to run FLYE. Default 128\r + --flye-polishing-iterations flye_pol_it\r + Number of polishing iterations to use with FLYE.\r + Default 2\r + --other-flye-opts other_flye_opts\r + Additional options to run Flye. Default --scaffold\r +\r +Nextdenovo:\r + --nextdenovo-cores nextdenovo_cores\r + Number of threads to run nextdenovo. Default 2\r + --nextdenovo-jobtype nextdenovo_type\r + Job_type for nextdenovo. Default slurm\r + --nextdenovo-task nextdenovo_task\r + Task need to run. Default all\r + --nextdenovo-rewrite nextdenovo_rewrite\r + Overwrite existing directory. Default yes\r + --nextdenovo-parallel_jobs nextdenovo_parallel_jobs\r + Number of tasks used to run in parallel. Default 50\r + --nextdenovo-minreadlen nextdenovo_minreadlen\r + Filter reads with length < minreadlen. Default 1k\r + --nextdenovo-seeddepth nextdenovo_seeddepth\r + Expected seed depth, used to calculate seed_cutoff,\r + co-use with genome_size, you can try to set it 30-45\r + to get a better assembly result. Default 45\r + --nextdenovo-seedcutoff nextdenovo_seedcutoff\r + Minimum seed length, <=0 means calculate it\r + automatically using bin/seq_stat. Default 0\r + --nextdenovo-blocksize nextdenovo_blocksize\r + Block size for parallel running, split non-seed reads\r + into small files, the maximum size of each file is\r + blocksize. Default 1g\r + --nextdenovo-pa-correction nextdenovo_pa_correction\r + number of corrected tasks used to run in parallel,\r + each corrected task requires ~TOTAL_INPUT_BASES/4\r + bytes of memory usage, overwrite parallel_jobs only\r + for this step. Default 100\r + --nextdenovo-minimap_raw nextdenovo_minimap_raw\r + minimap2 options, used to find overlaps between raw\r + reads, see minimap2-nd for details. Default -t 30\r + --nextdenovo-minimap_cns nextdenovo_minimap_cns\r + minimap2 options, used to find overlaps between\r + corrected reads. Default -t 30\r + --nextdenovo-minimap_map nextdenovo_minimap_map\r + minimap2 options, used to map reads back to the\r + assembly. Default -t 30 --no-kalloc\r + --nextdenovo-sort nextdenovo_sort\r + sort options, see ovl_sort for details. Default -m\r + 400g -t 20\r + --nextdenovo-correction_opts nextdenovo_correction_opts\r + Correction options. Default -p 30 -dbuf\r + --nextdenovo-nextgraph_opt nextdenovo_nextgraph_opt\r + nextgraph options, see nextgraph for details. Default\r + -a 1\r +\r +Hypo:\r + --sr-cov ill_cov Approximate short read coverage for hypo Default 0\r + --hypo-proc hypo_processes\r + Number of contigs to be processed in parallel by HyPo.\r + Default 6\r + --hypo-no-lr Set this to false if you don¡t want to run hypo with\r + long reads. Default True\r + --hypo-opts hypo_opts\r + Additional options to run Hypo. Default None\r +\r +Purge_dups:\r + --purgedups-cores purgedups_cores\r + Number of threads to run purgedups. Default 8\r + --purgedups-calcuts-opts calcuts_opts\r + Adjusted values to run calcuts for purgedups. Default\r + None\r +\r +Scaffold_with_10X:\r + --tigmint-cores tigmint_cores\r + Number of threads to run the 10X scaffolding step.\r + Default 12\r + --tigmint-opts tigmint_opts\r + Adjusted values to run the scaffolding with 10X reads.\r + Default None\r +\r +HiC:\r + --hic-qc Give this option if only QC of the HiC data needs to\r + be done.\r + --no-pretext Give this option if you do not want to generate the\r + pretext file\r + --assembly-qc assembly_qc\r + Path to the assembly to be used perfom the QC of the\r + HiC reads.\r + --yahs-cores yahs_cores\r + Number of threads to run YAHS. Default 48\r + --yahs-mq yahs_mq Mapping quality to use when running yahs.Default 40\r + --yahs-opts yahs_opts\r + Additional options to give to YAHS.Default\r + --hic-map-opts hic_map_opts\r + Options to use with bwa mem when aligning the HiC\r + reads. Deafault -5SP -T0\r + --mq mq [mq ...] Mapping qualities to use for processing the hic\r + mappings. Default [0, 40]\r + --hic-qc-assemblylen hic_qc_assemblylen\r + Lentgh of the assembly to be used for HiC QC\r + --blast-cores blast_cores\r + Number of threads to run blast with the HiC unmapped\r + reads.Default 8\r + --hic-blastdb blastdb\r + BLAST Database to use to classify the hic unmapped\r + reads. Default\r + /scratch_isilon/groups/assembly/data/blastdbs\r + --hic-readsblast hic_readsblast\r + Number of unmapped hic reads to classify with blast.\r + Default 100\r +\r +Finalize:\r + --no-final-evals If specified, do not run evaluations on final\r + assemblies. Default True\r + --busco-lin busco_lineage\r + Path to the lineage directory to run Busco with.\r + Default None\r + --merqury-db merqury_db\r + Meryl database. Default None\r + --meryl-k meryl_k Kmer length to build the meryl database. Default None\r + --meryl-threads meryl_threads\r + Number of threads to run meryl and merqury. Default 4\r +\r +Wildcards:\r + --ont-list ONT_wildcards\r + List with basename of the ONT fastqs that will be\r + used. Default None\r + --illumina-list illumina_wildcards\r + List with basename of the illumina fastqs. Default\r + None\r + --r10X-list r10X_wildcards\r + List with basename of the raw 10X fastqs. Default None\r + --hic-list hic_wildcards\r + List with basename of the raw hic fastqs. Default None\r +```\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.567.1" ; + schema1:isBasedOn "https://github.com/cnag-aat/assembly_pipeline.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CLAWS (CNAG's long-read assembly workflow in Snakemake)" ; + schema1:sdDatePublished "2024-07-12 13:24:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/567/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3888 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2023-09-12T13:23:11Z" ; + schema1:dateModified "2023-09-26T16:54:53Z" ; + schema1:description """# CLAWS (CNAG's Long-read Assembly Workflow in Snakemake)\r + Snakemake Pipeline used for de novo genome assembly @CNAG. It has been developed for Snakemake v6.0.5.\r +\r +It accepts Oxford Nanopore Technologies (ONT) reads, PacBio HFi reads, illumina paired-end data, illumina 10X data and Hi-C reads. It does the preprocessing of the reads, assembly, polishing, purge_dups, scaffodling and different evaluation steps. By default it will preprocess the reads, run Flye + Hypo + purge_dups + yahs and evaluate the resulting assemblies with BUSCO, MERQURY, Nseries and assembly_stats. \r +It needs a config file and a spec file (json file with instructions on which resources should slurm use for each of the jobs). Both files are created by the script "create_config_assembly.py" that is located in the bin directory. To check all the options accepted by the script, do:\r +\r +```\r +bin/create_config_assembly.py -h\r +```\r +\r +Once the 2 config files are produced, the pipeline can be launched using snakemake like this:\r +\r +``snakemake --notemp -j 999 --snakefile assembly_pipeline.smk --configfile assembly.config --is --cluster-conf assembly.spec --use-conda --use-envmodules``\r +\r +If you are using an HPC cluster, please check how should you run snakemake to launch the jobs to the cluster. \r +\r +Most of the tools used will be installed via conda using the environments of the "envs" directory after providing the "--use-conda" option to snakemake. However, a few tools cannot be installed via conda and will have to be available in your PATH, or as a module in the cluster. Those tools are:\r +\r +- NextDenovo/2.5.0\r +- NextPolish/1.4.1\r +\r +# How to provide input data:\r +\r +There are several ways of providing the reads.\r +\r +### 1- ONT reads\r +\r +1.1 Using the option ``--ont-dir {DIR}`` in create_config_assembly.py.\r +\r +If you do so, it will look for all the files in the directory that end in '.fastq.gz' and will add the basenames to "ONT_wildcards". These wildcards will be processed by the pipeline that will:\r +\r +- Concatenate all the files into a single file\r +\r +- Run filtlong with the default or specified parameters. \r +\r +- Use the resulting file for assembly, polishing and/or purging.\r +\r +You can also specify the basenames of the files that you want to use with the ``--ont-list `` option. In this case, the pipeline will use the wildcards that you're providing instead of merging all the files in the directory.\r +\r +1.2 Using the option ```--ont-reads {FILE}``` in create_config_assembly.py.\r +\r +If you do so, it will consider that you already have all the reads in one file and will: \r +\r +- Run filtlong with the default or specified parameters.\r +\r +- Use the resulting file for assembly, polishing and/or purging.\r +\r +1.3 Using the option ```--ont-filt {FILE}```. It will use this file as the output from filtlong. Hence, it will skip the preprocessing steps and directly use it for assembly, polishing and/or purging. \r +\r +\r +\r +### 2-Illumina 10X-linked data\r +\r +2.1 Using the ```--raw-10X {DIR:list}``` option. \r +\r +Dictionary with 10X raw read directories, it has to be the mkfastq dir. You must specify as well the sampleIDs from this run. Example: '{"mkfastq- dir":"sample1,sample2,sample3"}'...\r +\r +It will take each basename in the list to get the fastqs from the corresponding directory and run longranger on each sample. Afterwards, it will build meryldbs for each "barcoded" file. Finally, it will concatenate all the meryldbs and "barcoded" files. Resulting "barcoded" file will be used for polishing. \r +\r +2.2 Using the ``--processed-10X {DIR}`` parameter. \r +\r +This directory can already be there or be produced by the pipeline as described in step 2.1. Once all the "barcoded" fastq files are there, meryldbs will be built for each "barcoded" file. Finally, it will concatenate all the meryldbs and "barcoded" files. Resulting "barcoded" file will be used for polishing. \r +\r +2.3 Using the ``--10X`` option. \r +\r +The argument to this is the path to the concatenated ".barcoded" file that needs to be used for polishing. If the pre-concatenated files are not given, meryldbs will be directly generated with this file, but it may run out of memory. \r +\r +### 3- Illumina short-read data\r +\r +3.1 Using the ``--illumina-dir {DIR}`` option, that will look for all the files in the directory that end in '.1.fastq.gz' and will add the basenames to "illumina_wildcards". These wildcards will be processed by the pipeline that will: \r +\r +- Trim adaptors with Trimgalore\r +\r +- Concatenate all the trimmed *.1.fastq.gz and the *2.fastq.gz in one file per pair. \r +\r +- The resulting reads will be used for building meryldbs and polishing. \r +\r +3.2 Using the ``--processed-illumina`` option. If the directory exists and contains files, the pipeline will look for all the files in the directory that end in '.1.fastq.gz' and will add the basenames to "illumina_wildcards". These wildcards will be processed by the pipeline that will:\r +\r +- Concatenate all the trimmed *.1.fastq.gz and the *2.fastq.gz in one file per pair. \r +\r +- The resulting reads will be used for building meryldbs and polishing. \r +\r +3.3 Using the ``--pe1 {FILE} and --pe2 {FILE}`` options. That will consider that these are the paired files containing all the illumina reads ready to be used and will build meryldbs and polish with them.\r +\r +### 4- Input assemblies\r +\r +If you want to polish an already assembled assembly, you can give it to the pipeline by using the option ``--assembly-in ASSEMBLY_IN [ASSEMBLY_IN ...]\r + Dictionary with assemblies that need to be polished but not assembled and directory where they should\r + be polished. Example: '{"assembly1":"polishing_dir1"}' '{"assembly2"="polishing_dir2"}' ...``\r + \r +If you want to improve an already polished assembly, you can give it to the pipeline by using the option ``--postpolish-assemblies POSTPOLISH_ASSEMBLIES [POSTPOLISH_ASSEMBLIES ...]\r + Dictionary with assemblies for whic postpolishing steps need to be run but that are not assembled and\r + base step for the directory where the first postpolishing step should be run. Example:\r + '{"assembly1":"s04.1_p03.1"}' '{"assembly2"="s04.2_p03.2"}' ...``\r +\r +\r +\r +# Description of implemented rules\r +\r +1- Preprocessing:\r + \r +- **Read concatenation:**\r +\r +``zcat {input.fastqs} | pigz -p {threads} -c > {output.final_fastq}``\r + \r +- **Longranger for 10X reads**: it uses the Longranger version installed in the path specified in the configfile\r +\r +``longranger basic --id={params.sample} --sample={params.sample} --fastqs={input.mkfastq_dir} --localcores={threads}``\r +\r +- **Trimgalore:** By default it gives the ``--gzip -q 20 --paired --retain_unpaired`` options, but it can be changed with the ``--trim-galore-opts `` argument. \r +\r +``trim_galore -j {threads} {params.opts} {input.read1} {input.read2}``\r +\r +- **Filtlong:** it uses the Filtlong version installed in the path specified in the configfile. By default it gives the min_length and min_mean_q parameters, but extra parameters can be added with the ``--filtlong-opts`` option.\r +\r +``filtlong --min_length {params.minlen} --min_mean_q {params.min_mean_q} {params.opts} {input.reads} | pigz -p {threads} -c > {output.outreads}``\r + \r +- **Build meryldb** (with processed 10X reads or illumina reads): it uses the merqury conda environment specified in the configfile. It takes as argument the `--mery-k` value that needs to be estimated first for the genime size. \r +\r +``meryl k={params.kmer} count output {output.out_dir} {input.fastq}``\r + \r +- Concat meryldbs: with the merqury conda environment specified in the configfile\r +\r +``meryl union-sum output {output.meryl_all} {input.input_run}``\r + \r +- **Align ONT (Minimap2):** it aligns the reads using minimap2 and outputs the alignment either in bam or in paf.gz formats. It uses the minimap2 conda environment specified in the configfile\r +\r +``minimap2 -{params.align_opts} -t {threads} {input.genome} {input.reads} ``\r +\r +- **Align Illumina (BWA-MEM):** it aligns the reads with BWA-mem and outputs a bam file\r +\r +``bwa mem -Y {params.options} -t {threads} {input.genome} {input.reads} | samtools view -Sb - | samtools sort -@ {threads} -o {output.mapping} -``\r +\r +2- Assembly\r +\r +- **Flye (default)**. It is run by default, if you don't want the pipeline to run it, you can give `--no-flye` option when creating the config. It uses the conda environment specified in the config. By default it is set to 2 polishing iterations and gives the genome-size estimate that has been given when creating the config. Extra options can be provided with the `--flye-opts`.\r +\r +``flye --{params.readtype} {input.reads} -o {params.outdir}out -t {threads} -i {params.pol_iterations} {params.other_flye_opts} ``\r + \r +- **Nextdenovo (if ``run-nextdenovo``):** It uses the cluster module specified in the config. If nextdenovo option is turned on, the create_config script will also create the nextdenovo config file. Check the create_config help to see which options can be modified on it. \r +\r +``nextDenovo {input.config}``\r +\r +3- Polishing\r +\r +- **Hypo (default):** It is the polisher that the pipeline uses by default, it can be turned off specifying ``--no-hypo`` when creating the config. If selected, the reads will be aligned in previous rules and then hypo will be run, it requires illumina data. It uses the conda environment specified in the config. \r +\r +``hypo -r @short_reads.list.txt -d {input.genome} -b {input.sr_bam} -c {coverage} -s {params.genome_size} -B {input.lr_bam} -t {threads} -o {output.polished} -p {params.proc} {params.opts} ``\r + \r +- **Racon (if turned on):** to run racon, specify ``--racon-rounds `` and the number of rounds of it you want to run. It uses the conda environment specified in the config file. \r +\r +``{params.racon_env}/scripts/racon_wrapper.py -u {params.opts} -t {threads} reads4racon.fastq.gz {input.mapping} {input.assembly} > {output.polished} ``\r + \r +- **Medaka (if turned on):** to run medaka, specify ``--medaka-rounds`` and the nummber of rounds of it you want to run. It uses the conda environment specified in the config file. It'll run after racon and before pilon, if they are also selected. \r +\r +`` medaka consensus {input.mapping} {wildcards.directory}/rmp/{wildcards.base}.medaka{wildcards.param}.hdf --threads {medaka_threads} --model {params.model} {params.consensus_opts};\r +medaka stitch --threads {threads} {wildcards.directory}/rmp/{wildcards.base}.medaka{wildcards.param}.hdf {input.assembly} {output.polished}``\r + \r +- **Pilon (if turned on):** to run Pilon, specify ``--pilon-rounds`` and the number of rounds of it you want to run. If it's a big genome, the pipeline will split the consensus step in several jobs, each of them running on certain scaffolds. It uses the version installed in the path specified in the config. \r +\r +``{scripts_dir}split_bam.py assembly.len {input.mapping} {params.chunks} {threads};\r +java {params.java_opts} -jar {params.path} --genome {input.assembly} --frags {input.alignment} {params.opts} --threads {threads} --output {basename}; \r +{scripts_dir}/concat_pilon.py {params.splitdir} {params.chunks} > {output.polished}``\r +\r + \r +- **Nextpolish ont (if turned on):** to run nextpolish with ONT reads, specify ``--nextpolish-ont-rounds`` and the number of rounds you want to run of it. \r +\r +``"python /apps/NEXTPOLISH/1.3.1/lib/nextpolish2.py -g {input.genome} -p {threads} -l lgs.fofn -r {params.lrtype} > {output.polished}``\r + \r +- **Nextpolish illumina (if turned on):** to run nextpolish with ONT reads, specify ``--nextpolish-ill-rounds`` and the number of rounds you want to run of it. \r +\r +``"python /apps/NEXTPOLISH/1.3.1/lib/nextpolish1.py -g {input.genome} -p {threads} -s {input.bam} -t {params.task} > {output.polished}``\r +\r +4- Post-assembly\r +\r +- **Purge_dups (by default):** select ``--no-purgedups`` if you don't want to run it. If no manual cutoffs are given, it'll run purgedups with automatic cutoffs and then will rerun it selecting the mean cutoff as 0.75\\*cov. It uses the version installed in the cluster module specified in the config. \r +\r +5- Evaluations\r + \r +- **Merqury:** It runs on each 'terminal' assembly. This is, the base assembly and the resulting assembly from each branch of the pipeline. \r + \r +- **Busco:** It can be run only in the terminal assemblies or on all the assemblies produced by the pipeline. It uses the conda environment specified in the config as well as the parameters specified. \r + \r +- **Nseries:** This is run during the *finalize* on all the assemblies that are evaluated. After it, that rule combines the statistics produced by all the evaluation rules. \r +\r +# Description of all options\r +```\r +bin/create_config_assembly.py -h\r +usage: create_configuration_file [-h] [--configFile configFile]\r + [--specFile specFile]\r + [--ndconfFile ndconfFile]\r + [--concat-cores concat_cores]\r + [--genome-size genome_size]\r + [--lr-type lr_type] [--basename base_name]\r + [--species species] [--keep-intermediate]\r + [--preprocess-lr-step PREPROCESS_ONT_STEP]\r + [--preprocess-10X-step PREPROCESS_10X_STEP]\r + [--preprocess-illumina-step PREPROCESS_ILLUMINA_STEP]\r + [--preprocess-hic-step PREPROCESS_HIC_STEP]\r + [--flye-step FLYE_STEP] [--no-flye]\r + [--nextdenovo-step NEXTDENOVO_STEP]\r + [--run-nextdenovo]\r + [--nextpolish-cores nextpolish_cores]\r + [--minimap2-cores minimap2_cores]\r + [--bwa-cores bwa_cores]\r + [--hypo-cores hypo_cores]\r + [--pairtools-cores pairtools_cores]\r + [--busco-cores busco_cores]\r + [--nextpolish-ont-rounds nextpolish_ont_rounds]\r + [--nextpolish-ill-rounds nextpolish_ill_rounds]\r + [--hypo-rounds hypo_rounds]\r + [--longranger-cores longranger_cores]\r + [--longranger-path longranger_path]\r + [--genomescope-opts genomescope_additional]\r + [--no-purgedups] [--ploidy ploidy]\r + [--run-tigmint] [--run-kraken2] [--no-yahs]\r + [--scripts-dir SCRIPTS_DIR]\r + [--ont-reads ONT_READS] [--ont-dir ONT_DIR]\r + [--ont-filt ONT_FILTERED] [--pe1 PE1]\r + [--pe2 PE2]\r + [--processed-illumina PROCESSED_ILLUMINA]\r + [--raw-10X RAW_10X [RAW_10X ...]]\r + [--processed-10X PROCESSED_10X] [--10X R10X]\r + [--illumina-dir ILLUMINA_DIR]\r + [--assembly-in ASSEMBLY_IN [ASSEMBLY_IN ...]]\r + [--postpolish-assemblies POSTPOLISH_ASSEMBLIES [POSTPOLISH_ASSEMBLIES ...]]\r + [--hic-dir HIC_DIR]\r + [--pipeline-workdir PIPELINE_WORKDIR]\r + [--filtlong-dir FILTLONG_DIR]\r + [--concat-hic-dir CONCAT_HIC_DIR]\r + [--flye-dir FLYE_DIR]\r + [--nextdenovo-dir NEXTDENOVO_DIR]\r + [--flye-polishing-dir POLISH_FLYE_DIR]\r + [--nextdenovo-polishing-dir POLISH_NEXTDENOVO_DIR]\r + [--eval-dir eval_dir] [--stats-out stats_out]\r + [--hic-qc-dir hic_qc_dir]\r + [--filtlong-minlen filtlong_minlen]\r + [--filtlong-min-mean-q filtlong_min_mean_q]\r + [--filtlong-opts filtlong_opts]\r + [--kraken2-db kraken2_db]\r + [--kraken2-kmer kraken2_kmers]\r + [--kraken2-opts additional_kraken2_opts]\r + [--kraken2-cores kraken2_threads]\r + [--trim-galore-opts trim_galore_opts]\r + [--trim-Illumina-cores Trim_Illumina_cores]\r + [--flye-cores flye_cores]\r + [--flye-polishing-iterations flye_pol_it]\r + [--other-flye-opts other_flye_opts]\r + [--nextdenovo-cores nextdenovo_cores]\r + [--nextdenovo-jobtype nextdenovo_type]\r + [--nextdenovo-task nextdenovo_task]\r + [--nextdenovo-rewrite nextdenovo_rewrite]\r + [--nextdenovo-parallel_jobs nextdenovo_parallel_jobs]\r + [--nextdenovo-minreadlen nextdenovo_minreadlen]\r + [--nextdenovo-seeddepth nextdenovo_seeddepth]\r + [--nextdenovo-seedcutoff nextdenovo_seedcutoff]\r + [--nextdenovo-blocksize nextdenovo_blocksize]\r + [--nextdenovo-pa-correction nextdenovo_pa_correction]\r + [--nextdenovo-minimap_raw nextdenovo_minimap_raw]\r + [--nextdenovo-minimap_cns nextdenovo_minimap_cns]\r + [--nextdenovo-minimap_map nextdenovo_minimap_map]\r + [--nextdenovo-sort nextdenovo_sort]\r + [--nextdenovo-correction_opts nextdenovo_correction_opts]\r + [--nextdenovo-nextgraph_opt nextdenovo_nextgraph_opt]\r + [--sr-cov ill_cov]\r + [--hypo-proc hypo_processes] [--hypo-no-lr]\r + [--hypo-opts hypo_opts]\r + [--purgedups-cores purgedups_cores]\r + [--purgedups-calcuts-opts calcuts_opts]\r + [--tigmint-cores tigmint_cores]\r + [--tigmint-opts tigmint_opts] [--hic-qc]\r + [--no-pretext] [--assembly-qc assembly_qc]\r + [--yahs-cores yahs_cores] [--yahs-mq yahs_mq]\r + [--yahs-opts yahs_opts]\r + [--hic-map-opts hic_map_opts]\r + [--mq mq [mq ...]]\r + [--hic-qc-assemblylen hic_qc_assemblylen]\r + [--blast-cores blast_cores]\r + [--hic-blastdb blastdb]\r + [--hic-readsblast hic_readsblast]\r + [--no-final-evals]\r + [--busco-lin busco_lineage]\r + [--merqury-db merqury_db] [--meryl-k meryl_k]\r + [--meryl-threads meryl_threads]\r + [--ont-list ONT_wildcards]\r + [--illumina-list illumina_wildcards]\r + [--r10X-list r10X_wildcards]\r + [--hic-list hic_wildcards]\r +\r +Create a configuration json file for the assembly pipeline.\r +\r +optional arguments:\r + -h, --help show this help message and exit\r +\r +General Parameters:\r + --configFile configFile\r + Configuration JSON to be generated. Default\r + assembly.config\r + --specFile specFile Cluster specifications JSON fileto be generated.\r + Default assembly.spec\r + --ndconfFile ndconfFile\r + Name pf the nextdenovo config file. Default\r + nextdenovo.config\r + --concat-cores concat_cores\r + Number of threads to concatenate reads and to run\r + filtlong. Default 4\r + --genome-size genome_size\r + Approximate genome size. Example: 615m or 2.6g.\r + Default None\r + --lr-type lr_type Type of long reads (options are flye read-type\r + options). Default nano-raw\r + --basename base_name Base name for the project. Default None\r + --species species Name of the species to be assembled. Default None\r + --keep-intermediate Set this to True if you do not want intermediate files\r + to be removed. Default False\r + --preprocess-lr-step PREPROCESS_ONT_STEP\r + Step for preprocessing long-reads. Default 02.1\r + --preprocess-10X-step PREPROCESS_10X_STEP\r + Step for preprocessing 10X reads. Default 02.2\r + --preprocess-illumina-step PREPROCESS_ILLUMINA_STEP\r + Step for preprocessing illumina reads. Default 02.2\r + --preprocess-hic-step PREPROCESS_HIC_STEP\r + Step for preprocessing hic reads. Default 02.3\r + --flye-step FLYE_STEP\r + Step for running flye. Default 03.1\r + --no-flye Give this option if you do not want to run Flye.\r + --nextdenovo-step NEXTDENOVO_STEP\r + Step for running nextdenovo. Default 03.2\r + --run-nextdenovo Give this option if you do want to run Nextdenovo.\r + --nextpolish-cores nextpolish_cores\r + Number of threads to run the nextpolish step. Default\r + 24\r + --minimap2-cores minimap2_cores\r + Number of threads to run the alignment with minimap2.\r + Default 32\r + --bwa-cores bwa_cores\r + Number of threads to run the alignments with BWA-Mem2.\r + Default 16\r + --hypo-cores hypo_cores\r + Number of threads to run the hypo step. Default 24\r + --pairtools-cores pairtools_cores\r + Number of threads to run the pairtools step. Default\r + 64\r + --busco-cores busco_cores\r + Number of threads to run BUSCO. Default 32\r + --nextpolish-ont-rounds nextpolish_ont_rounds\r + Number of rounds to run the Nextpolish with ONT step.\r + Default 0\r + --nextpolish-ill-rounds nextpolish_ill_rounds\r + Number of rounds to run the Nextpolish with illumina\r + step. Default 0\r + --hypo-rounds hypo_rounds\r + Number of rounds to run the Hypostep. Default 1\r + --longranger-cores longranger_cores\r + Number of threads to run longranger. Default 16\r + --longranger-path longranger_path\r + Path to longranger executable. Default\r + /scratch/project/devel/aateam/src/10X/longranger-2.2.2\r + --genomescope-opts genomescope_additional\r + Additional options to run Genomescope2 with. Default\r + --no-purgedups Give this option if you do not want to run Purgedups.\r + --ploidy ploidy Expected ploidy. Default 2\r + --run-tigmint Give this option if you want to run the scaffolding\r + with 10X reads step.\r + --run-kraken2 Give this option if you want to run Kraken2 on the\r + input reads.\r + --no-yahs Give this option if you do not want to run yahs.\r +\r +Inputs:\r + --scripts-dir SCRIPTS_DIR\r + Directory with the different scripts for the pipeline.\r + Default bin/../scripts/\r + --ont-reads ONT_READS\r + File with all the ONT reads. Default None\r + --ont-dir ONT_DIR Directory where the ONT fastqs are stored. Default\r + None\r + --ont-filt ONT_FILTERED\r + File with the ONT reads after running filtlong on\r + them. Default None\r + --pe1 PE1 File with the illumina paired-end fastqs, already\r + trimmed, pair 1.\r + --pe2 PE2 File with the illumina paired-end fastqs, already\r + trimmed, pair 2.\r + --processed-illumina PROCESSED_ILLUMINA\r + Directory to Processed illumina reads. Already there\r + or to be produced by the pipeline.\r + --raw-10X RAW_10X [RAW_10X ...]\r + Dictionary with 10X raw read directories, it has to be\r + the mkfastq dir. You must specify as well the\r + sampleIDs from this run. Example: '{"mkfastq-\r + dir":"sample1,sample2,sample3"}'...\r + --processed-10X PROCESSED_10X\r + Directory to Processed 10X reads. Already there or to\r + be produced by the pipeline.\r + --10X R10X File with barcoded 10X reads in fastq.gz format,\r + concatenated.\r + --illumina-dir ILLUMINA_DIR\r + Directory where the raw illumina fastqs are stored.\r + Default None\r + --assembly-in ASSEMBLY_IN [ASSEMBLY_IN ...]\r + Dictionary with assemblies that need to be polished\r + but not assembled and directory where they should be\r + polished. Example: '{"assembly1":"polishing_dir1"}'\r + '{"assembly2"="polishing_dir2"}' ...\r + --postpolish-assemblies POSTPOLISH_ASSEMBLIES [POSTPOLISH_ASSEMBLIES ...]\r + Dictionary with assemblies for whic postpolishing\r + steps need to be run but that are not assembled and\r + base step for the directory where the first\r + postpolishing step should be run. Example:\r + '{"assembly1":"s04.1_p03.1"}'\r + '{"assembly2":"s04.2_p03.2"}' ...\r + --hic-dir HIC_DIR Directory where the HiC fastqs are stored. Default\r + None\r +\r +Outputs:\r + --pipeline-workdir PIPELINE_WORKDIR\r + Base directory for the pipeline run. Default /software\r + /assembly/pipelines/Assembly_pipeline/v2.1/GLAWS/\r + --filtlong-dir FILTLONG_DIR\r + Directory to process the ONT reads with filtlong.\r + Default s02.1_p01.1_Filtlong\r + --concat-hic-dir CONCAT_HIC_DIR\r + Directory to concatenate the HiC reads. Default\r + s02.3_p01.1_Concat_HiC\r + --flye-dir FLYE_DIR Directory to run flye. Default s03.1_p02.1_flye/\r + --nextdenovo-dir NEXTDENOVO_DIR\r + Directory to run nextdenovo. Default\r + s03.2_p02.1_nextdenovo/\r + --flye-polishing-dir POLISH_FLYE_DIR\r + Directory to polish the flye assembly. Default\r + s04.1_p03.1_polishing/\r + --nextdenovo-polishing-dir POLISH_NEXTDENOVO_DIR\r + Directory to run nextdenovo. Default\r + s04.2_p03.2_polishing/\r + --eval-dir eval_dir Base directory for the evaluations. Default\r + evaluations/\r + --stats-out stats_out\r + Path to the file with the final statistics.\r + --hic-qc-dir hic_qc_dir\r + Directory to run the hic_qc. Default hic_qc/\r +\r +Filtlong:\r + --filtlong-minlen filtlong_minlen\r + Minimum read length to use with Filtlong. Default 1000\r + --filtlong-min-mean-q filtlong_min_mean_q\r + Minimum mean quality to use with Filtlong. Default 80\r + --filtlong-opts filtlong_opts\r + Extra options to run Filtlong (eg. -t 4000000000)\r +\r +Kraken2:\r + --kraken2-db kraken2_db\r + Database to be used for running Kraken2. Default None\r + --kraken2-kmer kraken2_kmers\r + Database to be used for running Kraken2. Default None\r + --kraken2-opts additional_kraken2_opts\r + Optional parameters for the rule Kraken2. Default\r + --kraken2-cores kraken2_threads\r + Number of threads to run the Kraken2 step. Default 16\r +\r +Trim_Galore:\r + --trim-galore-opts trim_galore_opts\r + Optional parameters for the rule trim_galore. Default\r + --gzip -q 20 --paired --retain_unpaired\r + --trim-Illumina-cores Trim_Illumina_cores\r + Number of threads to run the Illumina trimming step.\r + Default 8\r +\r +Flye:\r + --flye-cores flye_cores\r + Number of threads to run FLYE. Default 128\r + --flye-polishing-iterations flye_pol_it\r + Number of polishing iterations to use with FLYE.\r + Default 2\r + --other-flye-opts other_flye_opts\r + Additional options to run Flye. Default --scaffold\r +\r +Nextdenovo:\r + --nextdenovo-cores nextdenovo_cores\r + Number of threads to run nextdenovo. Default 2\r + --nextdenovo-jobtype nextdenovo_type\r + Job_type for nextdenovo. Default slurm\r + --nextdenovo-task nextdenovo_task\r + Task need to run. Default all\r + --nextdenovo-rewrite nextdenovo_rewrite\r + Overwrite existing directory. Default yes\r + --nextdenovo-parallel_jobs nextdenovo_parallel_jobs\r + Number of tasks used to run in parallel. Default 50\r + --nextdenovo-minreadlen nextdenovo_minreadlen\r + Filter reads with length < minreadlen. Default 1k\r + --nextdenovo-seeddepth nextdenovo_seeddepth\r + Expected seed depth, used to calculate seed_cutoff,\r + co-use with genome_size, you can try to set it 30-45\r + to get a better assembly result. Default 45\r + --nextdenovo-seedcutoff nextdenovo_seedcutoff\r + Minimum seed length, <=0 means calculate it\r + automatically using bin/seq_stat. Default 0\r + --nextdenovo-blocksize nextdenovo_blocksize\r + Block size for parallel running, split non-seed reads\r + into small files, the maximum size of each file is\r + blocksize. Default 1g\r + --nextdenovo-pa-correction nextdenovo_pa_correction\r + number of corrected tasks used to run in parallel,\r + each corrected task requires ~TOTAL_INPUT_BASES/4\r + bytes of memory usage, overwrite parallel_jobs only\r + for this step. Default 100\r + --nextdenovo-minimap_raw nextdenovo_minimap_raw\r + minimap2 options, used to find overlaps between raw\r + reads, see minimap2-nd for details. Default -t 30\r + --nextdenovo-minimap_cns nextdenovo_minimap_cns\r + minimap2 options, used to find overlaps between\r + corrected reads. Default -t 30\r + --nextdenovo-minimap_map nextdenovo_minimap_map\r + minimap2 options, used to map reads back to the\r + assembly. Default -t 30 --no-kalloc\r + --nextdenovo-sort nextdenovo_sort\r + sort options, see ovl_sort for details. Default -m\r + 400g -t 20\r + --nextdenovo-correction_opts nextdenovo_correction_opts\r + Correction options. Default -p 30 -dbuf\r + --nextdenovo-nextgraph_opt nextdenovo_nextgraph_opt\r + nextgraph options, see nextgraph for details. Default\r + -a 1\r +\r +Hypo:\r + --sr-cov ill_cov Approximate short read coverage for hypo Default 0\r + --hypo-proc hypo_processes\r + Number of contigs to be processed in parallel by HyPo.\r + Default 6\r + --hypo-no-lr Set this to false if you don¡t want to run hypo with\r + long reads. Default True\r + --hypo-opts hypo_opts\r + Additional options to run Hypo. Default None\r +\r +Purge_dups:\r + --purgedups-cores purgedups_cores\r + Number of threads to run purgedups. Default 8\r + --purgedups-calcuts-opts calcuts_opts\r + Adjusted values to run calcuts for purgedups. Default\r + None\r +\r +Scaffold_with_10X:\r + --tigmint-cores tigmint_cores\r + Number of threads to run the 10X scaffolding step.\r + Default 12\r + --tigmint-opts tigmint_opts\r + Adjusted values to run the scaffolding with 10X reads.\r + Default None\r +\r +HiC:\r + --hic-qc Give this option if only QC of the HiC data needs to\r + be done.\r + --no-pretext Give this option if you do not want to generate the\r + pretext file\r + --assembly-qc assembly_qc\r + Path to the assembly to be used perfom the QC of the\r + HiC reads.\r + --yahs-cores yahs_cores\r + Number of threads to run YAHS. Default 48\r + --yahs-mq yahs_mq Mapping quality to use when running yahs.Default 40\r + --yahs-opts yahs_opts\r + Additional options to give to YAHS.Default\r + --hic-map-opts hic_map_opts\r + Options to use with bwa mem when aligning the HiC\r + reads. Deafault -5SP -T0\r + --mq mq [mq ...] Mapping qualities to use for processing the hic\r + mappings. Default [0, 40]\r + --hic-qc-assemblylen hic_qc_assemblylen\r + Lentgh of the assembly to be used for HiC QC\r + --blast-cores blast_cores\r + Number of threads to run blast with the HiC unmapped\r + reads.Default 8\r + --hic-blastdb blastdb\r + BLAST Database to use to classify the hic unmapped\r + reads. Default\r + /scratch_isilon/groups/assembly/data/blastdbs\r + --hic-readsblast hic_readsblast\r + Number of unmapped hic reads to classify with blast.\r + Default 100\r +\r +Finalize:\r + --no-final-evals If specified, do not run evaluations on final\r + assemblies. Default True\r + --busco-lin busco_lineage\r + Path to the lineage directory to run Busco with.\r + Default None\r + --merqury-db merqury_db\r + Meryl database. Default None\r + --meryl-k meryl_k Kmer length to build the meryl database. Default None\r + --meryl-threads meryl_threads\r + Number of threads to run meryl and merqury. Default 4\r +\r +Wildcards:\r + --ont-list ONT_wildcards\r + List with basename of the ONT fastqs that will be\r + used. Default None\r + --illumina-list illumina_wildcards\r + List with basename of the illumina fastqs. Default\r + None\r + --r10X-list r10X_wildcards\r + List with basename of the raw 10X fastqs. Default None\r + --hic-list hic_wildcards\r + List with basename of the raw hic fastqs. Default None\r +```\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/567?version=1" ; + schema1:keywords "Bioinformatics, Genomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "CLAWS (CNAG's long-read assembly workflow in Snakemake)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/567?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Complete workflow for TANGO as reported in Lecomte et al (2024),\r +"Revealing the dynamics and mechanisms of bacterial interactions in\r +cheese production with metabolic modelling", Metabolic Eng. 83:24-38\r +https://doi.org/10.1016/j.ymben.2024.02.014\r +\r +1. Parameters for individual models are obtained by optimization\r +2. Individual dynamics and community dynamics are simulated\r +3. Figures for the manuscript are assembled from the results.""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/873?version=1" ; + schema1:isBasedOn "https://forgemia.inra.fr/tango/tango_models.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Tango: Numerical reconciliation of bacterial fermentation in cheese production" ; + schema1:sdDatePublished "2024-07-12 13:23:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/873/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 58757 ; + schema1:dateCreated "2024-05-03T07:03:12Z" ; + schema1:dateModified "2024-05-03T07:08:20Z" ; + schema1:description """Complete workflow for TANGO as reported in Lecomte et al (2024),\r +"Revealing the dynamics and mechanisms of bacterial interactions in\r +cheese production with metabolic modelling", Metabolic Eng. 83:24-38\r +https://doi.org/10.1016/j.ymben.2024.02.014\r +\r +1. Parameters for individual models are obtained by optimization\r +2. Individual dynamics and community dynamics are simulated\r +3. Figures for the manuscript are assembled from the results.""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Tango: Numerical reconciliation of bacterial fermentation in cheese production" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/873?version=1" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 11509 . + + a schema1:Dataset ; + schema1:datePublished "2024-04-22T11:45:22.529281" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/parallel-accession-download" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "parallel-accession-download/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:Dataset ; + schema1:datePublished "2022-02-04T14:56:46.372304" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-variation-reporting" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sars-cov-2-variation-reporting/COVID-19-VARIATION-REPORTING" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "The workflow runs the RetroSynthesis algorithm to generate a collection of heterologous pathways in a host organism of choice, converts them to SBML files, performs analysis on the pathways to then rank the theoretical best performing ones." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/25?version=1" ; + schema1:isBasedOn "https://galaxy-synbiocad.org/u/mdulac/w/rpranker-3" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Pathway Ranker" ; + schema1:sdDatePublished "2024-07-12 13:37:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/25/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14605 ; + schema1:creator ; + schema1:dateCreated "2020-05-29T11:19:05Z" ; + schema1:dateModified "2023-01-16T13:41:47Z" ; + schema1:description "The workflow runs the RetroSynthesis algorithm to generate a collection of heterologous pathways in a host organism of choice, converts them to SBML files, performs analysis on the pathways to then rank the theoretical best performing ones." ; + schema1:keywords "pathway prediction, pathway design, metabolic engineering, Synthetic Biology, Retrosynthesis" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Pathway Ranker" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/25?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2024-04-29T16:50:16.825885" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for the identification of circular DNAs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/972?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/circdna" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/circdna" ; + schema1:sdDatePublished "2024-07-12 13:22:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/972/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9237 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:45Z" ; + schema1:dateModified "2024-06-11T12:54:45Z" ; + schema1:description "Pipeline for the identification of circular DNAs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/972?version=7" ; + schema1:keywords "ampliconarchitect, ampliconsuite, circle-seq, circular, DNA, eccdna, ecdna, extrachromosomal-circular-dna, Genomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/circdna" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/972?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Simple bacterial assembly and annotation pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/966?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/bacass" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/bacass" ; + schema1:sdDatePublished "2024-07-12 13:22:17 +0100" ; + schema1:url "https://workflowhub.eu/workflows/966/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3698 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:42Z" ; + schema1:dateModified "2024-06-11T12:54:42Z" ; + schema1:description "Simple bacterial assembly and annotation pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/966?version=7" ; + schema1:keywords "Assembly, bacterial-genomes, denovo, denovo-assembly, genome-assembly, hybrid-assembly, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/bacass" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/966?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# The Polygenic Score Catalog Calculator (`pgsc_calc`)\r +\r +[![Documentation Status](https://readthedocs.org/projects/pgsc-calc/badge/?version=latest)](https://pgsc-calc.readthedocs.io/en/latest/?badge=latest)\r +[![pgscatalog/pgsc_calc CI](https://github.com/PGScatalog/pgsc_calc/actions/workflows/ci.yml/badge.svg)](https://github.com/PGScatalog/pgsc_calc/actions/workflows/ci.yml)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A521.04.0-23aa62.svg?labelColor=000000)](https://www.nextflow.io/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +\r +## Introduction\r +\r +`pgsc_calc` is a bioinformatics best-practice analysis pipeline for calculating\r +polygenic [risk] scores on samples with imputed genotypes using existing scoring\r +files from the [Polygenic Score (PGS) Catalog](https://www.pgscatalog.org/)\r +and/or user-defined PGS/PRS.\r +\r +## Pipeline summary\r +\r +

\r + \r +

\r +\r +* Downloading scoring files using the PGS Catalog API in a specified genome build (GRCh37 and GRCh38).\r +* Reading custom scoring files (and performing a liftover if genotyping data is in a different build).\r +* Automatically combines and creates scoring files for efficient parallel computation of multiple PGS\r + - Matching variants in the scoring files against variants in the target dataset (in plink bfile/pfile or VCF format)\r +* Calculates PGS for all samples (linear sum of weights and dosages)\r +* Creates a summary report to visualize score distributions and pipeline metadata (variant matching QC)\r +\r +### Features in development\r +\r +- *Genetic Ancestry*: calculate similarity of target samples to populations in a\r + reference dataset (e.g. [1000 Genomes (1000G)](http://www.nature.com/nature/journal/v526/n7571/full/nature15393.html), \r + [Human Genome Diversity Project (HGDP)](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC7115999/)) using principal components analysis (PCA).\r +- *PGS Normalization*: Using reference population data and/or PCA projections to report\r + individual-level PGS predictions (e.g. percentiles, z-scores) that account for genetic ancestry.\r +\r +## Quick start\r +\r +1. Install\r +[`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation)\r +(`>=21.04.0`)\r +\r +2. Install [`Docker`](https://docs.docker.com/engine/installation/) or\r +[`Singularity (v3.8.3 minimum)`](https://www.sylabs.io/guides/3.0/user-guide/)\r +(please only use [`Conda`](https://conda.io/miniconda.html) as a last resort)\r +\r +3. Download the pipeline and test it on a minimal dataset with a single command:\r +\r + ```console\r + nextflow run pgscatalog/pgsc_calc -profile test,\r + ```\r +\r +4. Start running your own analysis!\r +\r + ```console\r + nextflow run pgscatalog/pgsc_calc -profile --input samplesheet.csv --pgs_id PGS001229\r + ```\r +\r +See [getting\r +started](https://pgsc-calc.readthedocs.io/en/latest/getting-started.html) for more\r +details.\r +\r +## Documentation\r +\r +[Full documentation is available on Read the Docs](https://pgsc-calc.readthedocs.io/)\r +\r +## Credits\r +\r +pgscatalog/pgsc_calc is developed as part of the PGS Catalog project, a\r +collaboration between the University of Cambridge’s Department of Public Health\r +and Primary Care (Michael Inouye, Samuel Lambert) and the European\r +Bioinformatics Institute (Helen Parkinson, Laura Harris).\r +\r +The pipeline seeks to provide a standardized workflow for PGS calculation and\r +ancestry inference implemented in nextflow derived from an existing set of\r +tools/scripts developed by Inouye lab (Rodrigo Canovas, Scott Ritchie, Jingqin\r +Wu) and PGS Catalog teams (Samuel Lambert, Laurent Gil).\r +\r +The adaptation of the codebase, nextflow implementation, and PGS Catalog features\r +are written by Benjamin Wingfield, Samuel Lambert, Laurent Gil with additional input\r +from Aoife McMahon (EBI). Development of new features, testing, and code review\r +is ongoing including Inouye lab members (Rodrigo Canovas, Scott Ritchie) and others. A\r +manuscript describing the tool is in preparation. In the meantime if you use the\r +tool we ask you to cite the repo and the paper describing the PGS Catalog\r +resource:\r +\r +- >PGS Catalog Calculator _(in development)_. PGS Catalog\r + Team. [https://github.com/PGScatalog/pgsc_calc](https://github.com/PGScatalog/pgsc_calc)\r +- >Lambert _et al._ (2021) The Polygenic Score Catalog as an open database for\r +reproducibility and systematic evaluation. Nature Genetics. 53:420–425\r +doi:[10.1038/s41588-021-00783-5](https://doi.org/10.1038/s41588-021-00783-5).\r +\r +This pipeline is distrubuted under an [Apache License](LICENSE) amd uses code and \r +infrastructure developed and maintained by the [nf-core](https://nf-co.re) community \r +(Ewels *et al. Nature Biotech* (2020) doi:[10.1038/s41587-020-0439-x](https://doi.org/10.1038/s41587-020-0439-x)), \r +reused here under the [MIT license](https://github.com/nf-core/tools/blob/master/LICENSE).\r +\r +Additional references of open-source tools and data used in this pipeline are described in\r +[`CITATIONS.md`](CITATIONS.md).\r +\r +This work has received funding from EMBL-EBI core funds, the Baker Institute,\r +the University of Cambridge, Health Data Research UK (HDRUK), and the European\r +Union’s Horizon 2020 research and innovation programme under grant agreement No\r +101016775 INTERVENE.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/556?version=3" ; + schema1:isBasedOn "https://github.com/PGScatalog/pgsc_calc" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for The Polygenic Score Catalog Calculator" ; + schema1:sdDatePublished "2024-07-12 13:27:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/556/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1673 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-08-10T09:10:39Z" ; + schema1:dateModified "2023-08-10T09:10:39Z" ; + schema1:description """# The Polygenic Score Catalog Calculator (`pgsc_calc`)\r +\r +[![Documentation Status](https://readthedocs.org/projects/pgsc-calc/badge/?version=latest)](https://pgsc-calc.readthedocs.io/en/latest/?badge=latest)\r +[![pgscatalog/pgsc_calc CI](https://github.com/PGScatalog/pgsc_calc/actions/workflows/ci.yml/badge.svg)](https://github.com/PGScatalog/pgsc_calc/actions/workflows/ci.yml)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A521.04.0-23aa62.svg?labelColor=000000)](https://www.nextflow.io/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +\r +## Introduction\r +\r +`pgsc_calc` is a bioinformatics best-practice analysis pipeline for calculating\r +polygenic [risk] scores on samples with imputed genotypes using existing scoring\r +files from the [Polygenic Score (PGS) Catalog](https://www.pgscatalog.org/)\r +and/or user-defined PGS/PRS.\r +\r +## Pipeline summary\r +\r +

\r + \r +

\r +\r +* Downloading scoring files using the PGS Catalog API in a specified genome build (GRCh37 and GRCh38).\r +* Reading custom scoring files (and performing a liftover if genotyping data is in a different build).\r +* Automatically combines and creates scoring files for efficient parallel computation of multiple PGS\r + - Matching variants in the scoring files against variants in the target dataset (in plink bfile/pfile or VCF format)\r +* Calculates PGS for all samples (linear sum of weights and dosages)\r +* Creates a summary report to visualize score distributions and pipeline metadata (variant matching QC)\r +\r +### Features in development\r +\r +- *Genetic Ancestry*: calculate similarity of target samples to populations in a\r + reference dataset (e.g. [1000 Genomes (1000G)](http://www.nature.com/nature/journal/v526/n7571/full/nature15393.html), \r + [Human Genome Diversity Project (HGDP)](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC7115999/)) using principal components analysis (PCA).\r +- *PGS Normalization*: Using reference population data and/or PCA projections to report\r + individual-level PGS predictions (e.g. percentiles, z-scores) that account for genetic ancestry.\r +\r +## Quick start\r +\r +1. Install\r +[`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation)\r +(`>=21.04.0`)\r +\r +2. Install [`Docker`](https://docs.docker.com/engine/installation/) or\r +[`Singularity (v3.8.3 minimum)`](https://www.sylabs.io/guides/3.0/user-guide/)\r +(please only use [`Conda`](https://conda.io/miniconda.html) as a last resort)\r +\r +3. Download the pipeline and test it on a minimal dataset with a single command:\r +\r + ```console\r + nextflow run pgscatalog/pgsc_calc -profile test,\r + ```\r +\r +4. Start running your own analysis!\r +\r + ```console\r + nextflow run pgscatalog/pgsc_calc -profile --input samplesheet.csv --pgs_id PGS001229\r + ```\r +\r +See [getting\r +started](https://pgsc-calc.readthedocs.io/en/latest/getting-started.html) for more\r +details.\r +\r +## Documentation\r +\r +[Full documentation is available on Read the Docs](https://pgsc-calc.readthedocs.io/)\r +\r +## Credits\r +\r +pgscatalog/pgsc_calc is developed as part of the PGS Catalog project, a\r +collaboration between the University of Cambridge’s Department of Public Health\r +and Primary Care (Michael Inouye, Samuel Lambert) and the European\r +Bioinformatics Institute (Helen Parkinson, Laura Harris).\r +\r +The pipeline seeks to provide a standardized workflow for PGS calculation and\r +ancestry inference implemented in nextflow derived from an existing set of\r +tools/scripts developed by Inouye lab (Rodrigo Canovas, Scott Ritchie, Jingqin\r +Wu) and PGS Catalog teams (Samuel Lambert, Laurent Gil).\r +\r +The adaptation of the codebase, nextflow implementation, and PGS Catalog features\r +are written by Benjamin Wingfield, Samuel Lambert, Laurent Gil with additional input\r +from Aoife McMahon (EBI). Development of new features, testing, and code review\r +is ongoing including Inouye lab members (Rodrigo Canovas, Scott Ritchie) and others. A\r +manuscript describing the tool is in preparation. In the meantime if you use the\r +tool we ask you to cite the repo and the paper describing the PGS Catalog\r +resource:\r +\r +- >PGS Catalog Calculator _(in development)_. PGS Catalog\r + Team. [https://github.com/PGScatalog/pgsc_calc](https://github.com/PGScatalog/pgsc_calc)\r +- >Lambert _et al._ (2021) The Polygenic Score Catalog as an open database for\r +reproducibility and systematic evaluation. Nature Genetics. 53:420–425\r +doi:[10.1038/s41588-021-00783-5](https://doi.org/10.1038/s41588-021-00783-5).\r +\r +This pipeline is distrubuted under an [Apache License](LICENSE) amd uses code and \r +infrastructure developed and maintained by the [nf-core](https://nf-co.re) community \r +(Ewels *et al. Nature Biotech* (2020) doi:[10.1038/s41587-020-0439-x](https://doi.org/10.1038/s41587-020-0439-x)), \r +reused here under the [MIT license](https://github.com/nf-core/tools/blob/master/LICENSE).\r +\r +Additional references of open-source tools and data used in this pipeline are described in\r +[`CITATIONS.md`](CITATIONS.md).\r +\r +This work has received funding from EMBL-EBI core funds, the Baker Institute,\r +the University of Cambridge, Health Data Research UK (HDRUK), and the European\r +Union’s Horizon 2020 research and innovation programme under grant agreement No\r +101016775 INTERVENE.\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/556?version=3" ; + schema1:keywords "Nextflow, Workflows, polygenic risk score, polygenic score, prediction, GWAS, genomic ancestry" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "The Polygenic Score Catalog Calculator" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/556?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# metaGOflow: A workflow for marine Genomic Observatories' data analysis\r +\r +![logo](https://raw.githubusercontent.com/hariszaf/metaGOflow-use-case/gh-pages/assets/img/metaGOflow_logo_italics.png)\r +\r +\r +## An EOSC-Life project\r +\r +The workflows developed in the framework of this project are based on `pipeline-v5` of the MGnify resource.\r +\r +> This branch is a child of the [`pipeline_5.1`](https://github.com/hariszaf/pipeline-v5/tree/pipeline_5.1) branch\r +> that contains all CWL descriptions of the MGnify pipeline version 5.1.\r +\r +## Dependencies\r +\r +To run metaGOflow you need to make sure you have the following set on your computing environmnet first:\r +\r +- python3 [v 3.8+]\r +- [Docker](https://www.docker.com) [v 19.+] or [Singularity](https://apptainer.org) [v 3.7.+]/[Apptainer](https://apptainer.org) [v 1.+]\r +- [cwltool](https://github.com/common-workflow-language/cwltool) [v 3.+]\r +- [rdflib](https://rdflib.readthedocs.io/en/stable/) [v 6.+]\r +- [rdflib-jsonld](https://pypi.org/project/rdflib-jsonld/) [v 0.6.2]\r +- [ro-crate-py](https://github.com/ResearchObject/ro-crate-py) [v 0.7.0]\r +- [pyyaml](https://pypi.org/project/PyYAML/) [v 6.0]\r +- [Node.js](https://nodejs.org/) [v 10.24.0+]\r +- Available storage ~235GB for databases\r +\r +### Storage while running\r +\r +Depending on the analysis you are about to run, disk requirements vary.\r +Indicatively, you may have a look at the metaGOflow publication for computing resources used in various cases.\r +\r +## Installation\r +\r +### Get the EOSC-Life marine GOs workflow\r +\r +```bash\r +git clone https://github.com/emo-bon/MetaGOflow\r +cd MetaGOflow\r +```\r +\r +### Download necessary databases (~235GB)\r +\r +You can download databases for the EOSC-Life GOs workflow by running the\r +`download_dbs.sh` script under the `Installation` folder.\r +\r +```bash\r +bash Installation/download_dbs.sh -f [Output Directory e.g. ref-dbs] \r +```\r +If you have one or more already in your system, then create a symbolic link pointing\r +at the `ref-dbs` folder or at one of its subfolders/files.\r +\r +The final structure of the DB directory should be like the following:\r +\r +````bash\r +user@server:~/MetaGOflow: ls ref-dbs/\r +db_kofam/ diamond/ eggnog/ GO-slim/ interproscan-5.57-90.0/ kegg_pathways/ kofam_ko_desc.tsv Rfam/ silva_lsu/ silva_ssu/\r +````\r +\r +## How to run\r +\r +### Ensure that `Node.js` is installed on your system before running metaGOflow\r +\r +If you have root access on your system, you can run the commands below to install it:\r +\r +##### DEBIAN/UBUNTU\r +```bash\r +sudo apt-get update -y\r +sudo apt-get install -y nodejs\r +```\r +\r +##### RH/CentOS\r +```bash\r +sudo yum install rh-nodejs (e.g. rh-nodejs10)\r +```\r +\r +### Set up the environment\r +\r +#### Run once - Setup environment\r +\r +- ```bash\r + conda create -n EOSC-CWL python=3.8\r + ```\r +\r +- ```bash\r + conda activate EOSC-CWL\r + ```\r +\r +- ```bash\r + pip install cwlref-runner cwltool[all] rdflib-jsonld rocrate pyyaml\r +\r + ```\r +\r +#### Run every time\r +\r +```bash\r +conda activate EOSC-CWL\r +``` \r +\r +### Run the workflow\r +\r +- Edit the `config.yml` file to set the parameter values of your choice. For selecting all the steps, then set to `true` the variables in lines [2-6].\r +\r +#### Using Singularity\r +\r +##### Standalone\r +- run:\r + ```bash\r + ./run_wf.sh -s -n osd-short -d short-test-case -f test_input/wgs-paired-SRR1620013_1.fastq.gz -r test_input/wgs-paired-SRR1620013_2.fastq.gz\r + ``\r +\r +##### Using a cluster with a queueing system (e.g. SLURM)\r +\r +- Create a job file (e.g., SBATCH file)\r +\r +- Enable Singularity, e.g. module load Singularity & all other dependencies \r +\r +- Add the run line to the job file\r +\r +\r +#### Using Docker\r +\r +##### Standalone\r +- run:\r + ``` bash\r + ./run_wf.sh -n osd-short -d short-test-case -f test_input/wgs-paired-SRR1620013_1.fastq.gz -r test_input/wgs-paired-SRR1620013_2.fastq.gz\r + ```\r + HINT: If you are using Docker, you may need to run the above command without the `-s' flag.\r +\r +## Testing samples\r +The samples are available in the `test_input` folder.\r +\r +We provide metaGOflow with partial samples from the Human Metagenome Project ([SRR1620013](https://www.ebi.ac.uk/ena/browser/view/SRR1620013) and [SRR1620014](https://www.ebi.ac.uk/ena/browser/view/SRR1620014))\r +They are partial as only a small part of their sequences have been kept, in terms for the pipeline to test in a fast way. \r +\r +\r +## Hints and tips\r +\r +1. In case you are using Docker, it is strongly recommended to **avoid** installing it through `snap`.\r +\r +2. `RuntimeError`: slurm currently does not support shared caching, because it does not support cleaning up a worker\r + after the last job finishes.\r + Set the `--disableCaching` flag if you want to use this batch system.\r +\r +3. In case you are having errors like:\r +\r +```\r +cwltool.errors.WorkflowException: Singularity is not available for this tool\r +```\r +\r +You may run the following command:\r +\r +```\r +singularity pull --force --name debian:stable-slim.sif docker://debian:stable-sli\r +```\r +\r +## Contribution\r +\r +To make contribution to the project a bit easier, all the MGnify `conditionals` and `subworkflows` under\r +the `workflows/` directory that are not used in the metaGOflow framework, have been removed. \r +However, all the MGnify `tools/` and `utils/` are available in this repo, even if they are not invoked in the current\r +version of metaGOflow.\r +This way, we hope we encourage people to implement their own `conditionals` and/or `subworkflows` by exploiting the\r +currently supported `tools` and `utils` as well as by developing new `tools` and/or `utils`.\r +\r +\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/384?version=2" ; + schema1:isBasedOn "https://github.com/emo-bon/MetaGOflow.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for A workflow for marine Genomic Observatories data analysis" ; + schema1:sdDatePublished "2024-07-12 13:33:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/384/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7006 ; + schema1:creator , + ; + schema1:dateCreated "2023-05-16T20:38:36Z" ; + schema1:dateModified "2023-05-16T20:38:36Z" ; + schema1:description """# metaGOflow: A workflow for marine Genomic Observatories' data analysis\r +\r +![logo](https://raw.githubusercontent.com/hariszaf/metaGOflow-use-case/gh-pages/assets/img/metaGOflow_logo_italics.png)\r +\r +\r +## An EOSC-Life project\r +\r +The workflows developed in the framework of this project are based on `pipeline-v5` of the MGnify resource.\r +\r +> This branch is a child of the [`pipeline_5.1`](https://github.com/hariszaf/pipeline-v5/tree/pipeline_5.1) branch\r +> that contains all CWL descriptions of the MGnify pipeline version 5.1.\r +\r +## Dependencies\r +\r +To run metaGOflow you need to make sure you have the following set on your computing environmnet first:\r +\r +- python3 [v 3.8+]\r +- [Docker](https://www.docker.com) [v 19.+] or [Singularity](https://apptainer.org) [v 3.7.+]/[Apptainer](https://apptainer.org) [v 1.+]\r +- [cwltool](https://github.com/common-workflow-language/cwltool) [v 3.+]\r +- [rdflib](https://rdflib.readthedocs.io/en/stable/) [v 6.+]\r +- [rdflib-jsonld](https://pypi.org/project/rdflib-jsonld/) [v 0.6.2]\r +- [ro-crate-py](https://github.com/ResearchObject/ro-crate-py) [v 0.7.0]\r +- [pyyaml](https://pypi.org/project/PyYAML/) [v 6.0]\r +- [Node.js](https://nodejs.org/) [v 10.24.0+]\r +- Available storage ~235GB for databases\r +\r +### Storage while running\r +\r +Depending on the analysis you are about to run, disk requirements vary.\r +Indicatively, you may have a look at the metaGOflow publication for computing resources used in various cases.\r +\r +## Installation\r +\r +### Get the EOSC-Life marine GOs workflow\r +\r +```bash\r +git clone https://github.com/emo-bon/MetaGOflow\r +cd MetaGOflow\r +```\r +\r +### Download necessary databases (~235GB)\r +\r +You can download databases for the EOSC-Life GOs workflow by running the\r +`download_dbs.sh` script under the `Installation` folder.\r +\r +```bash\r +bash Installation/download_dbs.sh -f [Output Directory e.g. ref-dbs] \r +```\r +If you have one or more already in your system, then create a symbolic link pointing\r +at the `ref-dbs` folder or at one of its subfolders/files.\r +\r +The final structure of the DB directory should be like the following:\r +\r +````bash\r +user@server:~/MetaGOflow: ls ref-dbs/\r +db_kofam/ diamond/ eggnog/ GO-slim/ interproscan-5.57-90.0/ kegg_pathways/ kofam_ko_desc.tsv Rfam/ silva_lsu/ silva_ssu/\r +````\r +\r +## How to run\r +\r +### Ensure that `Node.js` is installed on your system before running metaGOflow\r +\r +If you have root access on your system, you can run the commands below to install it:\r +\r +##### DEBIAN/UBUNTU\r +```bash\r +sudo apt-get update -y\r +sudo apt-get install -y nodejs\r +```\r +\r +##### RH/CentOS\r +```bash\r +sudo yum install rh-nodejs (e.g. rh-nodejs10)\r +```\r +\r +### Set up the environment\r +\r +#### Run once - Setup environment\r +\r +- ```bash\r + conda create -n EOSC-CWL python=3.8\r + ```\r +\r +- ```bash\r + conda activate EOSC-CWL\r + ```\r +\r +- ```bash\r + pip install cwlref-runner cwltool[all] rdflib-jsonld rocrate pyyaml\r +\r + ```\r +\r +#### Run every time\r +\r +```bash\r +conda activate EOSC-CWL\r +``` \r +\r +### Run the workflow\r +\r +- Edit the `config.yml` file to set the parameter values of your choice. For selecting all the steps, then set to `true` the variables in lines [2-6].\r +\r +#### Using Singularity\r +\r +##### Standalone\r +- run:\r + ```bash\r + ./run_wf.sh -s -n osd-short -d short-test-case -f test_input/wgs-paired-SRR1620013_1.fastq.gz -r test_input/wgs-paired-SRR1620013_2.fastq.gz\r + ``\r +\r +##### Using a cluster with a queueing system (e.g. SLURM)\r +\r +- Create a job file (e.g., SBATCH file)\r +\r +- Enable Singularity, e.g. module load Singularity & all other dependencies \r +\r +- Add the run line to the job file\r +\r +\r +#### Using Docker\r +\r +##### Standalone\r +- run:\r + ``` bash\r + ./run_wf.sh -n osd-short -d short-test-case -f test_input/wgs-paired-SRR1620013_1.fastq.gz -r test_input/wgs-paired-SRR1620013_2.fastq.gz\r + ```\r + HINT: If you are using Docker, you may need to run the above command without the `-s' flag.\r +\r +## Testing samples\r +The samples are available in the `test_input` folder.\r +\r +We provide metaGOflow with partial samples from the Human Metagenome Project ([SRR1620013](https://www.ebi.ac.uk/ena/browser/view/SRR1620013) and [SRR1620014](https://www.ebi.ac.uk/ena/browser/view/SRR1620014))\r +They are partial as only a small part of their sequences have been kept, in terms for the pipeline to test in a fast way. \r +\r +\r +## Hints and tips\r +\r +1. In case you are using Docker, it is strongly recommended to **avoid** installing it through `snap`.\r +\r +2. `RuntimeError`: slurm currently does not support shared caching, because it does not support cleaning up a worker\r + after the last job finishes.\r + Set the `--disableCaching` flag if you want to use this batch system.\r +\r +3. In case you are having errors like:\r +\r +```\r +cwltool.errors.WorkflowException: Singularity is not available for this tool\r +```\r +\r +You may run the following command:\r +\r +```\r +singularity pull --force --name debian:stable-slim.sif docker://debian:stable-sli\r +```\r +\r +## Contribution\r +\r +To make contribution to the project a bit easier, all the MGnify `conditionals` and `subworkflows` under\r +the `workflows/` directory that are not used in the metaGOflow framework, have been removed. \r +However, all the MGnify `tools/` and `utils/` are available in this repo, even if they are not invoked in the current\r +version of metaGOflow.\r +This way, we hope we encourage people to implement their own `conditionals` and/or `subworkflows` by exploiting the\r +currently supported `tools` and `utils` as well as by developing new `tools` and/or `utils`.\r +\r +\r +\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/384?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "A workflow for marine Genomic Observatories data analysis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/384?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# RNA-Seq pipeline\r +Here we provide the tools to perform paired end or single read RNA-Seq analysis including raw data quality control, differential expression (DE) analysis and functional annotation. As input files you may use either zipped fastq-files (.fastq.gz) or mapped read data (.bam files). In case of paired end reads, corresponding fastq files should be named using *.R1.fastq.gz* and *.R2.fastq.gz* suffixes.\r +\r +\r +## Pipeline Workflow\r +All analysis steps are illustrated in the pipeline [flowchart](https://www.draw.io/?lightbox=1&highlight=0000ff&edit=_blank&layers=1&nav=1&title=NGSpipe2go_RNAseq_pipeline.html#R7R1Zk5s489e4Knmwi%2Ft4nDOz2WSyO5OtbPYlJZCw%2BYLBATzXr%2F90AOYQNtgY8Ex2UjuDACG1Wn13ayJfLJ8%2BhGC1%2BBxA5E0kAT5N5MuJJImKJE3IPwE%2BsxZd11nDPHRh8tCm4d59QUmjkLSuXYiiwoNxEHixuyo22oHvIzsutIEwDB6LjzmBV%2FzqCsxRpeHeBl619ZsL40XSKmrm5sYNcueL5NOGlMzPAvbPeRis%2FeR7fuAjdmcJ0m6SOUYLAIPHXJN8NZEvwiCI2V%2FLpwvkEbCmEGPvXdfczYYcIj9u8sLiRdX%2F%2FXT3%2FOR9Xj7NbwX7r3%2FtaTa4%2BDmFBYIYNMllEMaLYB74wLvatJ7T%2BSLSrYCv%2FrdertLn52CFWzZvfQoC3HApksdQHD8nKw%2FWcYCbFvHSS%2B6iJzf%2Bl3Q4U5Or77k7l0%2FJt%2BjFc3rhx%2BFz7iVy%2BT1%2Fb%2FMavUrfi%2BIw%2BJmtM16H8yooE%2BhGwTq0E7g82Ld%2F339bf1xfvvz39Y%2Bv19%2Bjh%2F%2BmcoIgMQjnKN7yoJIAmkA394lkqT6gYInwGPEDIfJA7D4UERMk%2BD3PntssNP4jWWv%2But8IH53FrRF7D4s76%2Fbm%2FvrHy8003XgPwFsnn5pImodncA7dB%2FJFz5379Ib2a01Q9Dyk6J9d4r%2FmyW%2F6mhWWW%2FDAaF9pawnPcqvvAQt559lWugi8IKQPydf0P%2B6iOYEfJ%2FgkamTcIFpQpBQLKEquHNfz8p1K5CfrNL1D9658Pg8BdDEilJrtYOnaCQbNPRBFKTal21qgn43x4gUEcFNT2IZZDyiM0dNWTEjuGmpCaxLaOjU1dv2Yo1RGQoAXOSKlpIt8CPZwkVl80%2BhzpZGfY6BPB9giGUIBWzIWm8MWRTCr2KJ1gS0v3%2BLLr8K%2F%2F328vrsKlyL8%2BuH5j6l8MIt5SwylIT%2BRlaH4CXeNVfG1LzLy4RmRNPGlTfYv2c2k8dr10uFU8aBAVHpACnkopBDlx9sb9zE4%2B3O%2B%2BvLjW%2FDxXBGnEodJ9I8kXJAcdx0S%2FtgD3LcNk8OcCdMrLEDKjsmNaURBeIYfEJXVE4dXX0iTs%2FMlcH0CP3eFPBfzNtx4vp2Ns68Wm5mYsO9AuplO0ku0An5FHCk3ALLublz6TuiDCP2apbCYYQEkeHjOfypdqbQFqMCWFc1GpiVA3QKCaiqm4uiOo5qaIAlT2wRQRbbqiFDSZR0auqBrjgUdVRUECymKAnUgI6vwkUWInMJnFnFMVOczgkjS9dyNF2trhmUQfOEuLSfACIz%2FvP1wT4YuzQN8YXmBRdANRDHCq3idTirCf9%2FdnuF54j92T5iBrfa5FCcAB08qQOdiE2%2B5apCsLEKOBvWmlCK5MbDw27m9JAkQOa7vUh1CEt5Z5M771ptr1PP2AgAjSgujFbIpNZQEOlFiNQng2iMod%2FrzQ1GEGYwLCLkGPswmNyXTdh0sP1AmBDDNJ%2Fttjwl3gINPyF7HBN45HKwZSb3%2B1YXaYooFtcWoai2yztFaROlYOq6yW3jButuK%2FInp6gpreWTyTBZJRUAjbchshwoXXFu5eGMYihWQpcDJQyxtayyKJN%2F7K3AptiUfm0pqYcH0YgeB40RYRCovQjboA2wP5pCaRvr394LWsUvTKOgZG7WjB3VSa6o5DKU4bBt1rXWpSvGs4IkQPNefM5JnBSFE4RQ34wsmIyX7ghJEAW8CNbuTN%2BKT2xNJRgr5yZ5YAQizvqVt%2FKMiw%2FDEFb4waWNyG2LpK5rFTwVLWVmGhKYGRV2Fmg5EYAiKaIgSFiNFxZIMgGQw1R1bMETTAVARJB0CSVJkU9ZsQVRtLGealiqogoNFxkFkyNp5MiCVb2%2BTGGtkwcoi8PlnKn5V0akeV8qsFS0LQLQxduNB2RRZch0KlacylMo9JdXjVWwF8LnSWBEIY1hucdMGvGD405dXVBwB3nPk4lXB%2B1OYMxGMeq6EFPoE6ckSCkwmwM9pYEmcO74VrdgssFQ%2FY1D488tD9O0rnsq7P79Mv319P8vB3eWsRXWU1RausFt9jDRWodAGLh%2Fwzl%2FhSfhYEosYNcM7zs%2BoZhkPxzqhhv2%2FkYGRRt6WIe1s29fItUTecm3gnSUejJhICOepP8NDDnkrwE85HjUGOtT%2Bl5cVHhdujO4xESA9PoZgtY2DtxCRi4Z9WZQ48p5YFfiMo3mB1N%2BSWHNJLF2u0Yhi%2F0Qo%2FGL9j4RTYCpPXGhvR9QqUvZaQUu1DV3XHMlxREUxVUeVbRGopmTqjgMdxZqKGJUtyTItLFxZQFZMXVRUJBkOhIajiwKybSQpUB1G0KqZJQMQn7n9FrJaCVkRlom8zGSyN6Nzy%2F1RYaS1zNGaQ2Pe1eHgQwSgjbc2oSekZzYHISd3vlv77q81xYa1NcXUlawNIReU2cDg%2FbYpH0tMmTP5rysozHPiJJGqHTLbApr0ObkQrTwszcSdLnPWJaGXR5pUO%2Fltu5wi5RhoIticoMCXBSPuEvjEPYQBfJmTB1oIgZLxWwhsLgSmRuSTEQJXXM69xMPHGjJlzsJqI%2Fex9indQeSelruHkT6eJrvqjLJnn%2Fk9dkl3f22cYzx3SUUM4VKdVbltUXlqr6ky8kBuGnxR9ZwKuW3l08x3NHsAYdTArWxZqmBAKIuyqEhYaIVAFIGlSw6CyNEgmiITmhDaClINBe9QoBu6ZGL6AQSkmo4sQQgVU5Hso0uqmYdvI6funC0DWd1jjWXXRONogybHQYoSR12FwYMLCYLnfYY8ZN%2FKYukIKHRv8YsFIBVmzvkue43MleubFN4Fq3jWXjrLgXMnl5a2c%2Bnx8eMs6WEXPzb11mR5f3bMi%2F3p1W2ZfnC0fkuluIrFDo7nt5R%2B%2By0369W%2F%2FMPHBimNhE6wQUv3T9oFk%2BiStzZIUe3IKMYviErJ6spmXOnoLAzBc%2B6xFXkg2jJgVeMOuHZcpefTcW2Qm42gW1TnxU6MUZ60A99x5%2BuQ5mzkg454tq1RiJCgkVSGBflZ6K1mPiFltZLZdCOdQfRAMvquGUBIO3lsxi5rBTL%2BQ7XiWI0dkRt%2BGfKnXydB9wGSVYjA0sKiay08OE%2FsYVftSQbtEav4xukg8OpBWbpbC8aq2NkCywYT%2BCsNNOqTUKTHIPxJAghp3z6YU%2Bl7m9zNazukIUSM5UUtvvuuuwFiQIC1RxiYm4wggWXkrcOCF4BELrjEuOoH5HEfISIo4cXb%2FRGCv0noY5LTlXbrBTbRvXIL2Ghe7ydpiCfvLkfNyihF9iJBeGKCwQNImBFWusji%2F0SI4OQGMFg%2BjV1%2FHu2MsihpX6NWplRZ52gBHDVA1EoSWmfu7DSgcUDVSWkLxVenOnGT1MXqQgyapD6MMtVVLlnjZDLtKDpYW41HF4wC1kmatlXjKT8vG%2BohGs8uA3mmKVyDKP77gkoBTWzTv9aYCMfPkyTijhJ%2F6isMwSOFKU0iIPPAktPPZHN0aX3Fj%2BH%2FO%2FjGL3vGmjN5a6exjiFRrbGORyZLPCfx33%2BiV5eyVsNpaJZ20rEw4eT1HcSE1DJuyZzUeo1DUTWhHssPMOnxU2sPJn49kbrjkSylIclSjmM2akuylFIOvpxYmepIVuV5xZwcQLKaodWOPArkWcHjb5Saqp3XbekEpRRVOj6K7CgO9BtFEqpjjhNFxD5QZHvdhzeCIsd2U6hiaWlTZ1LH3oUKI9J6YESpQjL%2B0iGd1nlpTF06V8MOK%2BazPd7rJDZ8ppNnF6xTaZdS3sdy68OQFE3rh6QoRrckhWsw0oekKCNztXeHmnLncs6eQYmZWeX%2B69ldY4vLHUrcKqsVDTs%2Fom0lc3JFGKjtjStKDqzHNK4ct4SDlNZrGMiYwodt1ciPF2t6T1ZL2EQGlKgHiTXIQTkJNShapwgUy6HkSxdCSlxKC7JpuUvAIE9qygnSipI04J3t%2FITe4mGp5%2FgfBt0FIRfqJfGIqefi5hr%2FI4%2BH8UXg466BS1cWYWx9RFE8qVZA7AAHZIN8vIAFSgUJuH4dWWmNBA1XvL6YwG5fLy2cknh8hT2CT76s49Wa5sMEHiQoc1ahYfjmL3ucoSeZuzLIpuEynyXL6dmMu0n05%2BEpF3mETUt4XoOl6xFcu0HeAyK9TvpwX%2BpKsSirrKrN8HwPYtcQz%2FWR4blQQfUQRWsvHmmcVT5zLdo2nGN8HLqOg8Is9Bs9rTCsIlYErAq1Xob04QsZiB%2B69mKJ%2FM33x7vVj7CtVaEqw3C39dGS7FN9e8Tb2gvmI93TeGTTlGXZgedhYZHGCDH2hYC9mGyCcl49MityjzwqejGMp2vBVeX%2FxPnl8i5a3vijO22gnaq%2Bs%2F4vvko%2BScTpxICCQpdm0dxnCetdK%2FBpqPlOBd4czJS4ddyjIW1VyTxERIEaKXXbN7nw8opNa3a3hPleKzmF0BINZDuSbkuOYQFoObZkAmSqOhJMQZyKliWZJoIahrJsAMG0LUMVBc2EDrB1UVQUAxi6WvzIMXIKabQw%2Fp0t13W0cP3nH6wK7Q%2FWjDffDxZleV0HAQa90t22IdxtY9d5CzSj48fDmC2Qt0JhNLvbtlJANGwk6tAwdFs2bVGWbVWWoQmRIUBoy1PbkgQJiQCJoi7aUHdMVdaBaMqGZTqKSRZMEnTdGuFK7QBFumT8x8Yafv8XHiim9BmFIXQoJIDMlwKmksssh0FZkO%2FnACsMhFBTDBVYGQ%2FM1mMiz9jrKKZdbaq8bXI5cwHHeHmjBSZ%2BWc2zNIR6z6Di16fUa5wqtPygZFk6nEfyg78OL6%2BQl5gSo2L%2FIlPy4Y28dFS%2F%2FJHiwDixpsX0wTRwa5cTrcOwU9cnhoOnFRUz%2FbixNwQSarBkhIZYhylNSLO%2FWWAqDUf1XCsECSIf3V1Smkxrz4nSV1jqccNQZXFYzwmfEh2evzwcJSq5%2F9uSpQMokToMJWoaIdQhJao0RGuLhLFLWLaLn1eIr0W19N5SKy2rGsbK02LaQYSZ5xXqh0SVJ9WeRqmvg0aZI6RRqRNx%2FKFkYzhqTGlaHF4Z7KwxbnSPNuQin1p0T%2BNFlo8TeNY6XiylF1n61o7cidLz6mG5E425XcIHfjC34aFczEEgXof9srD2nEs7Sc5VjkuS1RFyrh3VgcYRVju6XNbUXrOTuKXVecYTulhpsMDygljHyEnlhwjKNt7btDar5c6%2FufM%2Bkkjx0O1k6O2Jiv4qiIpqGrOqd3VwsiKfAlnpVNwxBhN3DqUIH778uMrCbRpv902ETtHin6u6PUe09E4fokVhDu1pgfE6aMHA5rvzG1G8mL%2B4yvJGhnfXcPnpCU5PpvrgcKa646Rx7jTVZTFm%2FZnq4Hp1ByAI9%2FIW4Jdp%2FfckOpEKG33QFvzdkAy6PVkxT5KslC1uysD5FPzSPIcnb55caZ4CheqSAqlNA7lGWppH3WGrGao0DxGAzgP4jPUrqTHJC1NTTaqTCeSvRJwiH0T%2BnMRb9EH6yCfJYRh4MFJr8qfmz6I%2FXfKnjtFsIw9anOfkHA6Nk6CNnpSzZkzuZCowjcLh0DideJwOB0XbzsSGcjhcXv24vLpHv5pzsMstSUUbS0EvojvCv9tzLuV1JEIr8rCca6tFfDTB99W8ojgE9s%2BRxt4z4zqZcjLIxOD24EaksGOUqMdvMWWOawk%2FWp4RP1bt1YcINMhGasHUj37MRqnas14%2BHaPxMRumVOior2M29OKxGeM4ZsMY9TEbd4gKHzbRlT8gP1jy3ZoNSWT%2FeVW0tgw5CB6iJ5o0Oi9Mopzlk0VwCLSWv0vpP%2F3GPHb6Ti%2BpJIxsStAnuSL5OvP0rPtdh6vtdeDXqEvO62K15LyYRjUWsjuEDvgUfwcPXnI%2BoyFvt%2BQ8P1N5e13L3u3aRVNPJkb0LVMMlOFscoIYucsmjivD2ayvPURPSucQfit4IiefUzgS%2Bm4FIdaQprg54wH156ZbWBeZUyxMVSoZKeQneyI7EJ3clrbxjpzhIUmHpWi5XRt7a%2Be%2FM5AU6yPhX%2B8wiuNVY7UmWF5nMak4zdX8iQWF8P3mGksO1FaT1XxhSViAohrvyACWt5WL%2B4hitIryJ8M0Odv7WKePt%2Bz%2FjQxs0upo81M4qdxMi4emibMm5xyF1OLUuWjFNwEMWgN0P89M5%2By6Sx4simqVCQ96dnkzHpyNexBL5x9%2Bkyoj4BGCGIzT0klPiJlsSv%2B9e3FXKwTfbxtZLzqtD5YshQJzwweiu1LHBjPDroDLuC9KRKGmNQ16HH8ElisPibM7cUZhPJu%2FjHuc0h7jPGnTdslgkF7urAZxNHtB6kgasdOGlFcuFBEZESmzwHJSKvD4plwzvIrM%2FRaAO%2FnC7HqfoRSpH3l3vEznhxQdmsxAqzHdJUWemgYsJAAhHJ3Zqikvx3v7JwweWSlUViyJjProWZJJPSkyg9aRC2Ye%2FqcbuaArVYLRZ%2BTCjfDRWdwasfewuLNub%2B6vf7zcTOsDFyqoRegsl4%2BUDUwKj118QnMiPm5Ql3VXYRdWLbtI6sbnFolb451wiVJTpdR8bWl5HlYUSWYnnEQuchK5ghdKGq%2FJcwMcwki4SCBXkIAeRj%2FZlHNt4EbZveW6AJ1Rio8wqluK50Epe7Y7A90hXmOtLEPm7K0pNDeNFK48BQNrFESBsyLyK1fxbrM4g8uKewqIbYxxR0CuzNiRxy6Fh13qkbCrT4%2FmVvR%2Buw5NLlhO0BOVGMOvmVNly3Z8a16nrcB4s26V0Q5sQH9PByym7NvhyS88DtPFgT5cSjbkOScuM%2BdfByVb2Cij2N6AhauMnKJYPW6KL%2F9oR8LO%2BuM6Mkv5HzEGp50PbN8Y0el6riOaaJggW1aX%2BdUql2UhVpOaLeLRtMuqmf2S5fxSsxNiQZOQJMETGYMXl0HDMN7QkvEsy%2BnBvgXfiHn4knEtyydT36K7HL1qgGHtyuZNzd9N7%2BNX948r6%2F7%2BKXgOnj9%2B%2F%2FLPtKmlOT2OaOjM83Isr1zIJN%2F5vKL1k3meHDpEbRyfyDFJjat4Batnuh7RT%2FIJesKSgNEvF2uWNKYSyBFt4ks3ImoN8FGwJtfJrKjFLbONS1zreD2ynZxxvJyQrgvDFvzipipzzC0Dxy23oo34IgsnLtPLtod17%2BmJ42Js05zm1H8yksR1aWTVWTrEhhL3lPtHhqHz1o3iQbspi%2BOytOrx72ViVpal9y5C1UxcS%2FW0V4iZJTo1MJniQ38oKrVt1I1cBuvQez4nKdco3s30N%2FjCrmKWlS1fTs2WkG8uJSi6XDoBO9uoBd2oKibIegc%2BGf5yH17A5IhFuqS21UFbVQhstJn62SQ1qkm5HKB5nJRmXSyqQJrZjwr0ee3F7t8XzYOA1sslCAkKSQJ%2BLYvX26Hf4CHMQm8184n7rFbHmW70HIgekFdQc1gx4yUZ7y97W8hPPbqcnFJTjvjhHXnWp1KzNauv1nlZDT0I6UjbGcULbsQSvcxBnSL7eebfTA11E0m%2Bpv9xaVbePq6VaViRT%2BWtf7jTK4381JkK5yGALl7%2BUjNGM5IOQingnCSHpMR0ASCllQKPIXaAT6YszLRiLQRFUbKmPAtUjZnICRkSDXV2LEYo1aeC1CFSbQwLfWvtlVu8zJb%2Fd9ksTA9i9AKW1sfCGWmJQY9Roi2Y6VWi7DctyZESJO6apUNEZFWjuOhh2NbDPxf3hM6mRRoEjBiPzANRrMCz79B%2BrYEfk2PldhfF2dbbp%2FQkOhom5BEhgfG%2FCEXRshg0tHVU9MQomjJF14BUxcJvFwa3o4sPrAQkc%2BrmSkQ2evme5lOkggOCPqIDwbt%2BTQ7iYyeUW6Tjz5f3OZRhqZ4LBGK81vsvyraqYO8ur95POGXEk3MB6cgoMoMo3n8EV7tLlmf5r3t%2B4gxCWoEDeMVPuL7trSHbJ5TRp%2BkKdC%2B2%2FR5urBKAHYzkcBdQIi4M6wAySwGhilgNC5BEns9uj7hGfImlsDgvmuJpLT4HkEidV%2F8H). Specify the desired analysis details for your data in the *essential.vars.groovy* file (see below) and run the pipeline *rnaseq.pipeline.groovy* as described [here](https://gitlab.rlp.net/imbforge/NGSpipe2go/-/blob/master/README.md). A markdown file *DEreport.Rmd* will be generated in the output reports folder after running the pipeline. Subsequently, the *DEreport.Rmd* file can be converted to a final html report using the *knitr* R-package.\r +\r +\r +### The pipelines includes\r +- quality control of rawdata with FastQC and MultiQC\r +- Read mapping to the reference genome using STAR\r +- generation of bigWig tracks for visualisation of alignment with deeptools\r +- Characterization of insert size for paired-end libraries\r +- Read quantification with featureCounts (Subread) \r +- Library complexity assessment with dupRadar\r +- RNA class representation\r +- Check for strand specificity\r +- Visualization of gene body coverage\r +- Illustration of sample relatedness with MDS plots and heatmaps\r +- Differential Expression Analysis for depicted group comparisons with DESeq2\r +- Enrichment analysis for DE results with clusterProfiler and ReactomePA\r +- Additional DE analysis including multimapped reads\r +\r +\r +### Pipeline parameter settings\r +- targets.txt: tab-separated txt-file giving information about the analysed samples. The following columns are required \r + - sample: sample identifier for use in plots and and tables\r + - file: read counts file name (a unique sub-string of the file name is sufficient, this sub-string is grebbed against the count file names produced by the pipeline) \r + - group: variable for sample grouping (e.g. by condition)\r + - replicate: replicate number of samples belonging to the same group\r +- contrasts.txt: indicate intended group comparisions for differential expression analysis, e.g. *KOvsWT=(KO-WT)* if targets.txt contains the groups *KO* and *WT*. Give 1 contrast per line. \r +- essential.vars.groovy: essential parameter describing the experiment including: \r + - ESSENTIAL_PROJECT: your project folder name\r + - ESSENTIAL_STAR_REF: path to STAR indexed reference genome\r + - ESSENTIAL_GENESGTF: genome annotation file in gtf-format\r + - ESSENTIAL_PAIRED: either paired end ("yes") or single read ("no") design\r + - ESSENTIAL_STRANDED: strandness of library (no|yes|reverse)\r + - ESSENTIAL_ORG: UCSC organism name\r + - ESSENTIAL_READLENGTH: read length of library\r + - ESSENTIAL_THREADS: number of threads for parallel tasks\r +- additional (more specialized) parameter can be given in the var.groovy-files of the individual pipeline modules \r +\r +\r +## Programs required\r +- Bedtools\r +- DEseq2\r +- deeptools\r +- dupRadar (provided by another project from imbforge)\r +- FastQC\r +- MultiQC\r +- Picard\r +- R packages DESeq2, clusterProfiler, ReactomePA\r +- RSeQC\r +- Samtools\r +- STAR\r +- Subread\r +- UCSC utilities\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/58?version=1" ; + schema1:isBasedOn "https://gitlab.rlp.net/imbforge/NGSpipe2go/-/tree/master/pipelines/RNAseq" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for RNA-Seq" ; + schema1:sdDatePublished "2024-07-12 13:36:24 +0100" ; + schema1:url "https://workflowhub.eu/workflows/58/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2519 ; + schema1:creator ; + schema1:dateCreated "2020-10-07T07:38:07Z" ; + schema1:dateModified "2023-01-16T13:44:52Z" ; + schema1:description """# RNA-Seq pipeline\r +Here we provide the tools to perform paired end or single read RNA-Seq analysis including raw data quality control, differential expression (DE) analysis and functional annotation. As input files you may use either zipped fastq-files (.fastq.gz) or mapped read data (.bam files). In case of paired end reads, corresponding fastq files should be named using *.R1.fastq.gz* and *.R2.fastq.gz* suffixes.\r +\r +\r +## Pipeline Workflow\r +All analysis steps are illustrated in the pipeline [flowchart](https://www.draw.io/?lightbox=1&highlight=0000ff&edit=_blank&layers=1&nav=1&title=NGSpipe2go_RNAseq_pipeline.html#R7R1Zk5s489e4Knmwi%2Ft4nDOz2WSyO5OtbPYlJZCw%2BYLBATzXr%2F90AOYQNtgY8Ex2UjuDACG1Wn13ayJfLJ8%2BhGC1%2BBxA5E0kAT5N5MuJJImKJE3IPwE%2BsxZd11nDPHRh8tCm4d59QUmjkLSuXYiiwoNxEHixuyo22oHvIzsutIEwDB6LjzmBV%2FzqCsxRpeHeBl619ZsL40XSKmrm5sYNcueL5NOGlMzPAvbPeRis%2FeR7fuAjdmcJ0m6SOUYLAIPHXJN8NZEvwiCI2V%2FLpwvkEbCmEGPvXdfczYYcIj9u8sLiRdX%2F%2FXT3%2FOR9Xj7NbwX7r3%2FtaTa4%2BDmFBYIYNMllEMaLYB74wLvatJ7T%2BSLSrYCv%2FrdertLn52CFWzZvfQoC3HApksdQHD8nKw%2FWcYCbFvHSS%2B6iJzf%2Bl3Q4U5Or77k7l0%2FJt%2BjFc3rhx%2BFz7iVy%2BT1%2Fb%2FMavUrfi%2BIw%2BJmtM16H8yooE%2BhGwTq0E7g82Ld%2F339bf1xfvvz39Y%2Bv19%2Bjh%2F%2BmcoIgMQjnKN7yoJIAmkA394lkqT6gYInwGPEDIfJA7D4UERMk%2BD3PntssNP4jWWv%2But8IH53FrRF7D4s76%2Fbm%2FvrHy8003XgPwFsnn5pImodncA7dB%2FJFz5379Ib2a01Q9Dyk6J9d4r%2FmyW%2F6mhWWW%2FDAaF9pawnPcqvvAQt559lWugi8IKQPydf0P%2B6iOYEfJ%2FgkamTcIFpQpBQLKEquHNfz8p1K5CfrNL1D9658Pg8BdDEilJrtYOnaCQbNPRBFKTal21qgn43x4gUEcFNT2IZZDyiM0dNWTEjuGmpCaxLaOjU1dv2Yo1RGQoAXOSKlpIt8CPZwkVl80%2BhzpZGfY6BPB9giGUIBWzIWm8MWRTCr2KJ1gS0v3%2BLLr8K%2F%2F328vrsKlyL8%2BuH5j6l8MIt5SwylIT%2BRlaH4CXeNVfG1LzLy4RmRNPGlTfYv2c2k8dr10uFU8aBAVHpACnkopBDlx9sb9zE4%2B3O%2B%2BvLjW%2FDxXBGnEodJ9I8kXJAcdx0S%2FtgD3LcNk8OcCdMrLEDKjsmNaURBeIYfEJXVE4dXX0iTs%2FMlcH0CP3eFPBfzNtx4vp2Ns68Wm5mYsO9AuplO0ku0An5FHCk3ALLublz6TuiDCP2apbCYYQEkeHjOfypdqbQFqMCWFc1GpiVA3QKCaiqm4uiOo5qaIAlT2wRQRbbqiFDSZR0auqBrjgUdVRUECymKAnUgI6vwkUWInMJnFnFMVOczgkjS9dyNF2trhmUQfOEuLSfACIz%2FvP1wT4YuzQN8YXmBRdANRDHCq3idTirCf9%2FdnuF54j92T5iBrfa5FCcAB08qQOdiE2%2B5apCsLEKOBvWmlCK5MbDw27m9JAkQOa7vUh1CEt5Z5M771ptr1PP2AgAjSgujFbIpNZQEOlFiNQng2iMod%2FrzQ1GEGYwLCLkGPswmNyXTdh0sP1AmBDDNJ%2Fttjwl3gINPyF7HBN45HKwZSb3%2B1YXaYooFtcWoai2yztFaROlYOq6yW3jButuK%2FInp6gpreWTyTBZJRUAjbchshwoXXFu5eGMYihWQpcDJQyxtayyKJN%2F7K3AptiUfm0pqYcH0YgeB40RYRCovQjboA2wP5pCaRvr394LWsUvTKOgZG7WjB3VSa6o5DKU4bBt1rXWpSvGs4IkQPNefM5JnBSFE4RQ34wsmIyX7ghJEAW8CNbuTN%2BKT2xNJRgr5yZ5YAQizvqVt%2FKMiw%2FDEFb4waWNyG2LpK5rFTwVLWVmGhKYGRV2Fmg5EYAiKaIgSFiNFxZIMgGQw1R1bMETTAVARJB0CSVJkU9ZsQVRtLGealiqogoNFxkFkyNp5MiCVb2%2BTGGtkwcoi8PlnKn5V0akeV8qsFS0LQLQxduNB2RRZch0KlacylMo9JdXjVWwF8LnSWBEIY1hucdMGvGD405dXVBwB3nPk4lXB%2B1OYMxGMeq6EFPoE6ckSCkwmwM9pYEmcO74VrdgssFQ%2FY1D488tD9O0rnsq7P79Mv319P8vB3eWsRXWU1RausFt9jDRWodAGLh%2Fwzl%2FhSfhYEosYNcM7zs%2BoZhkPxzqhhv2%2FkYGRRt6WIe1s29fItUTecm3gnSUejJhICOepP8NDDnkrwE85HjUGOtT%2Bl5cVHhdujO4xESA9PoZgtY2DtxCRi4Z9WZQ48p5YFfiMo3mB1N%2BSWHNJLF2u0Yhi%2F0Qo%2FGL9j4RTYCpPXGhvR9QqUvZaQUu1DV3XHMlxREUxVUeVbRGopmTqjgMdxZqKGJUtyTItLFxZQFZMXVRUJBkOhIajiwKybSQpUB1G0KqZJQMQn7n9FrJaCVkRlom8zGSyN6Nzy%2F1RYaS1zNGaQ2Pe1eHgQwSgjbc2oSekZzYHISd3vlv77q81xYa1NcXUlawNIReU2cDg%2FbYpH0tMmTP5rysozHPiJJGqHTLbApr0ObkQrTwszcSdLnPWJaGXR5pUO%2Fltu5wi5RhoIticoMCXBSPuEvjEPYQBfJmTB1oIgZLxWwhsLgSmRuSTEQJXXM69xMPHGjJlzsJqI%2Fex9indQeSelruHkT6eJrvqjLJnn%2Fk9dkl3f22cYzx3SUUM4VKdVbltUXlqr6ky8kBuGnxR9ZwKuW3l08x3NHsAYdTArWxZqmBAKIuyqEhYaIVAFIGlSw6CyNEgmiITmhDaClINBe9QoBu6ZGL6AQSkmo4sQQgVU5Hso0uqmYdvI6funC0DWd1jjWXXRONogybHQYoSR12FwYMLCYLnfYY8ZN%2FKYukIKHRv8YsFIBVmzvkue43MleubFN4Fq3jWXjrLgXMnl5a2c%2Bnx8eMs6WEXPzb11mR5f3bMi%2F3p1W2ZfnC0fkuluIrFDo7nt5R%2B%2By0369W%2F%2FMPHBimNhE6wQUv3T9oFk%2BiStzZIUe3IKMYviErJ6spmXOnoLAzBc%2B6xFXkg2jJgVeMOuHZcpefTcW2Qm42gW1TnxU6MUZ60A99x5%2BuQ5mzkg454tq1RiJCgkVSGBflZ6K1mPiFltZLZdCOdQfRAMvquGUBIO3lsxi5rBTL%2BQ7XiWI0dkRt%2BGfKnXydB9wGSVYjA0sKiay08OE%2FsYVftSQbtEav4xukg8OpBWbpbC8aq2NkCywYT%2BCsNNOqTUKTHIPxJAghp3z6YU%2Bl7m9zNazukIUSM5UUtvvuuuwFiQIC1RxiYm4wggWXkrcOCF4BELrjEuOoH5HEfISIo4cXb%2FRGCv0noY5LTlXbrBTbRvXIL2Ghe7ydpiCfvLkfNyihF9iJBeGKCwQNImBFWusji%2F0SI4OQGMFg%2BjV1%2FHu2MsihpX6NWplRZ52gBHDVA1EoSWmfu7DSgcUDVSWkLxVenOnGT1MXqQgyapD6MMtVVLlnjZDLtKDpYW41HF4wC1kmatlXjKT8vG%2BohGs8uA3mmKVyDKP77gkoBTWzTv9aYCMfPkyTijhJ%2F6isMwSOFKU0iIPPAktPPZHN0aX3Fj%2BH%2FO%2FjGL3vGmjN5a6exjiFRrbGORyZLPCfx33%2BiV5eyVsNpaJZ20rEw4eT1HcSE1DJuyZzUeo1DUTWhHssPMOnxU2sPJn49kbrjkSylIclSjmM2akuylFIOvpxYmepIVuV5xZwcQLKaodWOPArkWcHjb5Saqp3XbekEpRRVOj6K7CgO9BtFEqpjjhNFxD5QZHvdhzeCIsd2U6hiaWlTZ1LH3oUKI9J6YESpQjL%2B0iGd1nlpTF06V8MOK%2BazPd7rJDZ8ppNnF6xTaZdS3sdy68OQFE3rh6QoRrckhWsw0oekKCNztXeHmnLncs6eQYmZWeX%2B69ldY4vLHUrcKqsVDTs%2Fom0lc3JFGKjtjStKDqzHNK4ct4SDlNZrGMiYwodt1ciPF2t6T1ZL2EQGlKgHiTXIQTkJNShapwgUy6HkSxdCSlxKC7JpuUvAIE9qygnSipI04J3t%2FITe4mGp5%2FgfBt0FIRfqJfGIqefi5hr%2FI4%2BH8UXg466BS1cWYWx9RFE8qVZA7AAHZIN8vIAFSgUJuH4dWWmNBA1XvL6YwG5fLy2cknh8hT2CT76s49Wa5sMEHiQoc1ahYfjmL3ucoSeZuzLIpuEynyXL6dmMu0n05%2BEpF3mETUt4XoOl6xFcu0HeAyK9TvpwX%2BpKsSirrKrN8HwPYtcQz%2FWR4blQQfUQRWsvHmmcVT5zLdo2nGN8HLqOg8Is9Bs9rTCsIlYErAq1Xob04QsZiB%2B69mKJ%2FM33x7vVj7CtVaEqw3C39dGS7FN9e8Tb2gvmI93TeGTTlGXZgedhYZHGCDH2hYC9mGyCcl49MityjzwqejGMp2vBVeX%2FxPnl8i5a3vijO22gnaq%2Bs%2F4vvko%2BScTpxICCQpdm0dxnCetdK%2FBpqPlOBd4czJS4ddyjIW1VyTxERIEaKXXbN7nw8opNa3a3hPleKzmF0BINZDuSbkuOYQFoObZkAmSqOhJMQZyKliWZJoIahrJsAMG0LUMVBc2EDrB1UVQUAxi6WvzIMXIKabQw%2Fp0t13W0cP3nH6wK7Q%2FWjDffDxZleV0HAQa90t22IdxtY9d5CzSj48fDmC2Qt0JhNLvbtlJANGwk6tAwdFs2bVGWbVWWoQmRIUBoy1PbkgQJiQCJoi7aUHdMVdaBaMqGZTqKSRZMEnTdGuFK7QBFumT8x8Yafv8XHiim9BmFIXQoJIDMlwKmksssh0FZkO%2FnACsMhFBTDBVYGQ%2FM1mMiz9jrKKZdbaq8bXI5cwHHeHmjBSZ%2BWc2zNIR6z6Di16fUa5wqtPygZFk6nEfyg78OL6%2BQl5gSo2L%2FIlPy4Y28dFS%2F%2FJHiwDixpsX0wTRwa5cTrcOwU9cnhoOnFRUz%2FbixNwQSarBkhIZYhylNSLO%2FWWAqDUf1XCsECSIf3V1Smkxrz4nSV1jqccNQZXFYzwmfEh2evzwcJSq5%2F9uSpQMokToMJWoaIdQhJao0RGuLhLFLWLaLn1eIr0W19N5SKy2rGsbK02LaQYSZ5xXqh0SVJ9WeRqmvg0aZI6RRqRNx%2FKFkYzhqTGlaHF4Z7KwxbnSPNuQin1p0T%2BNFlo8TeNY6XiylF1n61o7cidLz6mG5E425XcIHfjC34aFczEEgXof9srD2nEs7Sc5VjkuS1RFyrh3VgcYRVju6XNbUXrOTuKXVecYTulhpsMDygljHyEnlhwjKNt7btDar5c6%2FufM%2Bkkjx0O1k6O2Jiv4qiIpqGrOqd3VwsiKfAlnpVNwxBhN3DqUIH778uMrCbRpv902ETtHin6u6PUe09E4fokVhDu1pgfE6aMHA5rvzG1G8mL%2B4yvJGhnfXcPnpCU5PpvrgcKa646Rx7jTVZTFm%2FZnq4Hp1ByAI9%2FIW4Jdp%2FfckOpEKG33QFvzdkAy6PVkxT5KslC1uysD5FPzSPIcnb55caZ4CheqSAqlNA7lGWppH3WGrGao0DxGAzgP4jPUrqTHJC1NTTaqTCeSvRJwiH0T%2BnMRb9EH6yCfJYRh4MFJr8qfmz6I%2FXfKnjtFsIw9anOfkHA6Nk6CNnpSzZkzuZCowjcLh0DideJwOB0XbzsSGcjhcXv24vLpHv5pzsMstSUUbS0EvojvCv9tzLuV1JEIr8rCca6tFfDTB99W8ojgE9s%2BRxt4z4zqZcjLIxOD24EaksGOUqMdvMWWOawk%2FWp4RP1bt1YcINMhGasHUj37MRqnas14%2BHaPxMRumVOior2M29OKxGeM4ZsMY9TEbd4gKHzbRlT8gP1jy3ZoNSWT%2FeVW0tgw5CB6iJ5o0Oi9Mopzlk0VwCLSWv0vpP%2F3GPHb6Ti%2BpJIxsStAnuSL5OvP0rPtdh6vtdeDXqEvO62K15LyYRjUWsjuEDvgUfwcPXnI%2BoyFvt%2BQ8P1N5e13L3u3aRVNPJkb0LVMMlOFscoIYucsmjivD2ayvPURPSucQfit4IiefUzgS%2Bm4FIdaQprg54wH156ZbWBeZUyxMVSoZKeQneyI7EJ3clrbxjpzhIUmHpWi5XRt7a%2Be%2FM5AU6yPhX%2B8wiuNVY7UmWF5nMak4zdX8iQWF8P3mGksO1FaT1XxhSViAohrvyACWt5WL%2B4hitIryJ8M0Odv7WKePt%2Bz%2FjQxs0upo81M4qdxMi4emibMm5xyF1OLUuWjFNwEMWgN0P89M5%2By6Sx4simqVCQ96dnkzHpyNexBL5x9%2Bkyoj4BGCGIzT0klPiJlsSv%2B9e3FXKwTfbxtZLzqtD5YshQJzwweiu1LHBjPDroDLuC9KRKGmNQ16HH8ElisPibM7cUZhPJu%2FjHuc0h7jPGnTdslgkF7urAZxNHtB6kgasdOGlFcuFBEZESmzwHJSKvD4plwzvIrM%2FRaAO%2FnC7HqfoRSpH3l3vEznhxQdmsxAqzHdJUWemgYsJAAhHJ3Zqikvx3v7JwweWSlUViyJjProWZJJPSkyg9aRC2Ye%2FqcbuaArVYLRZ%2BTCjfDRWdwasfewuLNub%2B6vf7zcTOsDFyqoRegsl4%2BUDUwKj118QnMiPm5Ql3VXYRdWLbtI6sbnFolb451wiVJTpdR8bWl5HlYUSWYnnEQuchK5ghdKGq%2FJcwMcwki4SCBXkIAeRj%2FZlHNt4EbZveW6AJ1Rio8wqluK50Epe7Y7A90hXmOtLEPm7K0pNDeNFK48BQNrFESBsyLyK1fxbrM4g8uKewqIbYxxR0CuzNiRxy6Fh13qkbCrT4%2FmVvR%2Buw5NLlhO0BOVGMOvmVNly3Z8a16nrcB4s26V0Q5sQH9PByym7NvhyS88DtPFgT5cSjbkOScuM%2BdfByVb2Cij2N6AhauMnKJYPW6KL%2F9oR8LO%2BuM6Mkv5HzEGp50PbN8Y0el6riOaaJggW1aX%2BdUql2UhVpOaLeLRtMuqmf2S5fxSsxNiQZOQJMETGYMXl0HDMN7QkvEsy%2BnBvgXfiHn4knEtyydT36K7HL1qgGHtyuZNzd9N7%2BNX948r6%2F7%2BKXgOnj9%2B%2F%2FLPtKmlOT2OaOjM83Isr1zIJN%2F5vKL1k3meHDpEbRyfyDFJjat4Batnuh7RT%2FIJesKSgNEvF2uWNKYSyBFt4ks3ImoN8FGwJtfJrKjFLbONS1zreD2ynZxxvJyQrgvDFvzipipzzC0Dxy23oo34IgsnLtPLtod17%2BmJ42Js05zm1H8yksR1aWTVWTrEhhL3lPtHhqHz1o3iQbspi%2BOytOrx72ViVpal9y5C1UxcS%2FW0V4iZJTo1MJniQ38oKrVt1I1cBuvQez4nKdco3s30N%2FjCrmKWlS1fTs2WkG8uJSi6XDoBO9uoBd2oKibIegc%2BGf5yH17A5IhFuqS21UFbVQhstJn62SQ1qkm5HKB5nJRmXSyqQJrZjwr0ee3F7t8XzYOA1sslCAkKSQJ%2BLYvX26Hf4CHMQm8184n7rFbHmW70HIgekFdQc1gx4yUZ7y97W8hPPbqcnFJTjvjhHXnWp1KzNauv1nlZDT0I6UjbGcULbsQSvcxBnSL7eebfTA11E0m%2Bpv9xaVbePq6VaViRT%2BWtf7jTK4381JkK5yGALl7%2BUjNGM5IOQingnCSHpMR0ASCllQKPIXaAT6YszLRiLQRFUbKmPAtUjZnICRkSDXV2LEYo1aeC1CFSbQwLfWvtlVu8zJb%2Fd9ksTA9i9AKW1sfCGWmJQY9Roi2Y6VWi7DctyZESJO6apUNEZFWjuOhh2NbDPxf3hM6mRRoEjBiPzANRrMCz79B%2BrYEfk2PldhfF2dbbp%2FQkOhom5BEhgfG%2FCEXRshg0tHVU9MQomjJF14BUxcJvFwa3o4sPrAQkc%2BrmSkQ2evme5lOkggOCPqIDwbt%2BTQ7iYyeUW6Tjz5f3OZRhqZ4LBGK81vsvyraqYO8ur95POGXEk3MB6cgoMoMo3n8EV7tLlmf5r3t%2B4gxCWoEDeMVPuL7trSHbJ5TRp%2BkKdC%2B2%2FR5urBKAHYzkcBdQIi4M6wAySwGhilgNC5BEns9uj7hGfImlsDgvmuJpLT4HkEidV%2F8H). Specify the desired analysis details for your data in the *essential.vars.groovy* file (see below) and run the pipeline *rnaseq.pipeline.groovy* as described [here](https://gitlab.rlp.net/imbforge/NGSpipe2go/-/blob/master/README.md). A markdown file *DEreport.Rmd* will be generated in the output reports folder after running the pipeline. Subsequently, the *DEreport.Rmd* file can be converted to a final html report using the *knitr* R-package.\r +\r +\r +### The pipelines includes\r +- quality control of rawdata with FastQC and MultiQC\r +- Read mapping to the reference genome using STAR\r +- generation of bigWig tracks for visualisation of alignment with deeptools\r +- Characterization of insert size for paired-end libraries\r +- Read quantification with featureCounts (Subread) \r +- Library complexity assessment with dupRadar\r +- RNA class representation\r +- Check for strand specificity\r +- Visualization of gene body coverage\r +- Illustration of sample relatedness with MDS plots and heatmaps\r +- Differential Expression Analysis for depicted group comparisons with DESeq2\r +- Enrichment analysis for DE results with clusterProfiler and ReactomePA\r +- Additional DE analysis including multimapped reads\r +\r +\r +### Pipeline parameter settings\r +- targets.txt: tab-separated txt-file giving information about the analysed samples. The following columns are required \r + - sample: sample identifier for use in plots and and tables\r + - file: read counts file name (a unique sub-string of the file name is sufficient, this sub-string is grebbed against the count file names produced by the pipeline) \r + - group: variable for sample grouping (e.g. by condition)\r + - replicate: replicate number of samples belonging to the same group\r +- contrasts.txt: indicate intended group comparisions for differential expression analysis, e.g. *KOvsWT=(KO-WT)* if targets.txt contains the groups *KO* and *WT*. Give 1 contrast per line. \r +- essential.vars.groovy: essential parameter describing the experiment including: \r + - ESSENTIAL_PROJECT: your project folder name\r + - ESSENTIAL_STAR_REF: path to STAR indexed reference genome\r + - ESSENTIAL_GENESGTF: genome annotation file in gtf-format\r + - ESSENTIAL_PAIRED: either paired end ("yes") or single read ("no") design\r + - ESSENTIAL_STRANDED: strandness of library (no|yes|reverse)\r + - ESSENTIAL_ORG: UCSC organism name\r + - ESSENTIAL_READLENGTH: read length of library\r + - ESSENTIAL_THREADS: number of threads for parallel tasks\r +- additional (more specialized) parameter can be given in the var.groovy-files of the individual pipeline modules \r +\r +\r +## Programs required\r +- Bedtools\r +- DEseq2\r +- deeptools\r +- dupRadar (provided by another project from imbforge)\r +- FastQC\r +- MultiQC\r +- Picard\r +- R packages DESeq2, clusterProfiler, ReactomePA\r +- RSeQC\r +- Samtools\r +- STAR\r +- Subread\r +- UCSC utilities\r +""" ; + schema1:keywords "rna-seq, bpipe, groovy" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "RNA-Seq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/58?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "This workflow correspond to the Genome-wide alternative splicing analysis training. It allows to analyze isoform switching by making use of IsoformSwitchAnalyzeR." ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/472?version=2" ; + schema1:license "CC-BY-NC-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genome-wide alternative splicing analysis" ; + schema1:sdDatePublished "2024-07-12 13:33:28 +0100" ; + schema1:url "https://workflowhub.eu/workflows/472/ro_crate?version=2" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 17028 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 71945 ; + schema1:creator ; + schema1:dateCreated "2023-05-24T23:22:29Z" ; + schema1:dateModified "2023-05-24T23:23:44Z" ; + schema1:description "This workflow correspond to the Genome-wide alternative splicing analysis training. It allows to analyze isoform switching by making use of IsoformSwitchAnalyzeR." ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/472?version=3" ; + schema1:keywords "Transcriptomics, alternative-splicing, GTN" ; + schema1:license "https://spdx.org/licenses/CC-BY-NC-4.0" ; + schema1:name "Genome-wide alternative splicing analysis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/472?version=2" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 46064 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/991?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/hlatyping" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hlatyping" ; + schema1:sdDatePublished "2024-07-12 13:18:25 +0100" ; + schema1:url "https://workflowhub.eu/workflows/991/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3422 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:55Z" ; + schema1:dateModified "2024-06-11T12:54:55Z" ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/991?version=9" ; + schema1:keywords "DNA, hla, hla-typing, immunology, optitype, personalized-medicine, rna" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hlatyping" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/991?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.283.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_md_setup/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Amber Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/283/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6933 ; + schema1:creator , + ; + schema1:dateCreated "2023-04-14T08:32:45Z" ; + schema1:dateModified "2023-04-14T08:34:33Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/283?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Amber Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_md_setup/python/workflow.py" ; + schema1:version 3 . + + a schema1:Dataset ; + schema1:datePublished "2024-02-07T16:26:57.747008" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Mitogenome-assembly-VGP0" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Mitogenome-assembly-VGP0/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.20" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/991?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/hlatyping" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hlatyping" ; + schema1:sdDatePublished "2024-07-12 13:18:30 +0100" ; + schema1:url "https://workflowhub.eu/workflows/991/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4297 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:56Z" ; + schema1:dateModified "2024-06-11T12:54:56Z" ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/991?version=9" ; + schema1:keywords "DNA, hla, hla-typing, immunology, optitype, personalized-medicine, rna" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hlatyping" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/991?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "ATACSeq peak-calling and differential analysis pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/965?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/atacseq" ; + schema1:sdDatePublished "2024-07-12 13:22:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/965/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11439 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:42Z" ; + schema1:dateModified "2024-06-11T12:54:42Z" ; + schema1:description "ATACSeq peak-calling and differential analysis pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/965?version=8" ; + schema1:keywords "ATAC-seq, chromatin-accessibiity" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/atacseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/965?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Automated quantitative analysis of DIA proteomics mass spectrometry measurements." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/980?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/diaproteomics" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/diaproteomics" ; + schema1:sdDatePublished "2024-07-12 13:21:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/980/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6543 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:48Z" ; + schema1:dateModified "2024-06-11T12:54:48Z" ; + schema1:description "Automated quantitative analysis of DIA proteomics mass spectrometry measurements." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/980?version=6" ; + schema1:keywords "data-independent-proteomics, dia-proteomics, openms, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/diaproteomics" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/980?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + schema1:datePublished "2024-05-01T16:03:28.967443" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + , + ; + schema1:name "consensus-peaks/consensus-peaks-chip-sr" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-atac-cutandrun" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "1.0" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-chip-pe" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "1.0" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:Dataset ; + schema1:datePublished "2023-11-16T14:32:18.217599" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/kmer-profiling-hifi-VGP1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "kmer-profiling-hifi-VGP1/main" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "kmer-profiling-hifi-VGP1/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/kmer-profiling-hifi-VGP1" ; + schema1:version "0.1.4" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Evaluation of Swin Transformer and knowledge transfer for denoising of super-resolution structured illumination microscopy data\r +\r +In recent years, convolutional neural network (CNN)-based methods have shown remarkable performance in the denoising and reconstruction of super-resolved structured illumination microscopy (SR-SIM) data. Therefore, CNN-based architectures have been the main focus of existing studies. Recently, however, an alternative and highly\r +competitive deep learning architecture, Swin Transformer, has been proposed for image restoration tasks. In this work, we present SwinT-fairSIM, a novel method for restoring SR-SIM images with low signal-to-noise ratio (SNR) based on Swin Transformer. The experimental results show that SwinT-fairSIM outperforms previous CNN-based denoising methods. Furthermore, the generalization capabilities of deep learning methods for image restoration tasks on real fluorescence microscopy data have not been fully explored yet, i.e., the extent to which trained artificial neural networks are limited to specific types of cell structures and noise. Therefore, as a second contribution, we benchmark two types of transfer learning, i.e., direct transfer and fine-tuning, in combination with SwinT-fairSIM and two CNN-based methods for denoising SR-SIM data. Direct transfer does not prove to be a viable strategy, but fine-tuning achieves results comparable to conventional training from scratch while saving computational time and potentially reducing the amount of required training data. As a third contribution, we published four datasets of raw SIM images and already reconstructed SR-SIM images. These datasets cover two types of cell structures, tubulin filaments and vesicle structures. Different noise levels are available for the tubulin filaments. These datasets are structured in such a way that they can be easily used by the research community for research on denoising, super-resolution, and transfer learning strategies.\r +\r +The SIM microscopy datasets that were used during this work can be downloaded through this link: http://dx.doi.org/10.5524/102461\r +\r +\r +## Installation:\r +\r +This implementation requires the Tensorflow-GPU2.5 version. To avoid package conflicts, we recommend you create a new environment by using our provided environment.yml file. To create a new environment please run the following script:\r +\r +> conda env create -f environment.yml\r +\r +## How to use this code:\r +\r +This code can be used to train a denoising model from scratch or to fine-tune a pretrained model. After the installation of the Python environment from the yml file, the next step is to set the input parameters in the JSON parameter file (i.e., ParameterFile.json). Most of the input parameters are self-explanatory but below we will discuss some of the important input parameters from the JSON file:\r +\r +- TrainNetworkfromScratch: This input parameter will train the model from scratch If set to True, otherwise, for fine-tuning, It should be False.\r +- ActivateTrainandTestModel: This parameter will be set to False If you want to use this code for evaluation of the trained model or the reproducibility of the results by using pretrained models.\r +- PretrainedmodelPath: This parameter is mandatory in case of fine-tuning or evaluation of a pretrained model.\r +- FineTuneStartingpoint and FineTuneEndingpoint: These two input parameters are essential in the fine-tuning of a pretrained model. All the layers between the starting and ending points will be frozen during the fine-tuning of the pretrained model.\r +\r +After the assignment of the input parameters. You can run the following script from the command line to start training the model:\r +\r +> python MainModule.py 'ParameterFile.json'\r +\r +## Reproducibility and evaluation:\r +\r +To reproduce the results of the paper all the trained models used in this work are available in the 'Models' directory at [zenodo](https://doi.org/10.5281/zenodo.7626173). This code is capable of performing all the necessary steps for the training and test phases. It will automatically evaluate the model and generate a result directory to write all the results. Similarly, during the training process, It will also create a model directory and save the trained model along with the best checkpoints in the model directory. \r +\r +## Important Note:\r +\r +This code will work with at least one GPU.\r +\r +## Reference:\r +\r +Please cite our paper in case you use this code for any scientific publication. We will soon upload the citation index!\r +\r +\r +\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.675.1" ; + schema1:isBasedOn "https://github.com/ZafranShah/SwinT-fairSIM-and-knowledge-transfer" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Evaluation of Swin Transformer and knowledge transfer for denoising of super-resolution structured illumination microscopy data" ; + schema1:sdDatePublished "2024-07-12 13:26:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/675/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 35149 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7370 ; + schema1:creator ; + schema1:dateCreated "2023-11-21T09:02:46Z" ; + schema1:dateModified "2023-11-21T10:28:24Z" ; + schema1:description """# Evaluation of Swin Transformer and knowledge transfer for denoising of super-resolution structured illumination microscopy data\r +\r +In recent years, convolutional neural network (CNN)-based methods have shown remarkable performance in the denoising and reconstruction of super-resolved structured illumination microscopy (SR-SIM) data. Therefore, CNN-based architectures have been the main focus of existing studies. Recently, however, an alternative and highly\r +competitive deep learning architecture, Swin Transformer, has been proposed for image restoration tasks. In this work, we present SwinT-fairSIM, a novel method for restoring SR-SIM images with low signal-to-noise ratio (SNR) based on Swin Transformer. The experimental results show that SwinT-fairSIM outperforms previous CNN-based denoising methods. Furthermore, the generalization capabilities of deep learning methods for image restoration tasks on real fluorescence microscopy data have not been fully explored yet, i.e., the extent to which trained artificial neural networks are limited to specific types of cell structures and noise. Therefore, as a second contribution, we benchmark two types of transfer learning, i.e., direct transfer and fine-tuning, in combination with SwinT-fairSIM and two CNN-based methods for denoising SR-SIM data. Direct transfer does not prove to be a viable strategy, but fine-tuning achieves results comparable to conventional training from scratch while saving computational time and potentially reducing the amount of required training data. As a third contribution, we published four datasets of raw SIM images and already reconstructed SR-SIM images. These datasets cover two types of cell structures, tubulin filaments and vesicle structures. Different noise levels are available for the tubulin filaments. These datasets are structured in such a way that they can be easily used by the research community for research on denoising, super-resolution, and transfer learning strategies.\r +\r +The SIM microscopy datasets that were used during this work can be downloaded through this link: http://dx.doi.org/10.5524/102461\r +\r +\r +## Installation:\r +\r +This implementation requires the Tensorflow-GPU2.5 version. To avoid package conflicts, we recommend you create a new environment by using our provided environment.yml file. To create a new environment please run the following script:\r +\r +> conda env create -f environment.yml\r +\r +## How to use this code:\r +\r +This code can be used to train a denoising model from scratch or to fine-tune a pretrained model. After the installation of the Python environment from the yml file, the next step is to set the input parameters in the JSON parameter file (i.e., ParameterFile.json). Most of the input parameters are self-explanatory but below we will discuss some of the important input parameters from the JSON file:\r +\r +- TrainNetworkfromScratch: This input parameter will train the model from scratch If set to True, otherwise, for fine-tuning, It should be False.\r +- ActivateTrainandTestModel: This parameter will be set to False If you want to use this code for evaluation of the trained model or the reproducibility of the results by using pretrained models.\r +- PretrainedmodelPath: This parameter is mandatory in case of fine-tuning or evaluation of a pretrained model.\r +- FineTuneStartingpoint and FineTuneEndingpoint: These two input parameters are essential in the fine-tuning of a pretrained model. All the layers between the starting and ending points will be frozen during the fine-tuning of the pretrained model.\r +\r +After the assignment of the input parameters. You can run the following script from the command line to start training the model:\r +\r +> python MainModule.py 'ParameterFile.json'\r +\r +## Reproducibility and evaluation:\r +\r +To reproduce the results of the paper all the trained models used in this work are available in the 'Models' directory at [zenodo](https://doi.org/10.5281/zenodo.7626173). This code is capable of performing all the necessary steps for the training and test phases. It will automatically evaluate the model and generate a result directory to write all the results. Similarly, during the training process, It will also create a model directory and save the trained model along with the best checkpoints in the model directory. \r +\r +## Important Note:\r +\r +This code will work with at least one GPU.\r +\r +## Reference:\r +\r +Please cite our paper in case you use this code for any scientific publication. We will soon upload the citation index!\r +\r +\r +\r +\r +""" ; + schema1:keywords "Machine Learning, Python, image processing, SIM, microscopy, Deep learning" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Evaluation of Swin Transformer and knowledge transfer for denoising of super-resolution structured illumination microscopy data" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/675?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2021-07-26T10:22:23.311704" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-wgs-variant-calling" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:sdDatePublished "2021-10-27 15:45:09 +0100" ; + schema1:softwareVersion "v0.2.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.6" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/991?version=9" ; + schema1:isBasedOn "https://github.com/nf-core/hlatyping" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hlatyping" ; + schema1:sdDatePublished "2024-07-12 13:18:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/991/ro_crate?version=9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4780 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:57Z" ; + schema1:dateModified "2024-06-11T12:54:57Z" ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/991?version=9" ; + schema1:keywords "DNA, hla, hla-typing, immunology, optitype, personalized-medicine, rna" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hlatyping" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/991?version=9" ; + schema1:version 9 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Coprolite Identification" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/974?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/coproid" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/coproid" ; + schema1:sdDatePublished "2024-07-12 13:22:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/974/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4232 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:45Z" ; + schema1:dateModified "2024-06-11T12:54:45Z" ; + schema1:description "Coprolite Identification" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/974?version=2" ; + schema1:keywords "adna, ancient-dna, coprolite, microbiome" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/coproid" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/974?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2024-04-22T15:13:48.408861" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Purge-duplicate-contigs-VGP6/main" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Purge-duplicate-contigs-VGP6/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:version "0.3.7" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 1908023 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "MARS-seq v2 preprocessing pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/996?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/marsseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/marsseq" ; + schema1:sdDatePublished "2024-07-12 13:20:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/996/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9527 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:00Z" ; + schema1:dateModified "2024-06-11T12:55:00Z" ; + schema1:description "MARS-seq v2 preprocessing pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/996?version=4" ; + schema1:keywords "facs-sorting, MARS-seq, single-cell, single-cell-rna-seq, star-solo, transcriptional-dynamics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/marsseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/996?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1017?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/rnafusion" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnafusion" ; + schema1:sdDatePublished "2024-07-12 13:18:21 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1017/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5461 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:09Z" ; + schema1:dateModified "2024-06-11T12:55:09Z" ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1017?version=16" ; + schema1:keywords "fusion, fusion-genes, gene-fusion, rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnafusion" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1017?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-07T21:34:48.828460" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/kmer-profiling-hifi-VGP1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "kmer-profiling-hifi-VGP1/main" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "kmer-profiling-hifi-VGP1/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/kmer-profiling-hifi-VGP1" ; + schema1:version "0.1.2" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Deprecated" ; + schema1:description "A workflow for mapping and consensus generation of SARS-CoV2 whole genome amplicon nanopore data implemented in the Nextflow framework. Reads are mapped to a reference genome using Minimap2 after trimming the amplicon primers with a fixed length at both ends of the amplicons using Cutadapt. The consensus is called using Pysam based on a majority read support threshold per position of the Minimap2 alignment and positions with less than 30x coverage are masked using ‘N’." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/104?version=1" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ENA SARS-CoV-2 Nanopore Amplicon Sequencing Analysis Workflow" ; + schema1:sdDatePublished "2024-07-12 13:26:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/104/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2826 ; + schema1:creator , + ; + schema1:dateCreated "2021-02-12T11:46:24Z" ; + schema1:dateModified "2023-11-24T10:02:56Z" ; + schema1:description "A workflow for mapping and consensus generation of SARS-CoV2 whole genome amplicon nanopore data implemented in the Nextflow framework. Reads are mapped to a reference genome using Minimap2 after trimming the amplicon primers with a fixed length at both ends of the amplicons using Cutadapt. The consensus is called using Pysam based on a majority read support threshold per position of the Minimap2 alignment and positions with less than 30x coverage are masked using ‘N’." ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "ENA SARS-CoV-2 Nanopore Amplicon Sequencing Analysis Workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/104?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=17" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-07-12 13:19:08 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=17" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17563 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:16Z" ; + schema1:dateModified "2024-06-11T12:55:16Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=17" ; + schema1:version 17 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=22" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-07-12 13:20:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=22" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12338 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:04Z" ; + schema1:dateModified "2024-06-11T12:55:04Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=22" ; + schema1:version 22 . + + a schema1:Dataset ; + schema1:datePublished "2024-04-22T15:45:17.824364" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Purge-duplicates-one-haplotype-VGP6b" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Purge-duplicates-one-haplotype-VGP6b/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "This portion of the workflow produces sets of feature Counts ready for analysis by limma/etc." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/688?version=1" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for mRNA-Seq BY-COVID Pipeline: Counts" ; + schema1:sdDatePublished "2024-07-12 13:25:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/688/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 26083 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2023-12-19T10:10:00Z" ; + schema1:dateModified "2024-01-24T09:42:47Z" ; + schema1:description "This portion of the workflow produces sets of feature Counts ready for analysis by limma/etc." ; + schema1:image ; + schema1:keywords "BY-COVID, covid-19" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "mRNA-Seq BY-COVID Pipeline: Counts" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/688?version=1" ; + schema1:version 1 ; + ns1:input , + ; + ns1:output , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 772694 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Continuous flexibility analysis of SARS-CoV-2 Spike prefusion structures" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/73?version=1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Scipion workflow for Cryo electron microscopy of SARS-CoV-2 stabilized spike in prefusion state" ; + schema1:sdDatePublished "2024-07-12 13:37:19 +0100" ; + schema1:url "https://workflowhub.eu/workflows/73/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 38059 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1387470 ; + schema1:creator , + , + ; + schema1:dateCreated "2020-11-19T13:34:09Z" ; + schema1:dateModified "2023-01-16T13:46:12Z" ; + schema1:description "Continuous flexibility analysis of SARS-CoV-2 Spike prefusion structures" ; + schema1:image ; + schema1:keywords "covid-19, image processing, bioimaging" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Scipion workflow for Cryo electron microscopy of SARS-CoV-2 stabilized spike in prefusion state" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/73?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + ; + schema1:description "This workflow is used for the virtual screening of the SARS-CoV-2 main protease (de.NBI-cloud, STFC). It includes Charge enumeration, Generation of 3D conformations, Preparation of active site for docking using rDock, Docking, Scoring and Selection of compounds available. More info can be found at https://covid19.galaxyproject.org/cheminformatics/" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/18?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Cheminformatics - XChem combined" ; + schema1:sdDatePublished "2024-07-12 13:37:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/18/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1908 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 38448 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2020-04-10T15:02:39Z" ; + schema1:dateModified "2023-01-16T13:41:21Z" ; + schema1:description "This workflow is used for the virtual screening of the SARS-CoV-2 main protease (de.NBI-cloud, STFC). It includes Charge enumeration, Generation of 3D conformations, Preparation of active site for docking using rDock, Docking, Scoring and Selection of compounds available. More info can be found at https://covid19.galaxyproject.org/cheminformatics/" ; + schema1:image ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Cheminformatics - XChem combined" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/18?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 5969 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/278?version=4" ; + schema1:isBasedOn "https://gitlab.bsc.es/lrodrig1/structuralvariants_poc/-/tree/1.1.3/structuralvariants/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CNV_pipeline" ; + schema1:sdDatePublished "2024-07-12 13:35:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/278/ro_crate?version=4" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 8992 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8794 ; + schema1:creator , + ; + schema1:dateCreated "2022-04-25T07:30:13Z" ; + schema1:dateModified "2022-04-25T07:37:40Z" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/278?version=10" ; + schema1:keywords "cancer, CODEX2, ExomeDepth, manta, TransBioNet, variant calling, GRIDSS, structural variants" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CNV_pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/278?version=4" ; + schema1:version 4 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 65152 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# DNA-Seq pipeline\r +Here we provide the tools to perform paired end or single read DNA-Seq analysis including raw data quality control, read mapping, variant calling and variant filtering. \r +\r +\r +## Pipeline Workflow\r +All analysis steps are illustrated in the pipeline [flowchart](https://www.draw.io/?lightbox=1&highlight=0000ff&edit=_blank&layers=1&nav=1&title=NGSpipe2go_DNAseq_pipeline.html#R7R1bd5s489f4nObBPoDvj3ESO%2BnXZpuk3Wz3pUcG2WaDgQJ2Lr%2F%2B0wgJIxAY29jGabZ7TowAIc2MRnNXrXkxfxl5yJ19dQxs1TTFeKk1L2uaprY0rQb%2FK8Zr2NLtdsOGqWca7KFVw4P5hlmjwloXpoF94cHAcazAdMVG3bFtrAdCG%2FI851l8bOJY4lddNMWphgcdWenWR9MIZqxV7fRXN66xOZ2xT%2Fc0Nr8x0p%2BmnrOw2fdsx8bhnTni3bA5%2BjNkOM%2BxpuZVrXnhOU4Q%2Fpq%2FXGALwMohFr43zLgbDdnDdlDkhaV%2Be%2FfwuPi8uHz79%2FvN9%2BFPf%2FlvXeUYWCJrwYBR0zoW6XFgmEsAr2VObXqj83sBYx14FA7RJfk1ZX%2Fpa2Mv2ULGRPvirRQawSsH%2FiyYW%2BSXSu5ZaIytQQTTC8dyPPpQc0j%2FI4%2F4gec8RVgiUBxMHDtgJKV2YNzIn2GD9Uj7ia4mpmXFOr3qwL%2BoU36HIrE5mHrIMAlwE826Mzd1cqnAIxbyffY7wq8STTKOHIavJfYC%2FBJrYsgaYWeOA%2B%2BVPMLuaj32CltTmsJQ9byi0JbCCHQWI84OfxCxVTGN%2Bl6RB%2FnBKEROLf9i21w%2BXM8nj8355L63%2FPEjqNc7Kdxhg6wjdul4wcyZOjayrlatMQQAXP5bzF3%2B%2FBS5pGX11hfHcRme%2FsNB8MpwihaBQ5piVIJfzOAf6LDRZlc%2FWffw%2B%2FIlfvHKL2wCgNhLcPmT9wcXq9foFX8vTW4hDGDimQuRNfnOwtPZU%2F3P6rBl%2FfwyuhtMHr%2BPTHXx2Ku3GatD3hQHOXBvygnFwxYKzKU4jl2Qfq18nsxue4G1nN2Pb68fhr%2FerusfHGIvHMJzAoI8BwBX75fEMnrtrsAy6n0txTLUnpZmGS1F2516rkZfR%2BdB%2F9v8WRuN0C%2F%2F4n%2Bvn%2BvdnVlGaQyC35EziKyFvmZdS3fVZj%2B9sKUPthQpNznAUn97DC6%2FK%2F%2F8%2B3l4f%2BXNVeP76PUm5DOngKwsbr7i35tx862QXBDHzValcNxW3zuSsW2cg05ALnXgvsCLoXFoWnw4aToQtoQDEEXzWESRN2zJFg9bp0AtfFOHG3Wf4vucPKC23JecHT9quNBq54PL2%2FP6A%2F4NMDZdbJlk1yTtg7g4MC4kNISjE5tDoWTHAe827TrFrBmgMXk7NkmiIOOJaZt03ycosddMpRzg%2By6Cr%2BlMElk9qPt%2BPfAWto4CGFv8ss4oWNKpEht02HUhtFRyLmGnCKQhD0%2BEDmdBAMaHc1hY2nBqBkTybHiW27Chq6E5H08c0i35eTt6AAxrU4dc1IF2LWcMEi1egplkyNHvk9%2BE8n0g%2FKFhI%2FKjwe81iDjrLF%2FTY8x8jsMaSeC%2FAVqSknel1pDlIMMHKwtMH2wqjrGwAI4Ksg1YY75POLSJrJBbI8IMsXfKE8YvWF8EdIYxpkEIDbo17SllJ598fEb%2B0EYXmR6moKAA%2BeTSWwb2QSXbBhDlLNPUJ7MVulIsJYqg9nQ6aUMJ12LjWk9k%2FCp9P21J9tMECIgy6MJPoiy6RG2EyYcyFJdKerwhMjy2pODK3dALw1BNgUyTQIy3FZZK2Pe%2BOSYlK%2FaxutYWENYVO3AmEx%2BYbAIJ0aC3x4vWO6bwy3%2F%2FFAThdcKvIPquJOEDaDicatdLswcSZn%2F42Ptr%2FB94AMjmAJYgUVh1pZxrToZPRC3KthTgWqG1RQnb6wFgEe51YvfIqgnqzLAF93QCPLq1rBNwv63YtmxHKibWusm2WeqpraZq4UkQ3uzJuXdcvJMJEceQlKJNvrFEnp8pKGU9toWclAL%2FfoAd9m3yBtdzlqaBC4g1Zh4RDSI56Za8KACDDibnu%2BFrdd%2FFujkhWrPwdeVT3giyGxw3aMjePCs6IRlatLWMXotxILYzxJg%2BN1mHOBo4ZJ%2BcWNRiMKFGgjj3fp6ZAX5wEWWVzx51WpQhtCS9O7IdWCa0KK2N%2BSa5jLHOTfZLmWHgoIIM%2F2BlJZmWgMYEbkqRZPqfvd7T01B%2Fsr4P6uhypGL1S11tv3dJRrDIlWq4LWyk61REromkhiHyg7sLysiLiBS%2FF4TRBTBCnWhjngNU5UxgwOiZDpuq1zBYsus%2BcdWsrN080tbhSdAIyeB%2F642wOdqg17LyZtz7mWblMvaTYO9km9eJ8vyFXl02OxlMnXoJWceKSPcl8Ps2d9Vzht9qphiV2pFwqs7mSmoRfi%2FlKmnu%2Fs58A6XykVZBPtKqHB8ZPJ4XZiKCiSxsuieMgwq6rkvWVUyoXO%2FZT%2FX1yceRMQufZb22fx4V6RvjZ7Q5j2odiEeVbDjj8QGcJ%2FW6R%2BVJubuwxDm1Xi%2BjVkqmnSlb6Pd%2FLQJ3AasJQgrJA6tFE1sxym89bykdT6EPyRiWVzQNM7SfhyZlGLfyiQkV3Kg%2BOv%2F%2BPwpuz0RU1MUAeRqrcpa7zhP6WYz%2BgSJNHVnnjOapySW5AtJKWDxsh4fxDNHctIBcr7G1xNDrQbS1bqsnrJRmP21jlqpr3BZdvsu2W7FloaRWhof9hRX41VwbS30SWw%2FPJsFiRPTQQsjVop6V8etqUVwj13KCVxdfwF1qFbm%2BOIuWzg%2FbnJjYGGGbPkTv%2Fxid5U12H1P7%2B%2B7hy1%2BX0LHueNj%2FWLRcOe92Gu1iy7YfPVn%2Bwu1VfuFazrSiq5aMrM7XrO6QNagHdJGG%2BxlGOqxivu1Vi8j3QNDtfkFy3kJeKxbeyT2o1TdDlan58bDNtZpf%2F1CaX0G3dHYod1VEaQ%2B7hCAqyn1EB9menWGQAAWKaQSRoT8z7ddfYUTQr7CZqIy%2F4EFyl8kuYXvjfm5IZB7JI5n%2BsaRTqdrTbtCnyN3GDFtE9PIb95nzlz%2Bb5yg8iNj2jYyIMKNoEcBS8Zx5TYwFgs2vUZP48b46hjkBXkLxSh9cCbb6wg9oV4jw2Vff9Gsx594qoCogiPVnZH36IfvRFBtjYNU5vrqq7K8HEiI73X6xPVfVSth0pVbajzD9TfbqTnqvlnvUDrRVFzbS3s8vF4ypFrHTfkXeE%2BWbc7KeSNfGwrVMiH81agd39Xhz8nV%2FYzNqiKuTN6M2O12Jnnl05456VL5x0i7jdlGBv3ou479DmeeKyPwbuHzawEDGD7ffahpYhqfMokW1blt3PAPZBGD0Hg70xiE4S%2BSgYVIcmKU35jBtpXaKHCbpPG42laNzGEk%2B88lH1zY3FkzSCe07iCqFHcq9Y5kVpGjvf6C9bLTnlQs4PNalRQlaO2O91GIQIh1o2xGCtm14%2FablIHZb%2F2F%2B%2BQ6Ip6%2Beex56jT3gQmijH%2Bs5EevY4lsJtz4z1WpY8Pmm2k0QXTiCgnGRxYNbkI%2FvsU727bHHSiwUlHtWb4GkA%2F2QP3c0iM6kLgfI2FQG519rkefwgHKPF5%2FT5rrVqYaoiHHS7WY6vveQcs%2B4uzBGzu0%2FLp7X7%2B77QV8fdervPha3TF2qV5jLHWu7yx32Vr6Tvacd3eMJJqgAnUyB8IM5LqrpVcOnAgGJwF4N%2FELtVVNhDklXBIvGuMeM2DRlTBZTmBxLWGKDKq%2B%2B7TaW%2BuQs%2FPaT7TzbtXhYE2Pgm0DkwHlAzIRvOTpl%2BdEGtC7Vaat0moplx4gWtbYkq0PtS2qfqWp%2FX%2Bu%2FLVn%2FCcDtNxUm4kB%2FbiqMVBFR04j4KEu3ix7Sl7hMpJDfVQ3ZaUH2s%2BOCs6o5jJ0XKGEAMeuUIY8dz8BenTRHTJutFVriALx67ehOvNRnGDXVxC34Fz3hIsOI%2BtYK1cMRPfKUmPIjHyTB9LzMTHq%2B2ZNJVnPA8%2FhgidxhWUzcFzpUUk9Fc449pWVPPBg7xmuqMbU9BQZvCUEiBjDDNk%2BomqAujP0KneSS4Abu%2FX6yzcA7W11PTJsm10YBqmFWFKJEJ0uaCr3jMd%2B5H2DXj%2Be1rkacnkPuNpx%2BDBrTEMmB0Yb9%2FyEDg0YZtUF7uGLKjVnYv1DU7ybqnPRbaamoJ9nh%2ByVEIsiLyZ1MecdtNd%2Btisltv%2FWqqqQm7Alox9G4jxJaeGMXiSxEzwYK0Jr99UhaMM2NXSmlyqc303WxcfDUgZQ2aqN5mOJHNsMlaJ20ct0kXXxKsLGuCRI74Ph9NHctrDbu1QaFcWP6Vu1xaluMs5wN7EhB7alCGBKlVVoHY1%2FFu3j13spESacTNCDzVojKrBArG6N5jJFRwZrlZYDQDLwtyhuG8JEoTI3uVXMH4lTojRs7PGXiHlOwzYHC6A3mlxHcIR%2BZidH6KZoT0uvtvn7k9phjyoSiN6R7WHdIOdYXfgBKseLhRwwto2H09zkJGzFBLFqWkcled4xVcUvCWp6M0ELPg98P6Fnl%2BQAwkY0dq%2F34%2Bjwlx6poYpcqkwd0rMrPnsjciVMb3m6FS7%2FgKa2gGhFxRsXUceZOAhu8iCTpkRHx8yVYE6cGJhI0B8lNaW4aBuWSMqoQOWcpG4p4iIQmqaYq20%2FKSHeQEsExE93NULMcOgmxrJKO1KpLQCUQZ9IEpvLA2DjXakmos1VCHQYpdWZnc0dK201AwKnHt8iVPkfxufBprQBGbFHO1bvlMD0xmllt94ohcW8sJq3xXYapBFQEYaFfBldTZB4C6hB4zygTnbtqpy8rpSBFWgkeeWnU1e4BqO8z6koKrKJKxUcp50jEi5VyJut8Yk4XHo%2FDqaLd5aBZ2iFAoB0ea4SXmbWZ5Q8dPeH6OBBzPYzmY8jizgKX5IkKwCoh1IjAk9moj1EsnFYPyARs4m62uJwCc3Z9a3qiD820d7wnOLCEgtRGUyrSrbHhb1BHe32Dx8Ih%2FQ2%2Bm1vUe7MBEkCghQXs3GQjYND3rYUnhJeAEdcEu7DtxMVdxV%2F%2FEUwL0tFTUtgRjbxbiJK0BJQXmteqeJfsbrI2Omy%2BfHFGL4aFKIQwzU%2FUqPWEqb17BRifiBamPfXPpBXRs9W1XDEo3%2FxUgbCFhIOnyesvr%2FXwdDYXK4pYmqRQ1CRqXEK6LC26M09A%2BwjuFJ0JaTScmJDf3VzKz0ou3NS9IKWzXlFFoHccwT%2BSqFl5twHs81%2BgIF3RLC7dcV%2FpDP0nypahlh1h0rVYFCFr5Ma8%2Fbsa5qYP0ZLIxs7Cp9IgnRwVjCOXgyZ1OmRj8eR8Dm1VtN51Osf1OUhL2h25us5GPIdcfMOeGR7RkuBDqsiHtk5t3craUDSKjTs7Dx%2FGJsX97gfcVwP3O%2Be3l4v6vI2oPMxvl9ScyC%2FtsOPRt0xSLiZsSoMls6TNhWe9DjykPwE01zFp8TD71BnzGyC6OFdv9UQQttqSKvKSXK0mP4m%2B9AJpR7UMV7hAWl7ds%2FXhJkc7x9xEw7vl893NTXPYt97uH2%2FsN62EnOs9lp84cPWJfaKdR0Mem08nikm0WJ5D1YpP0KjFeMhi8doToYSeqC%2BhIPgij4V0Hd9kNqb96yyxwhPRdPKio7LJ7OQUlWRwVIsXgKtStS21QoGWlXKA5hVPWs%2Fvugfa5jat4AdHTvwYFeYnF2E2Y%2BxEi4UfZnKkT6k4IC9h49HpZPK5SRYST46bJKv3taXBE8fnJ39mLf%2FCBtEqM4br4odCZjOG1Ok2x%2BEL1xebF7c6UXtogjG0%2Bq0jMwZpdFz6EGBqt66tvKJZtouN4F0CQLs9MRmmIxHbZF6y1ubQLAa6E6xRwZLjh2GlhZwgiT%2BtFEUuMP7YMguVHVjtePUfSuBk%2FY64NXTSWRjSEOlma3dOJj%2FpKTsXZ328WSeZhhFbuhywsVAeALFs3aI5YMIe%2B%2FDHjYdn8n0m8Uz6WsklLWno5gYjTFBSBYJvkjuiPGFCk2yJJSRMyAlJkxBSAQFiq1CbfFL%2Bc2NtpNVctHzXN7bGzvPRdb8SjedlFvEufBh4Re3pzbaSIKnd%2FJ5Sn83u7vU4ylimzOnR3fZUJjvM6KSorN%2BLU9m%2BvTbFQgx3r2H1DspHCkfnSGlnLcVKgdssSLDCwSwHP8tg94NLKudMPlQNi52wHtpUD29CZcUCJkgXdZZV4vL69DDeENaxkBYj2KR%2FZsx9gCO7a1D1Jn6Awdq6BMIgsizByiYHg4sfOrwl%2BBcd46%2FChx5k02HJZuFCOltKtZCso%2BJmYVU7slk4u%2Fpvjm0zrTZ7dKCbVS4QrIwJJh1DBV3kg8j8ybOpa1pzSP%2BTcrx4EYNOchMUQ%2BniKdqk06sO%2FKtl5HNPPWSYBNmJZkJnUD6Sst8pFJPkrHiGDGoVUGpCzJ5SDjklaybw0BmxbKjEBKB223vab3OMSVm0k2kUom8trGSLtcqLQ8%2Bkx7D2ozR3H7xe8aReK5WSFesMsww%2FVsKMkCNBlx%2BI9R3yevg7OoEAnF20k5z38ibGjkFI%2B%2FMLjYO9nXL65bxMGvPGQ3rQF1ZYIyG11bBCT5MFEYtoaiRZUQH2QiDSKnErj2QOOygwo%2FiJh%2FEDH9KYKvgF6bTXsKbdKz%2BwHWjnug%2B78Y5WU%2BQd%2FXRsr9QWpm7hUoNQM8cJ4todmdbsqwPBb82r%2FwM%3D). In case of paired end reads, corresponding fastq files should be named using *.R1.fastq.gz* and *.R2.fastq.gz* suffixes. Specify the desired analysis details for your data in the *essential.vars.groovy* file (see below) and run the pipeline *dnaseq.pipeline.groovy* as described [here](https://gitlab.rlp.net/imbforge/NGSpipe2go/-/blob/master/README.md). A markdown file *variantreport.Rmd* will be generated in the output reports folder after running the pipeline. Subsequently, the *variantreport.Rmd* file can be converted to a final html report using the *knitr* R-package.\r +GATK requires chromosomes in bam files to be karyotypically ordered. Best you use an ordered genome fasta file as reference for the pipeline (assigned in *essential.vars.groovy*, see below).\r +\r +\r +### The pipelines includes\r +- quality control of rawdata with FastQC\r +- Read mapping to the reference genome using BWA\r +- identify and remove duplicate reads with Picard MarkDuplicates\r +- Realign BAM files at Indel positions using GATK\r +- Recalibrate Base Qualities in BAM files using GATK\r +- Variant calling using GATK UnifiedGenotyper and GATK HaplotypeCaller\r +- Calculate VQSLOD scores for further filtering variants using GATK VariantRecalibrator and ApplyRecalibration\r +- Calculate the basic properties of variants as triplets for "all", "known" ,"novel" variants in comparison to dbSNP using GATK VariantEval\r +\r +\r +### Pipeline parameter settings\r +- essential.vars.groovy: essential parameter describing the experiment including: \r + - ESSENTIAL_PROJECT: your project folder name\r + - ESSENTIAL_BWA_REF: path to BWA indexed reference genome\r + - ESSENTIAL_CALL_REGION: bath to bed file containing region s to limit variant calling to (optional)\r + - ESSENTIAL_PAIRED: either paired end ("yes") or single end ("no") design\r + - ESSENTIAL_KNOWN_VARIANTS: dbSNP from GATK resource bundle (crucial for BaseQualityRecalibration step)\r + - ESSENTIAL_HAPMAP_VARIANTS: variants provided by the GATK bundle (essential for Variant Score Recalibration)\r + - ESSENTIAL_OMNI_VARIANTS: variants provided by the GATK bundle (essential for Variant Score Recalibration)\r + - ESSENTIAL_MILLS_VARIANTS: variants provided by the GATK bundle (essential for Variant Score Recalibration)\r + - ESSENTIAL_THOUSAND_GENOMES_VARIANTS: variants provided by the GATK bundle (essential for Variant Score Recalibration)\r + - ESSENTIAL_THREADS: number of threads for parallel tasks\r +- additional (more specialized) parameter can be given in the var.groovy-files of the individual pipeline modules \r +\r +\r +## Programs required\r +- Bedtools\r +- BWA\r +- FastQC\r +- GATK\r +- Picard\r +- Samtools\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/60?version=1" ; + schema1:isBasedOn "https://gitlab.rlp.net/imbforge/NGSpipe2go/-/tree/master/pipelines/DNAseq" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for DNA-seq" ; + schema1:sdDatePublished "2024-07-12 13:36:25 +0100" ; + schema1:url "https://workflowhub.eu/workflows/60/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1793 ; + schema1:creator ; + schema1:dateCreated "2020-10-07T07:43:50Z" ; + schema1:dateModified "2023-01-16T13:44:52Z" ; + schema1:description """# DNA-Seq pipeline\r +Here we provide the tools to perform paired end or single read DNA-Seq analysis including raw data quality control, read mapping, variant calling and variant filtering. \r +\r +\r +## Pipeline Workflow\r +All analysis steps are illustrated in the pipeline [flowchart](https://www.draw.io/?lightbox=1&highlight=0000ff&edit=_blank&layers=1&nav=1&title=NGSpipe2go_DNAseq_pipeline.html#R7R1bd5s489f4nObBPoDvj3ESO%2BnXZpuk3Wz3pUcG2WaDgQJ2Lr%2F%2B0wgJIxAY29jGabZ7TowAIc2MRnNXrXkxfxl5yJ19dQxs1TTFeKk1L2uaprY0rQb%2FK8Zr2NLtdsOGqWca7KFVw4P5hlmjwloXpoF94cHAcazAdMVG3bFtrAdCG%2FI851l8bOJY4lddNMWphgcdWenWR9MIZqxV7fRXN66xOZ2xT%2Fc0Nr8x0p%2BmnrOw2fdsx8bhnTni3bA5%2BjNkOM%2BxpuZVrXnhOU4Q%2Fpq%2FXGALwMohFr43zLgbDdnDdlDkhaV%2Be%2FfwuPi8uHz79%2FvN9%2BFPf%2FlvXeUYWCJrwYBR0zoW6XFgmEsAr2VObXqj83sBYx14FA7RJfk1ZX%2Fpa2Mv2ULGRPvirRQawSsH%2FiyYW%2BSXSu5ZaIytQQTTC8dyPPpQc0j%2FI4%2F4gec8RVgiUBxMHDtgJKV2YNzIn2GD9Uj7ia4mpmXFOr3qwL%2BoU36HIrE5mHrIMAlwE826Mzd1cqnAIxbyffY7wq8STTKOHIavJfYC%2FBJrYsgaYWeOA%2B%2BVPMLuaj32CltTmsJQ9byi0JbCCHQWI84OfxCxVTGN%2Bl6RB%2FnBKEROLf9i21w%2BXM8nj8355L63%2FPEjqNc7Kdxhg6wjdul4wcyZOjayrlatMQQAXP5bzF3%2B%2FBS5pGX11hfHcRme%2FsNB8MpwihaBQ5piVIJfzOAf6LDRZlc%2FWffw%2B%2FIlfvHKL2wCgNhLcPmT9wcXq9foFX8vTW4hDGDimQuRNfnOwtPZU%2F3P6rBl%2FfwyuhtMHr%2BPTHXx2Ku3GatD3hQHOXBvygnFwxYKzKU4jl2Qfq18nsxue4G1nN2Pb68fhr%2FerusfHGIvHMJzAoI8BwBX75fEMnrtrsAy6n0txTLUnpZmGS1F2516rkZfR%2BdB%2F9v8WRuN0C%2F%2F4n%2Bvn%2BvdnVlGaQyC35EziKyFvmZdS3fVZj%2B9sKUPthQpNznAUn97DC6%2FK%2F%2F8%2B3l4f%2BXNVeP76PUm5DOngKwsbr7i35tx862QXBDHzValcNxW3zuSsW2cg05ALnXgvsCLoXFoWnw4aToQtoQDEEXzWESRN2zJFg9bp0AtfFOHG3Wf4vucPKC23JecHT9quNBq54PL2%2FP6A%2F4NMDZdbJlk1yTtg7g4MC4kNISjE5tDoWTHAe827TrFrBmgMXk7NkmiIOOJaZt03ycosddMpRzg%2By6Cr%2BlMElk9qPt%2BPfAWto4CGFv8ss4oWNKpEht02HUhtFRyLmGnCKQhD0%2BEDmdBAMaHc1hY2nBqBkTybHiW27Chq6E5H08c0i35eTt6AAxrU4dc1IF2LWcMEi1egplkyNHvk9%2BE8n0g%2FKFhI%2FKjwe81iDjrLF%2FTY8x8jsMaSeC%2FAVqSknel1pDlIMMHKwtMH2wqjrGwAI4Ksg1YY75POLSJrJBbI8IMsXfKE8YvWF8EdIYxpkEIDbo17SllJ598fEb%2B0EYXmR6moKAA%2BeTSWwb2QSXbBhDlLNPUJ7MVulIsJYqg9nQ6aUMJ12LjWk9k%2FCp9P21J9tMECIgy6MJPoiy6RG2EyYcyFJdKerwhMjy2pODK3dALw1BNgUyTQIy3FZZK2Pe%2BOSYlK%2FaxutYWENYVO3AmEx%2BYbAIJ0aC3x4vWO6bwy3%2F%2FFAThdcKvIPquJOEDaDicatdLswcSZn%2F42Ptr%2FB94AMjmAJYgUVh1pZxrToZPRC3KthTgWqG1RQnb6wFgEe51YvfIqgnqzLAF93QCPLq1rBNwv63YtmxHKibWusm2WeqpraZq4UkQ3uzJuXdcvJMJEceQlKJNvrFEnp8pKGU9toWclAL%2FfoAd9m3yBtdzlqaBC4g1Zh4RDSI56Za8KACDDibnu%2BFrdd%2FFujkhWrPwdeVT3giyGxw3aMjePCs6IRlatLWMXotxILYzxJg%2BN1mHOBo4ZJ%2BcWNRiMKFGgjj3fp6ZAX5wEWWVzx51WpQhtCS9O7IdWCa0KK2N%2BSa5jLHOTfZLmWHgoIIM%2F2BlJZmWgMYEbkqRZPqfvd7T01B%2Fsr4P6uhypGL1S11tv3dJRrDIlWq4LWyk61REromkhiHyg7sLysiLiBS%2FF4TRBTBCnWhjngNU5UxgwOiZDpuq1zBYsus%2BcdWsrN080tbhSdAIyeB%2F642wOdqg17LyZtz7mWblMvaTYO9km9eJ8vyFXl02OxlMnXoJWceKSPcl8Ps2d9Vzht9qphiV2pFwqs7mSmoRfi%2FlKmnu%2Fs58A6XykVZBPtKqHB8ZPJ4XZiKCiSxsuieMgwq6rkvWVUyoXO%2FZT%2FX1yceRMQufZb22fx4V6RvjZ7Q5j2odiEeVbDjj8QGcJ%2FW6R%2BVJubuwxDm1Xi%2BjVkqmnSlb6Pd%2FLQJ3AasJQgrJA6tFE1sxym89bykdT6EPyRiWVzQNM7SfhyZlGLfyiQkV3Kg%2BOv%2F%2BPwpuz0RU1MUAeRqrcpa7zhP6WYz%2BgSJNHVnnjOapySW5AtJKWDxsh4fxDNHctIBcr7G1xNDrQbS1bqsnrJRmP21jlqpr3BZdvsu2W7FloaRWhof9hRX41VwbS30SWw%2FPJsFiRPTQQsjVop6V8etqUVwj13KCVxdfwF1qFbm%2BOIuWzg%2FbnJjYGGGbPkTv%2Fxid5U12H1P7%2B%2B7hy1%2BX0LHueNj%2FWLRcOe92Gu1iy7YfPVn%2Bwu1VfuFazrSiq5aMrM7XrO6QNagHdJGG%2BxlGOqxivu1Vi8j3QNDtfkFy3kJeKxbeyT2o1TdDlan58bDNtZpf%2F1CaX0G3dHYod1VEaQ%2B7hCAqyn1EB9menWGQAAWKaQSRoT8z7ddfYUTQr7CZqIy%2F4EFyl8kuYXvjfm5IZB7JI5n%2BsaRTqdrTbtCnyN3GDFtE9PIb95nzlz%2Bb5yg8iNj2jYyIMKNoEcBS8Zx5TYwFgs2vUZP48b46hjkBXkLxSh9cCbb6wg9oV4jw2Vff9Gsx594qoCogiPVnZH36IfvRFBtjYNU5vrqq7K8HEiI73X6xPVfVSth0pVbajzD9TfbqTnqvlnvUDrRVFzbS3s8vF4ypFrHTfkXeE%2BWbc7KeSNfGwrVMiH81agd39Xhz8nV%2FYzNqiKuTN6M2O12Jnnl05456VL5x0i7jdlGBv3ou479DmeeKyPwbuHzawEDGD7ffahpYhqfMokW1blt3PAPZBGD0Hg70xiE4S%2BSgYVIcmKU35jBtpXaKHCbpPG42laNzGEk%2B88lH1zY3FkzSCe07iCqFHcq9Y5kVpGjvf6C9bLTnlQs4PNalRQlaO2O91GIQIh1o2xGCtm14%2FablIHZb%2F2F%2B%2BQ6Ip6%2Beex56jT3gQmijH%2Bs5EevY4lsJtz4z1WpY8Pmm2k0QXTiCgnGRxYNbkI%2FvsU727bHHSiwUlHtWb4GkA%2F2QP3c0iM6kLgfI2FQG519rkefwgHKPF5%2FT5rrVqYaoiHHS7WY6vveQcs%2B4uzBGzu0%2FLp7X7%2B77QV8fdervPha3TF2qV5jLHWu7yx32Vr6Tvacd3eMJJqgAnUyB8IM5LqrpVcOnAgGJwF4N%2FELtVVNhDklXBIvGuMeM2DRlTBZTmBxLWGKDKq%2B%2B7TaW%2BuQs%2FPaT7TzbtXhYE2Pgm0DkwHlAzIRvOTpl%2BdEGtC7Vaat0moplx4gWtbYkq0PtS2qfqWp%2FX%2Bu%2FLVn%2FCcDtNxUm4kB%2FbiqMVBFR04j4KEu3ix7Sl7hMpJDfVQ3ZaUH2s%2BOCs6o5jJ0XKGEAMeuUIY8dz8BenTRHTJutFVriALx67ehOvNRnGDXVxC34Fz3hIsOI%2BtYK1cMRPfKUmPIjHyTB9LzMTHq%2B2ZNJVnPA8%2FhgidxhWUzcFzpUUk9Fc449pWVPPBg7xmuqMbU9BQZvCUEiBjDDNk%2BomqAujP0KneSS4Abu%2FX6yzcA7W11PTJsm10YBqmFWFKJEJ0uaCr3jMd%2B5H2DXj%2Be1rkacnkPuNpx%2BDBrTEMmB0Yb9%2FyEDg0YZtUF7uGLKjVnYv1DU7ybqnPRbaamoJ9nh%2ByVEIsiLyZ1MecdtNd%2Btisltv%2FWqqqQm7Alox9G4jxJaeGMXiSxEzwYK0Jr99UhaMM2NXSmlyqc303WxcfDUgZQ2aqN5mOJHNsMlaJ20ct0kXXxKsLGuCRI74Ph9NHctrDbu1QaFcWP6Vu1xaluMs5wN7EhB7alCGBKlVVoHY1%2FFu3j13spESacTNCDzVojKrBArG6N5jJFRwZrlZYDQDLwtyhuG8JEoTI3uVXMH4lTojRs7PGXiHlOwzYHC6A3mlxHcIR%2BZidH6KZoT0uvtvn7k9phjyoSiN6R7WHdIOdYXfgBKseLhRwwto2H09zkJGzFBLFqWkcled4xVcUvCWp6M0ELPg98P6Fnl%2BQAwkY0dq%2F34%2Bjwlx6poYpcqkwd0rMrPnsjciVMb3m6FS7%2FgKa2gGhFxRsXUceZOAhu8iCTpkRHx8yVYE6cGJhI0B8lNaW4aBuWSMqoQOWcpG4p4iIQmqaYq20%2FKSHeQEsExE93NULMcOgmxrJKO1KpLQCUQZ9IEpvLA2DjXakmos1VCHQYpdWZnc0dK201AwKnHt8iVPkfxufBprQBGbFHO1bvlMD0xmllt94ohcW8sJq3xXYapBFQEYaFfBldTZB4C6hB4zygTnbtqpy8rpSBFWgkeeWnU1e4BqO8z6koKrKJKxUcp50jEi5VyJut8Yk4XHo%2FDqaLd5aBZ2iFAoB0ea4SXmbWZ5Q8dPeH6OBBzPYzmY8jizgKX5IkKwCoh1IjAk9moj1EsnFYPyARs4m62uJwCc3Z9a3qiD820d7wnOLCEgtRGUyrSrbHhb1BHe32Dx8Ih%2FQ2%2Bm1vUe7MBEkCghQXs3GQjYND3rYUnhJeAEdcEu7DtxMVdxV%2F%2FEUwL0tFTUtgRjbxbiJK0BJQXmteqeJfsbrI2Omy%2BfHFGL4aFKIQwzU%2FUqPWEqb17BRifiBamPfXPpBXRs9W1XDEo3%2FxUgbCFhIOnyesvr%2FXwdDYXK4pYmqRQ1CRqXEK6LC26M09A%2BwjuFJ0JaTScmJDf3VzKz0ou3NS9IKWzXlFFoHccwT%2BSqFl5twHs81%2BgIF3RLC7dcV%2FpDP0nypahlh1h0rVYFCFr5Ma8%2Fbsa5qYP0ZLIxs7Cp9IgnRwVjCOXgyZ1OmRj8eR8Dm1VtN51Osf1OUhL2h25us5GPIdcfMOeGR7RkuBDqsiHtk5t3craUDSKjTs7Dx%2FGJsX97gfcVwP3O%2Be3l4v6vI2oPMxvl9ScyC%2FtsOPRt0xSLiZsSoMls6TNhWe9DjykPwE01zFp8TD71BnzGyC6OFdv9UQQttqSKvKSXK0mP4m%2B9AJpR7UMV7hAWl7ds%2FXhJkc7x9xEw7vl893NTXPYt97uH2%2FsN62EnOs9lp84cPWJfaKdR0Mem08nikm0WJ5D1YpP0KjFeMhi8doToYSeqC%2BhIPgij4V0Hd9kNqb96yyxwhPRdPKio7LJ7OQUlWRwVIsXgKtStS21QoGWlXKA5hVPWs%2Fvugfa5jat4AdHTvwYFeYnF2E2Y%2BxEi4UfZnKkT6k4IC9h49HpZPK5SRYST46bJKv3taXBE8fnJ39mLf%2FCBtEqM4br4odCZjOG1Ok2x%2BEL1xebF7c6UXtogjG0%2Bq0jMwZpdFz6EGBqt66tvKJZtouN4F0CQLs9MRmmIxHbZF6y1ubQLAa6E6xRwZLjh2GlhZwgiT%2BtFEUuMP7YMguVHVjtePUfSuBk%2FY64NXTSWRjSEOlma3dOJj%2FpKTsXZ328WSeZhhFbuhywsVAeALFs3aI5YMIe%2B%2FDHjYdn8n0m8Uz6WsklLWno5gYjTFBSBYJvkjuiPGFCk2yJJSRMyAlJkxBSAQFiq1CbfFL%2Bc2NtpNVctHzXN7bGzvPRdb8SjedlFvEufBh4Re3pzbaSIKnd%2FJ5Sn83u7vU4ylimzOnR3fZUJjvM6KSorN%2BLU9m%2BvTbFQgx3r2H1DspHCkfnSGlnLcVKgdssSLDCwSwHP8tg94NLKudMPlQNi52wHtpUD29CZcUCJkgXdZZV4vL69DDeENaxkBYj2KR%2FZsx9gCO7a1D1Jn6Awdq6BMIgsizByiYHg4sfOrwl%2BBcd46%2FChx5k02HJZuFCOltKtZCso%2BJmYVU7slk4u%2Fpvjm0zrTZ7dKCbVS4QrIwJJh1DBV3kg8j8ybOpa1pzSP%2BTcrx4EYNOchMUQ%2BniKdqk06sO%2FKtl5HNPPWSYBNmJZkJnUD6Sst8pFJPkrHiGDGoVUGpCzJ5SDjklaybw0BmxbKjEBKB223vab3OMSVm0k2kUom8trGSLtcqLQ8%2Bkx7D2ozR3H7xe8aReK5WSFesMsww%2FVsKMkCNBlx%2BI9R3yevg7OoEAnF20k5z38ibGjkFI%2B%2FMLjYO9nXL65bxMGvPGQ3rQF1ZYIyG11bBCT5MFEYtoaiRZUQH2QiDSKnErj2QOOygwo%2FiJh%2FEDH9KYKvgF6bTXsKbdKz%2BwHWjnug%2B78Y5WU%2BQd%2FXRsr9QWpm7hUoNQM8cJ4todmdbsqwPBb82r%2FwM%3D). In case of paired end reads, corresponding fastq files should be named using *.R1.fastq.gz* and *.R2.fastq.gz* suffixes. Specify the desired analysis details for your data in the *essential.vars.groovy* file (see below) and run the pipeline *dnaseq.pipeline.groovy* as described [here](https://gitlab.rlp.net/imbforge/NGSpipe2go/-/blob/master/README.md). A markdown file *variantreport.Rmd* will be generated in the output reports folder after running the pipeline. Subsequently, the *variantreport.Rmd* file can be converted to a final html report using the *knitr* R-package.\r +GATK requires chromosomes in bam files to be karyotypically ordered. Best you use an ordered genome fasta file as reference for the pipeline (assigned in *essential.vars.groovy*, see below).\r +\r +\r +### The pipelines includes\r +- quality control of rawdata with FastQC\r +- Read mapping to the reference genome using BWA\r +- identify and remove duplicate reads with Picard MarkDuplicates\r +- Realign BAM files at Indel positions using GATK\r +- Recalibrate Base Qualities in BAM files using GATK\r +- Variant calling using GATK UnifiedGenotyper and GATK HaplotypeCaller\r +- Calculate VQSLOD scores for further filtering variants using GATK VariantRecalibrator and ApplyRecalibration\r +- Calculate the basic properties of variants as triplets for "all", "known" ,"novel" variants in comparison to dbSNP using GATK VariantEval\r +\r +\r +### Pipeline parameter settings\r +- essential.vars.groovy: essential parameter describing the experiment including: \r + - ESSENTIAL_PROJECT: your project folder name\r + - ESSENTIAL_BWA_REF: path to BWA indexed reference genome\r + - ESSENTIAL_CALL_REGION: bath to bed file containing region s to limit variant calling to (optional)\r + - ESSENTIAL_PAIRED: either paired end ("yes") or single end ("no") design\r + - ESSENTIAL_KNOWN_VARIANTS: dbSNP from GATK resource bundle (crucial for BaseQualityRecalibration step)\r + - ESSENTIAL_HAPMAP_VARIANTS: variants provided by the GATK bundle (essential for Variant Score Recalibration)\r + - ESSENTIAL_OMNI_VARIANTS: variants provided by the GATK bundle (essential for Variant Score Recalibration)\r + - ESSENTIAL_MILLS_VARIANTS: variants provided by the GATK bundle (essential for Variant Score Recalibration)\r + - ESSENTIAL_THOUSAND_GENOMES_VARIANTS: variants provided by the GATK bundle (essential for Variant Score Recalibration)\r + - ESSENTIAL_THREADS: number of threads for parallel tasks\r +- additional (more specialized) parameter can be given in the var.groovy-files of the individual pipeline modules \r +\r +\r +## Programs required\r +- Bedtools\r +- BWA\r +- FastQC\r +- GATK\r +- Picard\r +- Samtools\r +""" ; + schema1:keywords "DNA-seq, GATK3, bpipe, groovy" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "DNA-seq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/60?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:description "Analysis of S-protein polymorphism. This workflow includes: obtaining coding sequences of S proteins from a diverse group of coronaviruses and generating amino acid alignments to assess conservation of the polymorphic location. More info can be found at https://covid19.galaxyproject.org/genomics/" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/9?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genomics - Analysis of S-protein polymorphism" ; + schema1:sdDatePublished "2024-07-12 13:37:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/9/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1619 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6982 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2020-04-10T13:00:36Z" ; + schema1:dateModified "2023-01-16T13:40:19Z" ; + schema1:description "Analysis of S-protein polymorphism. This workflow includes: obtaining coding sequences of S proteins from a diverse group of coronaviruses and generating amino acid alignments to assess conservation of the polymorphic location. More info can be found at https://covid19.galaxyproject.org/genomics/" ; + schema1:image ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Genomics - Analysis of S-protein polymorphism" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/9?version=1" ; + schema1:version 1 ; + ns1:input . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 3531 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Analysis of variation within individual COVID-19 samples \r +using bowtie2, bwa, fastp, multiqc , picard ,samtools, snpEff \r +Workflow, tools and data are available on https://github.com/fjrmoreews/cwl-workflow-SARS-CoV-2/tree/master/Variation\r +This worklow was ported into CWL from a Galaxy Workflow \r + ( https://github.com/galaxyproject/SARS-CoV-2/tree/master/genomics/4-Variation migrated to CWL).\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/28?version=1" ; + schema1:isBasedOn "https://github.com/fjrmoreews/cwl-workflow-SARS-CoV-2/tree/master/Variation" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for var-PE" ; + schema1:sdDatePublished "2024-07-12 13:37:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/28/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15597 ; + schema1:creator ; + schema1:dateCreated "2020-06-08T14:57:04Z" ; + schema1:dateModified "2023-01-16T13:41:56Z" ; + schema1:description """Analysis of variation within individual COVID-19 samples \r +using bowtie2, bwa, fastp, multiqc , picard ,samtools, snpEff \r +Workflow, tools and data are available on https://github.com/fjrmoreews/cwl-workflow-SARS-CoV-2/tree/master/Variation\r +This worklow was ported into CWL from a Galaxy Workflow \r + ( https://github.com/galaxyproject/SARS-CoV-2/tree/master/genomics/4-Variation migrated to CWL).\r +\r +""" ; + schema1:image ; + schema1:keywords "variation, CWL, covid-19" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "var-PE" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/28?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 117071 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/963?version=13" ; + schema1:isBasedOn "https://github.com/nf-core/airrflow" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/airrflow" ; + schema1:sdDatePublished "2024-07-12 13:22:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/963/ro_crate?version=13" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14171 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:37Z" ; + schema1:dateModified "2024-06-11T12:54:37Z" ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/963?version=13" ; + schema1:keywords "airr, b-cell, immcantation, immunorepertoire, repseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/airrflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/963?version=13" ; + schema1:version 13 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "This pipeline analyses data for HiCAR data, a robust and sensitive multi-omic co-assay for simultaneous measurement of transcriptome, chromatin accessibility and cis-regulatory chromatin contacts." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/990?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/hicar" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hicar" ; + schema1:sdDatePublished "2024-07-12 13:21:21 +0100" ; + schema1:url "https://workflowhub.eu/workflows/990/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9925 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:55Z" ; + schema1:dateModified "2024-06-11T12:54:55Z" ; + schema1:description "This pipeline analyses data for HiCAR data, a robust and sensitive multi-omic co-assay for simultaneous measurement of transcriptome, chromatin accessibility and cis-regulatory chromatin contacts." ; + schema1:keywords "atac, ATAC-seq, HiC, hicar, Multi-omics, transcriptome" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hicar" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/990?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + ; + schema1:description """# ![IMPaCT program](impact_qc/docs/png/impact_data_logo_pink_horitzontal.png)\r +\r +[![IMPaCT](https://img.shields.io/badge/Web%20-IMPaCT-blue)](https://impact.isciii.es/)\r +[![IMPaCT-isciii](https://img.shields.io/badge/Web%20-IMPaCT--isciii-red)](https://www.isciii.es/QueHacemos/Financiacion/IMPaCT/Paginas/default.aspx)\r +[![IMPaCT-Data](https://img.shields.io/badge/Web%20-IMPaCT--Data-1d355c.svg?labelColor=000000)](https://impact-data.bsc.es/)\r +\r +## Introduction of the project\r +\r +IMPaCT-Data is the IMPaCT program that aims to support the development of a common, interoperable and integrated system for the collection and analysis of clinical and molecular data by providing the knowledge and resources available in the Spanish Science and Technology System. This development will make it possible to answer research questions based on the different clinical and molecular information systems available. Fundamentally, it aims to provide researchers with a population perspective based on individual data.\r +\r +The IMPaCT-Data project is divided into different work packages (WP). In the context of IMPaCT-Data WP3 (Genomics), a working group of experts worked on the generation of a specific quality control (QC) workflow for germline exome samples.\r +\r +To achieve this, a set of metrics related to human genomic data was decided upon, and the toolset or software to extract these metrics was implemented in an existing variant calling workflow called Sarek, part of the nf-core community. The final outcome is a Nextflow subworkflow, called IMPaCT-QC implemented in the Sarek pipeline.\r +\r +Below you can find the explanation of this workflow (raw pipeline), the link to the documentation of the IMPaCT QC subworkflow and a linked documentation associated to the QC metrics added in the mentioned workflow.\r +\r +- [IMPaCT-data subworkflow documentation](impact_qc/README.md)\r +\r +- [Metrics documentation](https://docs.google.com/document/d/12OWCcNKatkdJelYyiovyil-bIXDESO_K2zeIB3vncW4/edit#heading=h.cvdlfn10wodq)\r +\r +

\r + \r + \r + nf-core/sarek\r + \r +

\r +\r +[![GitHub Actions CI Status](https://github.com/nf-core/sarek/actions/workflows/ci.yml/badge.svg)](https://github.com/nf-core/sarek/actions/workflows/ci.yml)\r +[![GitHub Actions Linting Status](https://github.com/nf-core/sarek/actions/workflows/linting.yml/badge.svg)](https://github.com/nf-core/sarek/actions/workflows/linting.yml)\r +[![AWS CI](https://img.shields.io/badge/CI%20tests-full%20size-FF9900?labelColor=000000&logo=Amazon%20AWS)](https://nf-co.re/sarek/results)\r +[![nf-test](https://img.shields.io/badge/unit_tests-nf--test-337ab7.svg)](https://www.nf-test.com)\r +[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.3476425-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.3476425)\r +[![nf-test](https://img.shields.io/badge/unit_tests-nf--test-337ab7.svg)](https://www.nf-test.com)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A523.04.0-23aa62.svg)](https://www.nextflow.io/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +[![Launch on Seqera Platform](https://img.shields.io/badge/Launch%20%F0%9F%9A%80-Seqera%20Platform-%234256e7)](https://tower.nf/launch?pipeline=https://github.com/nf-core/sarek)\r +\r +[![Get help on Slack](http://img.shields.io/badge/slack-nf--core%20%23sarek-4A154B?labelColor=000000&logo=slack)](https://nfcore.slack.com/channels/sarek)\r +[![Follow on Twitter](http://img.shields.io/badge/twitter-%40nf__core-1DA1F2?labelColor=000000&logo=twitter)](https://twitter.com/nf_core)\r +[![Follow on Mastodon](https://img.shields.io/badge/mastodon-nf__core-6364ff?labelColor=FFFFFF&logo=mastodon)](https://mstdn.science/@nf_core)\r +[![Watch on YouTube](http://img.shields.io/badge/youtube-nf--core-FF0000?labelColor=000000&logo=youtube)](https://www.youtube.com/c/nf-core)\r +\r +## Introduction\r +\r +**nf-core/sarek** is a workflow designed to detect variants on whole genome or targeted sequencing data. Initially designed for Human, and Mouse, it can work on any species with a reference genome. Sarek can also handle tumour / normal pairs and could include additional relapses.\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!\r +\r +On release, automated continuous integration tests run the pipeline on a full-sized dataset on the AWS cloud infrastructure. This ensures that the pipeline runs on AWS, has sensible resource allocation defaults set to run on real-world datasets, and permits the persistent storage of results to benchmark between pipeline releases and other analysis sources. The results obtained from the full-sized test can be viewed on the [nf-core website](https://nf-co.re/sarek/results).\r +\r +It's listed on [Elixir - Tools and Data Services Registry](https://bio.tools/nf-core-sarek) and [Dockstore](https://dockstore.org/workflows/github.com/nf-core/sarek).\r +\r +

\r + \r +

\r +\r +## Pipeline summary\r +\r +Depending on the options and samples provided, the pipeline can currently perform the following:\r +\r +- Form consensus reads from UMI sequences (`fgbio`)\r +- Sequencing quality control and trimming (enabled by `--trim_fastq`) (`FastQC`, `fastp`)\r +- Map Reads to Reference (`BWA-mem`, `BWA-mem2`, `dragmap` or `Sentieon BWA-mem`)\r +- Process BAM file (`GATK MarkDuplicates`, `GATK BaseRecalibrator` and `GATK ApplyBQSR` or `Sentieon LocusCollector` and `Sentieon Dedup`)\r +- Summarise alignment statistics (`samtools stats`, `mosdepth`)\r +- Variant calling (enabled by `--tools`, see [compatibility](https://nf-co.re/sarek/latest/docs/usage#which-variant-calling-tool-is-implemented-for-which-data-type)):\r + - `ASCAT`\r + - `CNVkit`\r + - `Control-FREEC`\r + - `DeepVariant`\r + - `freebayes`\r + - `GATK HaplotypeCaller`\r + - `Manta`\r + - `mpileup`\r + - `MSIsensor-pro`\r + - `Mutect2`\r + - `Sentieon Haplotyper`\r + - `Strelka2`\r + - `TIDDIT`\r +- Variant filtering and annotation (`SnpEff`, `Ensembl VEP`, `BCFtools annotate`)\r +- Summarise and represent QC (`MultiQC`)\r +\r +

\r + \r +

\r +\r +## Usage\r +\r +> [!NOTE]\r +> If you are new to Nextflow and nf-core, please refer to [this page](https://nf-co.re/docs/usage/installation) on how to set-up Nextflow. Make sure to [test your setup](https://nf-co.re/docs/usage/introduction#how-to-run-a-pipeline) with `-profile test` before running the workflow on actual data.\r +\r +First, prepare a samplesheet with your input data that looks as follows:\r +\r +`samplesheet.csv`:\r +\r +```csv\r +patient,sample,lane,fastq_1,fastq_2\r +ID1,S1,L002,ID1_S1_L002_R1_001.fastq.gz,ID1_S1_L002_R2_001.fastq.gz\r +```\r +\r +Each row represents a pair of fastq files (paired end).\r +\r +Now, you can run the pipeline using:\r +\r +```bash\r +nextflow run nf-core/sarek \\\r + -profile \\\r + --input samplesheet.csv \\\r + --outdir \r +```\r +\r +> [!WARNING]\r +> Please provide pipeline parameters via the CLI or Nextflow `-params-file` option. Custom config files including those provided by the `-c` Nextflow option can be used to provide any configuration _**except for parameters**_;\r +> see [docs](https://nf-co.re/usage/configuration#custom-configuration-files).\r +\r +For more details and further functionality, please refer to the [usage documentation](https://nf-co.re/sarek/usage) and the [parameter documentation](https://nf-co.re/sarek/parameters).\r +\r +## Pipeline output\r +\r +To see the results of an example test run with a full size dataset refer to the [results](https://nf-co.re/sarek/results) tab on the nf-core website pipeline page.\r +For more details about the output files and reports, please refer to the\r +[output documentation](https://nf-co.re/sarek/output).\r +\r +## Benchmarking\r +\r +On each release, the pipeline is run on 3 full size tests:\r +\r +- `test_full` runs tumor-normal data for one patient from the SEQ2C consortium\r +- `test_full_germline` runs a WGS 30X Genome-in-a-Bottle(NA12878) dataset\r +- `test_full_germline_ncbench_agilent` runs two WES samples with 75M and 200M reads (data available [here](https://github.com/ncbench/ncbench-workflow#contributing-callsets)). The results are uploaded to Zenodo, evaluated against a truth dataset, and results are made available via the [NCBench dashboard](https://ncbench.github.io/report/report.html#).\r +\r +## Credits\r +\r +Sarek was originally written by Maxime U Garcia and Szilveszter Juhos at the [National Genomics Infastructure](https://ngisweden.scilifelab.se) and [National Bioinformatics Infastructure Sweden](https://nbis.se) which are both platforms at [SciLifeLab](https://scilifelab.se), with the support of [The Swedish Childhood Tumor Biobank (Barntumörbanken)](https://ki.se/forskning/barntumorbanken).\r +Friederike Hanssen and Gisela Gabernet at [QBiC](https://www.qbic.uni-tuebingen.de/) later joined and helped with further development.\r +\r +The Nextflow DSL2 conversion of the pipeline was lead by Friederike Hanssen and Maxime U Garcia.\r +\r +Maintenance is now lead by Friederike Hanssen and Maxime U Garcia (now at [Seqera Labs](https://seqera/io))\r +\r +Main developers:\r +\r +- [Maxime U Garcia](https://github.com/maxulysse)\r +- [Friederike Hanssen](https://github.com/FriederikeHanssen)\r +\r +We thank the following people for their extensive assistance in the development of this pipeline:\r +\r +- [Abhinav Sharma](https://github.com/abhi18av)\r +- [Adam Talbot](https://github.com/adamrtalbot)\r +- [Adrian Lärkeryd](https://github.com/adrlar)\r +- [Alexander Peltzer](https://github.com/apeltzer)\r +- [Alison Meynert](https://github.com/ameynert)\r +- [Anders Sune Pedersen](https://github.com/asp8200)\r +- [arontommi](https://github.com/arontommi)\r +- [BarryDigby](https://github.com/BarryDigby)\r +- [Bekir Ergüner](https://github.com/berguner)\r +- [bjornnystedt](https://github.com/bjornnystedt)\r +- [cgpu](https://github.com/cgpu)\r +- [Chela James](https://github.com/chelauk)\r +- [David Mas-Ponte](https://github.com/davidmasp)\r +- [Edmund Miller](https://github.com/edmundmiller)\r +- [Francesco Lescai](https://github.com/lescai)\r +- [Gavin Mackenzie](https://github.com/GCJMackenzie)\r +- [Gisela Gabernet](https://github.com/ggabernet)\r +- [Grant Neilson](https://github.com/grantn5)\r +- [gulfshores](https://github.com/gulfshores)\r +- [Harshil Patel](https://github.com/drpatelh)\r +- [James A. Fellows Yates](https://github.com/jfy133)\r +- [Jesper Eisfeldt](https://github.com/J35P312)\r +- [Johannes Alneberg](https://github.com/alneberg)\r +- [José Fernández Navarro](https://github.com/jfnavarro)\r +- [Júlia Mir Pedrol](https://github.com/mirpedrol)\r +- [Ken Brewer](https://github.com/kenibrewer)\r +- [Lasse Westergaard Folkersen](https://github.com/lassefolkersen)\r +- [Lucia Conde](https://github.com/lconde-ucl)\r +- [Malin Larsson](https://github.com/malinlarsson)\r +- [Marcel Martin](https://github.com/marcelm)\r +- [Nick Smith](https://github.com/nickhsmith)\r +- [Nicolas Schcolnicov](https://github.com/nschcolnicov)\r +- [Nilesh Tawari](https://github.com/nilesh-tawari)\r +- [Nils Homer](https://github.com/nh13)\r +- [Olga Botvinnik](https://github.com/olgabot)\r +- [Oskar Wacker](https://github.com/WackerO)\r +- [pallolason](https://github.com/pallolason)\r +- [Paul Cantalupo](https://github.com/pcantalupo)\r +- [Phil Ewels](https://github.com/ewels)\r +- [Sabrina Krakau](https://github.com/skrakau)\r +- [Sam Minot](https://github.com/sminot)\r +- [Sebastian-D](https://github.com/Sebastian-D)\r +- [Silvia Morini](https://github.com/silviamorins)\r +- [Simon Pearce](https://github.com/SPPearce)\r +- [Solenne Correard](https://github.com/scorreard)\r +- [Susanne Jodoin](https://github.com/SusiJo)\r +- [Szilveszter Juhos](https://github.com/szilvajuhos)\r +- [Tobias Koch](https://github.com/KochTobi)\r +- [Winni Kretzschmar](https://github.com/winni2k)\r +\r +## Acknowledgements\r +\r +| [![Barntumörbanken](docs/images/BTB_logo.png)](https://ki.se/forskning/barntumorbanken) | [![SciLifeLab](docs/images/SciLifeLab_logo.png)](https://scilifelab.se) |\r +| :-----------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------: |\r +| [![National Genomics Infrastructure](docs/images/NGI_logo.png)](https://ngisweden.scilifelab.se/) | [![National Bioinformatics Infrastructure Sweden](docs/images/NBIS_logo.png)](https://nbis.se) |\r +| [![QBiC](docs/images/QBiC_logo.png)](https://www.qbic.uni-tuebingen.de) | [![GHGA](docs/images/GHGA_logo.png)](https://www.ghga.de/) |\r +| [![DNGC](docs/images/DNGC_logo.png)](https://eng.ngc.dk/) | |\r +\r +## Contributions & Support\r +\r +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\r +\r +For further information or help, don't hesitate to get in touch on the [Slack `#sarek` channel](https://nfcore.slack.com/channels/sarek) (you can join with [this invite](https://nf-co.re/join/slack)), or contact us: [Maxime U Garcia](mailto:maxime.garcia@seqera.io?subject=[GitHub]%20nf-core/sarek), [Friederike Hanssen](mailto:friederike.hanssen@qbic.uni-tuebingen.de?subject=[GitHub]%20nf-core/sarek)\r +\r +## Citations\r +\r +If you use `nf-core/sarek` for your analysis, please cite the `Sarek` article as follows:\r +\r +> Friederike Hanssen, Maxime U Garcia, Lasse Folkersen, Anders Sune Pedersen, Francesco Lescai, Susanne Jodoin, Edmund Miller, Oskar Wacker, Nicholas Smith, nf-core community, Gisela Gabernet, Sven Nahnsen **Scalable and efficient DNA sequencing analysis on different compute infrastructures aiding variant discovery** _NAR Genomics and Bioinformatics_ Volume 6, Issue 2, June 2024, lqae031, [doi: 10.1093/nargab/lqae031](https://doi.org/10.1093/nargab/lqae031).\r +\r +> Garcia M, Juhos S, Larsson M et al. **Sarek: A portable workflow for whole-genome sequencing analysis of germline and somatic variants [version 2; peer review: 2 approved]** _F1000Research_ 2020, 9:63 [doi: 10.12688/f1000research.16665.2](http://dx.doi.org/10.12688/f1000research.16665.2).\r +\r +You can cite the sarek zenodo record for a specific version using the following [doi: 10.5281/zenodo.3476425](https://doi.org/10.5281/zenodo.3476425)\r +\r +An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\r +\r +You can cite the `nf-core` publication as follows:\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +\r +## CHANGELOG\r +\r +- [CHANGELOG](CHANGELOG.md)\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.1030.2" ; + schema1:isBasedOn "https://github.com/EGA-archive/sarek-IMPaCT-data-QC.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for IMPaCT-Data quality control workflow implementation in nf-core/Sarek" ; + schema1:sdDatePublished "2024-07-12 13:17:46 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1030/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 19394 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-06-12T13:11:24Z" ; + schema1:dateModified "2024-06-12T13:19:08Z" ; + schema1:description """# ![IMPaCT program](impact_qc/docs/png/impact_data_logo_pink_horitzontal.png)\r +\r +[![IMPaCT](https://img.shields.io/badge/Web%20-IMPaCT-blue)](https://impact.isciii.es/)\r +[![IMPaCT-isciii](https://img.shields.io/badge/Web%20-IMPaCT--isciii-red)](https://www.isciii.es/QueHacemos/Financiacion/IMPaCT/Paginas/default.aspx)\r +[![IMPaCT-Data](https://img.shields.io/badge/Web%20-IMPaCT--Data-1d355c.svg?labelColor=000000)](https://impact-data.bsc.es/)\r +\r +## Introduction of the project\r +\r +IMPaCT-Data is the IMPaCT program that aims to support the development of a common, interoperable and integrated system for the collection and analysis of clinical and molecular data by providing the knowledge and resources available in the Spanish Science and Technology System. This development will make it possible to answer research questions based on the different clinical and molecular information systems available. Fundamentally, it aims to provide researchers with a population perspective based on individual data.\r +\r +The IMPaCT-Data project is divided into different work packages (WP). In the context of IMPaCT-Data WP3 (Genomics), a working group of experts worked on the generation of a specific quality control (QC) workflow for germline exome samples.\r +\r +To achieve this, a set of metrics related to human genomic data was decided upon, and the toolset or software to extract these metrics was implemented in an existing variant calling workflow called Sarek, part of the nf-core community. The final outcome is a Nextflow subworkflow, called IMPaCT-QC implemented in the Sarek pipeline.\r +\r +Below you can find the explanation of this workflow (raw pipeline), the link to the documentation of the IMPaCT QC subworkflow and a linked documentation associated to the QC metrics added in the mentioned workflow.\r +\r +- [IMPaCT-data subworkflow documentation](impact_qc/README.md)\r +\r +- [Metrics documentation](https://docs.google.com/document/d/12OWCcNKatkdJelYyiovyil-bIXDESO_K2zeIB3vncW4/edit#heading=h.cvdlfn10wodq)\r +\r +

\r + \r + \r + nf-core/sarek\r + \r +

\r +\r +[![GitHub Actions CI Status](https://github.com/nf-core/sarek/actions/workflows/ci.yml/badge.svg)](https://github.com/nf-core/sarek/actions/workflows/ci.yml)\r +[![GitHub Actions Linting Status](https://github.com/nf-core/sarek/actions/workflows/linting.yml/badge.svg)](https://github.com/nf-core/sarek/actions/workflows/linting.yml)\r +[![AWS CI](https://img.shields.io/badge/CI%20tests-full%20size-FF9900?labelColor=000000&logo=Amazon%20AWS)](https://nf-co.re/sarek/results)\r +[![nf-test](https://img.shields.io/badge/unit_tests-nf--test-337ab7.svg)](https://www.nf-test.com)\r +[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.3476425-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.3476425)\r +[![nf-test](https://img.shields.io/badge/unit_tests-nf--test-337ab7.svg)](https://www.nf-test.com)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A523.04.0-23aa62.svg)](https://www.nextflow.io/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +[![Launch on Seqera Platform](https://img.shields.io/badge/Launch%20%F0%9F%9A%80-Seqera%20Platform-%234256e7)](https://tower.nf/launch?pipeline=https://github.com/nf-core/sarek)\r +\r +[![Get help on Slack](http://img.shields.io/badge/slack-nf--core%20%23sarek-4A154B?labelColor=000000&logo=slack)](https://nfcore.slack.com/channels/sarek)\r +[![Follow on Twitter](http://img.shields.io/badge/twitter-%40nf__core-1DA1F2?labelColor=000000&logo=twitter)](https://twitter.com/nf_core)\r +[![Follow on Mastodon](https://img.shields.io/badge/mastodon-nf__core-6364ff?labelColor=FFFFFF&logo=mastodon)](https://mstdn.science/@nf_core)\r +[![Watch on YouTube](http://img.shields.io/badge/youtube-nf--core-FF0000?labelColor=000000&logo=youtube)](https://www.youtube.com/c/nf-core)\r +\r +## Introduction\r +\r +**nf-core/sarek** is a workflow designed to detect variants on whole genome or targeted sequencing data. Initially designed for Human, and Mouse, it can work on any species with a reference genome. Sarek can also handle tumour / normal pairs and could include additional relapses.\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!\r +\r +On release, automated continuous integration tests run the pipeline on a full-sized dataset on the AWS cloud infrastructure. This ensures that the pipeline runs on AWS, has sensible resource allocation defaults set to run on real-world datasets, and permits the persistent storage of results to benchmark between pipeline releases and other analysis sources. The results obtained from the full-sized test can be viewed on the [nf-core website](https://nf-co.re/sarek/results).\r +\r +It's listed on [Elixir - Tools and Data Services Registry](https://bio.tools/nf-core-sarek) and [Dockstore](https://dockstore.org/workflows/github.com/nf-core/sarek).\r +\r +

\r + \r +

\r +\r +## Pipeline summary\r +\r +Depending on the options and samples provided, the pipeline can currently perform the following:\r +\r +- Form consensus reads from UMI sequences (`fgbio`)\r +- Sequencing quality control and trimming (enabled by `--trim_fastq`) (`FastQC`, `fastp`)\r +- Map Reads to Reference (`BWA-mem`, `BWA-mem2`, `dragmap` or `Sentieon BWA-mem`)\r +- Process BAM file (`GATK MarkDuplicates`, `GATK BaseRecalibrator` and `GATK ApplyBQSR` or `Sentieon LocusCollector` and `Sentieon Dedup`)\r +- Summarise alignment statistics (`samtools stats`, `mosdepth`)\r +- Variant calling (enabled by `--tools`, see [compatibility](https://nf-co.re/sarek/latest/docs/usage#which-variant-calling-tool-is-implemented-for-which-data-type)):\r + - `ASCAT`\r + - `CNVkit`\r + - `Control-FREEC`\r + - `DeepVariant`\r + - `freebayes`\r + - `GATK HaplotypeCaller`\r + - `Manta`\r + - `mpileup`\r + - `MSIsensor-pro`\r + - `Mutect2`\r + - `Sentieon Haplotyper`\r + - `Strelka2`\r + - `TIDDIT`\r +- Variant filtering and annotation (`SnpEff`, `Ensembl VEP`, `BCFtools annotate`)\r +- Summarise and represent QC (`MultiQC`)\r +\r +

\r + \r +

\r +\r +## Usage\r +\r +> [!NOTE]\r +> If you are new to Nextflow and nf-core, please refer to [this page](https://nf-co.re/docs/usage/installation) on how to set-up Nextflow. Make sure to [test your setup](https://nf-co.re/docs/usage/introduction#how-to-run-a-pipeline) with `-profile test` before running the workflow on actual data.\r +\r +First, prepare a samplesheet with your input data that looks as follows:\r +\r +`samplesheet.csv`:\r +\r +```csv\r +patient,sample,lane,fastq_1,fastq_2\r +ID1,S1,L002,ID1_S1_L002_R1_001.fastq.gz,ID1_S1_L002_R2_001.fastq.gz\r +```\r +\r +Each row represents a pair of fastq files (paired end).\r +\r +Now, you can run the pipeline using:\r +\r +```bash\r +nextflow run nf-core/sarek \\\r + -profile \\\r + --input samplesheet.csv \\\r + --outdir \r +```\r +\r +> [!WARNING]\r +> Please provide pipeline parameters via the CLI or Nextflow `-params-file` option. Custom config files including those provided by the `-c` Nextflow option can be used to provide any configuration _**except for parameters**_;\r +> see [docs](https://nf-co.re/usage/configuration#custom-configuration-files).\r +\r +For more details and further functionality, please refer to the [usage documentation](https://nf-co.re/sarek/usage) and the [parameter documentation](https://nf-co.re/sarek/parameters).\r +\r +## Pipeline output\r +\r +To see the results of an example test run with a full size dataset refer to the [results](https://nf-co.re/sarek/results) tab on the nf-core website pipeline page.\r +For more details about the output files and reports, please refer to the\r +[output documentation](https://nf-co.re/sarek/output).\r +\r +## Benchmarking\r +\r +On each release, the pipeline is run on 3 full size tests:\r +\r +- `test_full` runs tumor-normal data for one patient from the SEQ2C consortium\r +- `test_full_germline` runs a WGS 30X Genome-in-a-Bottle(NA12878) dataset\r +- `test_full_germline_ncbench_agilent` runs two WES samples with 75M and 200M reads (data available [here](https://github.com/ncbench/ncbench-workflow#contributing-callsets)). The results are uploaded to Zenodo, evaluated against a truth dataset, and results are made available via the [NCBench dashboard](https://ncbench.github.io/report/report.html#).\r +\r +## Credits\r +\r +Sarek was originally written by Maxime U Garcia and Szilveszter Juhos at the [National Genomics Infastructure](https://ngisweden.scilifelab.se) and [National Bioinformatics Infastructure Sweden](https://nbis.se) which are both platforms at [SciLifeLab](https://scilifelab.se), with the support of [The Swedish Childhood Tumor Biobank (Barntumörbanken)](https://ki.se/forskning/barntumorbanken).\r +Friederike Hanssen and Gisela Gabernet at [QBiC](https://www.qbic.uni-tuebingen.de/) later joined and helped with further development.\r +\r +The Nextflow DSL2 conversion of the pipeline was lead by Friederike Hanssen and Maxime U Garcia.\r +\r +Maintenance is now lead by Friederike Hanssen and Maxime U Garcia (now at [Seqera Labs](https://seqera/io))\r +\r +Main developers:\r +\r +- [Maxime U Garcia](https://github.com/maxulysse)\r +- [Friederike Hanssen](https://github.com/FriederikeHanssen)\r +\r +We thank the following people for their extensive assistance in the development of this pipeline:\r +\r +- [Abhinav Sharma](https://github.com/abhi18av)\r +- [Adam Talbot](https://github.com/adamrtalbot)\r +- [Adrian Lärkeryd](https://github.com/adrlar)\r +- [Alexander Peltzer](https://github.com/apeltzer)\r +- [Alison Meynert](https://github.com/ameynert)\r +- [Anders Sune Pedersen](https://github.com/asp8200)\r +- [arontommi](https://github.com/arontommi)\r +- [BarryDigby](https://github.com/BarryDigby)\r +- [Bekir Ergüner](https://github.com/berguner)\r +- [bjornnystedt](https://github.com/bjornnystedt)\r +- [cgpu](https://github.com/cgpu)\r +- [Chela James](https://github.com/chelauk)\r +- [David Mas-Ponte](https://github.com/davidmasp)\r +- [Edmund Miller](https://github.com/edmundmiller)\r +- [Francesco Lescai](https://github.com/lescai)\r +- [Gavin Mackenzie](https://github.com/GCJMackenzie)\r +- [Gisela Gabernet](https://github.com/ggabernet)\r +- [Grant Neilson](https://github.com/grantn5)\r +- [gulfshores](https://github.com/gulfshores)\r +- [Harshil Patel](https://github.com/drpatelh)\r +- [James A. Fellows Yates](https://github.com/jfy133)\r +- [Jesper Eisfeldt](https://github.com/J35P312)\r +- [Johannes Alneberg](https://github.com/alneberg)\r +- [José Fernández Navarro](https://github.com/jfnavarro)\r +- [Júlia Mir Pedrol](https://github.com/mirpedrol)\r +- [Ken Brewer](https://github.com/kenibrewer)\r +- [Lasse Westergaard Folkersen](https://github.com/lassefolkersen)\r +- [Lucia Conde](https://github.com/lconde-ucl)\r +- [Malin Larsson](https://github.com/malinlarsson)\r +- [Marcel Martin](https://github.com/marcelm)\r +- [Nick Smith](https://github.com/nickhsmith)\r +- [Nicolas Schcolnicov](https://github.com/nschcolnicov)\r +- [Nilesh Tawari](https://github.com/nilesh-tawari)\r +- [Nils Homer](https://github.com/nh13)\r +- [Olga Botvinnik](https://github.com/olgabot)\r +- [Oskar Wacker](https://github.com/WackerO)\r +- [pallolason](https://github.com/pallolason)\r +- [Paul Cantalupo](https://github.com/pcantalupo)\r +- [Phil Ewels](https://github.com/ewels)\r +- [Sabrina Krakau](https://github.com/skrakau)\r +- [Sam Minot](https://github.com/sminot)\r +- [Sebastian-D](https://github.com/Sebastian-D)\r +- [Silvia Morini](https://github.com/silviamorins)\r +- [Simon Pearce](https://github.com/SPPearce)\r +- [Solenne Correard](https://github.com/scorreard)\r +- [Susanne Jodoin](https://github.com/SusiJo)\r +- [Szilveszter Juhos](https://github.com/szilvajuhos)\r +- [Tobias Koch](https://github.com/KochTobi)\r +- [Winni Kretzschmar](https://github.com/winni2k)\r +\r +## Acknowledgements\r +\r +| [![Barntumörbanken](docs/images/BTB_logo.png)](https://ki.se/forskning/barntumorbanken) | [![SciLifeLab](docs/images/SciLifeLab_logo.png)](https://scilifelab.se) |\r +| :-----------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------: |\r +| [![National Genomics Infrastructure](docs/images/NGI_logo.png)](https://ngisweden.scilifelab.se/) | [![National Bioinformatics Infrastructure Sweden](docs/images/NBIS_logo.png)](https://nbis.se) |\r +| [![QBiC](docs/images/QBiC_logo.png)](https://www.qbic.uni-tuebingen.de) | [![GHGA](docs/images/GHGA_logo.png)](https://www.ghga.de/) |\r +| [![DNGC](docs/images/DNGC_logo.png)](https://eng.ngc.dk/) | |\r +\r +## Contributions & Support\r +\r +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\r +\r +For further information or help, don't hesitate to get in touch on the [Slack `#sarek` channel](https://nfcore.slack.com/channels/sarek) (you can join with [this invite](https://nf-co.re/join/slack)), or contact us: [Maxime U Garcia](mailto:maxime.garcia@seqera.io?subject=[GitHub]%20nf-core/sarek), [Friederike Hanssen](mailto:friederike.hanssen@qbic.uni-tuebingen.de?subject=[GitHub]%20nf-core/sarek)\r +\r +## Citations\r +\r +If you use `nf-core/sarek` for your analysis, please cite the `Sarek` article as follows:\r +\r +> Friederike Hanssen, Maxime U Garcia, Lasse Folkersen, Anders Sune Pedersen, Francesco Lescai, Susanne Jodoin, Edmund Miller, Oskar Wacker, Nicholas Smith, nf-core community, Gisela Gabernet, Sven Nahnsen **Scalable and efficient DNA sequencing analysis on different compute infrastructures aiding variant discovery** _NAR Genomics and Bioinformatics_ Volume 6, Issue 2, June 2024, lqae031, [doi: 10.1093/nargab/lqae031](https://doi.org/10.1093/nargab/lqae031).\r +\r +> Garcia M, Juhos S, Larsson M et al. **Sarek: A portable workflow for whole-genome sequencing analysis of germline and somatic variants [version 2; peer review: 2 approved]** _F1000Research_ 2020, 9:63 [doi: 10.12688/f1000research.16665.2](http://dx.doi.org/10.12688/f1000research.16665.2).\r +\r +You can cite the sarek zenodo record for a specific version using the following [doi: 10.5281/zenodo.3476425](https://doi.org/10.5281/zenodo.3476425)\r +\r +An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\r +\r +You can cite the `nf-core` publication as follows:\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +\r +## CHANGELOG\r +\r +- [CHANGELOG](CHANGELOG.md)\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1030?version=2" ; + schema1:keywords "Bioinformatics, Nextflow, variant calling, wes, WGS, NGS, EGA-archive, quality control" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "IMPaCT-Data quality control workflow implementation in nf-core/Sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1030?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# CMIP tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of computing **classical molecular interaction potentials** from **protein structures**, step by step, using the **BioExcel Building Blocks library (biobb)**. Examples shown are **Molecular Interaction Potentials (MIPs) grids, protein-protein/ligand interaction potentials, and protein titration**. The particular structures used are the **Lysozyme** protein (PDB code [1AKI](https://www.rcsb.org/structure/1aki)), and a MD simulation of the complex formed by the **SARS-CoV-2 Receptor Binding Domain and the human Angiotensin Converting Enzyme 2** (PDB code [6VW1](https://www.rcsb.org/structure/6vw1)).\r +\r +The code wrapped is the ***Classical Molecular Interaction Potentials (CMIP)*** code:\r +\r +**Classical molecular interaction potentials: Improved setup procedure in molecular dynamics simulations of proteins.**\r +*Gelpí, J.L., Kalko, S.G., Barril, X., Cirera, J., de la Cruz, X., Luque, F.J. and Orozco, M. (2001)*\r +*Proteins, 45: 428-437. [https://doi.org/10.1002/prot.1159](https://doi.org/10.1002/prot.1159)*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.773.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_cmip" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Classical Molecular Interaction Potentials" ; + schema1:sdDatePublished "2024-07-12 13:24:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/773/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 78658 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-04T14:54:36Z" ; + schema1:dateModified "2024-03-04T15:08:10Z" ; + schema1:description """# CMIP tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of computing **classical molecular interaction potentials** from **protein structures**, step by step, using the **BioExcel Building Blocks library (biobb)**. Examples shown are **Molecular Interaction Potentials (MIPs) grids, protein-protein/ligand interaction potentials, and protein titration**. The particular structures used are the **Lysozyme** protein (PDB code [1AKI](https://www.rcsb.org/structure/1aki)), and a MD simulation of the complex formed by the **SARS-CoV-2 Receptor Binding Domain and the human Angiotensin Converting Enzyme 2** (PDB code [6VW1](https://www.rcsb.org/structure/6vw1)).\r +\r +The code wrapped is the ***Classical Molecular Interaction Potentials (CMIP)*** code:\r +\r +**Classical molecular interaction potentials: Improved setup procedure in molecular dynamics simulations of proteins.**\r +*Gelpí, J.L., Kalko, S.G., Barril, X., Cirera, J., de la Cruz, X., Luque, F.J. and Orozco, M. (2001)*\r +*Proteins, 45: 428-437. [https://doi.org/10.1002/prot.1159](https://doi.org/10.1002/prot.1159)*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/773?version=1" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Classical Molecular Interaction Potentials" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_cmip/blob/main/biobb_wf_cmip/notebooks/biobb_wf_cmip.ipynb" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/963?version=9" ; + schema1:isBasedOn "https://github.com/nf-core/airrflow" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/airrflow" ; + schema1:sdDatePublished "2024-07-12 13:22:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/963/ro_crate?version=9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10787 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:37Z" ; + schema1:dateModified "2024-06-11T12:54:37Z" ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/963?version=13" ; + schema1:keywords "airr, b-cell, immcantation, immunorepertoire, repseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/airrflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/963?version=9" ; + schema1:version 9 . + + a schema1:Dataset ; + schema1:datePublished "2023-10-03T13:42:23.090753" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/parallel-accession-download" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "parallel-accession-download/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.12" . + + a schema1:Dataset ; + schema1:datePublished "2024-04-15T14:40:07.941038" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + , + ; + schema1:name "consensus-peaks/consensus-peaks-chip-sr" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-atac-cutandrun" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.7" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-chip-pe" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.7" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.128.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_virtual-screening" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein-ligand Docking tutorial (PDBe REST API)" ; + schema1:sdDatePublished "2024-07-12 13:33:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/128/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 37193 ; + schema1:creator , + , + ; + schema1:dateCreated "2021-06-29T09:19:51Z" ; + schema1:dateModified "2022-09-15T11:17:49Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/128?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein-ligand Docking tutorial (PDBe REST API)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_virtual-screening/master/biobb_wf_virtual-screening/notebooks/ebi_api/wf_vs_ebi_api.ipynb" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """# Snakemake workflow: dna-seq-varlociraptor\r +\r +[![Snakemake](https://img.shields.io/badge/snakemake-≥6.3.0-brightgreen.svg)](https://snakemake.github.io)\r +[![GitHub actions status](https://github.com/snakemake-workflows/dna-seq-varlociraptor/workflows/Tests/badge.svg?branch=master)](https://github.com/snakemake-workflows/dna-seq-varlociraptor/actions?query=branch%3Amaster+workflow%3ATests)\r +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.4675661.svg)](https://doi.org/10.5281/zenodo.4675661)\r +\r +\r +A Snakemake workflow for calling small and structural variants under any kind of scenario (tumor/normal, tumor/normal/relapse, germline, pedigree, populations) via the unified statistical model of [Varlociraptor](https://varlociraptor.github.io).\r +\r +\r +## Usage\r +\r +The usage of this workflow is described in the [Snakemake Workflow Catalog](https://snakemake.github.io/snakemake-workflow-catalog/?usage=snakemake-workflows%2Fdna-seq-varlociraptor).\r +\r +If you use this workflow in a paper, don't forget to give credits to the authors by citing the URL of this (original) repository and its DOI (see above).\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/382?version=1" ; + schema1:isBasedOn "https://github.com/snakemake-workflows/dna-seq-varlociraptor.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for dna-seq-varlociraptor workflow" ; + schema1:sdDatePublished "2024-07-12 13:35:20 +0100" ; + schema1:url "https://workflowhub.eu/workflows/382/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1143 ; + schema1:creator ; + schema1:dateCreated "2022-09-02T10:48:32Z" ; + schema1:dateModified "2023-01-16T14:02:24Z" ; + schema1:description """# Snakemake workflow: dna-seq-varlociraptor\r +\r +[![Snakemake](https://img.shields.io/badge/snakemake-≥6.3.0-brightgreen.svg)](https://snakemake.github.io)\r +[![GitHub actions status](https://github.com/snakemake-workflows/dna-seq-varlociraptor/workflows/Tests/badge.svg?branch=master)](https://github.com/snakemake-workflows/dna-seq-varlociraptor/actions?query=branch%3Amaster+workflow%3ATests)\r +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.4675661.svg)](https://doi.org/10.5281/zenodo.4675661)\r +\r +\r +A Snakemake workflow for calling small and structural variants under any kind of scenario (tumor/normal, tumor/normal/relapse, germline, pedigree, populations) via the unified statistical model of [Varlociraptor](https://varlociraptor.github.io).\r +\r +\r +## Usage\r +\r +The usage of this workflow is described in the [Snakemake Workflow Catalog](https://snakemake.github.io/snakemake-workflow-catalog/?usage=snakemake-workflows%2Fdna-seq-varlociraptor).\r +\r +If you use this workflow in a paper, don't forget to give credits to the authors by citing the URL of this (original) repository and its DOI (see above).\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "dna-seq-varlociraptor workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/382?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A Nanostring nCounter analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1003?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/nanostring" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/nanostring" ; + schema1:sdDatePublished "2024-07-12 13:20:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1003/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8462 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:05Z" ; + schema1:dateModified "2024-06-11T12:55:05Z" ; + schema1:description "A Nanostring nCounter analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1003?version=4" ; + schema1:keywords "nanostring, nanostringnorm" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/nanostring" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1003?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=12" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-07-12 13:22:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=12" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8929 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:12Z" ; + schema1:dateModified "2024-06-11T12:55:12Z" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=12" ; + schema1:version 12 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.120.4" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:34:15 +0100" ; + schema1:url "https://workflowhub.eu/workflows/120/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 57824 ; + schema1:creator , + ; + schema1:dateCreated "2023-04-13T15:35:54Z" ; + schema1:dateModified "2023-04-14T07:20:30Z" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/120?version=5" ; + schema1:isPartOf , + , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_md_setup/blob/master/biobb_wf_md_setup/notebooks/biobb_MDsetup_tutorial.ipynb" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and modern ancient DNA pipeline in Nextflow and with cloud support." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-07-12 13:21:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3515 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:49Z" ; + schema1:dateModified "2024-06-11T12:54:49Z" ; + schema1:description "A fully reproducible and modern ancient DNA pipeline in Nextflow and with cloud support." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Workflow converts one or multiple bam files back to the fastq format" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/968?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/bamtofastq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for qbic-pipelines/bamtofastq" ; + schema1:sdDatePublished "2024-07-12 13:22:07 +0100" ; + schema1:url "https://workflowhub.eu/workflows/968/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4042 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:43Z" ; + schema1:dateModified "2024-06-11T12:54:43Z" ; + schema1:description "Workflow converts one or multiple bam files back to the fastq format" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/968?version=5" ; + schema1:keywords "bamtofastq, Conversion, cramtofastq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "qbic-pipelines/bamtofastq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/968?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-07-12 13:19:02 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7941 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:15Z" ; + schema1:dateModified "2024-06-11T12:55:15Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/986?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/fetchngs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/fetchngs" ; + schema1:sdDatePublished "2024-07-12 13:21:29 +0100" ; + schema1:url "https://workflowhub.eu/workflows/986/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6418 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:53Z" ; + schema1:dateModified "2024-06-11T12:54:53Z" ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/986?version=13" ; + schema1:keywords "ddbj, download, ena, FASTQ, geo, sra, synapse" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/fetchngs" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/986?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for analysis of Molecular Pixelation assays" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1010?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/pixelator" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/pixelator" ; + schema1:sdDatePublished "2024-07-12 13:20:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1010/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10580 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:07Z" ; + schema1:dateModified "2024-06-11T12:55:07Z" ; + schema1:description "Pipeline for analysis of Molecular Pixelation assays" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1010?version=5" ; + schema1:keywords "molecular-pixelation, pixelator, pixelgen-technologies, proteins, single-cell, single-cell-omics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/pixelator" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1010?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """This WF is based on the official Covid19-Galaxy assembly workflow as available from https://covid19.galaxyproject.org/genomics/2-assembly/ . It has been adapted to suit the needs of the analysis of metagenomics sequencing data. Prior to be submitted to INDSC databases, these data need to be cleaned from contaminant reads, including reads of possible human origin. \r +\r +The assembly of the SARS-CoV-2 genome is performed using both the Unicycler and the SPAdes assemblers, similar to the original WV.\r +\r +To facilitate the deposition of raw sequencing reads in INDSC databases, different fastq files are saved during the different steps of the WV. Which reflect different levels of stringency/filtration:\r +\r +(1) Initially fastq are filtered to remove human reads. \r +(2) Subsequently, a similarity search is performed against the reference assembly of the SARS-CoV-2 genome, to retain only SARS-CoV-2 like reads. \r +(3) Finally, SARS-CoV-2 reads are assembled, and the bowtie2 program is used to identify (and save in the corresponding fastq files) only reads that are completely identical to the final assembly of the genome.\r +\r +Any of the fastq files produced in (1), (2) or (3) are suitable for being submitted in raw reads repositories. While the files filtered according to (1) are richer and contain more data, including for example genomic sequences of different microbes living in the oral cavity; files filtered according to (3) contain only the reads that are completely identical to the final assembly. This should guarantee that any re-analysis/re-assembly of these always produce consistent and identical results. File obtained at (2) include all the reads in the sequencing reaction that had some degree of similarity with the reference SARS-CoV-2 genome, these may include subgenomic RNAs, but also polymorphic regions/variants in the case of a coinfection by multiple SARS-CoV-2 strains. Consequently, reanalysis of these data is not guarateed to produce identical and consistent results, depending on the parameters used during the assembly. However, these data contain more information.\r +\r +Please feel free to comment, ask questions and/or add suggestions\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/68?version=1" ; + schema1:isBasedOn "https://raw.githubusercontent.com/matteo14c/Galaxy_wfs/main/Galaxy-Workflow-MC_COVID19like_Assembly_Reads.ga" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for MC_COVID19like_Assembly_Reads" ; + schema1:sdDatePublished "2024-07-12 13:37:20 +0100" ; + schema1:url "https://workflowhub.eu/workflows/68/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 26705 ; + schema1:dateCreated "2020-11-04T18:35:15Z" ; + schema1:dateModified "2023-02-13T14:06:45Z" ; + schema1:description """This WF is based on the official Covid19-Galaxy assembly workflow as available from https://covid19.galaxyproject.org/genomics/2-assembly/ . It has been adapted to suit the needs of the analysis of metagenomics sequencing data. Prior to be submitted to INDSC databases, these data need to be cleaned from contaminant reads, including reads of possible human origin. \r +\r +The assembly of the SARS-CoV-2 genome is performed using both the Unicycler and the SPAdes assemblers, similar to the original WV.\r +\r +To facilitate the deposition of raw sequencing reads in INDSC databases, different fastq files are saved during the different steps of the WV. Which reflect different levels of stringency/filtration:\r +\r +(1) Initially fastq are filtered to remove human reads. \r +(2) Subsequently, a similarity search is performed against the reference assembly of the SARS-CoV-2 genome, to retain only SARS-CoV-2 like reads. \r +(3) Finally, SARS-CoV-2 reads are assembled, and the bowtie2 program is used to identify (and save in the corresponding fastq files) only reads that are completely identical to the final assembly of the genome.\r +\r +Any of the fastq files produced in (1), (2) or (3) are suitable for being submitted in raw reads repositories. While the files filtered according to (1) are richer and contain more data, including for example genomic sequences of different microbes living in the oral cavity; files filtered according to (3) contain only the reads that are completely identical to the final assembly. This should guarantee that any re-analysis/re-assembly of these always produce consistent and identical results. File obtained at (2) include all the reads in the sequencing reaction that had some degree of similarity with the reference SARS-CoV-2 genome, these may include subgenomic RNAs, but also polymorphic regions/variants in the case of a coinfection by multiple SARS-CoV-2 strains. Consequently, reanalysis of these data is not guarateed to produce identical and consistent results, depending on the parameters used during the assembly. However, these data contain more information.\r +\r +Please feel free to comment, ask questions and/or add suggestions\r +\r +""" ; + schema1:image ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "MC_COVID19like_Assembly_Reads" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/68?version=1" ; + schema1:version 1 ; + ns1:input , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 369188 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-07T20:20:41.921844" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Purge-duplicate-contigs-VGP6/main" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Purge-duplicate-contigs-VGP6/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:version "0.2.0" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description "Finding potential muon stopping sites in crystalline copper" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/757?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Finding the Muon Stopping Site using PyMuonSuite" ; + schema1:sdDatePublished "2024-07-12 13:24:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/757/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8353 ; + schema1:creator , + ; + schema1:dateCreated "2024-02-15T11:52:27Z" ; + schema1:dateModified "2024-02-15T11:56:02Z" ; + schema1:description "Finding potential muon stopping sites in crystalline copper" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Finding the Muon Stopping Site using PyMuonSuite" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/757?version=1" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Nascent Transcription Processing Pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1004?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/nascent" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/nascent" ; + schema1:sdDatePublished "2024-07-12 13:20:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1004/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4457 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:06Z" ; + schema1:dateModified "2024-06-11T12:55:06Z" ; + schema1:description "Nascent Transcription Processing Pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1004?version=4" ; + schema1:keywords "gro-seq, nascent, pro-seq, rna, transcription, tss" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/nascent" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1004?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "A workflow querying on an endpoint of a graph database by a file containing a SPARQL query." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/122?version=2" ; + schema1:isBasedOn "https://github.com/longmanplus/EOSC-Life_demos" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for SPARQL query (in a file) on graph database" ; + schema1:sdDatePublished "2024-07-12 13:37:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/122/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 282 ; + schema1:dateCreated "2021-05-26T10:51:51Z" ; + schema1:dateModified "2023-01-16T13:49:55Z" ; + schema1:description "A workflow querying on an endpoint of a graph database by a file containing a SPARQL query." ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/122?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "SPARQL query (in a file) on graph database" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/122?version=2" ; + schema1:version 2 ; + ns1:input , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 1556 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.276.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_md_setup/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:34:07 +0100" ; + schema1:url "https://workflowhub.eu/workflows/276/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6791 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-14T10:06:01Z" ; + schema1:dateModified "2022-04-11T09:29:31Z" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/276?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_md_setup/python/workflow.py" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Mutations Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.289.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_md_setup_mutations/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL Protein MD Setup tutorial with mutations" ; + schema1:sdDatePublished "2024-07-12 13:35:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/289/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 74473 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 16985 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-17T13:22:24Z" ; + schema1:dateModified "2023-01-16T13:58:33Z" ; + schema1:description """# Mutations Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/289?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CWL Protein MD Setup tutorial with mutations" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_md_setup_mutations/cwl/workflow.cwl" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Takes fastqs and reference data, to produce a single cell counts matrix into and save in annData format - adding a column called sample with the sample name. \r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/646?version=2" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq: Count and Load with Cell Ranger" ; + schema1:sdDatePublished "2024-07-12 13:22:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/646/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 16848 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-05-30T05:49:49Z" ; + schema1:dateModified "2024-05-30T05:56:18Z" ; + schema1:description """Takes fastqs and reference data, to produce a single cell counts matrix into and save in annData format - adding a column called sample with the sample name. \r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/646?version=1" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "scRNAseq: Count and Load with Cell Ranger" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/646?version=2" ; + schema1:version 2 ; + ns1:input , + , + ; + ns1:output . + + a schema1:Dataset ; + schema1:datePublished "2024-06-10T13:24:06.656544" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """From the R1 and R2 fastq files of a single samples, make a scRNAseq counts matrix, and perform basic QC with scanpy. Then, do further processing by making a UMAP and clustering. Produces a processed AnnData \r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/465?version=3" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq Single Sample Processing STARSolo" ; + schema1:sdDatePublished "2024-07-12 13:22:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/465/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 128461 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-11-09T01:58:04Z" ; + schema1:dateModified "2023-11-09T03:52:15Z" ; + schema1:description """From the R1 and R2 fastq files of a single samples, make a scRNAseq counts matrix, and perform basic QC with scanpy. Then, do further processing by making a UMAP and clustering. Produces a processed AnnData \r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/465?version=3" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "scRNAseq Single Sample Processing STARSolo" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/465?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# ![sanger-tol/readmapping](docs/images/sanger-tol-readmapping_logo.png)\r +\r +[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.6563577-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.6563577)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A522.10.1-23aa62.svg)](https://www.nextflow.io/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +[![Launch on Nextflow Tower](https://img.shields.io/badge/Launch%20%F0%9F%9A%80-Nextflow%20Tower-%234256e7)](https://tower.nf/launch?pipeline=https://github.com/sanger-tol/readmapping)\r +\r +## Introduction\r +\r +**sanger-tol/readmapping** is a bioinformatics best-practice analysis pipeline for mapping reads generated using Illumina, HiC, PacBio and Nanopore technologies against a genome assembly.\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!\r +\r +On merge to `dev` and `main` branch, automated continuous integration tests run the pipeline on a full-sized dataset on the Wellcome Sanger Institute HPC farm using the Nextflow Tower infrastructure. This ensures that the pipeline runs on full sized datasets, has sensible resource allocation defaults set to run on real-world datasets, and permits the persistent storage of results to benchmark between pipeline releases and other analysis sources.\r +\r +## Pipeline summary\r +\r +\r +\r +## Quick Start\r +\r +1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=22.10.1`)\r +\r +2. Install any of [`Docker`](https://docs.docker.com/engine/installation/), [`Singularity`](https://www.sylabs.io/guides/3.0/user-guide/) (you can follow [this tutorial](https://singularity-tutorial.github.io/01-installation/)), [`Podman`](https://podman.io/), [`Shifter`](https://nersc.gitlab.io/development/shifter/how-to-use/) or [`Charliecloud`](https://hpc.github.io/charliecloud/) for full pipeline reproducibility _(you can use [`Conda`](https://conda.io/miniconda.html) both to install Nextflow itself and also to manage software within pipelines. Please only use it within pipelines as a last resort; see [docs](https://nf-co.re/usage/configuration#basic-configuration-profiles))_.\r +\r +3. Download the pipeline and test it on a minimal dataset with a single command:\r +\r + ```bash\r + nextflow run sanger-tol/readmapping -profile test,YOURPROFILE --outdir \r + ```\r +\r + Note that some form of configuration will be needed so that Nextflow knows how to fetch the required software. This is usually done in the form of a config profile (`YOURPROFILE` in the example command above). You can chain multiple config profiles in a comma-separated string.\r +\r + > - The pipeline comes with config profiles called `docker`, `singularity`, `podman`, `shifter`, `charliecloud` and `conda` which instruct the pipeline to use the named tool for software management. For example, `-profile test,docker`.\r + > - Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment.\r + > - If you are using `singularity`, please use the [`nf-core download`](https://nf-co.re/tools/#downloading-pipelines-for-offline-use) command to download images first, before running the pipeline. Setting the [`NXF_SINGULARITY_CACHEDIR` or `singularity.cacheDir`](https://www.nextflow.io/docs/latest/singularity.html?#singularity-docker-hub) Nextflow options enables you to store and re-use the images from a central location for future pipeline runs.\r + > - If you are using `conda`, it is highly recommended to use the [`NXF_CONDA_CACHEDIR` or `conda.cacheDir`](https://www.nextflow.io/docs/latest/conda.html) settings to store the environments in a central location for future pipeline runs.\r +\r +4. Start running your own analysis!\r +\r + ```bash\r + nextflow run sanger-tol/readmapping --input samplesheet.csv --fasta genome.fa.gz --outdir -profile \r + ```\r +\r +## Credits\r +\r +sanger-tol/readmapping was originally written by [Priyanka Surana](https://github.com/priyanka-surana).\r +\r +We thank the following people for their extensive assistance in the development of this pipeline:\r +\r +- [Matthieu Muffato](https://github.com/muffato) for the text logo\r +- [Guoying Qi](https://github.com/gq1) for being able to run tests using Nf-Tower and the Sanger HPC farm\r +\r +## Contributions and Support\r +\r +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\r +\r +For further information or help, don't hesitate to get in touch on the [Slack `#pipelines` channel](https://sangertreeoflife.slack.com/channels/pipelines). Please [create an issue](https://github.com/sanger-tol/readmapping/issues/new/choose) on GitHub if you are not on the Sanger slack channel.\r +\r +## Citations\r +\r +If you use sanger-tol/readmapping for your analysis, please cite it using the following doi: [10.5281/zenodo.6563577](https://doi.org/10.5281/zenodo.6563577)\r +\r +An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\r +\r +This pipeline uses code and infrastructure developed and maintained by the [nf-core](https://nf-co.re) community, reused here under the [MIT license](https://github.com/nf-core/tools/blob/master/LICENSE).\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/665?version=1" ; + schema1:isBasedOn "https://github.com/sanger-tol/readmapping" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for sanger-tol/readmapping v1.1.0 - Hebridean Black" ; + schema1:sdDatePublished "2024-07-12 13:26:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/665/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1641 ; + schema1:creator ; + schema1:dateCreated "2023-11-14T11:55:56Z" ; + schema1:dateModified "2023-11-14T12:00:50Z" ; + schema1:description """# ![sanger-tol/readmapping](docs/images/sanger-tol-readmapping_logo.png)\r +\r +[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.6563577-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.6563577)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A522.10.1-23aa62.svg)](https://www.nextflow.io/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +[![Launch on Nextflow Tower](https://img.shields.io/badge/Launch%20%F0%9F%9A%80-Nextflow%20Tower-%234256e7)](https://tower.nf/launch?pipeline=https://github.com/sanger-tol/readmapping)\r +\r +## Introduction\r +\r +**sanger-tol/readmapping** is a bioinformatics best-practice analysis pipeline for mapping reads generated using Illumina, HiC, PacBio and Nanopore technologies against a genome assembly.\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!\r +\r +On merge to `dev` and `main` branch, automated continuous integration tests run the pipeline on a full-sized dataset on the Wellcome Sanger Institute HPC farm using the Nextflow Tower infrastructure. This ensures that the pipeline runs on full sized datasets, has sensible resource allocation defaults set to run on real-world datasets, and permits the persistent storage of results to benchmark between pipeline releases and other analysis sources.\r +\r +## Pipeline summary\r +\r +\r +\r +## Quick Start\r +\r +1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=22.10.1`)\r +\r +2. Install any of [`Docker`](https://docs.docker.com/engine/installation/), [`Singularity`](https://www.sylabs.io/guides/3.0/user-guide/) (you can follow [this tutorial](https://singularity-tutorial.github.io/01-installation/)), [`Podman`](https://podman.io/), [`Shifter`](https://nersc.gitlab.io/development/shifter/how-to-use/) or [`Charliecloud`](https://hpc.github.io/charliecloud/) for full pipeline reproducibility _(you can use [`Conda`](https://conda.io/miniconda.html) both to install Nextflow itself and also to manage software within pipelines. Please only use it within pipelines as a last resort; see [docs](https://nf-co.re/usage/configuration#basic-configuration-profiles))_.\r +\r +3. Download the pipeline and test it on a minimal dataset with a single command:\r +\r + ```bash\r + nextflow run sanger-tol/readmapping -profile test,YOURPROFILE --outdir \r + ```\r +\r + Note that some form of configuration will be needed so that Nextflow knows how to fetch the required software. This is usually done in the form of a config profile (`YOURPROFILE` in the example command above). You can chain multiple config profiles in a comma-separated string.\r +\r + > - The pipeline comes with config profiles called `docker`, `singularity`, `podman`, `shifter`, `charliecloud` and `conda` which instruct the pipeline to use the named tool for software management. For example, `-profile test,docker`.\r + > - Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment.\r + > - If you are using `singularity`, please use the [`nf-core download`](https://nf-co.re/tools/#downloading-pipelines-for-offline-use) command to download images first, before running the pipeline. Setting the [`NXF_SINGULARITY_CACHEDIR` or `singularity.cacheDir`](https://www.nextflow.io/docs/latest/singularity.html?#singularity-docker-hub) Nextflow options enables you to store and re-use the images from a central location for future pipeline runs.\r + > - If you are using `conda`, it is highly recommended to use the [`NXF_CONDA_CACHEDIR` or `conda.cacheDir`](https://www.nextflow.io/docs/latest/conda.html) settings to store the environments in a central location for future pipeline runs.\r +\r +4. Start running your own analysis!\r +\r + ```bash\r + nextflow run sanger-tol/readmapping --input samplesheet.csv --fasta genome.fa.gz --outdir -profile \r + ```\r +\r +## Credits\r +\r +sanger-tol/readmapping was originally written by [Priyanka Surana](https://github.com/priyanka-surana).\r +\r +We thank the following people for their extensive assistance in the development of this pipeline:\r +\r +- [Matthieu Muffato](https://github.com/muffato) for the text logo\r +- [Guoying Qi](https://github.com/gq1) for being able to run tests using Nf-Tower and the Sanger HPC farm\r +\r +## Contributions and Support\r +\r +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\r +\r +For further information or help, don't hesitate to get in touch on the [Slack `#pipelines` channel](https://sangertreeoflife.slack.com/channels/pipelines). Please [create an issue](https://github.com/sanger-tol/readmapping/issues/new/choose) on GitHub if you are not on the Sanger slack channel.\r +\r +## Citations\r +\r +If you use sanger-tol/readmapping for your analysis, please cite it using the following doi: [10.5281/zenodo.6563577](https://doi.org/10.5281/zenodo.6563577)\r +\r +An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\r +\r +This pipeline uses code and infrastructure developed and maintained by the [nf-core](https://nf-co.re) community, reused here under the [MIT license](https://github.com/nf-core/tools/blob/master/LICENSE).\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "sanger-tol/readmapping v1.1.0 - Hebridean Black" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/665?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description "Genome assembly: Unicycler-based WF for Klebsiella pneumoniae [Wick et al. Microbial genomics 2017]" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/52?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ONT - Workflow-Wick-et.al." ; + schema1:sdDatePublished "2024-07-12 13:37:24 +0100" ; + schema1:url "https://workflowhub.eu/workflows/52/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 22259 ; + schema1:creator ; + schema1:dateCreated "2020-08-05T13:03:51Z" ; + schema1:dateModified "2023-02-13T14:06:46Z" ; + schema1:description "Genome assembly: Unicycler-based WF for Klebsiella pneumoniae [Wick et al. Microbial genomics 2017]" ; + schema1:keywords "ONT" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ONT - Workflow-Wick-et.al." ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/52?version=1" ; + schema1:version 1 ; + ns1:input <#ont___workflow_wick_et_al_-inputs-https://ndownloader.figshare.com/files/8811145>, + <#ont___workflow_wick_et_al_-inputs-https://ndownloader.figshare.com/files/8811148>, + <#ont___workflow_wick_et_al_-inputs-https://ndownloader.figshare.com/files/8812159> . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description "Metagenomics: taxa classification" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/53?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ONT -- Metagenomics-Kraken2-Krona" ; + schema1:sdDatePublished "2024-07-12 13:37:24 +0100" ; + schema1:url "https://workflowhub.eu/workflows/53/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7756 ; + schema1:creator ; + schema1:dateCreated "2020-08-05T13:05:34Z" ; + schema1:dateModified "2023-02-13T14:06:46Z" ; + schema1:description "Metagenomics: taxa classification" ; + schema1:keywords "ONT" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ONT -- Metagenomics-Kraken2-Krona" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/53?version=1" ; + schema1:version 1 ; + ns1:input . + + a schema1:Dataset ; + schema1:datePublished "2023-02-16T09:30:08.832459" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/fragment-based-docking-scoring" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "fragment-based-docking-scoring/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.5" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Galaxy Workflow Documentation: In-Silico Mass Spectra Prediction Using Semi-Empirical Quantum Chemistry\r +\r +\r +## Overview\r +\r +This workflow predicts in-silico mass spectra using a semi-empirical quantum chemistry method. It involves generating and optimizing molecular conformers and simulating their mass spectra with computational chemistry tools. The workflow receives an SDF file as input and outputs the mass spectrum in MSP file format.\r +\r +\r +\r +\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.897.1" ; + schema1:isBasedOn "https://github.com/RECETOX/workflow-testing/tree/main/QCxMS" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for End-to-end spectra predictions: multi atoms dataset" ; + schema1:sdDatePublished "2024-07-12 13:18:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/897/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 19226 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-03T13:52:34Z" ; + schema1:dateModified "2024-06-06T10:42:41Z" ; + schema1:description """# Galaxy Workflow Documentation: In-Silico Mass Spectra Prediction Using Semi-Empirical Quantum Chemistry\r +\r +\r +## Overview\r +\r +This workflow predicts in-silico mass spectra using a semi-empirical quantum chemistry method. It involves generating and optimizing molecular conformers and simulating their mass spectra with computational chemistry tools. The workflow receives an SDF file as input and outputs the mass spectrum in MSP file format.\r +\r +\r +\r +\r +\r +""" ; + schema1:image ; + schema1:keywords "Exposomics, QCxMS, GC-MS, Metabolomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "End-to-end spectra predictions: multi atoms dataset" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/897?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + ; + ns1:output , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 634182 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "This workflow correspond to the Genome-wide alternative splicing analysis training. It allows to analyze isoform switching by making use of IsoformSwitchAnalyzeR." ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/472?version=4" ; + schema1:license "CC-BY-NC-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genome-wide alternative splicing analysis" ; + schema1:sdDatePublished "2024-07-12 13:33:29 +0100" ; + schema1:url "https://workflowhub.eu/workflows/472/ro_crate?version=4" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 21107 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 81080 ; + schema1:creator ; + schema1:dateCreated "2023-05-25T21:57:53Z" ; + schema1:dateModified "2023-05-25T21:58:58Z" ; + schema1:description "This workflow correspond to the Genome-wide alternative splicing analysis training. It allows to analyze isoform switching by making use of IsoformSwitchAnalyzeR." ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/472?version=3" ; + schema1:keywords "Transcriptomics, alternative-splicing, GTN" ; + schema1:license "https://spdx.org/licenses/CC-BY-NC-4.0" ; + schema1:name "Genome-wide alternative splicing analysis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/472?version=4" ; + schema1:version 4 ; + ns1:input , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 46064 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 54463 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-07-12 13:22:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5189 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:11Z" ; + schema1:dateModified "2024-06-11T12:55:11Z" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Exome SAMtools Workflow" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/240?version=1" ; + schema1:isBasedOn "https://github.com/inab/ipc_workflows/tree/main/exome/samtools" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for exome-samtools" ; + schema1:sdDatePublished "2024-07-12 13:36:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/240/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1464 ; + schema1:creator ; + schema1:dateCreated "2021-11-19T10:11:36Z" ; + schema1:dateModified "2023-01-16T13:55:08Z" ; + schema1:description "Exome SAMtools Workflow" ; + schema1:keywords "cancer, pediatric, SAMTools" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "exome-samtools" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/240?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2024-03-13T17:18:04.078159" ; + schema1:hasPart , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/velocyto" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + ; + schema1:name "velocyto/Velocyto-on10X-from-bundled" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "velocyto/Velocyto-on10X-filtered-barcodes" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/velocyto" ; + schema1:version "0.2" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Conformational Transitions calculations tutorial using BioExcel Building Blocks (biobb) and GOdMD\r +\r +This tutorial aims to illustrate the process of computing a conformational transition between two known structural conformations of a protein, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.548.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_godmd" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein Conformational Transitions calculations tutorial" ; + schema1:sdDatePublished "2024-07-12 13:32:40 +0100" ; + schema1:url "https://workflowhub.eu/workflows/548/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 19968 ; + schema1:creator , + ; + schema1:dateCreated "2023-08-02T09:35:32Z" ; + schema1:dateModified "2023-08-02T09:43:19Z" ; + schema1:description """# Protein Conformational Transitions calculations tutorial using BioExcel Building Blocks (biobb) and GOdMD\r +\r +This tutorial aims to illustrate the process of computing a conformational transition between two known structural conformations of a protein, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/548?version=1" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein Conformational Transitions calculations tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_godmd/blob/a5e8e564a4fb8d90abc84ecac0a5322f17b2b562/biobb_wf_godmd/notebooks/biobb_wf_godmd.ipynb" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 6311 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:MediaObject ; + schema1:contentSize 256 ; + schema1:dateModified "2024-04-30T10:11:40+00:00" ; + schema1:name "A.0.0" ; + schema1:sdDatePublished "2024-04-30T10:49:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256 ; + schema1:dateModified "2024-04-30T10:11:40+00:00" ; + schema1:name "A.0.1" ; + schema1:sdDatePublished "2024-04-30T10:49:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256 ; + schema1:dateModified "2024-04-30T10:11:40+00:00" ; + schema1:name "A.1.0" ; + schema1:sdDatePublished "2024-04-30T10:49:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256 ; + schema1:dateModified "2024-04-30T10:11:40+00:00" ; + schema1:name "A.1.1" ; + schema1:sdDatePublished "2024-04-30T10:49:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256 ; + schema1:dateModified "2024-04-30T10:11:40+00:00" ; + schema1:name "B.0.0" ; + schema1:sdDatePublished "2024-04-30T10:49:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256 ; + schema1:dateModified "2024-04-30T10:11:40+00:00" ; + schema1:name "B.0.1" ; + schema1:sdDatePublished "2024-04-30T10:49:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256 ; + schema1:dateModified "2024-04-30T10:11:40+00:00" ; + schema1:name "B.1.0" ; + schema1:sdDatePublished "2024-04-30T10:49:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256 ; + schema1:dateModified "2024-04-30T10:11:40+00:00" ; + schema1:name "B.1.1" ; + schema1:sdDatePublished "2024-04-30T10:49:33+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Analysis pipeline for CUT&RUN and CUT&TAG experiments that includes sequencing QC, spike-in normalisation, IgG control normalisation, peak calling and downstream peak analysis." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/976?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/cutandrun" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/cutandrun" ; + schema1:sdDatePublished "2024-07-12 13:21:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/976/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10767 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:46Z" ; + schema1:dateModified "2024-06-11T12:54:46Z" ; + schema1:description "Analysis pipeline for CUT&RUN and CUT&TAG experiments that includes sequencing QC, spike-in normalisation, IgG control normalisation, peak calling and downstream peak analysis." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/976?version=7" ; + schema1:keywords "cutandrun, cutandrun-seq, cutandtag, cutandtag-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/cutandrun" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/976?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.286.4" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_dna_helparms/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Structural DNA helical parameters tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/286/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 22900 ; + schema1:creator , + ; + schema1:dateCreated "2023-07-26T15:48:55Z" ; + schema1:dateModified "2023-07-26T15:53:47Z" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/286?version=3" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Structural DNA helical parameters tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_dna_helparms/python/workflow.py" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """![Perl CI](https://github.com/FabianDeister/Library_curation_BOLD/actions/workflows/ci.yml/badge.svg)\r +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.10975576.svg)](https://doi.org/10.5281/zenodo.10975576)\r +\r +# Library curation BOLD\r +\r +![alt text](https://github.com/FabianDeister/Library_curation_BOLD/blob/main/doc/IBOL_LOGO_TRANSPARENT.png?raw=true)\r +\r +This repository contains scripts and synonymy data for pipelining the \r +automated curation of [BOLD](https://boldsystems.org) data dumps in \r +BCDM TSV format. The goal is to implement the classification of barcode \r +reference sequences as is being developed by the \r +[BGE](https://biodiversitygenomics.eu) consortium. A living document\r +in which these criteria are being developed is located\r +[here](https://docs.google.com/document/d/18m-7UnoJTG49TbvTsq_VncKMYZbYVbau98LE_q4rQvA/edit).\r +\r +A further goal of this project is to develop the code in this repository\r +according to the standards developed by the community in terms of automation,\r +reproducibility, and provenance. In practice, this means including the\r +scripts in a pipeline system such as [snakemake](https://snakemake.readthedocs.io/),\r +adopting an environment configuration system such as\r +[conda](https://docs.conda.io/), and organizing the folder structure\r +in compliance with the requirements of\r +[WorkFlowHub](https://workflowhub.eu/). The latter will provide it with \r +a DOI and will help generate [RO-crate](https://www.researchobject.org/ro-crate/)\r +documents, which means the entire tool chain is FAIR compliant according\r +to the current state of the art.\r +\r +## Install\r +Clone the repo:\r +```{shell}\r +git clone https://github.com/FabianDeister/Library_curation_BOLD.git\r +```\r +Change directory: \r +```{shell}\r +cd Library_curation_BOLD\r +```\r +The code in this repo depends on various tools. These are managed using\r +the `mamba` program (a drop-in replacement of `conda`). The following\r +sets up an environment in which all needed tools are installed:\r +\r +```{shell}\r +mamba env create -f environment.yml\r +```\r +\r +Once set up, this is activated like so:\r +\r +```{shell}\r +mamba activate bold-curation\r +```\r +\r +## How to run\r +### Bash\r +Although the aim of this project is to integrate all steps of the process\r +in a simple snakemake pipeline, at present this is not implemented. Instead,\r +the steps are executed individually on the command line as perl scripts\r +within the conda/mamba environment. Because the current project has its own\r +perl modules in the `lib` folder, every script needs to be run with the \r +additional include flag to add the module folder to the search path. Hence,\r +the invocation looks like the following inside the scripts folder:\r +\r +```{shell}\r +perl -I../../lib scriptname.pl -arg1 val1 -arg2 val2\r +```\r +### snakemake\r +\r +Follow the installation instructions above.\r +\r +Update config/config.yml to define your input data.\r +\r +Navigate to the directory "workflow" and type:\r +```{shell}\r +snakemake -p -c {number of cores} target\r +```\r +\r +If running on an HPC cluster with a SLURM scheduler you could use a bash script like this one:\r +```{shell}\r +#!/bin/bash\r +#SBATCH --partition=hour\r +#SBATCH --output=job_curate_bold_%j.out\r +#SBATCH --error=job_curate_bold_%j.err\r +#SBATCH --mem=24G\r +#SBATCH --cpus-per-task=2\r +\r +source activate bold-curation\r +\r +snakemake -p -c 2 target\r +\r +echo Complete!\r +```\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.833.1" ; + schema1:isBasedOn "https://github.com/FabianDeister/Library_curation_BOLD" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Library curation BOLD" ; + schema1:sdDatePublished "2024-07-12 13:23:04 +0100" ; + schema1:url "https://workflowhub.eu/workflows/833/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 27275 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13728 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-04-24T08:51:29Z" ; + schema1:dateModified "2024-04-24T09:11:55Z" ; + schema1:description """![Perl CI](https://github.com/FabianDeister/Library_curation_BOLD/actions/workflows/ci.yml/badge.svg)\r +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.10975576.svg)](https://doi.org/10.5281/zenodo.10975576)\r +\r +# Library curation BOLD\r +\r +![alt text](https://github.com/FabianDeister/Library_curation_BOLD/blob/main/doc/IBOL_LOGO_TRANSPARENT.png?raw=true)\r +\r +This repository contains scripts and synonymy data for pipelining the \r +automated curation of [BOLD](https://boldsystems.org) data dumps in \r +BCDM TSV format. The goal is to implement the classification of barcode \r +reference sequences as is being developed by the \r +[BGE](https://biodiversitygenomics.eu) consortium. A living document\r +in which these criteria are being developed is located\r +[here](https://docs.google.com/document/d/18m-7UnoJTG49TbvTsq_VncKMYZbYVbau98LE_q4rQvA/edit).\r +\r +A further goal of this project is to develop the code in this repository\r +according to the standards developed by the community in terms of automation,\r +reproducibility, and provenance. In practice, this means including the\r +scripts in a pipeline system such as [snakemake](https://snakemake.readthedocs.io/),\r +adopting an environment configuration system such as\r +[conda](https://docs.conda.io/), and organizing the folder structure\r +in compliance with the requirements of\r +[WorkFlowHub](https://workflowhub.eu/). The latter will provide it with \r +a DOI and will help generate [RO-crate](https://www.researchobject.org/ro-crate/)\r +documents, which means the entire tool chain is FAIR compliant according\r +to the current state of the art.\r +\r +## Install\r +Clone the repo:\r +```{shell}\r +git clone https://github.com/FabianDeister/Library_curation_BOLD.git\r +```\r +Change directory: \r +```{shell}\r +cd Library_curation_BOLD\r +```\r +The code in this repo depends on various tools. These are managed using\r +the `mamba` program (a drop-in replacement of `conda`). The following\r +sets up an environment in which all needed tools are installed:\r +\r +```{shell}\r +mamba env create -f environment.yml\r +```\r +\r +Once set up, this is activated like so:\r +\r +```{shell}\r +mamba activate bold-curation\r +```\r +\r +## How to run\r +### Bash\r +Although the aim of this project is to integrate all steps of the process\r +in a simple snakemake pipeline, at present this is not implemented. Instead,\r +the steps are executed individually on the command line as perl scripts\r +within the conda/mamba environment. Because the current project has its own\r +perl modules in the `lib` folder, every script needs to be run with the \r +additional include flag to add the module folder to the search path. Hence,\r +the invocation looks like the following inside the scripts folder:\r +\r +```{shell}\r +perl -I../../lib scriptname.pl -arg1 val1 -arg2 val2\r +```\r +### snakemake\r +\r +Follow the installation instructions above.\r +\r +Update config/config.yml to define your input data.\r +\r +Navigate to the directory "workflow" and type:\r +```{shell}\r +snakemake -p -c {number of cores} target\r +```\r +\r +If running on an HPC cluster with a SLURM scheduler you could use a bash script like this one:\r +```{shell}\r +#!/bin/bash\r +#SBATCH --partition=hour\r +#SBATCH --output=job_curate_bold_%j.out\r +#SBATCH --error=job_curate_bold_%j.err\r +#SBATCH --mem=24G\r +#SBATCH --cpus-per-task=2\r +\r +source activate bold-curation\r +\r +snakemake -p -c 2 target\r +\r +echo Complete!\r +```\r +""" ; + schema1:image ; + schema1:keywords "dna barcoding" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Library curation BOLD" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/833?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Automated quantitative analysis of DIA proteomics mass spectrometry measurements." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/980?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/diaproteomics" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/diaproteomics" ; + schema1:sdDatePublished "2024-07-12 13:21:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/980/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6507 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:48Z" ; + schema1:dateModified "2024-06-11T12:54:48Z" ; + schema1:description "Automated quantitative analysis of DIA proteomics mass spectrometry measurements." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/980?version=6" ; + schema1:keywords "data-independent-proteomics, dia-proteomics, openms, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/diaproteomics" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/980?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/991?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/hlatyping" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hlatyping" ; + schema1:sdDatePublished "2024-07-12 13:18:25 +0100" ; + schema1:url "https://workflowhub.eu/workflows/991/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3272 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:55Z" ; + schema1:dateModified "2024-06-11T12:54:55Z" ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/991?version=9" ; + schema1:keywords "DNA, hla, hla-typing, immunology, optitype, personalized-medicine, rna" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hlatyping" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/991?version=2" ; + schema1:version 2 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 13430 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:MediaObject ; + schema1:contentSize 2868 ; + schema1:dateModified "2020-09-21T10:34:27" ; + schema1:name "file0.txt" ; + schema1:sdDatePublished "2023-11-02T10:55:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2811 ; + schema1:dateModified "2020-09-21T10:34:27" ; + schema1:name "file1.txt" ; + schema1:sdDatePublished "2023-11-02T10:55:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2961 ; + schema1:dateModified "2020-09-21T10:34:27" ; + schema1:name "file2.txt" ; + schema1:sdDatePublished "2023-11-02T10:55:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2808 ; + schema1:dateModified "2020-09-21T10:34:27" ; + schema1:name "file3.txt" ; + schema1:sdDatePublished "2023-11-02T10:55:02+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=18" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-07-12 13:18:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=18" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12290 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:40Z" ; + schema1:dateModified "2024-06-11T12:54:40Z" ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=18" ; + schema1:version 18 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "extract 1 Id from SRA and assume it is PE as input to viralRNASpades." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/434?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for extract SRA + viralRNAspades (PE)" ; + schema1:sdDatePublished "2024-07-12 13:34:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/434/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6165 ; + schema1:dateCreated "2023-02-10T10:05:10Z" ; + schema1:dateModified "2023-02-10T10:05:10Z" ; + schema1:description "extract 1 Id from SRA and assume it is PE as input to viralRNASpades." ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "extract SRA + viralRNAspades (PE)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/434?version=1" ; + schema1:version 1 ; + ns1:output , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "The workflow takes a trimmed HiFi reads collection, Forward/Reverse HiC reads, and the max coverage depth (calculated from WF1) to run Hifiasm in HiC phasing mode. It produces both Pri/Alt and Hap1/Hap2 assemblies, and runs all the QC analysis (gfastats, BUSCO, and Merqury). The default Hifiasm purge level is Light (l1)." ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.605.1" ; + schema1:isBasedOn "https://github.com/ERGA-consortium/pipelines/tree/main/assembly/galaxy" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ERGA HiFi+HiC Assembly+QC Hifiasm v2309 (WF2)" ; + schema1:sdDatePublished "2024-07-12 13:27:15 +0100" ; + schema1:url "https://workflowhub.eu/workflows/605/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 284296 ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/2.Contigging/pics/Cont_hifiasm_hic_2309.png" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 70426 ; + schema1:creator , + ; + schema1:dateCreated "2023-10-09T12:47:49Z" ; + schema1:dateModified "2024-03-13T09:52:13Z" ; + schema1:description "The workflow takes a trimmed HiFi reads collection, Forward/Reverse HiC reads, and the max coverage depth (calculated from WF1) to run Hifiasm in HiC phasing mode. It produces both Pri/Alt and Hap1/Hap2 assemblies, and runs all the QC analysis (gfastats, BUSCO, and Merqury). The default Hifiasm purge level is Light (l1)." ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "ERGA, Assembly+QC, Hi-C, HiFi" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ERGA HiFi+HiC Assembly+QC Hifiasm v2309 (WF2)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/2.Contigging/Galaxy-Workflow-ERGA_HiFi_HiC_Assembly_QC_Hifiasm_v2309_(WF2).ga" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """CWL version of the md_list.cwl workflow for HPC.\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/98?version=2" ; + schema1:isBasedOn "https://github.com/douglowe/biobb_hpc_cwl_md_list" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Example of setting up a simulation system" ; + schema1:sdDatePublished "2024-07-12 13:37:18 +0100" ; + schema1:url "https://workflowhub.eu/workflows/98/ro_crate?version=2" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 33936 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6609 ; + schema1:dateCreated "2021-01-29T16:56:33Z" ; + schema1:dateModified "2023-01-16T13:46:36Z" ; + schema1:description """CWL version of the md_list.cwl workflow for HPC.\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/98?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Example of setting up a simulation system" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/98?version=2" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + , + , + ; + ns1:output . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Summary\r +This notebook shows how to integrate genomic and image data resources. This notebook looks at the question **Which diabetes related genes are expressed in the pancreas?** \r +\r +Steps:\r +\r +* Query humanmine.org, an integrated database of Homo sapiens genomic data using the intermine API to find the genes.\r +* Using the list of found genes, search in the Image Data Resource (IDR) for images linked to the genes, tissue and disease.\r +* \r +We use the intermine API and the IDR API\r +\r +The notebook can be launched in [My Binder](https://mybinder.org/v2/gh/IDR/idr-notebooks/master?urlpath=notebooks%2Fhumanmine.ipynb)\r +\r +# Inputs\r +Parameters needed to configure the workflow:\r +\r +* TISSUE = "Pancreas" \r +* DISEASE = "diabetes"\r +\r +# Ouputs\r +* List of genes found using HumanMine\r +* List of images from IDR for one of the gene found""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.242.1" ; + schema1:isBasedOn "https://github.com/IDR/idr-notebooks" ; + schema1:license "BSD-2-Clause" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Diabetes related genes expressed in pancreas" ; + schema1:sdDatePublished "2024-07-12 13:36:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/242/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 1135634 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 0 ; + schema1:creator ; + schema1:dateCreated "2021-11-23T11:28:07Z" ; + schema1:dateModified "2023-01-16T13:55:09Z" ; + schema1:description """# Summary\r +This notebook shows how to integrate genomic and image data resources. This notebook looks at the question **Which diabetes related genes are expressed in the pancreas?** \r +\r +Steps:\r +\r +* Query humanmine.org, an integrated database of Homo sapiens genomic data using the intermine API to find the genes.\r +* Using the list of found genes, search in the Image Data Resource (IDR) for images linked to the genes, tissue and disease.\r +* \r +We use the intermine API and the IDR API\r +\r +The notebook can be launched in [My Binder](https://mybinder.org/v2/gh/IDR/idr-notebooks/master?urlpath=notebooks%2Fhumanmine.ipynb)\r +\r +# Inputs\r +Parameters needed to configure the workflow:\r +\r +* TISSUE = "Pancreas" \r +* DISEASE = "diabetes"\r +\r +# Ouputs\r +* List of genes found using HumanMine\r +* List of images from IDR for one of the gene found""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/BSD-2-Clause" ; + schema1:name "Diabetes related genes expressed in pancreas" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/IDR/idr-notebooks/blob/master/humanmine.ipynb" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """From the R1 and R2 fastq files of a single samples, make a scRNAseq counts matrix, and perform basic QC with scanpy. Then, do further processing by making a UMAP and clustering. Produces a processed AnnData \r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/465?version=1" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq_SingleSampleProcessing_STARSolo" ; + schema1:sdDatePublished "2024-07-12 13:22:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/465/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 121325 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-05-05T06:29:11Z" ; + schema1:dateModified "2023-05-05T06:32:22Z" ; + schema1:description """From the R1 and R2 fastq files of a single samples, make a scRNAseq counts matrix, and perform basic QC with scanpy. Then, do further processing by making a UMAP and clustering. Produces a processed AnnData \r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/465?version=3" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "scRNAseq_SingleSampleProcessing_STARSolo" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/465?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Analysis of Chromosome Conformation Capture data (Hi-C)" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/989?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/hic" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hic" ; + schema1:sdDatePublished "2024-07-12 13:21:25 +0100" ; + schema1:url "https://workflowhub.eu/workflows/989/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4842 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:54Z" ; + schema1:dateModified "2024-06-11T12:54:54Z" ; + schema1:description "Analysis of Chromosome Conformation Capture data (Hi-C)" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/989?version=7" ; + schema1:keywords "chromosome-conformation-capture, Hi-C" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hic" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/989?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and modern ancient DNA pipeline in Nextflow and with cloud support." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-07-12 13:21:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3643 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:49Z" ; + schema1:dateModified "2024-06-11T12:54:49Z" ; + schema1:description "A fully reproducible and modern ancient DNA pipeline in Nextflow and with cloud support." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-wgs-variant-calling" ; + schema1:mainEntity ; + schema1:name "COVID-19-PE-WGS-ILLUMINA (v0.1)" ; + schema1:sdDatePublished "2021-03-12 13:41:30 +0000" ; + schema1:softwareVersion "v0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 34917 ; + schema1:name "COVID-19-PE-WGS-ILLUMINA" ; + schema1:programmingLanguage . + + a schema1:Dataset ; + schema1:datePublished "2023-09-01T09:38:56.153751" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/cutandrun" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "cutandrun/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.10" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """\r +\r +![Logo](https://cbg-ethz.github.io/V-pipe/img/logo.svg)\r +\r +[![bio.tools](https://img.shields.io/badge/bio-tools-blue.svg)](https://bio.tools/V-Pipe)\r +[![Snakemake](https://img.shields.io/badge/snakemake-≥7.11.0-blue.svg)](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe)\r +[![Deploy Docker image](https://github.com/cbg-ethz/V-pipe/actions/workflows/deploy-docker.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe)\r +[![Tests](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml)\r +[![Mega-Linter](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml)\r +[![License: Apache-2.0](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)\r +\r +V-pipe is a workflow designed for the analysis of next generation sequencing (NGS) data from viral pathogens. It produces a number of results in a curated format (e.g., consensus sequences, SNV calls, local/global haplotypes).\r +V-pipe is written using the Snakemake workflow management system.\r +\r +## Usage\r +\r +Different ways of initializing V-pipe are presented below. We strongly encourage you to deploy it [using the quick install script](#using-quick-install-script), as this is our preferred method.\r +\r +To configure V-pipe refer to the documentation present in [config/README.md](config/README.md).\r +\r +V-pipe expects the input samples to be organized in a [two-level](config/README.md#samples) directory hierarchy,\r +and the sequencing reads must be provided in a sub-folder named `raw_data`. Further details can be found on the [website](https://cbg-ethz.github.io/V-pipe/usage/).\r +Check the utils subdirectory for [mass-importers tools](utils/README.md#samples-mass-importers) that can assist you in generating this hierarchy.\r +\r +We provide [virus-specific base configuration files](config/README.md#virus-base-config) which contain handy defaults for, e.g., HIV and SARS-CoV-2. Set the virus in the general section of the configuration file:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +```\r +\r +Also see [snakemake's documentation](https://snakemake.readthedocs.io/en/stable/executing/cli.html) to learn more about the command-line options available when executing the workflow.\r +\r +\r +### Tutorials\r +\r +Tutorials for your first steps with V-pipe for different scenarios are available in the [docs/](docs/README.md) subdirectory.\r +\r +\r +### Using quick install script\r +\r +To deploy V-pipe, use the [installation script](utils/README.md#quick-installer) with the following parameters:\r +\r +```bash\r +curl -O 'https://raw.githubusercontent.com/cbg-ethz/V-pipe/master/utils/quick_install.sh'\r +./quick_install.sh -w work\r +```\r +\r +This script will download and install miniconda, checkout the V-pipe git repository (use `-b` to specify which branch/tag) and setup a work directory (specified with `-w`) with an executable script that will execute the workflow:\r +\r +```bash\r +cd work\r +# edit config.yaml and provide samples/ directory\r +./vpipe --jobs 4 --printshellcmds --dry-run\r +```\r +\r +Test data to test your installation is available with the tutorials provided in the [docs/](docs/README.md) subdirectory.\r +\r +### Using Docker\r +\r +Note: the [docker image](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe) is only setup with components to run the workflow for HIV and SARS-CoV-2 virus base configurations.\r +Using V-pipe with other viruses or configurations might require internet connectivity for additional software components.\r +\r +Create `config.yaml` or `vpipe.config` and then populate the `samples/` directory.\r +For example, the following config file could be used:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +\r +output:\r + snv: true\r + local: true\r + global: false\r + visualization: true\r + QA: true\r +```\r +\r +Then execute:\r +\r +```bash\r +docker run --rm -it -v $PWD:/work ghcr.io/cbg-ethz/v-pipe:master --jobs 4 --printshellcmds --dry-run\r +```\r +\r +### Using Snakedeploy\r +\r +First install [mamba](https://github.com/conda-forge/miniforge#mambaforge), then create and activate an environment with Snakemake and Snakedeploy:\r +\r +```bash\r +mamba create -c conda-forge -c bioconda --name snakemake snakemake snakedeploy\r +conda activate snakemake\r +```\r +\r +Snakemake's [official workflow installer Snakedeploy](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe) can now be used:\r +\r +```bash\r +snakedeploy deploy-workflow https://github.com/cbg-ethz/V-pipe --tag master .\r +# edit config/config.yaml and provide samples/ directory\r +snakemake --use-conda --jobs 4 --printshellcmds --dry-run\r +```\r +\r +## Dependencies\r +\r +- **[Conda](https://conda.io/docs/index.html)**\r +\r + Conda is a cross-platform package management system and an environment manager application. Snakemake uses mamba as a package manager.\r +\r +- **[Snakemake](https://snakemake.readthedocs.io/)**\r +\r + Snakemake is the central workflow and dependency manager of V-pipe. It determines the order in which individual tools are invoked and checks that programs do not exit unexpectedly.\r +\r +- **[VICUNA](https://www.broadinstitute.org/viral-genomics/vicuna)**\r +\r + VICUNA is a _de novo_ assembly software designed for populations with high mutation rates. It is used to build an initial reference for mapping reads with ngshmmalign aligner when a `references/cohort_consensus.fasta` file is not provided. Further details can be found in the [wiki](https://github.com/cbg-ethz/V-pipe/wiki/getting-started#input-files) pages.\r +\r +### Computational tools\r +\r +Other dependencies are managed by using isolated conda environments per rule, and below we list some of the computational tools integrated in V-pipe:\r +\r +- **[FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/)**\r +\r + FastQC gives an overview of the raw sequencing data. Flowcells that have been overloaded or otherwise fail during sequencing can easily be determined with FastQC.\r +\r +- **[PRINSEQ](http://prinseq.sourceforge.net/)**\r +\r + Trimming and clipping of reads is performed by PRINSEQ. It is currently the most versatile raw read processor with many customization options.\r +\r +- **[ngshmmalign](https://github.com/cbg-ethz/ngshmmalign)**\r +\r + We perform the alignment of the curated NGS data using our custom ngshmmalign that takes structural variants into account. It produces multiple consensus sequences that include either majority bases or ambiguous bases.\r +\r +- **[bwa](https://github.com/lh3/bwa)**\r +\r + In order to detect specific cross-contaminations with other probes, the Burrows-Wheeler aligner is used. It quickly yields estimates for foreign genomic material in an experiment.\r + Additionally, It can be used as an alternative aligner to ngshmmalign.\r +\r +- **[MAFFT](http://mafft.cbrc.jp/alignment/software/)**\r +\r + To standardise multiple samples to the same reference genome (say HXB2 for HIV-1), the multiple sequence aligner MAFFT is employed. The multiple sequence alignment helps in determining regions of low conservation and thus makes standardisation of alignments more robust.\r +\r +- **[Samtools and bcftools](https://www.htslib.org/)**\r +\r + The Swiss Army knife of alignment postprocessing and diagnostics. bcftools is also used to generate consensus sequence with indels.\r +\r +- **[SmallGenomeUtilities](https://github.com/cbg-ethz/smallgenomeutilities)**\r +\r + We perform genomic liftovers to standardised reference genomes using our in-house developed python library of utilities for rewriting alignments.\r +\r +- **[ShoRAH](https://github.com/cbg-ethz/shorah)**\r +\r + ShoRAh performs SNV calling and local haplotype reconstruction by using bayesian clustering.\r +\r +- **[LoFreq](https://csb5.github.io/lofreq/)**\r +\r + LoFreq (version 2) is SNVs and indels caller from next-generation sequencing data, and can be used as an alternative engine for SNV calling.\r +\r +- **[SAVAGE](https://bitbucket.org/jbaaijens/savage) and [Haploclique](https://github.com/cbg-ethz/haploclique)**\r +\r + We use HaploClique or SAVAGE to perform global haplotype reconstruction for heterogeneous viral populations by using an overlap graph.\r +\r +## Citation\r +\r +If you use this software in your research, please cite:\r +\r +Fuhrmann, L., Jablonski, K. P., Topolsky, I., Batavia, A. A., Borgsmueller, N., Icer Baykal, P., Carrara, M. ... & Beerenwinkel, (2023).\r +"V-Pipe 3.0: A Sustainable Pipeline for Within-Sample Viral Genetic Diversity Estimation."\r +_bioRxiv_, doi:[10.1101/2023.10.16.562462](https://doi.org/10.1101/2023.10.16.562462).\r +\r +## Contributions\r +\r +- [Ivan Topolsky\\* ![orcid]](https://orcid.org/0000-0002-7561-0810), [![github]](https://github.com/dryak)\r +- [Pelin Icer Baykal ![orcid]](https://orcid.org/0000-0002-9542-5292), [![github]](https://github.com/picerbaykal)\r +- [Kim Philipp Jablonski ![orcid]](https://orcid.org/0000-0002-4166-4343), [![github]](https://github.com/kpj)\r +- [Lara Fuhrmann ![orcid]](https://orcid.org/0000-0001-6405-0654), [![github]](https://github.com/LaraFuhrmann)\r +- [Uwe Schmitt ![orcid]](https://orcid.org/0000-0002-4658-0616), [![github]](https://github.com/uweschmitt)\r +- [Michal Okoniewski ![orcid]](https://orcid.org/0000-0003-4722-4506), [![github]](https://github.com/michalogit)\r +- [Monica Dragan ![orcid]](https://orcid.org/0000-0002-7719-5892), [![github]](https://github.com/monicadragan)\r +- [Susana Posada Céspedes ![orcid]](https://orcid.org/0000-0002-7459-8186), [![github]](https://github.com/sposadac)\r +- [David Seifert ![orcid]](https://orcid.org/0000-0003-4739-5110), [![github]](https://github.com/SoapZA)\r +- Tobias Marschall\r +- [Niko Beerenwinkel\\*\\* ![orcid]](https://orcid.org/0000-0002-0573-6119)\r +\r +\\* software maintainer ;\r +\\** group leader\r +\r +[github]: https://cbg-ethz.github.io/V-pipe/img/mark-github.svg\r +[orcid]: https://cbg-ethz.github.io/V-pipe/img/ORCIDiD_iconvector.svg\r +\r +## Contact\r +\r +We encourage users to use the [issue tracker](https://github.com/cbg-ethz/V-pipe/issues). For further enquiries, you can also contact the V-pipe Dev Team .\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.301.5" ; + schema1:isBasedOn "https://github.com/cbg-ethz/V-pipe.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for V-pipe (main multi-virus version)" ; + schema1:sdDatePublished "2024-07-12 13:18:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/301/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1634 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-10T18:38:49Z" ; + schema1:dateModified "2024-06-10T18:39:57Z" ; + schema1:description """\r +\r +![Logo](https://cbg-ethz.github.io/V-pipe/img/logo.svg)\r +\r +[![bio.tools](https://img.shields.io/badge/bio-tools-blue.svg)](https://bio.tools/V-Pipe)\r +[![Snakemake](https://img.shields.io/badge/snakemake-≥7.11.0-blue.svg)](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe)\r +[![Deploy Docker image](https://github.com/cbg-ethz/V-pipe/actions/workflows/deploy-docker.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe)\r +[![Tests](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml)\r +[![Mega-Linter](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml)\r +[![License: Apache-2.0](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)\r +\r +V-pipe is a workflow designed for the analysis of next generation sequencing (NGS) data from viral pathogens. It produces a number of results in a curated format (e.g., consensus sequences, SNV calls, local/global haplotypes).\r +V-pipe is written using the Snakemake workflow management system.\r +\r +## Usage\r +\r +Different ways of initializing V-pipe are presented below. We strongly encourage you to deploy it [using the quick install script](#using-quick-install-script), as this is our preferred method.\r +\r +To configure V-pipe refer to the documentation present in [config/README.md](config/README.md).\r +\r +V-pipe expects the input samples to be organized in a [two-level](config/README.md#samples) directory hierarchy,\r +and the sequencing reads must be provided in a sub-folder named `raw_data`. Further details can be found on the [website](https://cbg-ethz.github.io/V-pipe/usage/).\r +Check the utils subdirectory for [mass-importers tools](utils/README.md#samples-mass-importers) that can assist you in generating this hierarchy.\r +\r +We provide [virus-specific base configuration files](config/README.md#virus-base-config) which contain handy defaults for, e.g., HIV and SARS-CoV-2. Set the virus in the general section of the configuration file:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +```\r +\r +Also see [snakemake's documentation](https://snakemake.readthedocs.io/en/stable/executing/cli.html) to learn more about the command-line options available when executing the workflow.\r +\r +\r +### Tutorials\r +\r +Tutorials for your first steps with V-pipe for different scenarios are available in the [docs/](docs/README.md) subdirectory.\r +\r +\r +### Using quick install script\r +\r +To deploy V-pipe, use the [installation script](utils/README.md#quick-installer) with the following parameters:\r +\r +```bash\r +curl -O 'https://raw.githubusercontent.com/cbg-ethz/V-pipe/master/utils/quick_install.sh'\r +./quick_install.sh -w work\r +```\r +\r +This script will download and install miniconda, checkout the V-pipe git repository (use `-b` to specify which branch/tag) and setup a work directory (specified with `-w`) with an executable script that will execute the workflow:\r +\r +```bash\r +cd work\r +# edit config.yaml and provide samples/ directory\r +./vpipe --jobs 4 --printshellcmds --dry-run\r +```\r +\r +Test data to test your installation is available with the tutorials provided in the [docs/](docs/README.md) subdirectory.\r +\r +### Using Docker\r +\r +Note: the [docker image](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe) is only setup with components to run the workflow for HIV and SARS-CoV-2 virus base configurations.\r +Using V-pipe with other viruses or configurations might require internet connectivity for additional software components.\r +\r +Create `config.yaml` or `vpipe.config` and then populate the `samples/` directory.\r +For example, the following config file could be used:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +\r +output:\r + snv: true\r + local: true\r + global: false\r + visualization: true\r + QA: true\r +```\r +\r +Then execute:\r +\r +```bash\r +docker run --rm -it -v $PWD:/work ghcr.io/cbg-ethz/v-pipe:master --jobs 4 --printshellcmds --dry-run\r +```\r +\r +### Using Snakedeploy\r +\r +First install [mamba](https://github.com/conda-forge/miniforge#mambaforge), then create and activate an environment with Snakemake and Snakedeploy:\r +\r +```bash\r +mamba create -c conda-forge -c bioconda --name snakemake snakemake snakedeploy\r +conda activate snakemake\r +```\r +\r +Snakemake's [official workflow installer Snakedeploy](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe) can now be used:\r +\r +```bash\r +snakedeploy deploy-workflow https://github.com/cbg-ethz/V-pipe --tag master .\r +# edit config/config.yaml and provide samples/ directory\r +snakemake --use-conda --jobs 4 --printshellcmds --dry-run\r +```\r +\r +## Dependencies\r +\r +- **[Conda](https://conda.io/docs/index.html)**\r +\r + Conda is a cross-platform package management system and an environment manager application. Snakemake uses mamba as a package manager.\r +\r +- **[Snakemake](https://snakemake.readthedocs.io/)**\r +\r + Snakemake is the central workflow and dependency manager of V-pipe. It determines the order in which individual tools are invoked and checks that programs do not exit unexpectedly.\r +\r +- **[VICUNA](https://www.broadinstitute.org/viral-genomics/vicuna)**\r +\r + VICUNA is a _de novo_ assembly software designed for populations with high mutation rates. It is used to build an initial reference for mapping reads with ngshmmalign aligner when a `references/cohort_consensus.fasta` file is not provided. Further details can be found in the [wiki](https://github.com/cbg-ethz/V-pipe/wiki/getting-started#input-files) pages.\r +\r +### Computational tools\r +\r +Other dependencies are managed by using isolated conda environments per rule, and below we list some of the computational tools integrated in V-pipe:\r +\r +- **[FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/)**\r +\r + FastQC gives an overview of the raw sequencing data. Flowcells that have been overloaded or otherwise fail during sequencing can easily be determined with FastQC.\r +\r +- **[PRINSEQ](http://prinseq.sourceforge.net/)**\r +\r + Trimming and clipping of reads is performed by PRINSEQ. It is currently the most versatile raw read processor with many customization options.\r +\r +- **[ngshmmalign](https://github.com/cbg-ethz/ngshmmalign)**\r +\r + We perform the alignment of the curated NGS data using our custom ngshmmalign that takes structural variants into account. It produces multiple consensus sequences that include either majority bases or ambiguous bases.\r +\r +- **[bwa](https://github.com/lh3/bwa)**\r +\r + In order to detect specific cross-contaminations with other probes, the Burrows-Wheeler aligner is used. It quickly yields estimates for foreign genomic material in an experiment.\r + Additionally, It can be used as an alternative aligner to ngshmmalign.\r +\r +- **[MAFFT](http://mafft.cbrc.jp/alignment/software/)**\r +\r + To standardise multiple samples to the same reference genome (say HXB2 for HIV-1), the multiple sequence aligner MAFFT is employed. The multiple sequence alignment helps in determining regions of low conservation and thus makes standardisation of alignments more robust.\r +\r +- **[Samtools and bcftools](https://www.htslib.org/)**\r +\r + The Swiss Army knife of alignment postprocessing and diagnostics. bcftools is also used to generate consensus sequence with indels.\r +\r +- **[SmallGenomeUtilities](https://github.com/cbg-ethz/smallgenomeutilities)**\r +\r + We perform genomic liftovers to standardised reference genomes using our in-house developed python library of utilities for rewriting alignments.\r +\r +- **[ShoRAH](https://github.com/cbg-ethz/shorah)**\r +\r + ShoRAh performs SNV calling and local haplotype reconstruction by using bayesian clustering.\r +\r +- **[LoFreq](https://csb5.github.io/lofreq/)**\r +\r + LoFreq (version 2) is SNVs and indels caller from next-generation sequencing data, and can be used as an alternative engine for SNV calling.\r +\r +- **[SAVAGE](https://bitbucket.org/jbaaijens/savage) and [Haploclique](https://github.com/cbg-ethz/haploclique)**\r +\r + We use HaploClique or SAVAGE to perform global haplotype reconstruction for heterogeneous viral populations by using an overlap graph.\r +\r +## Citation\r +\r +If you use this software in your research, please cite:\r +\r +Fuhrmann, L., Jablonski, K. P., Topolsky, I., Batavia, A. A., Borgsmueller, N., Icer Baykal, P., Carrara, M. ... & Beerenwinkel, (2023).\r +"V-Pipe 3.0: A Sustainable Pipeline for Within-Sample Viral Genetic Diversity Estimation."\r +_bioRxiv_, doi:[10.1101/2023.10.16.562462](https://doi.org/10.1101/2023.10.16.562462).\r +\r +## Contributions\r +\r +- [Ivan Topolsky\\* ![orcid]](https://orcid.org/0000-0002-7561-0810), [![github]](https://github.com/dryak)\r +- [Pelin Icer Baykal ![orcid]](https://orcid.org/0000-0002-9542-5292), [![github]](https://github.com/picerbaykal)\r +- [Kim Philipp Jablonski ![orcid]](https://orcid.org/0000-0002-4166-4343), [![github]](https://github.com/kpj)\r +- [Lara Fuhrmann ![orcid]](https://orcid.org/0000-0001-6405-0654), [![github]](https://github.com/LaraFuhrmann)\r +- [Uwe Schmitt ![orcid]](https://orcid.org/0000-0002-4658-0616), [![github]](https://github.com/uweschmitt)\r +- [Michal Okoniewski ![orcid]](https://orcid.org/0000-0003-4722-4506), [![github]](https://github.com/michalogit)\r +- [Monica Dragan ![orcid]](https://orcid.org/0000-0002-7719-5892), [![github]](https://github.com/monicadragan)\r +- [Susana Posada Céspedes ![orcid]](https://orcid.org/0000-0002-7459-8186), [![github]](https://github.com/sposadac)\r +- [David Seifert ![orcid]](https://orcid.org/0000-0003-4739-5110), [![github]](https://github.com/SoapZA)\r +- Tobias Marschall\r +- [Niko Beerenwinkel\\*\\* ![orcid]](https://orcid.org/0000-0002-0573-6119)\r +\r +\\* software maintainer ;\r +\\** group leader\r +\r +[github]: https://cbg-ethz.github.io/V-pipe/img/mark-github.svg\r +[orcid]: https://cbg-ethz.github.io/V-pipe/img/ORCIDiD_iconvector.svg\r +\r +## Contact\r +\r +We encourage users to use the [issue tracker](https://github.com/cbg-ethz/V-pipe/issues). For further enquiries, you can also contact the V-pipe Dev Team .\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/301?version=4" ; + schema1:keywords "Alignment, Assembly, covid-19, Genomics, INDELs, rna, SNPs, variant_calling, workflow" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "V-pipe (main multi-virus version)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/301?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.56.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_protein-complex_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:34:05 +0100" ; + schema1:url "https://workflowhub.eu/workflows/56/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 68545 ; + schema1:creator , + ; + schema1:dateCreated "2021-07-02T08:43:37Z" ; + schema1:dateModified "2022-06-10T09:41:40Z" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/56?version=6" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/56?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for the identification of circular DNAs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/972?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/circdna" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/circdna" ; + schema1:sdDatePublished "2024-07-12 13:22:02 +0100" ; + schema1:url "https://workflowhub.eu/workflows/972/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9220 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:45Z" ; + schema1:dateModified "2024-06-11T12:54:45Z" ; + schema1:description "Pipeline for the identification of circular DNAs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/972?version=7" ; + schema1:keywords "ampliconarchitect, ampliconsuite, circle-seq, circular, DNA, eccdna, ecdna, extrachromosomal-circular-dna, Genomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/circdna" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/972?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Flashlite-Juicer is a PBS implementation of [Juicer](https://github.com/aidenlab/juicer) for University of Queensland's Flashlite HPC.\r +\r +Infrastructure\\_deployment\\_metadata: FlashLite (QRISCloud)""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.150.1" ; + schema1:isBasedOn "https://github.com/natbutter/juicer" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Flashlite-Juicer" ; + schema1:sdDatePublished "2024-07-12 13:36:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/150/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6741 ; + schema1:creator , + , + ; + schema1:dateCreated "2021-08-18T23:19:40Z" ; + schema1:dateModified "2023-01-16T13:51:45Z" ; + schema1:description """Flashlite-Juicer is a PBS implementation of [Juicer](https://github.com/aidenlab/juicer) for University of Queensland's Flashlite HPC.\r +\r +Infrastructure\\_deployment\\_metadata: FlashLite (QRISCloud)""" ; + schema1:isPartOf ; + schema1:keywords "Juicer, Flashlite, Hi-C, PBS, TAD, scalable, map, FASTQ, BWA, topologically associating domains" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Flashlite-Juicer" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/150?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Proteomics label-free quantification (LFQ) analysis pipeline using OpenMS and MSstats, with feature quantification, feature summarization, quality control and group-based statistical analysis." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1012?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/proteomicslfq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/proteomicslfq" ; + schema1:sdDatePublished "2024-07-12 13:19:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1012/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7027 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:08Z" ; + schema1:dateModified "2024-06-11T12:55:08Z" ; + schema1:description "Proteomics label-free quantification (LFQ) analysis pipeline using OpenMS and MSstats, with feature quantification, feature summarization, quality control and group-based statistical analysis." ; + schema1:keywords "label-free-quantification, openms, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/proteomicslfq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1012?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=13" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-07-12 13:18:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=13" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9496 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:39Z" ; + schema1:dateModified "2024-06-11T12:54:39Z" ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=13" ; + schema1:version 13 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for screening for functional components of assembled contigs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/987?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/funcscan" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/funcscan" ; + schema1:sdDatePublished "2024-07-12 13:21:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/987/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 16629 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:54Z" ; + schema1:dateModified "2024-06-11T12:54:54Z" ; + schema1:description "Pipeline for screening for functional components of assembled contigs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/987?version=8" ; + schema1:keywords "amp, AMR, antibiotic-resistance, antimicrobial-peptides, antimicrobial-resistance-genes, arg, Assembly, bgc, biosynthetic-gene-clusters, contigs, function, Metagenomics, natural-products, screening, secondary-metabolites" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/funcscan" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/987?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.261.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_complex_md_setup/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:36:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/261/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 166381 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 27124 ; + schema1:creator , + ; + schema1:dateCreated "2022-01-10T15:27:53Z" ; + schema1:dateModified "2023-06-09T06:44:35Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/261?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CWL Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/261?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "This workflow performs the process of protein-ligand docking, step by step, using the BioExcel Building Blocks library (biobb)." ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.257.1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Protein-ligand docking (fpocket)" ; + schema1:sdDatePublished "2024-07-12 13:36:26 +0100" ; + schema1:url "https://workflowhub.eu/workflows/257/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 29165 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5518 ; + schema1:creator , + ; + schema1:dateCreated "2022-01-10T11:48:32Z" ; + schema1:dateModified "2023-01-16T13:56:53Z" ; + schema1:description "This workflow performs the process of protein-ligand docking, step by step, using the BioExcel Building Blocks library (biobb)." ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Protein-ligand docking (fpocket)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/257?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Amplicon analysis workflow using NG-Tax\r +\r +**Steps:**\r +\r +* Quality control on the reads\r +* Execute NGTax for ASV detection and classification\r +\r +For more information about NG-Tax 2.0 have a look at https://doi.org/10.3389/fgene.2019.01366""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/45?version=7" ; + schema1:isBasedOn "https://gitlab.com/m-unlock/cwl" ; + schema1:license "CC0-1.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for NGTax" ; + schema1:sdDatePublished "2024-07-12 13:36:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/45/ro_crate?version=7" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 15649 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3837 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2020-10-28T11:07:26Z" ; + schema1:dateModified "2023-01-16T13:43:45Z" ; + schema1:description """Amplicon analysis workflow using NG-Tax\r +\r +**Steps:**\r +\r +* Quality control on the reads\r +* Execute NGTax for ASV detection and classification\r +\r +For more information about NG-Tax 2.0 have a look at https://doi.org/10.3389/fgene.2019.01366""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/45?version=6" ; + schema1:keywords "Amplicon, 16S, ITS" ; + schema1:license "https://spdx.org/licenses/CC0-1.0" ; + schema1:name "NGTax" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/45?version=7" ; + schema1:version 7 ; + ns1:input , + , + , + , + , + , + , + ; + ns1:output , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Protein 3D structure prediction pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1011?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/proteinfold" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/proteinfold" ; + schema1:sdDatePublished "2024-07-12 13:19:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1011/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11104 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:07Z" ; + schema1:dateModified "2024-06-11T12:55:07Z" ; + schema1:description "Protein 3D structure prediction pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1011?version=1" ; + schema1:keywords "alphafold2, protein-fold-prediction, protein-folding, protein-sequences, protein-structure" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/proteinfold" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1011?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/986?version=9" ; + schema1:isBasedOn "https://github.com/nf-core/fetchngs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/fetchngs" ; + schema1:sdDatePublished "2024-07-12 13:21:29 +0100" ; + schema1:url "https://workflowhub.eu/workflows/986/ro_crate?version=9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7847 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:53Z" ; + schema1:dateModified "2024-06-11T12:54:53Z" ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/986?version=13" ; + schema1:keywords "ddbj, download, ena, FASTQ, geo, sra, synapse" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/fetchngs" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/986?version=9" ; + schema1:version 9 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """### Workflow for LongRead Quality Control and Filtering\r +\r +- NanoPlot (read quality control) before and after filtering\r +- Filtlong (read trimming)\r +- Kraken2 taxonomic read classification before and after filtering\r +- Minimap2 read filtering based on given references

\r +\r +Other UNLOCK workflows on WorkflowHub: https://workflowhub.eu/projects/16/workflows?view=default

\r +\r +**All tool CWL files and other workflows can be found here:**
\r +https://gitlab.com/m-unlock/cwl/workflows\r +\r +**How to setup and use an UNLOCK workflow:**
\r +https://m-unlock.gitlab.io/docs/setup/setup.html
\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/337?version=1" ; + schema1:isBasedOn "https://gitlab.com/m-unlock/cwl/-/blob/master/cwl/workflows/workflow_nanopore_quality.cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for LongRead Quality Control and Filtering" ; + schema1:sdDatePublished "2024-07-12 13:34:21 +0100" ; + schema1:url "https://workflowhub.eu/workflows/337/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 95508 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 18185 ; + schema1:creator , + , + ; + schema1:dateCreated "2022-04-21T16:19:59Z" ; + schema1:dateModified "2023-04-07T14:07:55Z" ; + schema1:description """### Workflow for LongRead Quality Control and Filtering\r +\r +- NanoPlot (read quality control) before and after filtering\r +- Filtlong (read trimming)\r +- Kraken2 taxonomic read classification before and after filtering\r +- Minimap2 read filtering based on given references

\r +\r +Other UNLOCK workflows on WorkflowHub: https://workflowhub.eu/projects/16/workflows?view=default

\r +\r +**All tool CWL files and other workflows can be found here:**
\r +https://gitlab.com/m-unlock/cwl/workflows\r +\r +**How to setup and use an UNLOCK workflow:**
\r +https://m-unlock.gitlab.io/docs/setup/setup.html
\r +""" ; + schema1:image ; + schema1:keywords "Genomics, nanopore, CWL, Assembly" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "LongRead Quality Control and Filtering" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/337?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + ; + ns1:output , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """From the R1 and R2 fastq files of a single samples, make a scRNAseq counts matrix, and perform basic QC with scanpy. Then, do further processing by making a UMAP and clustering. Produces a processed AnnData \r +Depreciated: use individual workflows insead for multiple samples""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/647?version=1" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "other-closed" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq Single Sample Processing Cell Ranger" ; + schema1:sdDatePublished "2024-07-12 13:22:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/647/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 108091 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-11-09T01:30:26Z" ; + schema1:dateModified "2024-05-30T05:56:52Z" ; + schema1:description """From the R1 and R2 fastq files of a single samples, make a scRNAseq counts matrix, and perform basic QC with scanpy. Then, do further processing by making a UMAP and clustering. Produces a processed AnnData \r +Depreciated: use individual workflows insead for multiple samples""" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "" ; + schema1:name "scRNAseq Single Sample Processing Cell Ranger" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/647?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Automated quantitative analysis of DIA proteomics mass spectrometry measurements." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/980?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/diaproteomics" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/diaproteomics" ; + schema1:sdDatePublished "2024-07-12 13:21:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/980/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6375 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:48Z" ; + schema1:dateModified "2024-06-11T12:54:48Z" ; + schema1:description "Automated quantitative analysis of DIA proteomics mass spectrometry measurements." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/980?version=6" ; + schema1:keywords "data-independent-proteomics, dia-proteomics, openms, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/diaproteomics" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/980?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/998?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/methylseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/methylseq" ; + schema1:sdDatePublished "2024-07-12 13:18:23 +0100" ; + schema1:url "https://workflowhub.eu/workflows/998/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3868 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:01Z" ; + schema1:dateModified "2024-06-11T12:55:01Z" ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/998?version=14" ; + schema1:keywords "bisulfite-sequencing, dna-methylation, em-seq, epigenome, Epigenomics, methyl-seq, pbat, rrbs" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/methylseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/998?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """# Drug Synergies Screening Workflow\r +\r +## Table of Contents\r +\r +- [Drug Synergies Screening Workflow](#drug-synergies-screening-workflow)\r + - [Table of Contents](#table-of-contents)\r + - [Description](#description)\r + - [Contents](#contents)\r + - [Building Blocks](#building-blocks)\r + - [Workflows](#workflows)\r + - [Resources](#resources)\r + - [Tests](#tests)\r + - [Instructions](#instructions)\r + - [Local machine](#local-machine)\r + - [Requirements](#requirements)\r + - [Usage steps](#usage-steps)\r + - [MareNostrum 4](#marenostrum-4)\r + - [Requirements in MN4](#requirements-in-mn4)\r + - [Usage steps in MN4](#usage-steps-in-mn4)\r + - [License](#license)\r + - [Contact](#contact)\r +\r +## Description\r +\r +This pipeline simulates a drug screening on personalised cell line models. It automatically builds Boolean models of interest, then uses cell lines data (expression, mutations, copy number variations) to personalise them as MaBoSS models. Finally, this pipeline simulates multiple drug intervention on these MaBoSS models, and lists drug synergies of interest.\r +\r +The workflow uses the following building blocks, described in order of execution:\r +\r +1. Build model from species\r +2. Personalise patient\r +3. MaBoSS\r +4. Print drug results\r +\r +For details on individual workflow steps, see the user documentation for each building block.\r +\r +[`GitHub repository`](https://github.com/PerMedCoE/drug-synergies-workflow>)\r +\r +## Contents\r +\r +### Building Blocks\r +\r +The ``BuildingBlocks`` folder contains the script to install the\r +Building Blocks used in the Drug Synergies Workflow.\r +\r +### Workflows\r +\r +The ``Workflow`` folder contains the workflows implementations.\r +\r +Currently contains the implementation using PyCOMPSs.\r +\r +### Resources\r +\r +The ``Resources`` folder contains a small dataset for testing purposes.\r +\r +### Tests\r +\r +The ``Tests`` folder contains the scripts that run each Building Block\r +used in the workflow for a small dataset.\r +They can be executed individually *without PyCOMPSs installed* for testing\r +purposes.\r +\r +## Instructions\r +\r +### Local machine\r +\r +This section explains the requirements and usage for the Drug Synergies Workflow in a laptop or desktop computer.\r +\r +#### Requirements\r +\r +- [`permedcoe`](https://github.com/PerMedCoE/permedcoe) package\r +- [PyCOMPSs](https://pycompss.readthedocs.io/en/stable/Sections/00_Quickstart.html)\r +- [Singularity](https://sylabs.io/guides/3.0/user-guide/installation.html)\r +\r +#### Usage steps\r +\r +1. Clone this repository:\r +\r + ```bash\r + git clone https://github.com/PerMedCoE/drug-synergies-workflow.git\r + ```\r +\r +2. Install the Building Blocks required for the COVID19 Workflow:\r +\r + ```bash\r + drug-synergies-workflow/BuildingBlocks/./install_BBs.sh\r + ```\r +\r +3. Get the required Building Block images from the project [B2DROP](https://b2drop.bsc.es/index.php/f/444350):\r +\r + - Required images:\r + - PhysiCell-COVID19.singularity\r + - printResults.singularity\r + - MaBoSS_sensitivity.singularity\r + - FromSpeciesToMaBoSSModel.singularity\r +\r + The path where these files are stored **MUST be exported in the `PERMEDCOE_IMAGES`** environment variable.\r +\r + > :warning: **TIP**: These containers can be built manually as follows (be patient since some of them may take some time):\r + 1. Clone the `BuildingBlocks` repository\r + ```bash\r + git clone https://github.com/PerMedCoE/BuildingBlocks.git\r + ```\r + 2. Build the required Building Block images\r + ```bash\r + cd BuildingBlocks/Resources/images\r + sudo singularity build PhysiCell-COVID19.sif PhysiCell-COVID19.singularity\r + sudo singularity build printResults.sif printResults.singularity\r + sudo singularity build MaBoSS_sensitivity.sif MaBoSS_sensitivity.singularity\r + sudo singularity build FromSpeciesToMaBoSSModel.sif FromSpeciesToMaBoSSModel.singularity\r + cd ../../..\r + ```\r +\r +**If using PyCOMPSs in local PC** (make sure that PyCOMPSs in installed):\r +\r +4. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflows/PyCOMPSs\r + ```\r +\r +5. Execute `./run.sh`\r + > **TIP**: If you want to run the workflow with a different dataset, please update the `run.sh` script setting the `dataset` variable to the new dataset folder and their file names.\r +\r +### MareNostrum 4\r +\r +This section explains the requirements and usage for the Drug Synergies Workflow in the MareNostrum 4 supercomputer.\r +\r +#### Requirements in MN4\r +\r +- Access to MN4\r +\r +All Building Blocks are already installed in MN4, and the Drug Synergies Workflow available.\r +\r +#### Usage steps in MN4\r +\r +1. Load the `COMPSs`, `Singularity` and `permedcoe` modules\r +\r + ```bash\r + export COMPSS_PYTHON_VERSION=3\r + module load COMPSs/3.1\r + module load singularity/3.5.2\r + module use /apps/modules/modulefiles/tools/COMPSs/libraries\r + module load permedcoe\r + ```\r +\r + > **TIP**: Include the loading into your `${HOME}/.bashrc` file to load it automatically on the session start.\r +\r + This commands will load COMPSs and the permedcoe package which provides all necessary dependencies, as well as the path to the singularity container images (`PERMEDCOE_IMAGES` environment variable) and testing dataset (`DRUG_SYNERGIES_WORKFLOW_DATASET` environment variable).\r +\r +2. Get a copy of the pilot workflow into your desired folder\r +\r + ```bash\r + mkdir desired_folder\r + cd desired_folder\r + get_drug_synergies_workflow\r + ```\r +\r +3. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflow/PyCOMPSs\r + ```\r +\r +4. Execute `./launch.sh`\r +\r + This command will launch a job into the job queuing system (SLURM) requesting 2 nodes (one node acting half master and half worker, and other full worker node) for 20 minutes, and is prepared to use the singularity images that are already deployed in MN4 (located into the `PERMEDCOE_IMAGES` environment variable). It uses the dataset located into `../../Resources/data` folder.\r +\r + > :warning: **TIP**: If you want to run the workflow with a different dataset, please edit the `launch.sh` script and define the appropriate dataset path.\r +\r + After the execution, a `results` folder will be available with with Drug Synergies Workflow results.\r +\r +## License\r +\r +[Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)\r +\r +## Contact\r +\r +\r +\r +This software has been developed for the [PerMedCoE project](https://permedcoe.eu/), funded by the European Commission (EU H2020 [951773](https://cordis.europa.eu/project/id/951773)).\r +\r +![](https://permedcoe.eu/wp-content/uploads/2020/11/logo_1.png "PerMedCoE")\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/479?version=1" ; + schema1:isBasedOn "https://github.com/PerMedCoE/drug-synergies-workflow" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for PerMedCoE Drug Synergy" ; + schema1:sdDatePublished "2024-07-12 13:33:25 +0100" ; + schema1:url "https://workflowhub.eu/workflows/479/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1083 ; + schema1:dateCreated "2023-05-23T12:36:45Z" ; + schema1:dateModified "2023-05-23T12:36:45Z" ; + schema1:description """# Drug Synergies Screening Workflow\r +\r +## Table of Contents\r +\r +- [Drug Synergies Screening Workflow](#drug-synergies-screening-workflow)\r + - [Table of Contents](#table-of-contents)\r + - [Description](#description)\r + - [Contents](#contents)\r + - [Building Blocks](#building-blocks)\r + - [Workflows](#workflows)\r + - [Resources](#resources)\r + - [Tests](#tests)\r + - [Instructions](#instructions)\r + - [Local machine](#local-machine)\r + - [Requirements](#requirements)\r + - [Usage steps](#usage-steps)\r + - [MareNostrum 4](#marenostrum-4)\r + - [Requirements in MN4](#requirements-in-mn4)\r + - [Usage steps in MN4](#usage-steps-in-mn4)\r + - [License](#license)\r + - [Contact](#contact)\r +\r +## Description\r +\r +This pipeline simulates a drug screening on personalised cell line models. It automatically builds Boolean models of interest, then uses cell lines data (expression, mutations, copy number variations) to personalise them as MaBoSS models. Finally, this pipeline simulates multiple drug intervention on these MaBoSS models, and lists drug synergies of interest.\r +\r +The workflow uses the following building blocks, described in order of execution:\r +\r +1. Build model from species\r +2. Personalise patient\r +3. MaBoSS\r +4. Print drug results\r +\r +For details on individual workflow steps, see the user documentation for each building block.\r +\r +[`GitHub repository`](https://github.com/PerMedCoE/drug-synergies-workflow>)\r +\r +## Contents\r +\r +### Building Blocks\r +\r +The ``BuildingBlocks`` folder contains the script to install the\r +Building Blocks used in the Drug Synergies Workflow.\r +\r +### Workflows\r +\r +The ``Workflow`` folder contains the workflows implementations.\r +\r +Currently contains the implementation using PyCOMPSs.\r +\r +### Resources\r +\r +The ``Resources`` folder contains a small dataset for testing purposes.\r +\r +### Tests\r +\r +The ``Tests`` folder contains the scripts that run each Building Block\r +used in the workflow for a small dataset.\r +They can be executed individually *without PyCOMPSs installed* for testing\r +purposes.\r +\r +## Instructions\r +\r +### Local machine\r +\r +This section explains the requirements and usage for the Drug Synergies Workflow in a laptop or desktop computer.\r +\r +#### Requirements\r +\r +- [`permedcoe`](https://github.com/PerMedCoE/permedcoe) package\r +- [PyCOMPSs](https://pycompss.readthedocs.io/en/stable/Sections/00_Quickstart.html)\r +- [Singularity](https://sylabs.io/guides/3.0/user-guide/installation.html)\r +\r +#### Usage steps\r +\r +1. Clone this repository:\r +\r + ```bash\r + git clone https://github.com/PerMedCoE/drug-synergies-workflow.git\r + ```\r +\r +2. Install the Building Blocks required for the COVID19 Workflow:\r +\r + ```bash\r + drug-synergies-workflow/BuildingBlocks/./install_BBs.sh\r + ```\r +\r +3. Get the required Building Block images from the project [B2DROP](https://b2drop.bsc.es/index.php/f/444350):\r +\r + - Required images:\r + - PhysiCell-COVID19.singularity\r + - printResults.singularity\r + - MaBoSS_sensitivity.singularity\r + - FromSpeciesToMaBoSSModel.singularity\r +\r + The path where these files are stored **MUST be exported in the `PERMEDCOE_IMAGES`** environment variable.\r +\r + > :warning: **TIP**: These containers can be built manually as follows (be patient since some of them may take some time):\r + 1. Clone the `BuildingBlocks` repository\r + ```bash\r + git clone https://github.com/PerMedCoE/BuildingBlocks.git\r + ```\r + 2. Build the required Building Block images\r + ```bash\r + cd BuildingBlocks/Resources/images\r + sudo singularity build PhysiCell-COVID19.sif PhysiCell-COVID19.singularity\r + sudo singularity build printResults.sif printResults.singularity\r + sudo singularity build MaBoSS_sensitivity.sif MaBoSS_sensitivity.singularity\r + sudo singularity build FromSpeciesToMaBoSSModel.sif FromSpeciesToMaBoSSModel.singularity\r + cd ../../..\r + ```\r +\r +**If using PyCOMPSs in local PC** (make sure that PyCOMPSs in installed):\r +\r +4. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflows/PyCOMPSs\r + ```\r +\r +5. Execute `./run.sh`\r + > **TIP**: If you want to run the workflow with a different dataset, please update the `run.sh` script setting the `dataset` variable to the new dataset folder and their file names.\r +\r +### MareNostrum 4\r +\r +This section explains the requirements and usage for the Drug Synergies Workflow in the MareNostrum 4 supercomputer.\r +\r +#### Requirements in MN4\r +\r +- Access to MN4\r +\r +All Building Blocks are already installed in MN4, and the Drug Synergies Workflow available.\r +\r +#### Usage steps in MN4\r +\r +1. Load the `COMPSs`, `Singularity` and `permedcoe` modules\r +\r + ```bash\r + export COMPSS_PYTHON_VERSION=3\r + module load COMPSs/3.1\r + module load singularity/3.5.2\r + module use /apps/modules/modulefiles/tools/COMPSs/libraries\r + module load permedcoe\r + ```\r +\r + > **TIP**: Include the loading into your `${HOME}/.bashrc` file to load it automatically on the session start.\r +\r + This commands will load COMPSs and the permedcoe package which provides all necessary dependencies, as well as the path to the singularity container images (`PERMEDCOE_IMAGES` environment variable) and testing dataset (`DRUG_SYNERGIES_WORKFLOW_DATASET` environment variable).\r +\r +2. Get a copy of the pilot workflow into your desired folder\r +\r + ```bash\r + mkdir desired_folder\r + cd desired_folder\r + get_drug_synergies_workflow\r + ```\r +\r +3. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflow/PyCOMPSs\r + ```\r +\r +4. Execute `./launch.sh`\r +\r + This command will launch a job into the job queuing system (SLURM) requesting 2 nodes (one node acting half master and half worker, and other full worker node) for 20 minutes, and is prepared to use the singularity images that are already deployed in MN4 (located into the `PERMEDCOE_IMAGES` environment variable). It uses the dataset located into `../../Resources/data` folder.\r +\r + > :warning: **TIP**: If you want to run the workflow with a different dataset, please edit the `launch.sh` script and define the appropriate dataset path.\r +\r + After the execution, a `results` folder will be available with with Drug Synergies Workflow results.\r +\r +## License\r +\r +[Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)\r +\r +## Contact\r +\r +\r +\r +This software has been developed for the [PerMedCoE project](https://permedcoe.eu/), funded by the European Commission (EU H2020 [951773](https://cordis.europa.eu/project/id/951773)).\r +\r +![](https://permedcoe.eu/wp-content/uploads/2020/11/logo_1.png "PerMedCoE")\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "PerMedCoE Drug Synergy" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/479?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/amber-protein-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.297.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_md_setup/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy Amber Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/297/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 83828 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-23T13:28:34Z" ; + schema1:dateModified "2023-01-16T13:58:48Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/amber-protein-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/297?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy Amber Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_md_setup/galaxy/biobb_wf_amber_md_setup.ga" ; + schema1:version 2 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """The workflows in this collection are from the '16S Microbial Analysis with mothur' tutorial for analysis of 16S data (Saskia Hiltemann, Bérénice Batut, Dave Clements), adapted for pipeline use on galaxy australia (Ahmed Mehdi). The workflows developed in galaxy use mothur software package developed by Schloss et al https://pubmed.ncbi.nlm.nih.gov/19801464/. \r +\r +Please also refer to the 16S tutorials available at Galaxy https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop-short/tutorial.html and [https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop/tutorial.html\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/653?version=1" ; + schema1:isBasedOn "https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop-short/tutorial.html" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Workflow 7 : Beta Diversity [16S Microbial Analysis With Mothur]" ; + schema1:sdDatePublished "2024-07-12 13:26:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/653/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9429 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2023-11-09T05:26:53Z" ; + schema1:dateModified "2023-11-09T05:26:53Z" ; + schema1:description """The workflows in this collection are from the '16S Microbial Analysis with mothur' tutorial for analysis of 16S data (Saskia Hiltemann, Bérénice Batut, Dave Clements), adapted for pipeline use on galaxy australia (Ahmed Mehdi). The workflows developed in galaxy use mothur software package developed by Schloss et al https://pubmed.ncbi.nlm.nih.gov/19801464/. \r +\r +Please also refer to the 16S tutorials available at Galaxy https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop-short/tutorial.html and [https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop/tutorial.html\r +\r +""" ; + schema1:isPartOf ; + schema1:keywords "Metagenomics" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Workflow 7 : Beta Diversity [16S Microbial Analysis With Mothur]" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/653?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for the identification of circular DNAs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/972?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/circdna" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/circdna" ; + schema1:sdDatePublished "2024-07-12 13:22:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/972/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9212 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:45Z" ; + schema1:dateModified "2024-06-11T12:54:45Z" ; + schema1:description "Pipeline for the identification of circular DNAs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/972?version=7" ; + schema1:keywords "ampliconarchitect, ampliconsuite, circle-seq, circular, DNA, eccdna, ecdna, extrachromosomal-circular-dna, Genomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/circdna" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/972?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Global Run-On sequencing analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1004?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/nascent" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/nascent" ; + schema1:sdDatePublished "2024-07-12 13:20:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1004/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8446 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:06Z" ; + schema1:dateModified "2024-06-11T12:55:06Z" ; + schema1:description "Global Run-On sequencing analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1004?version=4" ; + schema1:keywords "gro-seq, nascent, pro-seq, rna, transcription, tss" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/nascent" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1004?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/521?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for SARS-CoV-2 ONT Amplicon Sequencing SANBI 1.0" ; + schema1:sdDatePublished "2024-07-12 13:32:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/521/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 19807 ; + schema1:dateCreated "2023-06-29T12:07:19Z" ; + schema1:dateModified "2023-06-29T12:07:19Z" ; + schema1:description "" ; + schema1:keywords "SARS-CoV-2, SANBI, nanopore" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "SARS-CoV-2 ONT Amplicon Sequencing SANBI 1.0" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/521?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """# GERONIMO\r +\r +## Introduction\r +GERONIMO is a bioinformatics pipeline designed to conduct high-throughput homology searches of structural genes using covariance models. These models are based on the alignment of sequences and the consensus of secondary structures. The pipeline is built using Snakemake, a workflow management tool that allows for the reproducible execution of analyses on various computational platforms. \r +\r +The idea for developing GERONIMO emerged from a comprehensive search for [telomerase RNA in lower plants] and was subsequently refined through an [expanded search of telomerase RNA across Insecta]. GERONIMO can test hundreds of genomes and ensures the stability and reproducibility of the analyses performed.\r +\r +\r +[telomerase RNA in lower plants]: https://doi.org/10.1093/nar/gkab545\r +[expanded search of telomerase RNA across Insecta]: https://doi.org/10.1093/nar/gkac1202\r +\r +## Scope\r +The GERONIMO tool utilises covariance models (CMs) to conduct homology searches of RNA sequences across a wide range of gene families in a broad evolutionary context. Specifically, it can be utilised to:\r +\r +* Detect RNA sequences that share a common evolutionary ancestor\r +* Identify and align orthologous RNA sequences among closely related species, as well as paralogous sequences within a single species\r +* Identify conserved non-coding RNAs in a genome, and extract upstream genomic regions to characterise potential promoter regions. \r +It is important to note that GERONIMO is a computational tool, and as such, it is intended to be run on a computer with a small amount of data. Appropriate computational infrastructure is necessary for analysing hundreds of genomes.\r +\r +Although GERONIMO was primarily designed for Telomerase RNA identification, its functionality extends to include the detection and alignment of other RNA gene families, including **rRNA**, **tRNA**, **snRNA**, **miRNA**, and **lncRNA**. This can aid in identifying paralogs and orthologs across different species that may carry specific functions, making it useful for phylogenetic analyses. \r +\r +It is crucial to remember that some gene families may exhibit similar characteristics but different functions. Therefore, analysing the data and functional annotation after conducting the search is essential to characterise the sequences properly.\r +\r +## Pipeline overview\r +\r +\r +By default, the GERONIMO pipeline conducts high-throughput searches of homology sequences in downloaded genomes utilizing covariance models. If a significant similarity is detected between the model and genome sequence, the pipeline extracts the upstream region, making it convenient to identify the promoter of the discovered gene. In brief, the pipeline:\r +- Compiles a list of genomes using the NCBI's [Entrez] database based on a specified query, *e.g. "Rhodophyta"[Organism]*\r +- Downloads and decompresses the requested genomes using *rsync* and *gunzip*, respectively\r +- *Optionally*, generates a covariance model based on a provided alignment using [Infernal]\r +- Conducts searches among the genomes using the covariance model [Infernal]\r +- Supplements genome information with taxonomy data using [rentrez]\r +- Expands the significant hits sequence by extracting upstream genomic regions using [*blastcmd*]\r +- Compiles the results, organizes them into a tabular format, and generates a visual summary of the performed analysis.\r +\r +[Entrez]: https://www.ncbi.nlm.nih.gov/books/NBK179288/\r +[Infernal]: http://eddylab.org/infernal/\r +[rentrez]: https://github.com/ropensci/rentrez\r +[*blastcmd*]: https://www.ncbi.nlm.nih.gov/books/NBK569853/\r +\r +## Quick start\r +The GERONIMO is available as a `snakemake pipeline` running on Linux and Windows operating systems.\r +\r +### Windows 10\r +Instal Linux on Windows 10 (WSL) according to [instructions], which bottling down to opening PowerShell or Windows Command Prompt in *administrator mode* and pasting the following:\r +```shell\r +wsl --install\r +wsl.exe --install UBUNTU\r +```\r +Then restart the machine and follow the instructions for setting up the Linux environment.\r +\r +[instructions]: https://learn.microsoft.com/en-us/windows/wsl/install\r +\r +### Linux:\r +#### Check whether the conda is installed:\r +```shell\r +conda -V\r +```\r +> GERONIMO was tested on conda 23.3.1\r +#### 1) If you do not have installed `conda`, please install `miniconda`\r +Please follow the instructions for installing [miniconda]\r +\r +[miniconda]: https://conda.io/projects/conda/en/stable/user-guide/install/linux.html\r +\r +#### 2) Continue with installing `mamba` (recommended but optional)\r +```shell\r +conda install -n base -c conda-forge mamba\r +```\r +#### 3) Install `snakemake`\r +```shell\r +conda activate base\r +mamba create -p env_snakemake -c conda-forge -c bioconda snakemake\r +mamba activate env_snakemake\r +snakemake --help\r +```\r +In case of complications, please check the section `Questions & Answers` below or follow the [official documentation] for troubleshooting.\r +\r +[official documentation]: https://snakemake.readthedocs.io/en/stable/getting_started/installation.html\r +\r +### Clone the GERONIMO repository\r +Go to the path in which you want to run the analysis and clone the repository:\r +```shell\r +cd \r +git clone https://github.com/amkilar/GERONIMO.git\r +```\r +\r +### Run sample analysis to ensure GERONIMO installation was successful\r +All files are prepared for the sample analysis as a default. Please execute the line below:\r +```shell\r +snakemake -s GERONIMO.sm --cores 1 --use-conda results/summary_table.xlsx\r +```\r +\r +This will prompt GERONIMO to quickly scan all modules, verifying the correct setup of the pipeline without executing any analysis.\r +You should see the message `Building DAG of jobs...`, followed by `Nothing to be done (all requested files are present and up to date).`, when successfully completed.\r +\r +If you want to run the sample analysis fully, please remove the folder `results` from the GERONIMO directory and execute GERONIMO again with:\r +\r +`snakemake -s GERONIMO.sm --cores 1 --use-conda results/summary_table.xlsx`\r +\r +> You might consider allowing more cores to speed up the analysis, which might take up to several hours.\r +\r +#### You might want to clean `GERONIMO/` directory from the files produced by the example analysis. You can safely remove the following:\r +- `GERONIMO/results`\r +- `GERONIMO/database`\r +- `GERONIMO/taxonomy`\r +- `GERONIMO/temp`\r +- `.create_genome_list.touch`\r +- `list_of_genomes.txt`\r +\r +## Setup the inputs\r +\r +### 1) Prepare the `covariance models`:\r +\r +#### Browse the collection of available `covariance models` at [Rfam] (*You can find the covariance model in the tab `Curation`.*) \r +Paste the covariance model to the folder `GERONIMO/models` and ensure its name follows the convention: `cov_model_`\r +\r +[Rfam]: https://rfam.org/\r +\r +#### **OR**\r +\r +#### Prepare your own `covariance model` using [LocARNA]\r +1. Paste or upload your sequences to the web server and download the `.stk` file with the alignment result. \r + \r + > *Please note that the `.stk` file format is crucial for the analysis, containing sequence alignment and secondary structure consensus.*\r + \r + > The LocARNA web service allows you to align 30 sequences at once - if you need to align more sequences, please use the standalone version available [here] \r + > After installation run: \r + ```shell\r + mlocarna my_fasta_sequences.fasta\r + ```\r + \r +2. Paste the `.stk` alignment file to the folder `GERONIMO/model_to_build` and ensure its name follows the convention: `.stk`\r +\r + > Please check the example `heterotrichea.stk` format in `GERONIMO/models_to_built` for reference\r + \r +\r +[LocARNA]: http://rna.informatik.uni-freiburg.de/LocARNA/Input.jsp\r +[here]: http://www.bioinf.uni-freiburg.de/Software/LocARNA/\r +\r +\r +### 2) Adjust the `config.yaml` file\r +Please adjust the analysis specifications, as in the following example:\r +\r +> - database: ' [Organism]' (in case of difficulties with defining the database query, please follow the instructions below)\r +> - extract_genomic_region-length: (here you can determine how long the upstream genomic region should be extracted; tested for 200)\r +> - models: ["", ""] (here specify the names of models that should be used to perform analysis)\r +> \r +> *Here you can also insert the name of the covariance model you want to build with GERONIMO - just be sure you placed `.stk` file in `GERONIMO/models_to_build` before starting analysis*\r +> - CPU_for_model_building: (specify the number of available CPUs devoted to the process of building model (cannot exceed the CPU number allowed to snakemake with `--cores`)\r +>\r +> *You might ignore this parameter when you do not need to create a new covariance model*\r +\r +\r +Keep in mind that the covariance models and alignments must be present in the respective GERONIMO folders.\r + \r +### 3) Remove folder `results`, which contains example analysis output\r +### 4) **Please ensure you have enough storage capacity to download all the requested genomes (in the `GERONIMO/` directory)**\r +\r +## Run GERONIMO\r +```shell\r +mamba activate env_snakemake\r +cd ~/GERONIMO\r +snakemake -s GERONIMO.sm --cores --use-conda results/summary_table.xlsx\r +```\r + \r +## Example results\r +\r +### Outputs characterisation\r +\r +#### A) Summary table\r +The Excel table contains the results arranged by taxonomy information and hit significance. The specific columns include:\r +* family, organism_name, class, order, phylum (taxonomy context)\r +* GCA_id - corresponds to the genome assembly in the *NCBI database*\r +* model - describes which covariance model identified the result\r +* label - follows the *Infernal* convention of categorizing hits\r +* number - the counter of the result\r +* e_value - indicates the significance level of the hit\r +* HIT_sequence - the exact HIT sequence found by *Infernal*, which corresponds to the covariance model\r +* HIT_ID - describes in which part of the genome assembly the hit was found, which may help publish novel sequences\r +* extended_genomic_region - upstream sequence, which may contain a possible promoter sequence\r +* secondary_structure - the secondary structure consensus of the covariance model\r +\r +\r +#### B) Significant Hits Distribution Across Taxonomy Families\r +The plot provides an overview of the number of genomes in which at least one significant hit was identified, grouped by family. The bold black line corresponds to the number of genomes present in each family, helping to minimize bias regarding unequal data representation across the taxonomy.\r +\r +\r +#### C) Hits Distribution in Genomes Across Families\r +The heatmap provides information about the most significant hits from the genome, identified by a specific covariance model. Genomes are grouped by families (on the right). Hits are classified into three categories based on their e-values. Generally, these categories correspond to hit classifications ("HIT," "MAYBE," "NO HIT"). The "HIT" category is further divided to distinguish between highly significant hits and moderately significant ones.\r +\r +\r +\r +### GERONIMO directory structure\r +\r +The GERONIMO directory structure is designed to produce files in a highly structured manner, ensuring clear insight and facilitating the analysis of results. During a successful run, GERONIMO produces the following folders:\r +* `/database` - which contains genome assemblies that were downloaded from the *NCBI database* and grouped in subfolders\r +* `/taxonomy` - where taxonomy information is gathered and stored in the form of tables\r +* `/results` - the main folder containing all produced results:\r + * `/infernal_raw` - contains the raw results produced by *Infernal*\r + * `/infernal` - contains restructured results of *Infernal* in table format\r + * `/cmdBLAST` - contains results of *cmdblast*, which extracts the extended genomic region\r + * `/summary` - contains summary files that join results from *Infernal*, *cmdblast*, and attach taxonomy context\r + * `/plots` - contains two types of summary plots\r +* `/temp` - folder contains the information necessary to download genome assemblies from *NCBI database*\r +\r +* `/env` - stores instructions for dependency installation\r +* `/models` - where calibrated covariance models can be pasted, *for example, from the Rfam database*\r +* `/modes_to_built` - where multiple alignments in *.stk* format can be pasted\r +* `/scripts` - contains developed scripts that perform results structurization\r +\r +#### The example GERONIMO directory structure:\r +\r +```shell\r +GERONIMO\r +├── database\r +│   ├── GCA_000091205.1_ASM9120v1_genomic\r +│   ├── GCA_000341285.1_ASM34128v1_genomic\r +│   ├── GCA_000350225.2_ASM35022v2_genomic\r +│   └── ...\r +├── env\r +├── models\r +├── model_to_build\r +├── results\r +│   ├── cmdBLAST\r +│   │   ├── MRP\r +│   │   │   ├── GCA_000091205.1_ASM9120v1_genomic\r +│   │   │   │   ├── extended\r +│   │   │   │   └── filtered\r +│   │   │   ├── GCA_000341285.1_ASM34128v1_genomic\r +│   │   │   │   ├── extended\r +│   │   │   │   └── filtered\r +│   │   │   ├── GCA_000350225.2_ASM35022v2_genomic\r +│   │   │   │   ├── extended\r +│   │   │   │   └── filtered\r +│   │   │   └── ...\r +│   │   ├── SRP\r +│   │   │   ├── GCA_000091205.1_ASM9120v1_genomic\r +│   │   │   │   ├── extended\r +│   │   │   │   └── filtered\r +│   │   │   ├── GCA_000341285.1_ASM34128v1_genomic\r +│   │   │   │   ├── extended\r +│   │   │   │   └── filtered\r +│   │   │   ├── GCA_000350225.2_ASM35022v2_genomic\r +│   │   │   │   ├── extended\r +│   │   │   │   └── filtered\r +│   │   │   └── ...\r +│   │   ├── ...\r +│   ├── infernal\r +│   │   ├── MRP\r +│   │   │   ├── GCA_000091205.1_ASM9120v1_genomic\r +│   │   │   ├── GCA_000341285.1_ASM34128v1_genomic\r +│   │   │   ├── GCA_000350225.2_ASM35022v2_genomic\r +│   │   │   ├── ...\r +│   │   ├── SRP\r +│   │   │   ├── GCA_000091205.1_ASM9120v1_genomic\r +│   │   │   ├── GCA_000341285.1_ASM34128v1_genomic\r +│   │   │   ├── GCA_000350225.2_ASM35022v2_genomic\r +│   │   │   ├── ...\r +│   ├── plots\r +│   ├── raw_infernal\r +│   │   ├── MRP\r +│   │   │   ├── GCA_000091205.1_ASM9120v1_genomic\r +│   │   │   ├── GCA_000341285.1_ASM34128v1_genomic\r +│   │   │   ├── GCA_000350225.2_ASM35022v2_genomic\r +│   │   │   ├── ...\r +│   │   ├── SRP\r +│   │   │   ├── GCA_000091205.1_ASM9120v1_genomic\r +│   │   │   ├── GCA_000341285.1_ASM34128v1_genomic\r +│   │   │   ├── GCA_000350225.2_ASM35022v2_genomic\r +│   │   │   ├── ...\r +│   └── summary\r +│   ├── GCA_000091205.1_ASM9120v1_genomic\r +│   ├── GCA_000341285.1_ASM34128v1_genomic\r +│   ├── GCA_000350225.2_ASM35022v2_genomic\r +│   ├── ...\r +├── scripts\r +├── taxonomy\r +└── temp\r +```\r +\r +## GERONIMO applicability\r +\r +### Expanding the evolutionary context\r +To add new genomes or database queries to an existing analysis, please follow the instructions:\r +1) Rename the `list_of_genomes.txt` file to `previous_list_of_genomes.txt` or any other preferred name.\r +2) Modify the `config.yaml` file by replacing the previous database query with the new one.\r +3) Delete:\r + - `summary_table.xlsx`, `part_summary_table.csv`, `summary_table_models.xlsx` files located in the `GERONIMO\\results` directory\r + - `.create_genome_list.touch` file\r +5) Run GERONIMO to calculate new results using the command:\r + ```shell\r + snakemake -s GERONIMO.sm --cores --use-conda results/summary_table.xlsx\r + ```\r +7) Once the new results are generated, reviewing them before merging them with the original results is recommended.\r +8) Copy the contents of the `previous_list_of_genomes.txt` file and paste them into the current `list_of_genomes.txt`.\r +9) Delete:\r + - `summary_table.xlsx` located in the `GERONIMO\\results` directory\r + - `.create_genome_list.touch` file\r +10) Run GERONIMO to merge the results from both analyses using the command:\r + ```shell\r + snakemake -s GERONIMO.sm --cores 1 --use-conda results/summary_table.xlsx\r + ```\r +\r +### Incorporating new covariance models into existing analysis\r +1) Copy the new covariance model to `GERONIMO/models`\r +2) Modify the `config.yaml` file by adding the name of the new model to the line `models: [...]`\r +3) Run GERONIMO to see the updated analysis outcome\r +\r +### Building a new covariance model\r +With GERONIMO, building a new covariance model from multiple sequence alignment in the `.stk` format is possible. \r +\r +To do so, simply paste `.stk` file to `GERONIMO/models_to_build` and paste the name of the new covariance model to `config.yaml` file to the line `models: [""]`\r +\r +and run GERONIMO.\r +\r +\r +## Questions & Answers\r +\r +### How to specify the database query?\r +- Visit the [NCBI Assemblies] website. \r +- Follow the instruction on the graphic below:\r +\r +[NCBI Assemblies]: https://www.ncbi.nlm.nih.gov/assembly/?term=\r +\r +### WSL: problem with creating `snakemake_env`\r +In the case of an error similar to the one below:\r +> CondaError: Unable to create prefix directory '/mnt/c/Windows/system32/env_snakemake'.\r +> Check that you have sufficient permissions. \r + \r +You might try to delete the cache with: `rm -r ~/.cache/` and try again.\r +\r +### When `snakemake` does not seem to be installed properly\r +In the case of the following error:\r +> Command 'snakemake' not found ...\r +\r +Check whether the `env_snakemake` is activated.\r +> It should result in a change from (base) to (env_snakemake) before your login name in the command line window.\r +\r +If you still see `(base)` before your login name, please try to activate the environment with conda:\r +`conda activate env_snakemake`\r +\r +\r +Please note that you might need to specify the full path to the `env_snakemake`, like /home/your user name/env_snakemake\r +\r +### How to browse GERONIMO results obtained in WSL?\r +You can easily access the results obtained on WSL from your Windows environment by opening `File Explorer` and pasting the following line into the search bar: `\\\\wsl.localhost\\Ubuntu\\home\\`. This will reveal a folder with your username, as specified during the configuration of your Ubuntu system. To locate the GERONIMO results, simply navigate to the folder with your username and then to the `home` folder. (`\\\\wsl.localhost\\Ubuntu\\home\\\\home\\GERONIMO`)\r +\r +### GERONIMO occupies a lot of storage space\r +Through genome downloads, GERONIMO can potentially consume storage space, rapidly leading to a shortage. Currently, downloading genomes is an essential step for optimal GERONIMO performance.\r +\r +Regrettably, if the analysis is rerun without the `/database` folder, it will result in the need to redownload genomes, which is a highly time-consuming process.\r +\r +Nevertheless, if you do not intend to repeat the analysis and have no requirement for additional genomes or models, you are welcome to retain your results tables and plots while removing the remaining files.\r +\r +It is strongly advised against using local machines for extensive analyses. If you lack access to external storage space, it is recommended to divide the analysis into smaller segments, which can be later merged, as explained in the section titled `Expanding the evolutionary context`.\r +\r +Considering this limitation, I am currently working on implementing a solution that will help circumvent the need for redundant genome downloads without compromising GERONIMO performance in the future.\r +\r +You might consider deleting the `.snakemake` folder to free up storage space. However, please note that deleting this folder will require the reinstallation of GERONIMO dependencies when the analysis is rerun.\r +\r +## License\r +Copyright (c) 2023 Agata M. Kilar\r +\r +Permission is hereby granted, free of charge, to any person obtaining a copy\r +of this software and associated documentation files (the "Software"), to deal\r +in the Software without restriction, including without limitation the rights\r +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r +copies of the Software, and to permit persons to whom the Software is\r +furnished to do so, subject to the following conditions:\r +\r +The above copyright notice and this permission notice shall be included in all\r +copies or substantial portions of the Software.\r +\r +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r +SOFTWARE.\r +\r +## Contact\r +mgr inż. Agata Magdalena Kilar, PhD (agata.kilar@ceitec.muni.cz)\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.547.1" ; + schema1:isBasedOn "https://github.com/amkilar/GERONIMO.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for GERONIMO" ; + schema1:sdDatePublished "2024-07-12 13:32:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/547/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6740 ; + schema1:creator ; + schema1:dateCreated "2023-08-01T01:34:42Z" ; + schema1:dateModified "2023-08-03T18:15:31Z" ; + schema1:description """# GERONIMO\r +\r +## Introduction\r +GERONIMO is a bioinformatics pipeline designed to conduct high-throughput homology searches of structural genes using covariance models. These models are based on the alignment of sequences and the consensus of secondary structures. The pipeline is built using Snakemake, a workflow management tool that allows for the reproducible execution of analyses on various computational platforms. \r +\r +The idea for developing GERONIMO emerged from a comprehensive search for [telomerase RNA in lower plants] and was subsequently refined through an [expanded search of telomerase RNA across Insecta]. GERONIMO can test hundreds of genomes and ensures the stability and reproducibility of the analyses performed.\r +\r +\r +[telomerase RNA in lower plants]: https://doi.org/10.1093/nar/gkab545\r +[expanded search of telomerase RNA across Insecta]: https://doi.org/10.1093/nar/gkac1202\r +\r +## Scope\r +The GERONIMO tool utilises covariance models (CMs) to conduct homology searches of RNA sequences across a wide range of gene families in a broad evolutionary context. Specifically, it can be utilised to:\r +\r +* Detect RNA sequences that share a common evolutionary ancestor\r +* Identify and align orthologous RNA sequences among closely related species, as well as paralogous sequences within a single species\r +* Identify conserved non-coding RNAs in a genome, and extract upstream genomic regions to characterise potential promoter regions. \r +It is important to note that GERONIMO is a computational tool, and as such, it is intended to be run on a computer with a small amount of data. Appropriate computational infrastructure is necessary for analysing hundreds of genomes.\r +\r +Although GERONIMO was primarily designed for Telomerase RNA identification, its functionality extends to include the detection and alignment of other RNA gene families, including **rRNA**, **tRNA**, **snRNA**, **miRNA**, and **lncRNA**. This can aid in identifying paralogs and orthologs across different species that may carry specific functions, making it useful for phylogenetic analyses. \r +\r +It is crucial to remember that some gene families may exhibit similar characteristics but different functions. Therefore, analysing the data and functional annotation after conducting the search is essential to characterise the sequences properly.\r +\r +## Pipeline overview\r +\r +\r +By default, the GERONIMO pipeline conducts high-throughput searches of homology sequences in downloaded genomes utilizing covariance models. If a significant similarity is detected between the model and genome sequence, the pipeline extracts the upstream region, making it convenient to identify the promoter of the discovered gene. In brief, the pipeline:\r +- Compiles a list of genomes using the NCBI's [Entrez] database based on a specified query, *e.g. "Rhodophyta"[Organism]*\r +- Downloads and decompresses the requested genomes using *rsync* and *gunzip*, respectively\r +- *Optionally*, generates a covariance model based on a provided alignment using [Infernal]\r +- Conducts searches among the genomes using the covariance model [Infernal]\r +- Supplements genome information with taxonomy data using [rentrez]\r +- Expands the significant hits sequence by extracting upstream genomic regions using [*blastcmd*]\r +- Compiles the results, organizes them into a tabular format, and generates a visual summary of the performed analysis.\r +\r +[Entrez]: https://www.ncbi.nlm.nih.gov/books/NBK179288/\r +[Infernal]: http://eddylab.org/infernal/\r +[rentrez]: https://github.com/ropensci/rentrez\r +[*blastcmd*]: https://www.ncbi.nlm.nih.gov/books/NBK569853/\r +\r +## Quick start\r +The GERONIMO is available as a `snakemake pipeline` running on Linux and Windows operating systems.\r +\r +### Windows 10\r +Instal Linux on Windows 10 (WSL) according to [instructions], which bottling down to opening PowerShell or Windows Command Prompt in *administrator mode* and pasting the following:\r +```shell\r +wsl --install\r +wsl.exe --install UBUNTU\r +```\r +Then restart the machine and follow the instructions for setting up the Linux environment.\r +\r +[instructions]: https://learn.microsoft.com/en-us/windows/wsl/install\r +\r +### Linux:\r +#### Check whether the conda is installed:\r +```shell\r +conda -V\r +```\r +> GERONIMO was tested on conda 23.3.1\r +#### 1) If you do not have installed `conda`, please install `miniconda`\r +Please follow the instructions for installing [miniconda]\r +\r +[miniconda]: https://conda.io/projects/conda/en/stable/user-guide/install/linux.html\r +\r +#### 2) Continue with installing `mamba` (recommended but optional)\r +```shell\r +conda install -n base -c conda-forge mamba\r +```\r +#### 3) Install `snakemake`\r +```shell\r +conda activate base\r +mamba create -p env_snakemake -c conda-forge -c bioconda snakemake\r +mamba activate env_snakemake\r +snakemake --help\r +```\r +In case of complications, please check the section `Questions & Answers` below or follow the [official documentation] for troubleshooting.\r +\r +[official documentation]: https://snakemake.readthedocs.io/en/stable/getting_started/installation.html\r +\r +### Clone the GERONIMO repository\r +Go to the path in which you want to run the analysis and clone the repository:\r +```shell\r +cd \r +git clone https://github.com/amkilar/GERONIMO.git\r +```\r +\r +### Run sample analysis to ensure GERONIMO installation was successful\r +All files are prepared for the sample analysis as a default. Please execute the line below:\r +```shell\r +snakemake -s GERONIMO.sm --cores 1 --use-conda results/summary_table.xlsx\r +```\r +\r +This will prompt GERONIMO to quickly scan all modules, verifying the correct setup of the pipeline without executing any analysis.\r +You should see the message `Building DAG of jobs...`, followed by `Nothing to be done (all requested files are present and up to date).`, when successfully completed.\r +\r +If you want to run the sample analysis fully, please remove the folder `results` from the GERONIMO directory and execute GERONIMO again with:\r +\r +`snakemake -s GERONIMO.sm --cores 1 --use-conda results/summary_table.xlsx`\r +\r +> You might consider allowing more cores to speed up the analysis, which might take up to several hours.\r +\r +#### You might want to clean `GERONIMO/` directory from the files produced by the example analysis. You can safely remove the following:\r +- `GERONIMO/results`\r +- `GERONIMO/database`\r +- `GERONIMO/taxonomy`\r +- `GERONIMO/temp`\r +- `.create_genome_list.touch`\r +- `list_of_genomes.txt`\r +\r +## Setup the inputs\r +\r +### 1) Prepare the `covariance models`:\r +\r +#### Browse the collection of available `covariance models` at [Rfam] (*You can find the covariance model in the tab `Curation`.*) \r +Paste the covariance model to the folder `GERONIMO/models` and ensure its name follows the convention: `cov_model_`\r +\r +[Rfam]: https://rfam.org/\r +\r +#### **OR**\r +\r +#### Prepare your own `covariance model` using [LocARNA]\r +1. Paste or upload your sequences to the web server and download the `.stk` file with the alignment result. \r + \r + > *Please note that the `.stk` file format is crucial for the analysis, containing sequence alignment and secondary structure consensus.*\r + \r + > The LocARNA web service allows you to align 30 sequences at once - if you need to align more sequences, please use the standalone version available [here] \r + > After installation run: \r + ```shell\r + mlocarna my_fasta_sequences.fasta\r + ```\r + \r +2. Paste the `.stk` alignment file to the folder `GERONIMO/model_to_build` and ensure its name follows the convention: `.stk`\r +\r + > Please check the example `heterotrichea.stk` format in `GERONIMO/models_to_built` for reference\r + \r +\r +[LocARNA]: http://rna.informatik.uni-freiburg.de/LocARNA/Input.jsp\r +[here]: http://www.bioinf.uni-freiburg.de/Software/LocARNA/\r +\r +\r +### 2) Adjust the `config.yaml` file\r +Please adjust the analysis specifications, as in the following example:\r +\r +> - database: ' [Organism]' (in case of difficulties with defining the database query, please follow the instructions below)\r +> - extract_genomic_region-length: (here you can determine how long the upstream genomic region should be extracted; tested for 200)\r +> - models: ["", ""] (here specify the names of models that should be used to perform analysis)\r +> \r +> *Here you can also insert the name of the covariance model you want to build with GERONIMO - just be sure you placed `.stk` file in `GERONIMO/models_to_build` before starting analysis*\r +> - CPU_for_model_building: (specify the number of available CPUs devoted to the process of building model (cannot exceed the CPU number allowed to snakemake with `--cores`)\r +>\r +> *You might ignore this parameter when you do not need to create a new covariance model*\r +\r +\r +Keep in mind that the covariance models and alignments must be present in the respective GERONIMO folders.\r + \r +### 3) Remove folder `results`, which contains example analysis output\r +### 4) **Please ensure you have enough storage capacity to download all the requested genomes (in the `GERONIMO/` directory)**\r +\r +## Run GERONIMO\r +```shell\r +mamba activate env_snakemake\r +cd ~/GERONIMO\r +snakemake -s GERONIMO.sm --cores --use-conda results/summary_table.xlsx\r +```\r + \r +## Example results\r +\r +### Outputs characterisation\r +\r +#### A) Summary table\r +The Excel table contains the results arranged by taxonomy information and hit significance. The specific columns include:\r +* family, organism_name, class, order, phylum (taxonomy context)\r +* GCA_id - corresponds to the genome assembly in the *NCBI database*\r +* model - describes which covariance model identified the result\r +* label - follows the *Infernal* convention of categorizing hits\r +* number - the counter of the result\r +* e_value - indicates the significance level of the hit\r +* HIT_sequence - the exact HIT sequence found by *Infernal*, which corresponds to the covariance model\r +* HIT_ID - describes in which part of the genome assembly the hit was found, which may help publish novel sequences\r +* extended_genomic_region - upstream sequence, which may contain a possible promoter sequence\r +* secondary_structure - the secondary structure consensus of the covariance model\r +\r +\r +#### B) Significant Hits Distribution Across Taxonomy Families\r +The plot provides an overview of the number of genomes in which at least one significant hit was identified, grouped by family. The bold black line corresponds to the number of genomes present in each family, helping to minimize bias regarding unequal data representation across the taxonomy.\r +\r +\r +#### C) Hits Distribution in Genomes Across Families\r +The heatmap provides information about the most significant hits from the genome, identified by a specific covariance model. Genomes are grouped by families (on the right). Hits are classified into three categories based on their e-values. Generally, these categories correspond to hit classifications ("HIT," "MAYBE," "NO HIT"). The "HIT" category is further divided to distinguish between highly significant hits and moderately significant ones.\r +\r +\r +\r +### GERONIMO directory structure\r +\r +The GERONIMO directory structure is designed to produce files in a highly structured manner, ensuring clear insight and facilitating the analysis of results. During a successful run, GERONIMO produces the following folders:\r +* `/database` - which contains genome assemblies that were downloaded from the *NCBI database* and grouped in subfolders\r +* `/taxonomy` - where taxonomy information is gathered and stored in the form of tables\r +* `/results` - the main folder containing all produced results:\r + * `/infernal_raw` - contains the raw results produced by *Infernal*\r + * `/infernal` - contains restructured results of *Infernal* in table format\r + * `/cmdBLAST` - contains results of *cmdblast*, which extracts the extended genomic region\r + * `/summary` - contains summary files that join results from *Infernal*, *cmdblast*, and attach taxonomy context\r + * `/plots` - contains two types of summary plots\r +* `/temp` - folder contains the information necessary to download genome assemblies from *NCBI database*\r +\r +* `/env` - stores instructions for dependency installation\r +* `/models` - where calibrated covariance models can be pasted, *for example, from the Rfam database*\r +* `/modes_to_built` - where multiple alignments in *.stk* format can be pasted\r +* `/scripts` - contains developed scripts that perform results structurization\r +\r +#### The example GERONIMO directory structure:\r +\r +```shell\r +GERONIMO\r +├── database\r +│   ├── GCA_000091205.1_ASM9120v1_genomic\r +│   ├── GCA_000341285.1_ASM34128v1_genomic\r +│   ├── GCA_000350225.2_ASM35022v2_genomic\r +│   └── ...\r +├── env\r +├── models\r +├── model_to_build\r +├── results\r +│   ├── cmdBLAST\r +│   │   ├── MRP\r +│   │   │   ├── GCA_000091205.1_ASM9120v1_genomic\r +│   │   │   │   ├── extended\r +│   │   │   │   └── filtered\r +│   │   │   ├── GCA_000341285.1_ASM34128v1_genomic\r +│   │   │   │   ├── extended\r +│   │   │   │   └── filtered\r +│   │   │   ├── GCA_000350225.2_ASM35022v2_genomic\r +│   │   │   │   ├── extended\r +│   │   │   │   └── filtered\r +│   │   │   └── ...\r +│   │   ├── SRP\r +│   │   │   ├── GCA_000091205.1_ASM9120v1_genomic\r +│   │   │   │   ├── extended\r +│   │   │   │   └── filtered\r +│   │   │   ├── GCA_000341285.1_ASM34128v1_genomic\r +│   │   │   │   ├── extended\r +│   │   │   │   └── filtered\r +│   │   │   ├── GCA_000350225.2_ASM35022v2_genomic\r +│   │   │   │   ├── extended\r +│   │   │   │   └── filtered\r +│   │   │   └── ...\r +│   │   ├── ...\r +│   ├── infernal\r +│   │   ├── MRP\r +│   │   │   ├── GCA_000091205.1_ASM9120v1_genomic\r +│   │   │   ├── GCA_000341285.1_ASM34128v1_genomic\r +│   │   │   ├── GCA_000350225.2_ASM35022v2_genomic\r +│   │   │   ├── ...\r +│   │   ├── SRP\r +│   │   │   ├── GCA_000091205.1_ASM9120v1_genomic\r +│   │   │   ├── GCA_000341285.1_ASM34128v1_genomic\r +│   │   │   ├── GCA_000350225.2_ASM35022v2_genomic\r +│   │   │   ├── ...\r +│   ├── plots\r +│   ├── raw_infernal\r +│   │   ├── MRP\r +│   │   │   ├── GCA_000091205.1_ASM9120v1_genomic\r +│   │   │   ├── GCA_000341285.1_ASM34128v1_genomic\r +│   │   │   ├── GCA_000350225.2_ASM35022v2_genomic\r +│   │   │   ├── ...\r +│   │   ├── SRP\r +│   │   │   ├── GCA_000091205.1_ASM9120v1_genomic\r +│   │   │   ├── GCA_000341285.1_ASM34128v1_genomic\r +│   │   │   ├── GCA_000350225.2_ASM35022v2_genomic\r +│   │   │   ├── ...\r +│   └── summary\r +│   ├── GCA_000091205.1_ASM9120v1_genomic\r +│   ├── GCA_000341285.1_ASM34128v1_genomic\r +│   ├── GCA_000350225.2_ASM35022v2_genomic\r +│   ├── ...\r +├── scripts\r +├── taxonomy\r +└── temp\r +```\r +\r +## GERONIMO applicability\r +\r +### Expanding the evolutionary context\r +To add new genomes or database queries to an existing analysis, please follow the instructions:\r +1) Rename the `list_of_genomes.txt` file to `previous_list_of_genomes.txt` or any other preferred name.\r +2) Modify the `config.yaml` file by replacing the previous database query with the new one.\r +3) Delete:\r + - `summary_table.xlsx`, `part_summary_table.csv`, `summary_table_models.xlsx` files located in the `GERONIMO\\results` directory\r + - `.create_genome_list.touch` file\r +5) Run GERONIMO to calculate new results using the command:\r + ```shell\r + snakemake -s GERONIMO.sm --cores --use-conda results/summary_table.xlsx\r + ```\r +7) Once the new results are generated, reviewing them before merging them with the original results is recommended.\r +8) Copy the contents of the `previous_list_of_genomes.txt` file and paste them into the current `list_of_genomes.txt`.\r +9) Delete:\r + - `summary_table.xlsx` located in the `GERONIMO\\results` directory\r + - `.create_genome_list.touch` file\r +10) Run GERONIMO to merge the results from both analyses using the command:\r + ```shell\r + snakemake -s GERONIMO.sm --cores 1 --use-conda results/summary_table.xlsx\r + ```\r +\r +### Incorporating new covariance models into existing analysis\r +1) Copy the new covariance model to `GERONIMO/models`\r +2) Modify the `config.yaml` file by adding the name of the new model to the line `models: [...]`\r +3) Run GERONIMO to see the updated analysis outcome\r +\r +### Building a new covariance model\r +With GERONIMO, building a new covariance model from multiple sequence alignment in the `.stk` format is possible. \r +\r +To do so, simply paste `.stk` file to `GERONIMO/models_to_build` and paste the name of the new covariance model to `config.yaml` file to the line `models: [""]`\r +\r +and run GERONIMO.\r +\r +\r +## Questions & Answers\r +\r +### How to specify the database query?\r +- Visit the [NCBI Assemblies] website. \r +- Follow the instruction on the graphic below:\r +\r +[NCBI Assemblies]: https://www.ncbi.nlm.nih.gov/assembly/?term=\r +\r +### WSL: problem with creating `snakemake_env`\r +In the case of an error similar to the one below:\r +> CondaError: Unable to create prefix directory '/mnt/c/Windows/system32/env_snakemake'.\r +> Check that you have sufficient permissions. \r + \r +You might try to delete the cache with: `rm -r ~/.cache/` and try again.\r +\r +### When `snakemake` does not seem to be installed properly\r +In the case of the following error:\r +> Command 'snakemake' not found ...\r +\r +Check whether the `env_snakemake` is activated.\r +> It should result in a change from (base) to (env_snakemake) before your login name in the command line window.\r +\r +If you still see `(base)` before your login name, please try to activate the environment with conda:\r +`conda activate env_snakemake`\r +\r +\r +Please note that you might need to specify the full path to the `env_snakemake`, like /home/your user name/env_snakemake\r +\r +### How to browse GERONIMO results obtained in WSL?\r +You can easily access the results obtained on WSL from your Windows environment by opening `File Explorer` and pasting the following line into the search bar: `\\\\wsl.localhost\\Ubuntu\\home\\`. This will reveal a folder with your username, as specified during the configuration of your Ubuntu system. To locate the GERONIMO results, simply navigate to the folder with your username and then to the `home` folder. (`\\\\wsl.localhost\\Ubuntu\\home\\\\home\\GERONIMO`)\r +\r +### GERONIMO occupies a lot of storage space\r +Through genome downloads, GERONIMO can potentially consume storage space, rapidly leading to a shortage. Currently, downloading genomes is an essential step for optimal GERONIMO performance.\r +\r +Regrettably, if the analysis is rerun without the `/database` folder, it will result in the need to redownload genomes, which is a highly time-consuming process.\r +\r +Nevertheless, if you do not intend to repeat the analysis and have no requirement for additional genomes or models, you are welcome to retain your results tables and plots while removing the remaining files.\r +\r +It is strongly advised against using local machines for extensive analyses. If you lack access to external storage space, it is recommended to divide the analysis into smaller segments, which can be later merged, as explained in the section titled `Expanding the evolutionary context`.\r +\r +Considering this limitation, I am currently working on implementing a solution that will help circumvent the need for redundant genome downloads without compromising GERONIMO performance in the future.\r +\r +You might consider deleting the `.snakemake` folder to free up storage space. However, please note that deleting this folder will require the reinstallation of GERONIMO dependencies when the analysis is rerun.\r +\r +## License\r +Copyright (c) 2023 Agata M. Kilar\r +\r +Permission is hereby granted, free of charge, to any person obtaining a copy\r +of this software and associated documentation files (the "Software"), to deal\r +in the Software without restriction, including without limitation the rights\r +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r +copies of the Software, and to permit persons to whom the Software is\r +furnished to do so, subject to the following conditions:\r +\r +The above copyright notice and this permission notice shall be included in all\r +copies or substantial portions of the Software.\r +\r +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r +SOFTWARE.\r +\r +## Contact\r +mgr inż. Agata Magdalena Kilar, PhD (agata.kilar@ceitec.muni.cz)\r +\r +""" ; + schema1:image ; + schema1:keywords "Bioinformatics, Snakemake, rna" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "GERONIMO" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/547?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2024-06-19T11:29:55.009783" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/bacterial_genome_annotation" ; + schema1:license "GPL-3.0-or-later" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "bacterial_genome_annotation/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Analysis of ribosome profiling, or Ribo-seq (also named ribosome footprinting)" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1016?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/riboseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/riboseq" ; + schema1:sdDatePublished "2024-07-12 13:19:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1016/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12214 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:09Z" ; + schema1:dateModified "2024-06-11T12:55:09Z" ; + schema1:description "Analysis of ribosome profiling, or Ribo-seq (also named ribosome footprinting)" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1016?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/riboseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1016?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "16S rRNA amplicon sequencing analysis workflow using QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-07-12 13:18:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5201 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:38Z" ; + schema1:dateModified "2024-06-11T12:54:38Z" ; + schema1:description "16S rRNA amplicon sequencing analysis workflow using QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Demultiplexing pipeline for Illumina sequencing data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/978?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/demultiplex" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/demultiplex" ; + schema1:sdDatePublished "2024-07-12 13:21:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/978/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10420 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:48Z" ; + schema1:dateModified "2024-06-11T12:54:48Z" ; + schema1:description "Demultiplexing pipeline for Illumina sequencing data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/978?version=7" ; + schema1:keywords "bases2fastq, bcl2fastq, demultiplexing, elementbiosciences, illumina" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/demultiplex" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/978?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "The workflow takes raw ONT reads and trimmed Illumina WGS paired reads collections, and the estimated genome size and Max depth (both calculated from WF1) to run Flye and subsequently polish the assembly with HyPo. It produces collapsed assemblies (unpolished and polished) and runs all the QC analyses (gfastats, BUSCO, and Merqury)." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/788?version=1" ; + schema1:isBasedOn "https://github.com/ERGA-consortium/pipelines/tree/main/assembly/galaxy" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ERGA ONT+Illumina Assembly+QC Flye+HyPo v2403 (WF2)" ; + schema1:sdDatePublished "2024-07-12 13:23:46 +0100" ; + schema1:url "https://workflowhub.eu/workflows/788/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 283740 ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/2.Contigging/pics/Cont_ONTflye_2403.png" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 55231 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-11T12:41:58Z" ; + schema1:dateModified "2024-03-11T12:41:58Z" ; + schema1:description "The workflow takes raw ONT reads and trimmed Illumina WGS paired reads collections, and the estimated genome size and Max depth (both calculated from WF1) to run Flye and subsequently polish the assembly with HyPo. It produces collapsed assemblies (unpolished and polished) and runs all the QC analyses (gfastats, BUSCO, and Merqury)." ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "name:ERGA, name:ASSEMBLY+QC, name:ONT, name:ILLUMINA" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ERGA ONT+Illumina Assembly+QC Flye+HyPo v2403 (WF2)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/2.Contigging/Galaxy-Workflow-ERGA_ONT_Illumina_Assembly_QC_Flye_HyPo_v2403_(WF2).ga" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/998?version=14" ; + schema1:isBasedOn "https://github.com/nf-core/methylseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/methylseq" ; + schema1:sdDatePublished "2024-07-12 13:18:24 +0100" ; + schema1:url "https://workflowhub.eu/workflows/998/ro_crate?version=14" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11981 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:02Z" ; + schema1:dateModified "2024-06-11T12:55:02Z" ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/998?version=14" ; + schema1:keywords "bisulfite-sequencing, dna-methylation, em-seq, epigenome, Epigenomics, methyl-seq, pbat, rrbs" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/methylseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/998?version=14" ; + schema1:version 14 . + + a schema1:Dataset ; + schema1:datePublished "2021-07-26T10:22:23.315184" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-variation-reporting" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:sdDatePublished "2021-10-27 15:45:07 +0100" ; + schema1:softwareVersion "v0.1.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.6" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Galaxy Workflow created on Galaxy-E european instance, ecology.usegalaxy.eu, related to the Galaxy training tutorial "[Champs blocs](https://training.galaxyproject.org/training-material/topics/ecology/tutorials/champs-blocs/tutorial.html)" .\r +\r +This workflow allows to produce Visual Rollover Indicator and dissimilarity as diversity indices on boulder fields.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/661?version=1" ; + schema1:isBasedOn "https://ecology.usegalaxy.eu/u/ylebras/w/workflow-constructed-from-history-champs-bloc-1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Boulder fields indicators" ; + schema1:sdDatePublished "2024-07-12 13:26:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/661/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8647 ; + schema1:creator , + ; + schema1:dateCreated "2023-11-10T08:55:14Z" ; + schema1:dateModified "2023-11-10T08:55:14Z" ; + schema1:description """Galaxy Workflow created on Galaxy-E european instance, ecology.usegalaxy.eu, related to the Galaxy training tutorial "[Champs blocs](https://training.galaxyproject.org/training-material/topics/ecology/tutorials/champs-blocs/tutorial.html)" .\r +\r +This workflow allows to produce Visual Rollover Indicator and dissimilarity as diversity indices on boulder fields.\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Boulder fields indicators" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/661?version=1" ; + schema1:version 1 ; + ns1:input , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# ROIforMSI\r +Source codes for manuscript "Delineating Regions-of-interest for Mass Spectrometry Imaging by Multimodally Corroborated Spatial Segmentation"\r +\r +\r +"ExampleWorkflow.ipynb" is a methods document to demonstrate the workflow of our multimodal fusion-based spatial segmentation.\r +\r +\r +"Utilities.py" contains all the tools to implement our method.\r +\r +\r +"gui.py" and "registration_gui.py" are files to implement linear and nonlinear registration.\r +\r +(Licence: GPL-3)""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.437.1" ; + schema1:isBasedOn "https://github.com/guoang4github/ROIforMSI/" ; + schema1:license "AGPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Delineating Regions-of-interest for Mass Spectrometry Imaging by Multimodally Corroborated Spatial Segmentation" ; + schema1:sdDatePublished "2024-07-12 13:34:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/437/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2007984 ; + schema1:creator , + ; + schema1:dateCreated "2023-02-16T00:34:18Z" ; + schema1:dateModified "2023-03-08T23:57:45Z" ; + schema1:description """# ROIforMSI\r +Source codes for manuscript "Delineating Regions-of-interest for Mass Spectrometry Imaging by Multimodally Corroborated Spatial Segmentation"\r +\r +\r +"ExampleWorkflow.ipynb" is a methods document to demonstrate the workflow of our multimodal fusion-based spatial segmentation.\r +\r +\r +"Utilities.py" contains all the tools to implement our method.\r +\r +\r +"gui.py" and "registration_gui.py" are files to implement linear and nonlinear registration.\r +\r +(Licence: GPL-3)""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/AGPL-3.0" ; + schema1:name "Delineating Regions-of-interest for Mass Spectrometry Imaging by Multimodally Corroborated Spatial Segmentation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/437?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 277163 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "CAGE-seq pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/969?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/cageseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/cageseq" ; + schema1:sdDatePublished "2024-07-12 13:22:13 +0100" ; + schema1:url "https://workflowhub.eu/workflows/969/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5423 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:44Z" ; + schema1:dateModified "2024-06-11T12:54:44Z" ; + schema1:description "CAGE-seq pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/969?version=2" ; + schema1:keywords "cage, cage-seq, cageseq-data, gene-expression, rna" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/cageseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/969?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:description "Preprocessing of raw SARS-CoV-2 reads. More info can be found at https://covid19.galaxyproject.org/genomics/" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/2?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genomics - Read pre-processing" ; + schema1:sdDatePublished "2024-07-12 13:37:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/2/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 7753 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 41191 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2020-04-10T10:20:13Z" ; + schema1:dateModified "2023-01-16T13:39:40Z" ; + schema1:description "Preprocessing of raw SARS-CoV-2 reads. More info can be found at https://covid19.galaxyproject.org/genomics/" ; + schema1:image ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Genomics - Read pre-processing" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/2?version=1" ; + schema1:version 1 ; + ns1:input , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 12854 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "This workflow is created as part of a tutorial listed on GTN. The workflow shows the steps in human copy number variance detection using the Contrl_FREEC tool. " ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.676.1" ; + schema1:isBasedOn "https://usegalaxy.eu/u/khaled_jumah/w/somatic-variant-discovery-from-wes-data-using-control-freec" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Somatic-Variant-Discovery-from-WES-Data-Using-Control-FREEC" ; + schema1:sdDatePublished "2024-07-12 13:26:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/676/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 70117 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2023-11-23T17:19:43Z" ; + schema1:dateModified "2024-04-03T15:20:03Z" ; + schema1:description "This workflow is created as part of a tutorial listed on GTN. The workflow shows the steps in human copy number variance detection using the Contrl_FREEC tool. " ; + schema1:keywords "hCNV, variant-analysis, MIRACUM" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Somatic-Variant-Discovery-from-WES-Data-Using-Control-FREEC" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/676?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + ; + ns1:output , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/998?version=11" ; + schema1:isBasedOn "https://github.com/nf-core/methylseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/methylseq" ; + schema1:sdDatePublished "2024-07-12 13:18:24 +0100" ; + schema1:url "https://workflowhub.eu/workflows/998/ro_crate?version=11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9874 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:02Z" ; + schema1:dateModified "2024-06-11T12:55:02Z" ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/998?version=14" ; + schema1:keywords "bisulfite-sequencing, dna-methylation, em-seq, epigenome, Epigenomics, methyl-seq, pbat, rrbs" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/methylseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/998?version=11" ; + schema1:version 11 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=20" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-07-12 13:19:20 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=20" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 18609 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:16Z" ; + schema1:dateModified "2024-06-11T12:55:16Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=20" ; + schema1:version 20 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-03T16:25:31.059727" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.17" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Calculating and visualizing marine biodiversity indicators" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/758?version=1" ; + schema1:license "CC-BY-SA-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Calculating and visualizing OBIS marine biodiversity indicators" ; + schema1:sdDatePublished "2024-07-12 13:24:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/758/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5916 ; + schema1:creator ; + schema1:dateCreated "2024-02-15T11:58:49Z" ; + schema1:dateModified "2024-02-15T11:58:49Z" ; + schema1:description "Calculating and visualizing marine biodiversity indicators" ; + schema1:isPartOf ; + schema1:keywords "Ecology" ; + schema1:license "https://spdx.org/licenses/CC-BY-SA-4.0" ; + schema1:name "Calculating and visualizing OBIS marine biodiversity indicators" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/758?version=1" ; + schema1:version 1 ; + ns1:input . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for processing of 10xGenomics single cell rnaseq data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1021?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/scrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/scrnaseq" ; + schema1:sdDatePublished "2024-07-12 13:18:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1021/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5942 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:17Z" ; + schema1:dateModified "2024-06-11T12:55:17Z" ; + schema1:description "Pipeline for processing of 10xGenomics single cell rnaseq data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1021?version=13" ; + schema1:keywords "10x-genomics, 10xgenomics, alevin, bustools, Cellranger, kallisto, rna-seq, single-cell, star-solo" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/scrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1021?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Genome-wide alternative splicing analysis v.2" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/482?version=5" ; + schema1:license "CC-BY-NC-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genome-wide alternative splicing analysis v.2" ; + schema1:sdDatePublished "2024-07-12 13:33:06 +0100" ; + schema1:url "https://workflowhub.eu/workflows/482/ro_crate?version=5" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 214419 . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 30115 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 119677 ; + schema1:creator ; + schema1:dateCreated "2023-06-11T12:01:08Z" ; + schema1:dateModified "2023-06-11T12:01:24Z" ; + schema1:description "Genome-wide alternative splicing analysis v.2" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/482?version=6" ; + schema1:keywords "Transcriptomics, Alternative splicing, isoform switching" ; + schema1:license "https://spdx.org/licenses/CC-BY-NC-4.0" ; + schema1:name "Genome-wide alternative splicing analysis v.2" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/482?version=5" ; + schema1:version 5 ; + ns1:input , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Taxonomic classification and profiling of shotgun short- and long-read metagenomic data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1025?version=10" ; + schema1:isBasedOn "https://github.com/nf-core/taxprofiler" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/taxprofiler" ; + schema1:sdDatePublished "2024-07-12 13:18:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1025/ro_crate?version=10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15939 ; + schema1:creator , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:20Z" ; + schema1:dateModified "2024-06-11T12:55:20Z" ; + schema1:description "Taxonomic classification and profiling of shotgun short- and long-read metagenomic data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1025?version=10" ; + schema1:keywords "Classification, illumina, long-reads, Metagenomics, microbiome, nanopore, pathogen, Profiling, shotgun, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/taxprofiler" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1025?version=10" ; + schema1:version 10 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-09T09:28:52+00:00" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:mainEntity . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:name "main" ; + schema1:programmingLanguage . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=18" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-07-12 13:21:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=18" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12737 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:50Z" ; + schema1:dateModified "2024-06-11T12:54:50Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=18" ; + schema1:version 18 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """This is a Galaxy workflow for performing molecular dynamics simulations and analysis with flavivirus helicases bound to a ligand/drug molecule. \r +The associated input files can be found at:\r +https://zenodo.org/records/7493015\r +The associated output files can be found at:\r +https://zenodo.org/records/7850935""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.761.1" ; + schema1:isBasedOn "https://zenodo.org/records/7493015" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for flavivirushelicase_proteindrugcomplex" ; + schema1:sdDatePublished "2024-07-12 13:24:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/761/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 72937 ; + schema1:creator ; + schema1:dateCreated "2024-02-16T17:23:53Z" ; + schema1:dateModified "2024-03-02T16:44:04Z" ; + schema1:description """This is a Galaxy workflow for performing molecular dynamics simulations and analysis with flavivirus helicases bound to a ligand/drug molecule. \r +The associated input files can be found at:\r +https://zenodo.org/records/7493015\r +The associated output files can be found at:\r +https://zenodo.org/records/7850935""" ; + schema1:keywords "helicase, rna virus, zika, dengue, west nile, NS3, molecular dynamics" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "flavivirushelicase_proteindrugcomplex" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/761?version=1" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2023-11-16T14:32:18.203562" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A Nanostring nCounter analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1003?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/nanostring" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/nanostring" ; + schema1:sdDatePublished "2024-07-12 13:20:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1003/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10131 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:06Z" ; + schema1:dateModified "2024-06-11T12:55:06Z" ; + schema1:description "A Nanostring nCounter analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1003?version=4" ; + schema1:keywords "nanostring, nanostringnorm" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/nanostring" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1003?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.129.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_virtual-screening" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein-ligand Docking tutorial (Fpocket)" ; + schema1:sdDatePublished "2024-07-12 13:33:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/129/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 42751 ; + schema1:creator , + , + ; + schema1:dateCreated "2021-06-29T09:24:27Z" ; + schema1:dateModified "2022-09-15T11:20:56Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/129?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein-ligand Docking tutorial (Fpocket)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_virtual-screening/0ab8d1d3410c67db6a5a25d3dde6f3e0303af08f/biobb_wf_virtual-screening/notebooks/fpocket/wf_vs_fpocket.ipynb" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.131.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/131/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 77840 ; + schema1:creator , + ; + schema1:dateCreated "2022-09-15T12:31:30Z" ; + schema1:dateModified "2023-01-16T13:50:20Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/131?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_amber_md_setup/master/biobb_wf_amber_md_setup/notebooks/mdsetup_lig/biobb_amber_complex_setup_notebook.ipynb" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.129.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_virtual-screening" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein-ligand Docking tutorial (Fpocket)" ; + schema1:sdDatePublished "2024-07-12 13:33:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/129/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 43287 ; + schema1:creator , + , + ; + schema1:dateCreated "2022-09-15T11:21:17Z" ; + schema1:dateModified "2023-01-16T13:50:19Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/129?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein-ligand Docking tutorial (Fpocket)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_virtual-screening/master/biobb_wf_virtual-screening/notebooks/fpocket/wf_vs_fpocket.ipynb" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/303?version=2" ; + schema1:isBasedOn "https://github.com/DimitraPanou/scRNAseq-cwl.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for seurat scRNA-seq" ; + schema1:sdDatePublished "2024-07-12 13:35:40 +0100" ; + schema1:url "https://workflowhub.eu/workflows/303/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6555 ; + schema1:dateCreated "2022-04-14T13:28:34Z" ; + schema1:dateModified "2023-01-16T13:59:13Z" ; + schema1:description "" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/303?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "seurat scRNA-seq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/303?version=2" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 56489 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Galaxy-E (ecology.usegalaxy.eu) workflow to calculate species presence / absence, community metrics and compute generalized linear models to identify effects and significativity of these effects on biodiversity." ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/49?version=2" ; + schema1:isBasedOn "https://ecology.usegalaxy.eu/u/ylebras/w/workflow-constructed-from-history-population-and-community-metrics-calculation-from-biodiversity-data-1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Population and community metrics calculation from Biodiversity data" ; + schema1:sdDatePublished "2024-07-12 13:37:25 +0100" ; + schema1:url "https://workflowhub.eu/workflows/49/ro_crate?version=2" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 2434 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9837 ; + schema1:creator , + ; + schema1:dateCreated "2020-07-24T13:00:50Z" ; + schema1:dateModified "2023-01-16T13:44:17Z" ; + schema1:description "Galaxy-E (ecology.usegalaxy.eu) workflow to calculate species presence / absence, community metrics and compute generalized linear models to identify effects and significativity of these effects on biodiversity." ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/49?version=1" ; + schema1:keywords "Community_metrics, Presence_absence, GLM, Ecology, Biodiversity, Species abundance, Modeling, Statistics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Population and community metrics calculation from Biodiversity data" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/49?version=2" ; + schema1:version 2 ; + ns1:input , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 6739 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Coprolite Identification" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/974?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/coproid" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/coproid" ; + schema1:sdDatePublished "2024-07-12 13:22:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/974/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4839 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:45Z" ; + schema1:dateModified "2024-06-11T12:54:45Z" ; + schema1:description "Coprolite Identification" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/974?version=2" ; + schema1:keywords "adna, ancient-dna, coprolite, microbiome" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/coproid" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/974?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/998?version=12" ; + schema1:isBasedOn "https://github.com/nf-core/methylseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/methylseq" ; + schema1:sdDatePublished "2024-07-12 13:18:24 +0100" ; + schema1:url "https://workflowhub.eu/workflows/998/ro_crate?version=12" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10195 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:02Z" ; + schema1:dateModified "2024-06-11T12:55:02Z" ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/998?version=14" ; + schema1:keywords "bisulfite-sequencing, dna-methylation, em-seq, epigenome, Epigenomics, methyl-seq, pbat, rrbs" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/methylseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/998?version=12" ; + schema1:version 12 . + + a schema1:Dataset ; + schema1:datePublished "2021-12-06T14:50:19.556018" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/gromacs-mmgbsa" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "gromacs-mmgbsa/main" ; + schema1:sdDatePublished "2021-12-07 03:00:58 +0000" ; + schema1:softwareVersion "v0.1.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Simple bacterial assembly and annotation" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/966?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/bacass" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/bacass" ; + schema1:sdDatePublished "2024-07-12 13:22:20 +0100" ; + schema1:url "https://workflowhub.eu/workflows/966/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11118 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:43Z" ; + schema1:dateModified "2024-06-11T12:54:43Z" ; + schema1:description "Simple bacterial assembly and annotation" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/966?version=7" ; + schema1:keywords "Assembly, bacterial-genomes, denovo, denovo-assembly, genome-assembly, hybrid-assembly, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/bacass" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/966?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """We present an R script that describes the workflow for analysing honey bee (_Apis mellifera_) wing shape. It is based on a dataset of wing images and landmark coordinates available at Zenodo: https://doi.org/10.5281/zenodo.8128010. \r +The dataset can be used as a reference for the identification of local bees from southern Kazakhstan, which most probably belong to the subspecies _Apis mellifera pomonella_. It was compared with data from Nawrocka et al. (2018), available at Zenodo: https://doi.org/10.5281/zenodo.7567336. """ ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.559.1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Apis-mellifera-wings-KZ: A workflow for morphometric identification of honey bees from Kazakhstan" ; + schema1:sdDatePublished "2024-07-12 13:27:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/559/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 26894 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1083489 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2023-08-30T06:57:48Z" ; + schema1:dateModified "2023-08-30T07:06:42Z" ; + schema1:description """We present an R script that describes the workflow for analysing honey bee (_Apis mellifera_) wing shape. It is based on a dataset of wing images and landmark coordinates available at Zenodo: https://doi.org/10.5281/zenodo.8128010. \r +The dataset can be used as a reference for the identification of local bees from southern Kazakhstan, which most probably belong to the subspecies _Apis mellifera pomonella_. It was compared with data from Nawrocka et al. (2018), available at Zenodo: https://doi.org/10.5281/zenodo.7567336. """ ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Apis-mellifera-wings-KZ: A workflow for morphometric identification of honey bees from Kazakhstan" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/559?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "CAGE-seq pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/969?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/cageseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/cageseq" ; + schema1:sdDatePublished "2024-07-12 13:22:12 +0100" ; + schema1:url "https://workflowhub.eu/workflows/969/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5423 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:44Z" ; + schema1:dateModified "2024-06-11T12:54:44Z" ; + schema1:description "CAGE-seq pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/969?version=2" ; + schema1:keywords "cage, cage-seq, cageseq-data, gene-expression, rna" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/cageseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/969?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/520?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for SARS-CoV-2 PostProcessing" ; + schema1:sdDatePublished "2024-07-12 13:32:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/520/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11463 ; + schema1:dateCreated "2023-06-28T12:12:55Z" ; + schema1:dateModified "2023-06-28T12:12:55Z" ; + schema1:description "" ; + schema1:keywords "SARS-CoV-2, SANBI" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "SARS-CoV-2 PostProcessing" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/520?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=11" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-07-12 13:22:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8901 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:12Z" ; + schema1:dateModified "2024-06-11T12:55:12Z" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=11" ; + schema1:version 11 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "RNA-RNA interactome analysis using ChiRA tools suite. The aligner used is BWA-MEM." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/66?version=1" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for RNA-RNA interactome analysis using BWA-MEM" ; + schema1:sdDatePublished "2024-07-12 13:37:23 +0100" ; + schema1:url "https://workflowhub.eu/workflows/66/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 29083 ; + schema1:creator ; + schema1:dateCreated "2020-11-03T19:46:07Z" ; + schema1:dateModified "2023-02-13T14:06:45Z" ; + schema1:description "RNA-RNA interactome analysis using ChiRA tools suite. The aligner used is BWA-MEM." ; + schema1:keywords "rna, Transcriptomics" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "RNA-RNA interactome analysis using BWA-MEM" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/66?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """SAMBA is a FAIR scalable workflow integrating, into a unique tool, state-of-the-art bioinformatics and statistical methods to conduct reproducible eDNA analyses using Nextflow. SAMBA starts processing by verifying integrity of raw reads and metadata. Then all bioinformatics processing is done using commonly used procedure (QIIME 2 and DADA2) but adds new steps relying on dbOTU3 and microDecon to build high quality ASV count tables. Extended statistical analyses are also performed. Finally, SAMBA produces a full dynamic HTML report including resources used, commands executed, intermediate results, statistical analyses and figures.\r +\r +The SAMBA pipeline can run tasks across multiple compute infrastructures in a very portable manner. It comes with singularity containers making installation trivial and results highly reproducible.""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.156.1" ; + schema1:isBasedOn "https://github.com/ifremer-bioinformatics/samba" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for SAMBA: Standardized and Automated MetaBarcoding Analyses workflow" ; + schema1:sdDatePublished "2024-07-12 13:36:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/156/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 82198 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2021-09-10T13:40:28Z" ; + schema1:dateModified "2023-01-16T13:52:16Z" ; + schema1:description """SAMBA is a FAIR scalable workflow integrating, into a unique tool, state-of-the-art bioinformatics and statistical methods to conduct reproducible eDNA analyses using Nextflow. SAMBA starts processing by verifying integrity of raw reads and metadata. Then all bioinformatics processing is done using commonly used procedure (QIIME 2 and DADA2) but adds new steps relying on dbOTU3 and microDecon to build high quality ASV count tables. Extended statistical analyses are also performed. Finally, SAMBA produces a full dynamic HTML report including resources used, commands executed, intermediate results, statistical analyses and figures.\r +\r +The SAMBA pipeline can run tasks across multiple compute infrastructures in a very portable manner. It comes with singularity containers making installation trivial and results highly reproducible.""" ; + schema1:image ; + schema1:keywords "Metabarcoding, Nextflow, 16S, 18S, eDNA" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "SAMBA: Standardized and Automated MetaBarcoding Analyses workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/156?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 398906 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Metagenomic dataset taxonomic classification using kraken2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/101?version=1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for 1: Plant virus detection with kraken2 (PE)" ; + schema1:sdDatePublished "2024-07-12 13:36:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/101/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12810 ; + schema1:dateCreated "2021-02-04T09:07:38Z" ; + schema1:dateModified "2023-02-13T14:06:45Z" ; + schema1:description "Metagenomic dataset taxonomic classification using kraken2" ; + schema1:keywords "Virology, kraken" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "1: Plant virus detection with kraken2 (PE)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/101?version=1" ; + schema1:version 1 ; + ns1:input , + ; + ns1:output , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2021-12-20T17:04:47.620664" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/protein-ligand-complex-parameterization" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "protein-ligand-complex-parameterization/main" ; + schema1:sdDatePublished "2021-12-21 03:01:01 +0000" ; + schema1:softwareVersion "v0.1.2" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "An example workflow for the Specimen Data Refinery tool, allowing an individual tool to be used" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.374.1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for DLA-Collections-test" ; + schema1:sdDatePublished "2024-07-12 13:35:26 +0100" ; + schema1:url "https://workflowhub.eu/workflows/374/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13871 ; + schema1:creator , + ; + schema1:dateCreated "2022-07-08T13:04:19Z" ; + schema1:dateModified "2023-01-16T14:01:59Z" ; + schema1:description "An example workflow for the Specimen Data Refinery tool, allowing an individual tool to be used" ; + schema1:keywords "Default-SDR, multi-specimen-input, collections, validated-2022-06-29" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "DLA-Collections-test" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/374?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """This repository contains the workflow used to find and characterize the HI sources in the data cube of the SKA Data Challenge 2. It was developed to process a simulated [SKA data cube](https://sdc2.astronomers.skatelescope.org/sdc2-challenge/data) data cube, but can be adapted for clean HI data cubes from other radio observatories.\r +\r +The workflow is managed and executed using snakemake workflow management system. It uses [https://spectral-cube.readthedocs.io/en/latest/](http://) based on [https://dask.org/](http://) parallelization tool and [https://www.astropy.org/](http://) suite to divide the large cube in smaller pieces. On each of the subcubes, we execute [https://github.com/SoFiA-Admin/SoFiA-2](http://) for masking the subcubes, find sources and characterize their properties. Finally, the individual catalogs are cleaned, concatenated into a single catalog, and duplicates from the overlapping regions are eliminated. Some diagnostic plots are produced using Jupyter notebook.\r +\r +The documentation can be found in the [Documentation page](https://hi-friends-sdc2.readthedocs.io/en/latest/index.html). The workflow and the results can be cited in the [Zenodo record](https://doi.org/10.5281/zenodo.5167659).""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/141?version=1" ; + schema1:isBasedOn "https://github.com/HI-FRIENDS-SDC2/hi-friends" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for HI-FRIENDS HI data cube source finding and characterization" ; + schema1:sdDatePublished "2024-07-12 13:36:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/141/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 590 ; + schema1:dateCreated "2021-08-09T20:25:49Z" ; + schema1:dateModified "2023-01-16T13:51:30Z" ; + schema1:description """This repository contains the workflow used to find and characterize the HI sources in the data cube of the SKA Data Challenge 2. It was developed to process a simulated [SKA data cube](https://sdc2.astronomers.skatelescope.org/sdc2-challenge/data) data cube, but can be adapted for clean HI data cubes from other radio observatories.\r +\r +The workflow is managed and executed using snakemake workflow management system. It uses [https://spectral-cube.readthedocs.io/en/latest/](http://) based on [https://dask.org/](http://) parallelization tool and [https://www.astropy.org/](http://) suite to divide the large cube in smaller pieces. On each of the subcubes, we execute [https://github.com/SoFiA-Admin/SoFiA-2](http://) for masking the subcubes, find sources and characterize their properties. Finally, the individual catalogs are cleaned, concatenated into a single catalog, and duplicates from the overlapping regions are eliminated. Some diagnostic plots are produced using Jupyter notebook.\r +\r +The documentation can be found in the [Documentation page](https://hi-friends-sdc2.readthedocs.io/en/latest/index.html). The workflow and the results can be cited in the [Zenodo record](https://doi.org/10.5281/zenodo.5167659).""" ; + schema1:image ; + schema1:keywords "SKA, radio interferometry" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "HI-FRIENDS HI data cube source finding and characterization" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/141?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 8546 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Compare DNA/RNA/protein sequences on k-mer content" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/994?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/kmermaid" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/kmermaid" ; + schema1:sdDatePublished "2024-07-12 13:21:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/994/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6459 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:58Z" ; + schema1:dateModified "2024-06-11T12:54:58Z" ; + schema1:description "Compare DNA/RNA/protein sequences on k-mer content" ; + schema1:keywords "k-mer, kmer, kmer-counting, kmer-frequency-count" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/kmermaid" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/994?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Conformational ensembles generation\r +\r +## Workflow included in the [ELIXIR 3D-Bioinfo](https://elixir-europe.org/communities/3d-bioinfo) Implementation Study:\r +\r +### Building on PDBe-KB to chart and characterize the conformation landscape of native proteins\r +\r +This tutorial aims to illustrate the process of generating **protein conformational ensembles** from** 3D structures **and analysing its **molecular flexibility**, step by step, using the **BioExcel Building Blocks library (biobb)**.\r +\r +## Conformational landscape of native proteins\r +**Proteins** are **dynamic** systems that adopt multiple **conformational states**, a property essential for many **biological processes** (e.g. binding other proteins, nucleic acids, small molecule ligands, or switching between functionaly active and inactive states). Characterizing the different **conformational states** of proteins and the transitions between them is therefore critical for gaining insight into their **biological function** and can help explain the effects of genetic variants in **health** and **disease** and the action of drugs.\r +\r +**Structural biology** has become increasingly efficient in sampling the different **conformational states** of proteins. The **PDB** has currently archived more than **170,000 individual structures**, but over two thirds of these structures represent **multiple conformations** of the same or related protein, observed in different crystal forms, when interacting with other proteins or other macromolecules, or upon binding small molecule ligands. Charting this conformational diversity across the PDB can therefore be employed to build a useful approximation of the **conformational landscape** of native proteins.\r +\r +A number of resources and **tools** describing and characterizing various often complementary aspects of protein **conformational diversity** in known structures have been developed, notably by groups in Europe. These tools include algorithms with varying degree of sophistication, for aligning the 3D structures of individual protein chains or domains, of protein assemblies, and evaluating their degree of **structural similarity**. Using such tools one can **align structures pairwise**, compute the corresponding **similarity matrix**, and identify ensembles of **structures/conformations** with a defined **similarity level** that tend to recur in different PDB entries, an operation typically performed using **clustering** methods. Such workflows are at the basis of resources such as **CATH, Contemplate, or PDBflex** that offer access to **conformational ensembles** comprised of similar **conformations** clustered according to various criteria. Other types of tools focus on differences between **protein conformations**, identifying regions of proteins that undergo large **collective displacements** in different PDB entries, those that act as **hinges or linkers**, or regions that are inherently **flexible**.\r +\r +To build a meaningful approximation of the **conformational landscape** of native proteins, the **conformational ensembles** (and the differences between them), identified on the basis of **structural similarity/dissimilarity** measures alone, need to be **biophysically characterized**. This may be approached at **two different levels**. \r +- At the **biological level**, it is important to link observed **conformational ensembles**, to their **functional roles** by evaluating the correspondence with **protein family classifications** based on sequence information and **functional annotations** in public databases e.g. Uniprot, PDKe-Knowledge Base (KB). These links should provide valuable mechanistic insights into how the **conformational and dynamic properties** of proteins are exploited by evolution to regulate their **biological function**.

\r +\r +- At the **physical level** one needs to introduce **energetic consideration** to evaluate the likelihood that the identified **conformational ensembles** represent **conformational states** that the protein (or domain under study) samples in isolation. Such evaluation is notoriously **challenging** and can only be roughly approximated by using **computational methods** to evaluate the extent to which the observed **conformational ensembles** can be reproduced by algorithms that simulate the **dynamic behavior** of protein systems. These algorithms include the computationally expensive **classical molecular dynamics (MD) simulations** to sample local thermal fluctuations but also faster more approximate methods such as **Elastic Network Models** and **Normal Node Analysis** (NMA) to model low energy **collective motions**. Alternatively, **enhanced sampling molecular dynamics** can be used to model complex types of **conformational changes** but at a very high computational cost. \r +\r +The **ELIXIR 3D-Bioinfo Implementation Study** *Building on PDBe-KB to chart and characterize the conformation landscape of native proteins* focuses on:\r +\r +1. Mapping the **conformational diversity** of proteins and their homologs across the PDB. \r +2. Characterize the different **flexibility properties** of protein regions, and link this information to sequence and functional annotation.\r +3. Benchmark **computational methods** that can predict a biophysical description of protein motions.\r +\r +This notebook is part of the third objective, where a list of **computational resources** that are able to predict **protein flexibility** and **conformational ensembles** have been collected, evaluated, and integrated in reproducible and interoperable workflows using the **BioExcel Building Blocks library**. Note that the list is not meant to be exhaustive, it is built following the expertise of the implementation study partners.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.488.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_flexdyn/cwl" ; + schema1:license "other-open" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL Protein conformational ensembles generation" ; + schema1:sdDatePublished "2024-07-12 13:33:22 +0100" ; + schema1:url "https://workflowhub.eu/workflows/488/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 30813 ; + schema1:creator , + ; + schema1:dateCreated "2023-05-31T13:51:17Z" ; + schema1:dateModified "2023-05-31T14:04:20Z" ; + schema1:description """# Protein Conformational ensembles generation\r +\r +## Workflow included in the [ELIXIR 3D-Bioinfo](https://elixir-europe.org/communities/3d-bioinfo) Implementation Study:\r +\r +### Building on PDBe-KB to chart and characterize the conformation landscape of native proteins\r +\r +This tutorial aims to illustrate the process of generating **protein conformational ensembles** from** 3D structures **and analysing its **molecular flexibility**, step by step, using the **BioExcel Building Blocks library (biobb)**.\r +\r +## Conformational landscape of native proteins\r +**Proteins** are **dynamic** systems that adopt multiple **conformational states**, a property essential for many **biological processes** (e.g. binding other proteins, nucleic acids, small molecule ligands, or switching between functionaly active and inactive states). Characterizing the different **conformational states** of proteins and the transitions between them is therefore critical for gaining insight into their **biological function** and can help explain the effects of genetic variants in **health** and **disease** and the action of drugs.\r +\r +**Structural biology** has become increasingly efficient in sampling the different **conformational states** of proteins. The **PDB** has currently archived more than **170,000 individual structures**, but over two thirds of these structures represent **multiple conformations** of the same or related protein, observed in different crystal forms, when interacting with other proteins or other macromolecules, or upon binding small molecule ligands. Charting this conformational diversity across the PDB can therefore be employed to build a useful approximation of the **conformational landscape** of native proteins.\r +\r +A number of resources and **tools** describing and characterizing various often complementary aspects of protein **conformational diversity** in known structures have been developed, notably by groups in Europe. These tools include algorithms with varying degree of sophistication, for aligning the 3D structures of individual protein chains or domains, of protein assemblies, and evaluating their degree of **structural similarity**. Using such tools one can **align structures pairwise**, compute the corresponding **similarity matrix**, and identify ensembles of **structures/conformations** with a defined **similarity level** that tend to recur in different PDB entries, an operation typically performed using **clustering** methods. Such workflows are at the basis of resources such as **CATH, Contemplate, or PDBflex** that offer access to **conformational ensembles** comprised of similar **conformations** clustered according to various criteria. Other types of tools focus on differences between **protein conformations**, identifying regions of proteins that undergo large **collective displacements** in different PDB entries, those that act as **hinges or linkers**, or regions that are inherently **flexible**.\r +\r +To build a meaningful approximation of the **conformational landscape** of native proteins, the **conformational ensembles** (and the differences between them), identified on the basis of **structural similarity/dissimilarity** measures alone, need to be **biophysically characterized**. This may be approached at **two different levels**. \r +- At the **biological level**, it is important to link observed **conformational ensembles**, to their **functional roles** by evaluating the correspondence with **protein family classifications** based on sequence information and **functional annotations** in public databases e.g. Uniprot, PDKe-Knowledge Base (KB). These links should provide valuable mechanistic insights into how the **conformational and dynamic properties** of proteins are exploited by evolution to regulate their **biological function**.

\r +\r +- At the **physical level** one needs to introduce **energetic consideration** to evaluate the likelihood that the identified **conformational ensembles** represent **conformational states** that the protein (or domain under study) samples in isolation. Such evaluation is notoriously **challenging** and can only be roughly approximated by using **computational methods** to evaluate the extent to which the observed **conformational ensembles** can be reproduced by algorithms that simulate the **dynamic behavior** of protein systems. These algorithms include the computationally expensive **classical molecular dynamics (MD) simulations** to sample local thermal fluctuations but also faster more approximate methods such as **Elastic Network Models** and **Normal Node Analysis** (NMA) to model low energy **collective motions**. Alternatively, **enhanced sampling molecular dynamics** can be used to model complex types of **conformational changes** but at a very high computational cost. \r +\r +The **ELIXIR 3D-Bioinfo Implementation Study** *Building on PDBe-KB to chart and characterize the conformation landscape of native proteins* focuses on:\r +\r +1. Mapping the **conformational diversity** of proteins and their homologs across the PDB. \r +2. Characterize the different **flexibility properties** of protein regions, and link this information to sequence and functional annotation.\r +3. Benchmark **computational methods** that can predict a biophysical description of protein motions.\r +\r +This notebook is part of the third objective, where a list of **computational resources** that are able to predict **protein flexibility** and **conformational ensembles** have been collected, evaluated, and integrated in reproducible and interoperable workflows using the **BioExcel Building Blocks library**. Note that the list is not meant to be exhaustive, it is built following the expertise of the implementation study partners.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/488?version=1" ; + schema1:keywords "" ; + schema1:license "" ; + schema1:name "CWL Protein conformational ensembles generation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_flexdyn/cwl/workflow.cwl" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.132.4" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Amber Constant pH MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/132/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 54708 ; + schema1:creator , + ; + schema1:dateCreated "2023-07-26T09:38:15Z" ; + schema1:dateModified "2023-07-26T09:38:42Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/132?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Amber Constant pH MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_amber_md_setup/master/biobb_wf_amber_md_setup/notebooks/mdsetup_ph/biobb_amber_CpHMD_notebook.ipynb" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + ; + schema1:description "This workflow generates binding scores that correlate well with binding affinities using an additional tool SuCOS Max, developed at Oxford University. More info can be found at https://covid19.galaxyproject.org/cheminformatics/" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/15?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Cheminformatics - SuCOS scoring" ; + schema1:sdDatePublished "2024-07-12 13:37:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/15/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 2468 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9469 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2020-04-10T14:50:31Z" ; + schema1:dateModified "2023-01-16T13:40:57Z" ; + schema1:description "This workflow generates binding scores that correlate well with binding affinities using an additional tool SuCOS Max, developed at Oxford University. More info can be found at https://covid19.galaxyproject.org/cheminformatics/" ; + schema1:image ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Cheminformatics - SuCOS scoring" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/15?version=1" ; + schema1:version 1 ; + ns1:input , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 5544 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """

\r +

+VIRify

\r +

Sankey plot

\r +

VIRify is a recently developed pipeline for the detection, annotation, and taxonomic classification of viral contigs in metagenomic and metatranscriptomic assemblies. The pipeline is part of the repertoire of analysis services offered by MGnify. VIRify’s taxonomic classification relies on the detection of taxon-specific profile hidden Markov models (HMMs), built upon a set of 22,014 orthologous protein domains and referred to as ViPhOGs.

\r +

VIRify was implemented in CWL.

\r +

+What do I need?

\r +

The current implementation uses CWL version 1.2 dev+2. It was tested using Toil version 4.10 as the workflow engine and conda to manage the software dependencies.

\r +

+Docker - Singularity support

\r +

Soon…

\r +

+Setup environment

\r +
conda env create -f cwl/requirements/conda_env.yml\r
+conda activate viral_pipeline\r
+
\r +

+Basic execution

\r +
cd cwl/\r
+virify.sh -h\r
+
\r +

+A note about metatranscriptomes

\r +

Although VIRify has been benchmarked and validated with metagenomic data in mind, it is also possible to use this tool to detect RNA viruses in metatranscriptome assemblies (e.g. SARS-CoV-2). However, some additional considerations for this purpose are outlined below:
\r +1. Quality control: As for metagenomic data, a thorough quality control of the FASTQ sequence reads to remove low-quality bases, adapters and host contamination (if appropriate) is required prior to assembly. This is especially important for metatranscriptomes as small errors can further decrease the quality and contiguity of the assembly obtained. We have used TrimGalore for this purpose.

\r +

2. Assembly: There are many assemblers available that are appropriate for either metagenomic or single-species transcriptomic data. However, to our knowledge, there is no assembler currently available specifically for metatranscriptomic data. From our preliminary investigations, we have found that transcriptome-specific assemblers (e.g. rnaSPAdes) generate more contiguous and complete metatranscriptome assemblies compared to metagenomic alternatives (e.g. MEGAHIT and metaSPAdes).

\r +

3. Post-processing: Metatranscriptomes generate highly fragmented assemblies. Therefore, filtering contigs based on a set minimum length has a substantial impact in the number of contigs processed in VIRify. It has also been observed that the number of false-positive detections of VirFinder (one of the tools included in VIRify) is lower among larger contigs. The choice of a length threshold will depend on the complexity of the sample and the sequencing technology used, but in our experience any contigs <2 kb should be analysed with caution.

\r +

4. Classification: The classification module of VIRify depends on the presence of a minimum number and proportion of phylogenetically-informative genes within each contig in order to confidently assign a taxonomic lineage. Therefore, short contigs typically obtained from metatranscriptome assemblies remain generally unclassified. For targeted classification of RNA viruses (for instance, to search for Coronavirus-related sequences), alternative DNA- or protein-based classification methods can be used. Two of the possible options are: (i) using MashMap to screen the VIRify contigs against a database of RNA viruses (e.g. Coronaviridae) or (ii) using hmmsearch to screen the proteins obtained in the VIRify contigs against marker genes of the taxon of interest.

\r +

Contact us

\r +MGnify helpdesk""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/27?version=1" ; + schema1:isBasedOn "https://github.com/EBI-Metagenomics/emg-viral-pipeline/blob/master/virify.nf" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for VIRify" ; + schema1:sdDatePublished "2024-07-12 13:37:06 +0100" ; + schema1:url "https://workflowhub.eu/workflows/27/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 24762 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2020-06-08T10:29:47Z" ; + schema1:dateModified "2023-03-24T16:47:02Z" ; + schema1:description """

\r +

+VIRify

\r +

Sankey plot

\r +

VIRify is a recently developed pipeline for the detection, annotation, and taxonomic classification of viral contigs in metagenomic and metatranscriptomic assemblies. The pipeline is part of the repertoire of analysis services offered by MGnify. VIRify’s taxonomic classification relies on the detection of taxon-specific profile hidden Markov models (HMMs), built upon a set of 22,014 orthologous protein domains and referred to as ViPhOGs.

\r +

VIRify was implemented in CWL.

\r +

+What do I need?

\r +

The current implementation uses CWL version 1.2 dev+2. It was tested using Toil version 4.10 as the workflow engine and conda to manage the software dependencies.

\r +

+Docker - Singularity support

\r +

Soon…

\r +

+Setup environment

\r +
conda env create -f cwl/requirements/conda_env.yml\r
+conda activate viral_pipeline\r
+
\r +

+Basic execution

\r +
cd cwl/\r
+virify.sh -h\r
+
\r +

+A note about metatranscriptomes

\r +

Although VIRify has been benchmarked and validated with metagenomic data in mind, it is also possible to use this tool to detect RNA viruses in metatranscriptome assemblies (e.g. SARS-CoV-2). However, some additional considerations for this purpose are outlined below:
\r +1. Quality control: As for metagenomic data, a thorough quality control of the FASTQ sequence reads to remove low-quality bases, adapters and host contamination (if appropriate) is required prior to assembly. This is especially important for metatranscriptomes as small errors can further decrease the quality and contiguity of the assembly obtained. We have used TrimGalore for this purpose.

\r +

2. Assembly: There are many assemblers available that are appropriate for either metagenomic or single-species transcriptomic data. However, to our knowledge, there is no assembler currently available specifically for metatranscriptomic data. From our preliminary investigations, we have found that transcriptome-specific assemblers (e.g. rnaSPAdes) generate more contiguous and complete metatranscriptome assemblies compared to metagenomic alternatives (e.g. MEGAHIT and metaSPAdes).

\r +

3. Post-processing: Metatranscriptomes generate highly fragmented assemblies. Therefore, filtering contigs based on a set minimum length has a substantial impact in the number of contigs processed in VIRify. It has also been observed that the number of false-positive detections of VirFinder (one of the tools included in VIRify) is lower among larger contigs. The choice of a length threshold will depend on the complexity of the sample and the sequencing technology used, but in our experience any contigs <2 kb should be analysed with caution.

\r +

4. Classification: The classification module of VIRify depends on the presence of a minimum number and proportion of phylogenetically-informative genes within each contig in order to confidently assign a taxonomic lineage. Therefore, short contigs typically obtained from metatranscriptome assemblies remain generally unclassified. For targeted classification of RNA viruses (for instance, to search for Coronavirus-related sequences), alternative DNA- or protein-based classification methods can be used. Two of the possible options are: (i) using MashMap to screen the VIRify contigs against a database of RNA viruses (e.g. Coronaviridae) or (ii) using hmmsearch to screen the proteins obtained in the VIRify contigs against marker genes of the taxon of interest.

\r +

Contact us

\r +MGnify helpdesk""" ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "VIRify" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/27?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "Workflow for gene set enrichment analsysis (GSEA) and co-expression analysis (WGCNA) on transcriptomics data to analyze pathways affected in Porto-Sinusoidal Vascular Disease." ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.1040.1" ; + schema1:isBasedOn "https://github.com/aish181095/PSVD-transcriptomics-workflow" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Porto-Sinusoidal Vascular Disease transcriptomics analysis workflow" ; + schema1:sdDatePublished "2024-07-12 13:18:19 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1040/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 14252 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4381 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T14:13:25Z" ; + schema1:dateModified "2024-06-28T11:33:39Z" ; + schema1:description "Workflow for gene set enrichment analsysis (GSEA) and co-expression analysis (WGCNA) on transcriptomics data to analyze pathways affected in Porto-Sinusoidal Vascular Disease." ; + schema1:image ; + schema1:keywords "Bioinformatics, CWL, Transcriptomics, Workflows" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Porto-Sinusoidal Vascular Disease transcriptomics analysis workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1040?version=1" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """The tool provides a calculaiton of the power spectrum of Stochastic Gravitational Wave Backgorund (SGWB) from a first-order cosmological phase transition based on the parameterisations of Roper Pol et al. (2023). The power spectrum includes two components: from the sound waves excited by collisions of bubbles of the new phase and from the turbulence that is induced by these collisions.\r +\r +The cosmological epoch of the phase transition is described by the temperature, T_star and by the number(s) of relativistic degrees of freedom, g_star that should be specified as parameters.\r +\r +The phase transition itself is characterised by phenomenological parameters, alpha, beta_H and epsilon_turb, the latent heat, the ratio of the Hubble radius to the bubble size at percolation and the fraction of the energy otuput of the phase transition that goes into turbulence.\r +\r +The product Model spectrum outputs the power spectrum for fixed values of these parameters. The product Phase transition parameters reproduces the constraints on the phase transition parameters from the Pulsar Timing Array gravitational wave detectors, reported by Boyer & Neronov (2024), including the estimate of the cosmological magnetic field induced by turbulence.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.815.1" ; + schema1:isBasedOn "https://gitlab.renkulab.io/astronomy/mmoda/sgwb" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Stochastic Gravitational Wave Backgorund (SGWB) tool" ; + schema1:sdDatePublished "2024-07-12 13:23:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/815/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3955 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-04-18T12:44:15Z" ; + schema1:dateModified "2024-04-18T12:54:52Z" ; + schema1:description """The tool provides a calculaiton of the power spectrum of Stochastic Gravitational Wave Backgorund (SGWB) from a first-order cosmological phase transition based on the parameterisations of Roper Pol et al. (2023). The power spectrum includes two components: from the sound waves excited by collisions of bubbles of the new phase and from the turbulence that is induced by these collisions.\r +\r +The cosmological epoch of the phase transition is described by the temperature, T_star and by the number(s) of relativistic degrees of freedom, g_star that should be specified as parameters.\r +\r +The phase transition itself is characterised by phenomenological parameters, alpha, beta_H and epsilon_turb, the latent heat, the ratio of the Hubble radius to the bubble size at percolation and the fraction of the energy otuput of the phase transition that goes into turbulence.\r +\r +The product Model spectrum outputs the power spectrum for fixed values of these parameters. The product Phase transition parameters reproduces the constraints on the phase transition parameters from the Pulsar Timing Array gravitational wave detectors, reported by Boyer & Neronov (2024), including the estimate of the cosmological magnetic field induced by turbulence.\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/815?version=1" ; + schema1:keywords "astronomy" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Stochastic Gravitational Wave Backgorund (SGWB) tool" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/815?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.282.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_virtual_screening/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Protein-ligand Docking tutorial (Fpocket)" ; + schema1:sdDatePublished "2024-07-12 13:33:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/282/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2666 ; + schema1:creator , + ; + schema1:dateCreated "2023-04-14T08:24:40Z" ; + schema1:dateModified "2023-04-14T08:26:20Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/282?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Protein-ligand Docking tutorial (Fpocket)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_virtual_screening/python/workflow.py" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """# Cancer Invasion Workflow\r +\r +## Table of Contents\r +\r +- [Cancer Invasion Workflow](#cancer-invasion-workflow)\r + - [Table of Contents](#table-of-contents)\r + - [Description](#description)\r + - [Contents](#contents)\r + - [Building Blocks](#building-blocks)\r + - [Workflows](#workflows)\r + - [Resources](#resources)\r + - [Tests](#tests)\r + - [Instructions](#instructions)\r + - [Local machine](#local-machine)\r + - [Requirements](#requirements)\r + - [Usage steps](#usage-steps)\r + - [MareNostrum 4](#marenostrum-4)\r + - [Requirements in MN4](#requirements-in-mn4)\r + - [Usage steps in MN4](#usage-steps-in-mn4)\r + - [Mahti or Puhti](#mahti-or-puhti)\r + - [Requirements](#requirements)\r + - [Steps](#steps)\r + - [License](#license)\r + - [Contact](#contact)\r +\r +## Description\r +\r +Uses multiscale simulations to describe cancer progression into invasion.\r +\r +The workflow uses the following building blocks, described in order of execution:\r +\r +1. PhysiBoSS-Invasion\r +\r +For details on individual workflow steps, see the user documentation for each building block.\r +\r +[`GitHub repository`]()\r +\r +\r +## Contents\r +\r +### Building Blocks\r +\r +The ``BuildingBlocks`` folder contains the script to install the\r +Building Blocks used in the Cancer Invasion Workflow.\r +\r +### Workflows\r +\r +The ``Workflow`` folder contains the workflows implementations.\r +\r +Currently contains the implementation using PyCOMPSs and Snakemake (in progress).\r +\r +### Resources\r +\r +The ``Resources`` folder contains dataset files.\r +\r +### Tests\r +\r +The ``Tests`` folder contains the scripts that run each Building Block\r +used in the workflow for the given small dataset.\r +They can be executed individually for testing purposes.\r +\r +## Instructions\r +\r +### Local machine\r +\r +This section explains the requirements and usage for the Cancer Invasion Workflow in a laptop or desktop computer.\r +\r +#### Requirements\r +\r +- [`permedcoe`](https://github.com/PerMedCoE/permedcoe) package\r +- [PyCOMPSs](https://pycompss.readthedocs.io/en/stable/Sections/00_Quickstart.html) / [Snakemake](https://snakemake.readthedocs.io/en/stable/)\r +- [Singularity](https://sylabs.io/guides/3.0/user-guide/installation.html)\r +\r +#### Usage steps\r +\r +1. Clone this repository:\r +\r + ```bash\r + git clone https://github.com/PerMedCoE/cancer-invasion-workflow\r + ```\r +\r +2. Install the Building Blocks required for the Cancer Invasion Workflow:\r +\r + ```bash\r + cancer-invasion-workflow/BuildingBlocks/./install_BBs.sh\r + ```\r +\r +3. Get the required Building Block images from the project [B2DROP](https://b2drop.bsc.es/index.php/f/444350):\r +\r + - Required images:\r + - PhysiCell-Invasion.singularity\r +\r + The path where these files are stored **MUST be exported in the `PERMEDCOE_IMAGES`** environment variable.\r +\r + > :warning: **TIP**: These containers can be built manually as follows (be patient since some of them may take some time):\r + 1. Clone the `BuildingBlocks` repository\r + ```bash\r + git clone https://github.com/PerMedCoE/BuildingBlocks.git\r + ```\r + 2. Build the required Building Block images\r + ```bash\r + cd BuildingBlocks/Resources/images\r + sudo singularity build PhysiCell-Invasion.sif PhysiCell-Invasion.singularity\r + cd ../../..\r + ```\r +\r +**If using PyCOMPSs in local PC** (make sure that PyCOMPSs in installed):\r +\r +4. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflows/PyCOMPSs\r + ```\r +\r +5. Execute `./run.sh`\r +\r +**If using Snakemake in local PC** (make sure that SnakeMake is installed):\r +\r +4. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflows/SnakeMake\r + ```\r +\r +5. Execute `./run.sh`\r + > **TIP**: If you want to run the workflow with a different dataset, please update the `run.sh` script setting the `dataset` variable to the new dataset folder and their file names.\r +\r +\r +### MareNostrum 4\r +\r +This section explains the requirements and usage for the Cancer Invasion Workflow in the MareNostrum 4 supercomputer.\r +\r +#### Requirements in MN4\r +\r +- Access to MN4\r +\r +All Building Blocks are already installed in MN4, and the Cancer Invasion Workflow available.\r +\r +#### Usage steps in MN4\r +\r +1. Load the `COMPSs`, `Singularity` and `permedcoe` modules\r +\r + ```bash\r + export COMPSS_PYTHON_VERSION=3\r + module load COMPSs/3.1\r + module load singularity/3.5.2\r + module use /apps/modules/modulefiles/tools/COMPSs/libraries\r + module load permedcoe\r + ```\r +\r + > **TIP**: Include the loading into your `${HOME}/.bashrc` file to load it automatically on the session start.\r +\r + This commands will load COMPSs and the permedcoe package which provides all necessary dependencies, as well as the path to the singularity container images (`PERMEDCOE_IMAGES` environment variable) and testing dataset (`CANCERINVASIONWORKFLOW_DATASET` environment variable).\r +\r +2. Get a copy of the pilot workflow into your desired folder\r +\r + ```bash\r + mkdir desired_folder\r + cd desired_folder\r + get_cancerinvasionworkflow\r + ```\r +\r +3. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflow/PyCOMPSs\r + ```\r +\r +4. Execute `./launch.sh`\r +\r + This command will launch a job into the job queuing system (SLURM) requesting 2 nodes (one node acting half master and half worker, and other full worker node) for 20 minutes, and is prepared to use the singularity images that are already deployed in MN4 (located into the `PERMEDCOE_IMAGES` environment variable). It uses the dataset located into `../../Resources/data` folder.\r +\r + > :warning: **TIP**: If you want to run the workflow with a different dataset, please edit the `launch.sh` script and define the appropriate dataset path.\r +\r + After the execution, a `results` folder will be available with with Cancer Invasion Workflow results.\r +\r +### Mahti or Puhti\r +\r +This section explains how to run the Cancer Invasion workflow on CSC supercomputers using SnakeMake.\r +\r +#### Requirements\r +\r +- Install snakemake (or check if there is a version installed using `module spider snakemake`)\r +- Install workflow, using the same steps as for the local machine. With the exception that containers have to be built elsewhere.\r +\r +#### Steps\r +\r +\r +1. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflow/SnakeMake\r + ```\r +\r +2. Edit `launch.sh` with the correct partition, account, and resource specifications. \r +\r +3. Execute `./launch.sh`\r +\r + > :warning: Snakemake provides a `--cluster` flag, but this functionality should be avoided as it's really not suited for HPC systems.\r +\r +## License\r +\r +[Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)\r +\r +## Contact\r +\r +\r +\r +This software has been developed for the [PerMedCoE project](https://permedcoe.eu/), funded by the European Commission (EU H2020 [951773](https://cordis.europa.eu/project/id/951773)).\r +\r +![](https://permedcoe.eu/wp-content/uploads/2020/11/logo_1.png "PerMedCoE")\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/477?version=1" ; + schema1:isBasedOn "https://github.com/PerMedCoE/cancer-invasion-workflow" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for PerMedCoE Cancer Invasion" ; + schema1:sdDatePublished "2024-07-12 13:33:25 +0100" ; + schema1:url "https://workflowhub.eu/workflows/477/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 927 ; + schema1:dateCreated "2023-05-23T12:10:39Z" ; + schema1:dateModified "2023-05-23T12:34:23Z" ; + schema1:description """# Cancer Invasion Workflow\r +\r +## Table of Contents\r +\r +- [Cancer Invasion Workflow](#cancer-invasion-workflow)\r + - [Table of Contents](#table-of-contents)\r + - [Description](#description)\r + - [Contents](#contents)\r + - [Building Blocks](#building-blocks)\r + - [Workflows](#workflows)\r + - [Resources](#resources)\r + - [Tests](#tests)\r + - [Instructions](#instructions)\r + - [Local machine](#local-machine)\r + - [Requirements](#requirements)\r + - [Usage steps](#usage-steps)\r + - [MareNostrum 4](#marenostrum-4)\r + - [Requirements in MN4](#requirements-in-mn4)\r + - [Usage steps in MN4](#usage-steps-in-mn4)\r + - [Mahti or Puhti](#mahti-or-puhti)\r + - [Requirements](#requirements)\r + - [Steps](#steps)\r + - [License](#license)\r + - [Contact](#contact)\r +\r +## Description\r +\r +Uses multiscale simulations to describe cancer progression into invasion.\r +\r +The workflow uses the following building blocks, described in order of execution:\r +\r +1. PhysiBoSS-Invasion\r +\r +For details on individual workflow steps, see the user documentation for each building block.\r +\r +[`GitHub repository`]()\r +\r +\r +## Contents\r +\r +### Building Blocks\r +\r +The ``BuildingBlocks`` folder contains the script to install the\r +Building Blocks used in the Cancer Invasion Workflow.\r +\r +### Workflows\r +\r +The ``Workflow`` folder contains the workflows implementations.\r +\r +Currently contains the implementation using PyCOMPSs and Snakemake (in progress).\r +\r +### Resources\r +\r +The ``Resources`` folder contains dataset files.\r +\r +### Tests\r +\r +The ``Tests`` folder contains the scripts that run each Building Block\r +used in the workflow for the given small dataset.\r +They can be executed individually for testing purposes.\r +\r +## Instructions\r +\r +### Local machine\r +\r +This section explains the requirements and usage for the Cancer Invasion Workflow in a laptop or desktop computer.\r +\r +#### Requirements\r +\r +- [`permedcoe`](https://github.com/PerMedCoE/permedcoe) package\r +- [PyCOMPSs](https://pycompss.readthedocs.io/en/stable/Sections/00_Quickstart.html) / [Snakemake](https://snakemake.readthedocs.io/en/stable/)\r +- [Singularity](https://sylabs.io/guides/3.0/user-guide/installation.html)\r +\r +#### Usage steps\r +\r +1. Clone this repository:\r +\r + ```bash\r + git clone https://github.com/PerMedCoE/cancer-invasion-workflow\r + ```\r +\r +2. Install the Building Blocks required for the Cancer Invasion Workflow:\r +\r + ```bash\r + cancer-invasion-workflow/BuildingBlocks/./install_BBs.sh\r + ```\r +\r +3. Get the required Building Block images from the project [B2DROP](https://b2drop.bsc.es/index.php/f/444350):\r +\r + - Required images:\r + - PhysiCell-Invasion.singularity\r +\r + The path where these files are stored **MUST be exported in the `PERMEDCOE_IMAGES`** environment variable.\r +\r + > :warning: **TIP**: These containers can be built manually as follows (be patient since some of them may take some time):\r + 1. Clone the `BuildingBlocks` repository\r + ```bash\r + git clone https://github.com/PerMedCoE/BuildingBlocks.git\r + ```\r + 2. Build the required Building Block images\r + ```bash\r + cd BuildingBlocks/Resources/images\r + sudo singularity build PhysiCell-Invasion.sif PhysiCell-Invasion.singularity\r + cd ../../..\r + ```\r +\r +**If using PyCOMPSs in local PC** (make sure that PyCOMPSs in installed):\r +\r +4. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflows/PyCOMPSs\r + ```\r +\r +5. Execute `./run.sh`\r +\r +**If using Snakemake in local PC** (make sure that SnakeMake is installed):\r +\r +4. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflows/SnakeMake\r + ```\r +\r +5. Execute `./run.sh`\r + > **TIP**: If you want to run the workflow with a different dataset, please update the `run.sh` script setting the `dataset` variable to the new dataset folder and their file names.\r +\r +\r +### MareNostrum 4\r +\r +This section explains the requirements and usage for the Cancer Invasion Workflow in the MareNostrum 4 supercomputer.\r +\r +#### Requirements in MN4\r +\r +- Access to MN4\r +\r +All Building Blocks are already installed in MN4, and the Cancer Invasion Workflow available.\r +\r +#### Usage steps in MN4\r +\r +1. Load the `COMPSs`, `Singularity` and `permedcoe` modules\r +\r + ```bash\r + export COMPSS_PYTHON_VERSION=3\r + module load COMPSs/3.1\r + module load singularity/3.5.2\r + module use /apps/modules/modulefiles/tools/COMPSs/libraries\r + module load permedcoe\r + ```\r +\r + > **TIP**: Include the loading into your `${HOME}/.bashrc` file to load it automatically on the session start.\r +\r + This commands will load COMPSs and the permedcoe package which provides all necessary dependencies, as well as the path to the singularity container images (`PERMEDCOE_IMAGES` environment variable) and testing dataset (`CANCERINVASIONWORKFLOW_DATASET` environment variable).\r +\r +2. Get a copy of the pilot workflow into your desired folder\r +\r + ```bash\r + mkdir desired_folder\r + cd desired_folder\r + get_cancerinvasionworkflow\r + ```\r +\r +3. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflow/PyCOMPSs\r + ```\r +\r +4. Execute `./launch.sh`\r +\r + This command will launch a job into the job queuing system (SLURM) requesting 2 nodes (one node acting half master and half worker, and other full worker node) for 20 minutes, and is prepared to use the singularity images that are already deployed in MN4 (located into the `PERMEDCOE_IMAGES` environment variable). It uses the dataset located into `../../Resources/data` folder.\r +\r + > :warning: **TIP**: If you want to run the workflow with a different dataset, please edit the `launch.sh` script and define the appropriate dataset path.\r +\r + After the execution, a `results` folder will be available with with Cancer Invasion Workflow results.\r +\r +### Mahti or Puhti\r +\r +This section explains how to run the Cancer Invasion workflow on CSC supercomputers using SnakeMake.\r +\r +#### Requirements\r +\r +- Install snakemake (or check if there is a version installed using `module spider snakemake`)\r +- Install workflow, using the same steps as for the local machine. With the exception that containers have to be built elsewhere.\r +\r +#### Steps\r +\r +\r +1. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflow/SnakeMake\r + ```\r +\r +2. Edit `launch.sh` with the correct partition, account, and resource specifications. \r +\r +3. Execute `./launch.sh`\r +\r + > :warning: Snakemake provides a `--cluster` flag, but this functionality should be avoided as it's really not suited for HPC systems.\r +\r +## License\r +\r +[Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)\r +\r +## Contact\r +\r +\r +\r +This software has been developed for the [PerMedCoE project](https://permedcoe.eu/), funded by the European Commission (EU H2020 [951773](https://cordis.europa.eu/project/id/951773)).\r +\r +![](https://permedcoe.eu/wp-content/uploads/2020/11/logo_1.png "PerMedCoE")\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "PerMedCoE Cancer Invasion" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/477?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """`atavide` is a complete workflow for metagenomics data analysis, including QC/QA, optional host removal, assembly and cross-assembly, and individual read based annotations. We have also built in some advanced analytics including tools to assign annotations from reads to contigs, and to generate metagenome-assembled genomes in several different ways, giving you the power to explore your data!\r +\r +`atavide` is 100% snakemake and conda, so you only need to install the snakemake workflow, and then everything else will be installed with conda.\r +\r +Steps:\r +1. QC/QA with [prinseq++](https://github.com/Adrian-Cantu/PRINSEQ-plus-plus)\r +2. optional host removal using bowtie2 and samtools, [as described previously](https://edwards.flinders.edu.au/command-line-deconseq/). To enable this, you need to provide a path to the host db and a host db.\r +\r +Metagenome assembly\r +1. pairwise assembly of each sample using [megahit](https://github.com/voutcn/megahit)\r +2. extraction of all reads that do not assemble using samtools flags\r +3. assembly of all unassembled reads using [megahit](https://github.com/voutcn/megahit)\r +4. compilation of _all_ contigs into a single unified set using [Flye](https://github.com/fenderglass/Flye)\r +5. comparison of reads -> contigs to generate coverage\r +\r +MAG creation\r +1. [metabat](https://bitbucket.org/berkeleylab/metabat/src/master/)\r +2. [concoct](https://github.com/BinPro/CONCOCT)\r +3. Pairwise comparisons using [turbocor](https://github.com/dcjones/turbocor) followed by clustering\r +\r +Read-based annotations\r +1. [Kraken2](https://ccb.jhu.edu/software/kraken2/)\r +2. [singlem](https://github.com/wwood/singlem)\r +3. [SUPER-focus](https://github.com/metageni/SUPER-FOCUS)\r +4. [FOCUS](https://github.com/metageni/FOCUS)\r +\r +Want something else added to the suite? File an issue on github and we'll add it ASAP!\r +\r +### Installation\r +\r +You will need to install\r +1. The NCBI taxonomy database somewhere\r +2. The superfocus databases somewhere, and set the SUPERFOCUS_DB environmental variable\r +\r +Everything else should install automatically.""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.241.1" ; + schema1:isBasedOn "https://github.com/linsalrob/atavide" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for atavide" ; + schema1:sdDatePublished "2024-07-12 13:36:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/241/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5948 ; + schema1:dateCreated "2021-11-21T05:26:02Z" ; + schema1:dateModified "2023-01-16T13:55:08Z" ; + schema1:description """`atavide` is a complete workflow for metagenomics data analysis, including QC/QA, optional host removal, assembly and cross-assembly, and individual read based annotations. We have also built in some advanced analytics including tools to assign annotations from reads to contigs, and to generate metagenome-assembled genomes in several different ways, giving you the power to explore your data!\r +\r +`atavide` is 100% snakemake and conda, so you only need to install the snakemake workflow, and then everything else will be installed with conda.\r +\r +Steps:\r +1. QC/QA with [prinseq++](https://github.com/Adrian-Cantu/PRINSEQ-plus-plus)\r +2. optional host removal using bowtie2 and samtools, [as described previously](https://edwards.flinders.edu.au/command-line-deconseq/). To enable this, you need to provide a path to the host db and a host db.\r +\r +Metagenome assembly\r +1. pairwise assembly of each sample using [megahit](https://github.com/voutcn/megahit)\r +2. extraction of all reads that do not assemble using samtools flags\r +3. assembly of all unassembled reads using [megahit](https://github.com/voutcn/megahit)\r +4. compilation of _all_ contigs into a single unified set using [Flye](https://github.com/fenderglass/Flye)\r +5. comparison of reads -> contigs to generate coverage\r +\r +MAG creation\r +1. [metabat](https://bitbucket.org/berkeleylab/metabat/src/master/)\r +2. [concoct](https://github.com/BinPro/CONCOCT)\r +3. Pairwise comparisons using [turbocor](https://github.com/dcjones/turbocor) followed by clustering\r +\r +Read-based annotations\r +1. [Kraken2](https://ccb.jhu.edu/software/kraken2/)\r +2. [singlem](https://github.com/wwood/singlem)\r +3. [SUPER-focus](https://github.com/metageni/SUPER-FOCUS)\r +4. [FOCUS](https://github.com/metageni/FOCUS)\r +\r +Want something else added to the suite? File an issue on github and we'll add it ASAP!\r +\r +### Installation\r +\r +You will need to install\r +1. The NCBI taxonomy database somewhere\r +2. The superfocus databases somewhere, and set the SUPERFOCUS_DB environmental variable\r +\r +Everything else should install automatically.""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "atavide" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/241?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=13" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-07-12 13:21:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=13" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11175 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:50Z" ; + schema1:dateModified "2024-06-11T12:54:50Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=13" ; + schema1:version 13 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """### Workflow for Metagenomics from bins to metabolic models (GEMs)\r +\r +**Summary**\r + - Prodigal gene prediction\r + - CarveMe genome scale metabolic model reconstruction\r + - MEMOTE for metabolic model testing\r + - SMETANA Species METabolic interaction ANAlysis\r +\r +Other UNLOCK workflows on WorkflowHub: https://workflowhub.eu/projects/16/workflows?view=default
\r +\r +**All tool CWL files and other workflows can be found here:**
\r +Tools: https://gitlab.com/m-unlock/cwl
\r +Workflows: https://gitlab.com/m-unlock/cwl/workflows\r +\r +**How to setup and use an UNLOCK workflow:**
\r +https://m-unlock.gitlab.io/docs/setup/setup.html
\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/372?version=1" ; + schema1:isBasedOn "https://gitlab.com/m-unlock/cwl/-/blob/master/cwl/workflows/workflow_metagenomics_GEM.cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Metagenomic GEMs from Assembly" ; + schema1:sdDatePublished "2024-07-12 13:34:46 +0100" ; + schema1:url "https://workflowhub.eu/workflows/372/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 29095 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7276 ; + schema1:creator , + ; + schema1:dateCreated "2022-07-07T08:23:15Z" ; + schema1:dateModified "2023-01-16T14:01:55Z" ; + schema1:description """### Workflow for Metagenomics from bins to metabolic models (GEMs)\r +\r +**Summary**\r + - Prodigal gene prediction\r + - CarveMe genome scale metabolic model reconstruction\r + - MEMOTE for metabolic model testing\r + - SMETANA Species METabolic interaction ANAlysis\r +\r +Other UNLOCK workflows on WorkflowHub: https://workflowhub.eu/projects/16/workflows?view=default
\r +\r +**All tool CWL files and other workflows can be found here:**
\r +Tools: https://gitlab.com/m-unlock/cwl
\r +Workflows: https://gitlab.com/m-unlock/cwl/workflows\r +\r +**How to setup and use an UNLOCK workflow:**
\r +https://m-unlock.gitlab.io/docs/setup/setup.html
\r +""" ; + schema1:image ; + schema1:keywords "Metagenomics, Genomics, GEM, carveme, memote" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Metagenomic GEMs from Assembly" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/372?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + ; + ns1:output , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2024-07-10T16:00:36.610549" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/bacterial-genome-assembly" ; + schema1:license "GPL-3.0-or-later" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "bacterial-genome-assembly/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.24" . + + a schema1:Dataset ; + schema1:datePublished "2022-10-14T16:18:18.328560" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/ChIPseq_PE" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "ChIPseq_PE/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.11" . + + a schema1:Dataset ; + schema1:datePublished "2024-03-06T13:54:12.142581" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-variant-calling" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sars-cov-2-pe-illumina-artic-variant-calling/COVID-19-PE-ARTIC-ILLUMINA" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Loads a single cell counts matrix into an annData format - adding a column called sample with the sample name. (Input format - matrix.mtx, features.tsv and barcodes.tsv)" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/512?version=2" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq: Load counts matrix" ; + schema1:sdDatePublished "2024-07-12 13:22:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/512/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13569 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-05-30T05:44:52Z" ; + schema1:dateModified "2024-05-30T05:44:52Z" ; + schema1:description "Loads a single cell counts matrix into an annData format - adding a column called sample with the sample name. (Input format - matrix.mtx, features.tsv and barcodes.tsv)" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/512?version=1" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "scRNAseq: Load counts matrix" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/512?version=2" ; + schema1:version 2 ; + ns1:input , + , + , + ; + ns1:output . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Quantitative Mass Spectrometry nf-core workflow" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1013?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/quantms" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/quantms" ; + schema1:sdDatePublished "2024-07-12 13:19:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1013/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14195 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:08Z" ; + schema1:dateModified "2024-06-11T12:55:08Z" ; + schema1:description "Quantitative Mass Spectrometry nf-core workflow" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1013?version=3" ; + schema1:keywords "dia, lfq, mass-spec, mass-spectrometry, openms, proteogenomics, Proteomics, tmt" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/quantms" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1013?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Mutations Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.290.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_md_setup_mutations/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Protein MD Setup tutorial with mutations" ; + schema1:sdDatePublished "2024-07-12 13:33:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/290/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7377 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-17T13:25:49Z" ; + schema1:dateModified "2022-04-11T09:29:46Z" ; + schema1:description """# Mutations Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/290?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Protein MD Setup tutorial with mutations" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_md_setup_mutations/python/workflow.py" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/278?version=5" ; + schema1:isBasedOn "https://gitlab.bsc.es/lrodrig1/structuralvariants_poc/-/tree/1.1.3/structuralvariants/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CNV_pipeline" ; + schema1:sdDatePublished "2024-07-12 13:35:28 +0100" ; + schema1:url "https://workflowhub.eu/workflows/278/ro_crate?version=5" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 9858 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9607 ; + schema1:creator , + ; + schema1:dateCreated "2022-05-10T09:04:10Z" ; + schema1:dateModified "2022-05-10T09:04:10Z" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/278?version=10" ; + schema1:keywords "cancer, CODEX2, ExomeDepth, manta, TransBioNet, variant calling, GRIDSS, structural variants" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CNV_pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/278?version=5" ; + schema1:version 5 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 74039 . + + a schema1:Dataset ; + schema1:datePublished "2024-05-02T14:42:36.593454" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-Trio-phasing-VGP5/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for the identification of circular DNAs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/972?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/circdna" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/circdna" ; + schema1:sdDatePublished "2024-07-12 13:22:02 +0100" ; + schema1:url "https://workflowhub.eu/workflows/972/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8577 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:45Z" ; + schema1:dateModified "2024-06-11T12:54:45Z" ; + schema1:description "Pipeline for the identification of circular DNAs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/972?version=7" ; + schema1:keywords "ampliconarchitect, ampliconsuite, circle-seq, circular, DNA, eccdna, ecdna, extrachromosomal-circular-dna, Genomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/circdna" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/972?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Molecular Structure Checking using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **checking** a **molecular structure** before using it as an input for a **Molecular Dynamics** simulation. The workflow uses the **BioExcel Building Blocks library (biobb)**. The particular structure used is the crystal structure of **human Adenylate Kinase 1A (AK1A)**, in complex with the **AP5A inhibitor** (PDB code [1Z83](https://www.rcsb.org/structure/1z83)). \r +\r +**Structure checking** is a key step before setting up a protein system for **simulations**. A number of **common issues** found in structures at **Protein Data Bank** may compromise the success of the **simulation**, or may suggest that longer **equilibration** procedures are necessary.\r +\r +The **workflow** shows how to:\r +\r +- Run **basic manipulations on structures** (selection of models, chains, alternative locations\r +- Detect and fix **amide assignments** and **wrong chiralities**\r +- Detect and fix **protein backbone** issues (missing fragments, and atoms, capping)\r +- Detect and fix **missing side-chain atoms**\r +- **Add hydrogen atoms** according to several criteria\r +- Detect and classify **atomic clashes**\r +- Detect possible **disulfide bonds (SS)**\r +\r +An implementation of this workflow in a **web-based Graphical User Interface (GUI)** can be found in the [https://mmb.irbbarcelona.org/biobb-wfs/](https://mmb.irbbarcelona.org/biobb-wfs/) server (see [https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check](https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check)).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.775.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_structure_checking" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Molecular Structure Checking" ; + schema1:sdDatePublished "2024-07-12 13:24:40 +0100" ; + schema1:url "https://workflowhub.eu/workflows/775/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 47048 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-05T08:36:14Z" ; + schema1:dateModified "2024-03-05T08:38:46Z" ; + schema1:description """# Molecular Structure Checking using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **checking** a **molecular structure** before using it as an input for a **Molecular Dynamics** simulation. The workflow uses the **BioExcel Building Blocks library (biobb)**. The particular structure used is the crystal structure of **human Adenylate Kinase 1A (AK1A)**, in complex with the **AP5A inhibitor** (PDB code [1Z83](https://www.rcsb.org/structure/1z83)). \r +\r +**Structure checking** is a key step before setting up a protein system for **simulations**. A number of **common issues** found in structures at **Protein Data Bank** may compromise the success of the **simulation**, or may suggest that longer **equilibration** procedures are necessary.\r +\r +The **workflow** shows how to:\r +\r +- Run **basic manipulations on structures** (selection of models, chains, alternative locations\r +- Detect and fix **amide assignments** and **wrong chiralities**\r +- Detect and fix **protein backbone** issues (missing fragments, and atoms, capping)\r +- Detect and fix **missing side-chain atoms**\r +- **Add hydrogen atoms** according to several criteria\r +- Detect and classify **atomic clashes**\r +- Detect possible **disulfide bonds (SS)**\r +\r +An implementation of this workflow in a **web-based Graphical User Interface (GUI)** can be found in the [https://mmb.irbbarcelona.org/biobb-wfs/](https://mmb.irbbarcelona.org/biobb-wfs/) server (see [https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check](https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check)).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Molecular Structure Checking" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_structure_checking/blob/main/biobb_wf_structure_checking/notebooks/biobb_wf_structure_checking.ipynb" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Taxonomic classification and profiling of shotgun short- and long-read metagenomic data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1025?version=11" ; + schema1:isBasedOn "https://github.com/nf-core/taxprofiler" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/taxprofiler" ; + schema1:sdDatePublished "2024-07-12 13:18:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1025/ro_crate?version=11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 16232 ; + schema1:creator , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-21T03:02:43Z" ; + schema1:dateModified "2024-06-21T03:02:43Z" ; + schema1:description "Taxonomic classification and profiling of shotgun short- and long-read metagenomic data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1025?version=10" ; + schema1:keywords "Classification, illumina, long-reads, Metagenomics, microbiome, nanopore, pathogen, Profiling, shotgun, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/taxprofiler" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1025?version=11" ; + schema1:version 11 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-07-12 13:19:29 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=23" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 21521 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:16Z" ; + schema1:dateModified "2024-06-11T12:55:16Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:version 23 . + + a schema1:Dataset ; + schema1:datePublished "2022-06-03T10:07:46.335781" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/generic-variant-calling-wgs-pe" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "generic-variant-calling-wgs-pe/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.10" . + + a schema1:Dataset ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-variant-calling" ; + schema1:mainEntity ; + schema1:name "COVID-19-PE-ARTIC-ILLUMINA (v0.4)" ; + schema1:sdDatePublished "2021-06-19 03:00:40 +0100" ; + schema1:softwareVersion "v0.4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 64620 ; + schema1:name "COVID-19-PE-ARTIC-ILLUMINA" ; + schema1:programmingLanguage . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.131.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/131/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 76543 ; + schema1:creator , + ; + schema1:dateCreated "2021-06-30T12:18:15Z" ; + schema1:dateModified "2022-09-15T12:31:15Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/131?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_amber_md_setup/8bcda75405183a84476acd7ba733e4cb666ce397/biobb_wf_amber_md_setup/notebooks/mdsetup_lig/biobb_amber_complex_setup_notebook.ipynb" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + ; + schema1:description "This workflow generates binding scores that correlate well with binding affinities using an additional tool TransFS, developed at Oxford University. More info can be found at https://covid19.galaxyproject.org/cheminformatics/" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/16?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Cheminformatics - TransFS scoring" ; + schema1:sdDatePublished "2024-07-12 13:37:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/16/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1909 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7581 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2020-04-10T14:51:59Z" ; + schema1:dateModified "2023-01-16T13:41:04Z" ; + schema1:description "This workflow generates binding scores that correlate well with binding affinities using an additional tool TransFS, developed at Oxford University. More info can be found at https://covid19.galaxyproject.org/cheminformatics/" ; + schema1:image ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Cheminformatics - TransFS scoring" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/16?version=1" ; + schema1:version 1 ; + ns1:input , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 4174 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 21106 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for screening for functional components of assembled contigs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/987?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/funcscan" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/funcscan" ; + schema1:sdDatePublished "2024-07-12 13:21:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/987/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15247 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:53Z" ; + schema1:dateModified "2024-06-11T12:54:53Z" ; + schema1:description "Pipeline for screening for functional components of assembled contigs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/987?version=8" ; + schema1:keywords "amp, AMR, antibiotic-resistance, antimicrobial-peptides, antimicrobial-resistance-genes, arg, Assembly, bgc, biosynthetic-gene-clusters, contigs, function, Metagenomics, natural-products, screening, secondary-metabolites" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/funcscan" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/987?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Protein 3D structure prediction pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1011?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/proteinfold" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/proteinfold" ; + schema1:sdDatePublished "2024-07-12 13:20:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1011/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14542 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-26T03:02:45Z" ; + schema1:dateModified "2024-06-26T03:02:45Z" ; + schema1:description "Protein 3D structure prediction pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1011?version=1" ; + schema1:keywords "alphafold2, protein-fold-prediction, protein-folding, protein-sequences, protein-structure" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/proteinfold" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1011?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.262.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_abc_md_setup/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL ABC MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:35:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/262/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 229479 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 36638 ; + schema1:creator , + ; + schema1:dateCreated "2022-01-11T08:14:55Z" ; + schema1:dateModified "2023-06-12T08:27:38Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/262?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CWL ABC MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/262?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """# ![nf-core/ampliseq](docs/images/nf-core-ampliseq_logo.png) + +**16S rRNA amplicon sequencing analysis workflow using QIIME2**. + +[![nf-core](https://img.shields.io/badge/nf--core-pipeline-brightgreen.svg)](https://nf-co.re/) +[![DOI](https://zenodo.org/badge/150448201.svg)](https://zenodo.org/badge/latestdoi/150448201) +[![Cite Preprint](https://img.shields.io/badge/Cite%20Us!-Cite%20Publication-important)](https://doi.org/10.3389/fmicb.2020.550420) + +[![GitHub Actions CI Status](https://github.com/nf-core/ampliseq/workflows/nf-core%20CI/badge.svg)](https://github.com/nf-core/ampliseq/actions) +[![GitHub Actions Linting Status](https://github.com/nf-core/ampliseq/workflows/nf-core%20linting/badge.svg)](https://github.com/nf-core/ampliseq/actions) +[![Nextflow](https://img.shields.io/badge/nextflow-%E2%89%A520.04.0-brightgreen.svg)](https://www.nextflow.io/) + +[![install with bioconda](https://img.shields.io/badge/install%20with-bioconda-brightgreen.svg)](https://bioconda.github.io/) +[![Docker](https://img.shields.io/docker/automated/nfcore/ampliseq.svg)](https://hub.docker.com/r/nfcore/ampliseq) +[![Get help on Slack](http://img.shields.io/badge/slack-nf--core%20%23ampliseq-4A154B?logo=slack)](https://nfcore.slack.com/channels/ampliseq) + +## Introduction + +**nfcore/ampliseq** is a bioinformatics analysis pipeline used for 16S rRNA or ITS amplicon sequencing data (currently supported is Illumina paired end or PacBio). + +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It comes with docker containers making installation trivial and results highly reproducible. + +## Quick Start + +1. Install [`nextflow`](https://nf-co.re/usage/installation) + +2. Install any of [`Docker`](https://docs.docker.com/engine/installation/), [`Singularity`](https://www.sylabs.io/guides/3.0/user-guide/) or [`Podman`](https://podman.io/) for full pipeline reproducibility _(please only use [`Conda`](https://conda.io/miniconda.html) as a last resort; see [docs](https://nf-co.re/usage/configuration#basic-configuration-profiles))_ + +3. Download the pipeline and test it on a minimal dataset with a single command: + + ```bash + nextflow run nf-core/ampliseq -profile test, + ``` + + > Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment. + +4. Start running your own analysis! + + ```bash + nextflow run nf-core/ampliseq -profile --input "data" --FW_primer GTGYCAGCMGCCGCGGTAA --RV_primer GGACTACNVGGGTWTCTAAT --metadata "data/Metadata.tsv" + ``` + +See [usage docs](https://nf-co.re/ampliseq/usage) and [parameter docs](https://nf-co.re/ampliseq/parameters) for all of the available options when running the pipeline. + +## Pipeline Summary + +By default, the pipeline currently performs the following: + +* Sequencing quality control ([FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/)) +* Trimming of reads ([Cutadapt](https://journal.embnet.org/index.php/embnetjournal/article/view/200)) +* Illumina read processing with [QIIME2](https://www.nature.com/articles/s41587-019-0209-9) +* Infer Amplicon Sequence Variants (ASVs) ([DADA2](https://doi.org/10.1038/nmeth.3869)) +* Taxonomical classification based on [SILVA](https://www.arb-silva.de/) [v132](https://www.arb-silva.de/documentation/release-132/) or [UNITE](https://unite.ut.ee/) database +* excludes unwanted taxa, produces absolute and relative feature/taxa count tables and plots, plots alpha rarefaction curves, computes alpha and beta diversity indices and plots thereof ([QIIME2](https://www.nature.com/articles/s41587-019-0209-9)) +* Calls differentially abundant taxa ([ANCOM](https://www.ncbi.nlm.nih.gov/pubmed/26028277)) +* Overall pipeline run summaries ([MultiQC](https://multiqc.info/)) + +## Documentation + +The nf-core/ampliseq pipeline comes with documentation about the pipeline: [usage](https://nf-co.re/ampliseq/usage) and [output](https://nf-co.re/ampliseq/output). + +## Credits + +nf-core/ampliseq was originally written by Daniel Straub ([@d4straub](https://github.com/d4straub)) and Alexander Peltzer ([@apeltzer](https://github.com/apeltzer)) for use at the [Quantitative Biology Center (QBiC)](http://www.qbic.life) and [Microbial Ecology, Center for Applied Geosciences](http://www.uni-tuebingen.de/de/104325), part of Eberhard Karls Universität Tübingen (Germany). + +We thank the following people for their extensive assistance in the development of this pipeline (in alphabetical order): + +* [Daniel Lundin](https://github.com/erikrikarddaniel) +* [Diego Brambilla](https://github.com/DiegoBrambilla) +* [Emelie Nilsson](https://github.com/emnilsson) +* [Jeanette Tångrot](https://github.com/jtangrot) +* [Sabrina Krakau](https://github.com/skrakau) + +## Contributions and Support + +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md). + +For further information or help, don't hesitate to get in touch on the [Slack `#ampliseq` channel](https://nfcore.slack.com/channels/ampliseq) (you can join with [this invite](https://nf-co.re/join/slack)). + +## Citations + +If you use `nf-core/ampliseq` for your analysis, please cite the `ampliseq` article as follows: +> Daniel Straub, Nia Blackwell, Adrian Langarica-Fuentes, Alexander Peltzer, Sven Nahnsen, Sara Kleindienst **Interpretations of Environmental Microbial Community Studies Are Biased by the Selected 16S rRNA (Gene) Amplicon Sequencing Pipeline** *Frontiers in Microbiology* 2020, 11:2652 [doi: 10.3389/fmicb.2020.550420](https://doi.org/10.3389/fmicb.2020.550420). + +You can cite the `nf-core/ampliseq` zenodo record for a specific version using the following [doi: 10.5281/zenodo.1493841](https://zenodo.org/badge/latestdoi/150448201) + +You can cite the `nf-core` publication as follows: + +> **The nf-core framework for community-curated bioinformatics pipelines.** +> +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen. +> +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x). +> ReadCube: [Full Access Link](https://rdcu.be/b1GjZ) + + + + +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-07-12 13:18:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6195 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:38Z" ; + schema1:dateModified "2024-06-11T12:54:39Z" ; + schema1:description """# ![nf-core/ampliseq](docs/images/nf-core-ampliseq_logo.png) + +**16S rRNA amplicon sequencing analysis workflow using QIIME2**. + +[![nf-core](https://img.shields.io/badge/nf--core-pipeline-brightgreen.svg)](https://nf-co.re/) +[![DOI](https://zenodo.org/badge/150448201.svg)](https://zenodo.org/badge/latestdoi/150448201) +[![Cite Preprint](https://img.shields.io/badge/Cite%20Us!-Cite%20Publication-important)](https://doi.org/10.3389/fmicb.2020.550420) + +[![GitHub Actions CI Status](https://github.com/nf-core/ampliseq/workflows/nf-core%20CI/badge.svg)](https://github.com/nf-core/ampliseq/actions) +[![GitHub Actions Linting Status](https://github.com/nf-core/ampliseq/workflows/nf-core%20linting/badge.svg)](https://github.com/nf-core/ampliseq/actions) +[![Nextflow](https://img.shields.io/badge/nextflow-%E2%89%A520.04.0-brightgreen.svg)](https://www.nextflow.io/) + +[![install with bioconda](https://img.shields.io/badge/install%20with-bioconda-brightgreen.svg)](https://bioconda.github.io/) +[![Docker](https://img.shields.io/docker/automated/nfcore/ampliseq.svg)](https://hub.docker.com/r/nfcore/ampliseq) +[![Get help on Slack](http://img.shields.io/badge/slack-nf--core%20%23ampliseq-4A154B?logo=slack)](https://nfcore.slack.com/channels/ampliseq) + +## Introduction + +**nfcore/ampliseq** is a bioinformatics analysis pipeline used for 16S rRNA or ITS amplicon sequencing data (currently supported is Illumina paired end or PacBio). + +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It comes with docker containers making installation trivial and results highly reproducible. + +## Quick Start + +1. Install [`nextflow`](https://nf-co.re/usage/installation) + +2. Install any of [`Docker`](https://docs.docker.com/engine/installation/), [`Singularity`](https://www.sylabs.io/guides/3.0/user-guide/) or [`Podman`](https://podman.io/) for full pipeline reproducibility _(please only use [`Conda`](https://conda.io/miniconda.html) as a last resort; see [docs](https://nf-co.re/usage/configuration#basic-configuration-profiles))_ + +3. Download the pipeline and test it on a minimal dataset with a single command: + + ```bash + nextflow run nf-core/ampliseq -profile test, + ``` + + > Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment. + +4. Start running your own analysis! + + ```bash + nextflow run nf-core/ampliseq -profile --input "data" --FW_primer GTGYCAGCMGCCGCGGTAA --RV_primer GGACTACNVGGGTWTCTAAT --metadata "data/Metadata.tsv" + ``` + +See [usage docs](https://nf-co.re/ampliseq/usage) and [parameter docs](https://nf-co.re/ampliseq/parameters) for all of the available options when running the pipeline. + +## Pipeline Summary + +By default, the pipeline currently performs the following: + +* Sequencing quality control ([FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/)) +* Trimming of reads ([Cutadapt](https://journal.embnet.org/index.php/embnetjournal/article/view/200)) +* Illumina read processing with [QIIME2](https://www.nature.com/articles/s41587-019-0209-9) +* Infer Amplicon Sequence Variants (ASVs) ([DADA2](https://doi.org/10.1038/nmeth.3869)) +* Taxonomical classification based on [SILVA](https://www.arb-silva.de/) [v132](https://www.arb-silva.de/documentation/release-132/) or [UNITE](https://unite.ut.ee/) database +* excludes unwanted taxa, produces absolute and relative feature/taxa count tables and plots, plots alpha rarefaction curves, computes alpha and beta diversity indices and plots thereof ([QIIME2](https://www.nature.com/articles/s41587-019-0209-9)) +* Calls differentially abundant taxa ([ANCOM](https://www.ncbi.nlm.nih.gov/pubmed/26028277)) +* Overall pipeline run summaries ([MultiQC](https://multiqc.info/)) + +## Documentation + +The nf-core/ampliseq pipeline comes with documentation about the pipeline: [usage](https://nf-co.re/ampliseq/usage) and [output](https://nf-co.re/ampliseq/output). + +## Credits + +nf-core/ampliseq was originally written by Daniel Straub ([@d4straub](https://github.com/d4straub)) and Alexander Peltzer ([@apeltzer](https://github.com/apeltzer)) for use at the [Quantitative Biology Center (QBiC)](http://www.qbic.life) and [Microbial Ecology, Center for Applied Geosciences](http://www.uni-tuebingen.de/de/104325), part of Eberhard Karls Universität Tübingen (Germany). + +We thank the following people for their extensive assistance in the development of this pipeline (in alphabetical order): + +* [Daniel Lundin](https://github.com/erikrikarddaniel) +* [Diego Brambilla](https://github.com/DiegoBrambilla) +* [Emelie Nilsson](https://github.com/emnilsson) +* [Jeanette Tångrot](https://github.com/jtangrot) +* [Sabrina Krakau](https://github.com/skrakau) + +## Contributions and Support + +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md). + +For further information or help, don't hesitate to get in touch on the [Slack `#ampliseq` channel](https://nfcore.slack.com/channels/ampliseq) (you can join with [this invite](https://nf-co.re/join/slack)). + +## Citations + +If you use `nf-core/ampliseq` for your analysis, please cite the `ampliseq` article as follows: +> Daniel Straub, Nia Blackwell, Adrian Langarica-Fuentes, Alexander Peltzer, Sven Nahnsen, Sara Kleindienst **Interpretations of Environmental Microbial Community Studies Are Biased by the Selected 16S rRNA (Gene) Amplicon Sequencing Pipeline** *Frontiers in Microbiology* 2020, 11:2652 [doi: 10.3389/fmicb.2020.550420](https://doi.org/10.3389/fmicb.2020.550420). + +You can cite the `nf-core/ampliseq` zenodo record for a specific version using the following [doi: 10.5281/zenodo.1493841](https://zenodo.org/badge/latestdoi/150448201) + +You can cite the `nf-core` publication as follows: + +> **The nf-core framework for community-curated bioinformatics pipelines.** +> +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen. +> +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x). +> ReadCube: [Full Access Link](https://rdcu.be/b1GjZ) + + + + +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Genome-wide alternative splicing analysis v.2" ; + schema1:hasPart , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.482.3" ; + schema1:license "CC-BY-NC-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genome-wide alternative splicing analysis v.2" ; + schema1:sdDatePublished "2024-07-12 13:33:05 +0100" ; + schema1:url "https://workflowhub.eu/workflows/482/ro_crate?version=3" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 214419 . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 30115 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 119329 ; + schema1:creator ; + schema1:dateCreated "2023-06-11T11:53:05Z" ; + schema1:dateModified "2023-06-11T11:53:41Z" ; + schema1:description "Genome-wide alternative splicing analysis v.2" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/482?version=6" ; + schema1:keywords "Transcriptomics, Alternative splicing, isoform switching" ; + schema1:license "https://spdx.org/licenses/CC-BY-NC-4.0" ; + schema1:name "Genome-wide alternative splicing analysis v.2" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/482?version=3" ; + schema1:version 3 ; + ns1:input , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.56.4" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_protein-complex_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:34:05 +0100" ; + schema1:url "https://workflowhub.eu/workflows/56/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 156554 ; + schema1:creator , + ; + schema1:dateCreated "2022-09-15T11:04:55Z" ; + schema1:dateModified "2023-01-16T13:44:51Z" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/56?version=6" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_protein-complex_md_setup/master/biobb_wf_protein-complex_md_setup/notebooks/biobb_Protein-Complex_MDsetup_tutorial.ipynb" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + ; + schema1:description "This workflow is used form the preparation of protein and ligands for docking. More info can be found at https://covid19.galaxyproject.org/cheminformatics/" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/12?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Cheminformatics - Enumerate ligands for docking" ; + schema1:sdDatePublished "2024-07-12 13:37:40 +0100" ; + schema1:url "https://workflowhub.eu/workflows/12/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1801 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7160 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2020-04-10T14:17:44Z" ; + schema1:dateModified "2023-01-16T13:40:34Z" ; + schema1:description "This workflow is used form the preparation of protein and ligands for docking. More info can be found at https://covid19.galaxyproject.org/cheminformatics/" ; + schema1:image ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Cheminformatics - Enumerate ligands for docking" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/12?version=1" ; + schema1:version 1 ; + ns1:input . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 3960 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +****\r +\r +About this workflow:\r +\r +* Repeat this workflow separately for datasets from different tissues. \r +* Inputs = collections of R1 files, and R2 files (all from a single tissue type). \r +* Runs FastQC with default settings, separately for raw reads R1 and R2 collections; all output to MultiQC. \r +* Runs Trimmomatic with initial ILLUMINACLIP step (using standard adapter sequence for TruSeq3 paired-ended), uses settings SLIDINGWINDOW:4:5 LEADING:5 TRAILING:5 MINLEN:25, retain paired (not unpaired) outputs. User can modify at runtime. \r +* Runs FastQC with default settings, separately for trimmed R1 and R2 collections; all output to MultiQC. \r +* From Trimmomatic output: concatenate all R1 reads; concatenate all R2 reads. \r +* Outputs = trimmed merged R1 file, trimmed merged R2 file. \r +* Log files from Trimmomatic to MultiQC, to summarise trimming results. \r +* Note: a known bug with MultiQC html output is that plot is labelled as "R1" reads, when it actually contains information from both R1 and R2 read sets - this is under investigation (and is due to a Trimmomatic output file labelling issue). \r +* MultiQC results table formatted to show % of reads retained after trimming, table included in workflow report. \r +* Note: a known bug is that sometimes the workflow report text resets to default text. To restore, look for an earlier workflow version with correct workflow report text, and copy and paste report text into current version. """ ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.876.1" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for QC and trimming of RNAseq reads - TSI" ; + schema1:sdDatePublished "2024-07-12 13:22:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/876/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 40452 ; + schema1:creator , + ; + schema1:dateCreated "2024-05-08T06:39:28Z" ; + schema1:dateModified "2024-05-09T04:03:15Z" ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +****\r +\r +About this workflow:\r +\r +* Repeat this workflow separately for datasets from different tissues. \r +* Inputs = collections of R1 files, and R2 files (all from a single tissue type). \r +* Runs FastQC with default settings, separately for raw reads R1 and R2 collections; all output to MultiQC. \r +* Runs Trimmomatic with initial ILLUMINACLIP step (using standard adapter sequence for TruSeq3 paired-ended), uses settings SLIDINGWINDOW:4:5 LEADING:5 TRAILING:5 MINLEN:25, retain paired (not unpaired) outputs. User can modify at runtime. \r +* Runs FastQC with default settings, separately for trimmed R1 and R2 collections; all output to MultiQC. \r +* From Trimmomatic output: concatenate all R1 reads; concatenate all R2 reads. \r +* Outputs = trimmed merged R1 file, trimmed merged R2 file. \r +* Log files from Trimmomatic to MultiQC, to summarise trimming results. \r +* Note: a known bug with MultiQC html output is that plot is labelled as "R1" reads, when it actually contains information from both R1 and R2 read sets - this is under investigation (and is due to a Trimmomatic output file labelling issue). \r +* MultiQC results table formatted to show % of reads retained after trimming, table included in workflow report. \r +* Note: a known bug is that sometimes the workflow report text resets to default text. To restore, look for an earlier workflow version with correct workflow report text, and copy and paste report text into current version. """ ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "TSI-annotation" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "QC and trimming of RNAseq reads - TSI" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/876?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 2139822 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-03T15:31:41.978689" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-Trio-phasing-VGP5/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.17" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1017?version=16" ; + schema1:isBasedOn "https://github.com/nf-core/rnafusion" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnafusion" ; + schema1:sdDatePublished "2024-07-12 13:18:22 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1017/ro_crate?version=16" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11863 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:11Z" ; + schema1:dateModified "2024-06-11T12:55:11Z" ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1017?version=16" ; + schema1:keywords "fusion, fusion-genes, gene-fusion, rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnafusion" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1017?version=16" ; + schema1:version 16 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# CWL-assembly\r +[![Codacy Badge](https://api.codacy.com/project/badge/Grade/684724bbc0134960ab41748f4a4b732f)](https://www.codacy.com/app/mb1069/CWL-assembly?utm_source=github.com&utm_medium=referral&utm_content=EBI-Metagenomics/CWL-assembly&utm_campaign=Badge_Grade)\r +[![Build Status](https://travis-ci.org/EBI-Metagenomics/CWL-assembly.svg?branch=develop)](https://travis-ci.org/EBI-Metagenomics/CWL-assembly)\r +\r +## Description\r +\r +This repository contains two workflows for metagenome and metatranscriptome assembly of short read data. MetaSPAdes is used as default for paired end data, and MEGAHIT for single end data. MEGAHIT can be specified as the default assembler in the yaml file if preferred. Steps include:\r +\r +QC - removal of short reads, low quality regions, adapters and host decontamination\r +\r +Assembly - with metaSPADES or MEGAHIT\r +\r +Post-assembly - Host and PhiX decontamination, contig length filter (500bp), stats generation.\r +\r +Multiple input read files can also be specified for co-assembly.\r +\r +## Requirements\r +\r +This pipeline requires and environment with cwltool, blastn, metaspades and megahit.\r +\r +## Databases\r +\r +Predownload fasta files for host decontamination and generate:\r + - bwa index folder\r + - blast index folder\r + \r +Specify the locations in the yaml file when running the pipeline.\r +\r +\r +## Main pipeline executables\r +\r +src/workflows/metagenome_pipeline.cwl\r +src/workflows/metatranscriptome_pipeline.cwl\r +\r +# Example output directory structure\r +```\r +SRP0741\r + └── SRP074153 Project directory containing all assemblies under that project\r + ├── downloads.yml Raw data download caching logfile, to avoid duplicate downloads of raw data\r + ├── SRR6257\r + │   └── SRR6257420 Run directory\r + │   └── megahit\r + │   ├── 001 Assembly directory\r + │   │ ├── SRR6257420.fasta Trimmed assembly\r + │   │ ├── SRR6257420.fasta.gz Archive trimmed assembly\r + │   │ ├── SRR6257420.fasta.gz.md5 MD5 hash of above archive\r + │   │ ├── coverage.tab Coverage file\r + │   │ ├── final.contigs.fa Raw assembly\r + │   │ ├── job_config.yml CWL job configuration\r + │   │ ├── megahit.log Assembler output log\r + │   │ ├── output.json Human-readable Assembly stats file\r + │   │ ├── sorted.bam BAM file of assembly\r + │   │ ├── sorted.bam.bai Secondary BAM file\r + │   │ └── toil.log cwlToil output log\r + │  └── metaspades Assembly of equivalent data using another assembler (eg metaspades, spades...)\r + │  └── ... \r + │ \r + ├── raw Raw data directory\r + │   └── SRR6257420.fastq.gz Raw data files\r + │\r + └── tmp Temporary directory for assemblies\r + └── SRR6257\r + └── SRR6257420\r + └── megahit\r + └── 001\r +```\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/474?version=1" ; + schema1:isBasedOn "https://github.com/EBI-Metagenomics/CWL-assembly.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Metagenome and metatranscriptome assembly in CWL" ; + schema1:sdDatePublished "2024-07-12 13:32:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/474/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6441 ; + schema1:dateCreated "2023-05-19T13:59:30Z" ; + schema1:dateModified "2023-05-19T13:59:30Z" ; + schema1:description """# CWL-assembly\r +[![Codacy Badge](https://api.codacy.com/project/badge/Grade/684724bbc0134960ab41748f4a4b732f)](https://www.codacy.com/app/mb1069/CWL-assembly?utm_source=github.com&utm_medium=referral&utm_content=EBI-Metagenomics/CWL-assembly&utm_campaign=Badge_Grade)\r +[![Build Status](https://travis-ci.org/EBI-Metagenomics/CWL-assembly.svg?branch=develop)](https://travis-ci.org/EBI-Metagenomics/CWL-assembly)\r +\r +## Description\r +\r +This repository contains two workflows for metagenome and metatranscriptome assembly of short read data. MetaSPAdes is used as default for paired end data, and MEGAHIT for single end data. MEGAHIT can be specified as the default assembler in the yaml file if preferred. Steps include:\r +\r +QC - removal of short reads, low quality regions, adapters and host decontamination\r +\r +Assembly - with metaSPADES or MEGAHIT\r +\r +Post-assembly - Host and PhiX decontamination, contig length filter (500bp), stats generation.\r +\r +Multiple input read files can also be specified for co-assembly.\r +\r +## Requirements\r +\r +This pipeline requires and environment with cwltool, blastn, metaspades and megahit.\r +\r +## Databases\r +\r +Predownload fasta files for host decontamination and generate:\r + - bwa index folder\r + - blast index folder\r + \r +Specify the locations in the yaml file when running the pipeline.\r +\r +\r +## Main pipeline executables\r +\r +src/workflows/metagenome_pipeline.cwl\r +src/workflows/metatranscriptome_pipeline.cwl\r +\r +# Example output directory structure\r +```\r +SRP0741\r + └── SRP074153 Project directory containing all assemblies under that project\r + ├── downloads.yml Raw data download caching logfile, to avoid duplicate downloads of raw data\r + ├── SRR6257\r + │   └── SRR6257420 Run directory\r + │   └── megahit\r + │   ├── 001 Assembly directory\r + │   │ ├── SRR6257420.fasta Trimmed assembly\r + │   │ ├── SRR6257420.fasta.gz Archive trimmed assembly\r + │   │ ├── SRR6257420.fasta.gz.md5 MD5 hash of above archive\r + │   │ ├── coverage.tab Coverage file\r + │   │ ├── final.contigs.fa Raw assembly\r + │   │ ├── job_config.yml CWL job configuration\r + │   │ ├── megahit.log Assembler output log\r + │   │ ├── output.json Human-readable Assembly stats file\r + │   │ ├── sorted.bam BAM file of assembly\r + │   │ ├── sorted.bam.bai Secondary BAM file\r + │   │ └── toil.log cwlToil output log\r + │  └── metaspades Assembly of equivalent data using another assembler (eg metaspades, spades...)\r + │  └── ... \r + │ \r + ├── raw Raw data directory\r + │   └── SRR6257420.fastq.gz Raw data files\r + │\r + └── tmp Temporary directory for assemblies\r + └── SRR6257\r + └── SRR6257420\r + └── megahit\r + └── 001\r +```\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/474?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Metagenome and metatranscriptome assembly in CWL" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/474?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1023?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/smrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/smrnaseq" ; + schema1:sdDatePublished "2024-07-12 13:18:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1023/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11092 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:18Z" ; + schema1:dateModified "2024-06-11T12:55:18Z" ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1023?version=10" ; + schema1:keywords "small-rna, smrna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/smrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1023?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# GSC (Genotype Sparse Compression)\r +Genotype Sparse Compression (GSC) is an advanced tool for lossless compression of VCF files, designed to efficiently store and manage VCF files in a compressed format. It accepts VCF/BCF files as input and utilizes advanced compression techniques to significantly reduce storage requirements while ensuring fast query capabilities. In our study, we successfully compressed the VCF files from the 1000 Genomes Project (1000Gpip3), consisting of 2504 samples and 80 million variants, from an uncompressed VCF file of 803.70GB to approximately 1GB.\r +\r +## Requirements \r +### GSC requires:\r +\r +- **Compiler Compatibility**: GSC requires a modern C++14-ready compiler, such as:\r + - g++ version 10.1.0 or higher\r +\r +- **Build System**: Make build system is necessary for compiling GSC.\r +\r +- **Operating System**: GSC supports 64-bit operating systems, including:\r + - Linux (Ubuntu 18.04)\r + \r +## Installation\r +To download, build and install GSC use the following commands.\r +```bash\r +git clone https://github.com/luo-xiaolong/GSC.git\r +cd GSC\r +make\r +```\r +To clean the GSC build use:\r +```bash\r +make clean\r +```\r +## Usage\r +```bash\r +Usage: gsc [option] [arguments] \r +Available options: \r + compress - compress VCF/BCF file\r + decompress - query and decompress to VCF/BCF file\r +```\r +- Compress the input VCF/BCF file\r +```bash\r +Usage of gsc compress:\r +\r + gsc compress [options] [--in [in_file]] [--out [out_file]]\r +\r +Where:\r +\r + [options] Optional flags and parameters for compression.\r + -i, --in [in_file] Specify the input file (default: VCF or VCF.GZ). If omitted, input is taken from standard input (stdin).\r + -o, --out [out_file] Specify the output file. If omitted, output is sent to standard output (stdout).\r +\r +Options:\r +\r + -M, --mode_lossly Choose lossy compression mode (lossless by default).\r + -b, --bcf Input is a BCF file (default: VCF or VCF.GZ).\r + -p, --ploidy [X] Set ploidy of samples in input VCF to [X] (default: 2).\r + -t, --threads [X] Set number of threads to [X] (default: 1).\r + -d, --depth [X] Set maximum replication depth to [X] (default: 100, 0 means no matches).\r + -m, --merge [X] Specify files to merge, separated by commas (e.g., -m chr1.vcf,chr2.vcf), or '@' followed by a file containing a list of VCF files (e.g., -m @file_with_IDs.txt). By default, all VCF files are compressed.\r +```\r +- Decompress / Query\r +```bash\r +Usage of gsc decompress and query:\r +\r + gsc decompress [options] --in [in_file] --out [out_file]\r +\r +Where:\r + [options] Optional flags and parameters for compression.\r + -i, --in [in_file] Specify the input file . If omitted, input is taken from standard input (stdin).\r + -o, --out [out_file] Specify the output file (default: VCF). If omitted, output is sent to standard output (stdout).\r +\r +Options:\r +\r + General Options:\r +\r + -M, --mode_lossly Choose lossy compression mode (default: lossless).\r + -b, --bcf Output a BCF file (default: VCF).\r +\r + Filter options (applicable in lossy compression mode only): \r +\r + -r, --range [X] Specify range in format [start],[end] (e.g., -r 4999756,4999852).\r + -s, --samples [X] Samples separated by comms (e.g., -s HG03861,NA18639) OR '@' sign followed by the name of a file with sample name(s) separated by whitespaces (for exaple: -s @file_with_IDs.txt). By default all samples/individuals are decompressed. \r + --header-only Output only the header of the VCF/BCF.\r + --no-header Output without the VCF/BCF header (only genotypes).\r + -G, --no-genotype Don't output sample genotypes (only #CHROM, POS, ID, REF, ALT, QUAL, FILTER, and INFO columns).\r + -C, --out-ac-an Write AC/AN to the INFO field.\r + -S, --split Split output into multiple files (one per chromosome).\r + -I, [ID=^] Include only sites with specified ID (e.g., -I "ID=rs6040355").\r + --minAC [X] Include only sites with AC <= X.\r + --maxAC [X] Include only sites with AC >= X.\r + --minAF [X] Include only sites with AF >= X (X: 0 to 1).\r + --maxAF [X] Include only sites with AF <= X (X: 0 to 1).\r + --min-qual [X] Include only sites with QUAL >= X.\r + --max-qual [X] Include only sites with QUAL <= X.\r +```\r +## Example\r +There is an example VCF/VCF.gz/BCF file, `toy.vcf`/`toy.vcf.gz`/`toy.bcf`, in the toy folder, which can be used to test GSC\r +### compress\r +\r +#### lossless compression:\r +The input file format is VCF. You can compress a VCF file in lossless mode using one of the following methods:\r +1. **Explicit input and output file parameters**:\r + \r + Use the `--in` option to specify the input VCF file and the `--out` option for the output compressed file.\r + ```bash\r + ./gsc compress --in toy/toy.vcf --out toy/toy_lossless.gsc\r + ```\r +2. **Input file parameter and output redirection**:\r + \r + Use the `--out` option for the output compressed file and redirect the input VCF file into the command.\r + ```bash\r + ./gsc compress --out toy/toy_lossless.gsc < toy/toy.vcf\r + ```\r +3. **Output file redirection and input file parameter**:\r + \r + Specify the input VCF file with the `--in` option and redirect the output to create the compressed file.\r + ```bash\r + ./gsc compress --in toy/toy.vcf > toy/toy_lossless.gsc\r + ```\r +4. **Input and output redirection**:\r + \r + Use shell redirection for both input and output. This method does not use the `--in` and `--out` options.\r + ```bash\r + ./gsc compress < toy/toy.vcf > toy/toy_lossless.gsc\r + ```\r +This will create a file:\r +* `toy_lossless.gsc` - The compressed archive of the entire VCF file.\r +\r +#### lossy compression:\r +\r +The input file format is VCF. The commands are similar to those used for lossless compression, with the addition of the `-M` parameter to enable lossy compression.\r +\r + For example, to compress a VCF file in lossy mode:\r +\r + ```bash\r + ./gsc compress -M --in toy/toy.vcf --out toy/toy_lossy.gsc\r + ```\r + Or using redirection:\r + ```bash\r + ./gsc compress -M --out toy/toy_lossy.gsc < toy/toy.vcf\r + ``` \r + This will create a file:\r + * `toy_lossy.gsc` - The compressed archive of the entire VCF file is implemented with lossy compression. It only retains the 'GT' subfield within the INFO and FORMAT fields, and excludes all other subfields..\r + \r +### Decompress (The commands are similar to those used for compression)\r +lossless decompression:\r +\r +To decompress the compressed toy_lossless.gsc into a VCF file named toy_lossless.vcf:\r +```bash\r +./gsc decompress --in toy/toy_lossless.gsc --out toy/toy_lossless.vcf\r +```\r +lossy decompression:\r +\r +To decompress the compressed toy_lossy.gsc into a VCF file named toy_lossy.vcf:\r +```bash\r +./gsc decompress -M --in toy/toy_lossy.gsc --out toy/toy_lossy.vcf\r +```\r +## Dockerfile\r +Dockerfile can be used to build a Docker image with all necessary dependencies and GSC compressor. The image is based on Ubuntu 18.04. To build a Docker image and run a Docker container, you need Docker Desktop (https://www.docker.com). Example commands (run it within a directory with Dockerfile):\r +```bash\r +docker build -t gsc_project .\r +docker run -it gsc_project\r +```\r +## Citations\r +- **bio.tools ID**: `gsc_genotype_sparse_compression`\r +- **Research Resource Identifier (RRID)**: `SCR_025071`\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.887.1" ; + schema1:isBasedOn "https://github.com/luo-xiaolong/GSC.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for GSC (Genotype Sparse Compression)" ; + schema1:sdDatePublished "2024-07-12 13:22:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/887/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 838 ; + schema1:creator ; + schema1:dateCreated "2024-05-18T13:18:00Z" ; + schema1:dateModified "2024-05-18T13:18:31Z" ; + schema1:description """# GSC (Genotype Sparse Compression)\r +Genotype Sparse Compression (GSC) is an advanced tool for lossless compression of VCF files, designed to efficiently store and manage VCF files in a compressed format. It accepts VCF/BCF files as input and utilizes advanced compression techniques to significantly reduce storage requirements while ensuring fast query capabilities. In our study, we successfully compressed the VCF files from the 1000 Genomes Project (1000Gpip3), consisting of 2504 samples and 80 million variants, from an uncompressed VCF file of 803.70GB to approximately 1GB.\r +\r +## Requirements \r +### GSC requires:\r +\r +- **Compiler Compatibility**: GSC requires a modern C++14-ready compiler, such as:\r + - g++ version 10.1.0 or higher\r +\r +- **Build System**: Make build system is necessary for compiling GSC.\r +\r +- **Operating System**: GSC supports 64-bit operating systems, including:\r + - Linux (Ubuntu 18.04)\r + \r +## Installation\r +To download, build and install GSC use the following commands.\r +```bash\r +git clone https://github.com/luo-xiaolong/GSC.git\r +cd GSC\r +make\r +```\r +To clean the GSC build use:\r +```bash\r +make clean\r +```\r +## Usage\r +```bash\r +Usage: gsc [option] [arguments] \r +Available options: \r + compress - compress VCF/BCF file\r + decompress - query and decompress to VCF/BCF file\r +```\r +- Compress the input VCF/BCF file\r +```bash\r +Usage of gsc compress:\r +\r + gsc compress [options] [--in [in_file]] [--out [out_file]]\r +\r +Where:\r +\r + [options] Optional flags and parameters for compression.\r + -i, --in [in_file] Specify the input file (default: VCF or VCF.GZ). If omitted, input is taken from standard input (stdin).\r + -o, --out [out_file] Specify the output file. If omitted, output is sent to standard output (stdout).\r +\r +Options:\r +\r + -M, --mode_lossly Choose lossy compression mode (lossless by default).\r + -b, --bcf Input is a BCF file (default: VCF or VCF.GZ).\r + -p, --ploidy [X] Set ploidy of samples in input VCF to [X] (default: 2).\r + -t, --threads [X] Set number of threads to [X] (default: 1).\r + -d, --depth [X] Set maximum replication depth to [X] (default: 100, 0 means no matches).\r + -m, --merge [X] Specify files to merge, separated by commas (e.g., -m chr1.vcf,chr2.vcf), or '@' followed by a file containing a list of VCF files (e.g., -m @file_with_IDs.txt). By default, all VCF files are compressed.\r +```\r +- Decompress / Query\r +```bash\r +Usage of gsc decompress and query:\r +\r + gsc decompress [options] --in [in_file] --out [out_file]\r +\r +Where:\r + [options] Optional flags and parameters for compression.\r + -i, --in [in_file] Specify the input file . If omitted, input is taken from standard input (stdin).\r + -o, --out [out_file] Specify the output file (default: VCF). If omitted, output is sent to standard output (stdout).\r +\r +Options:\r +\r + General Options:\r +\r + -M, --mode_lossly Choose lossy compression mode (default: lossless).\r + -b, --bcf Output a BCF file (default: VCF).\r +\r + Filter options (applicable in lossy compression mode only): \r +\r + -r, --range [X] Specify range in format [start],[end] (e.g., -r 4999756,4999852).\r + -s, --samples [X] Samples separated by comms (e.g., -s HG03861,NA18639) OR '@' sign followed by the name of a file with sample name(s) separated by whitespaces (for exaple: -s @file_with_IDs.txt). By default all samples/individuals are decompressed. \r + --header-only Output only the header of the VCF/BCF.\r + --no-header Output without the VCF/BCF header (only genotypes).\r + -G, --no-genotype Don't output sample genotypes (only #CHROM, POS, ID, REF, ALT, QUAL, FILTER, and INFO columns).\r + -C, --out-ac-an Write AC/AN to the INFO field.\r + -S, --split Split output into multiple files (one per chromosome).\r + -I, [ID=^] Include only sites with specified ID (e.g., -I "ID=rs6040355").\r + --minAC [X] Include only sites with AC <= X.\r + --maxAC [X] Include only sites with AC >= X.\r + --minAF [X] Include only sites with AF >= X (X: 0 to 1).\r + --maxAF [X] Include only sites with AF <= X (X: 0 to 1).\r + --min-qual [X] Include only sites with QUAL >= X.\r + --max-qual [X] Include only sites with QUAL <= X.\r +```\r +## Example\r +There is an example VCF/VCF.gz/BCF file, `toy.vcf`/`toy.vcf.gz`/`toy.bcf`, in the toy folder, which can be used to test GSC\r +### compress\r +\r +#### lossless compression:\r +The input file format is VCF. You can compress a VCF file in lossless mode using one of the following methods:\r +1. **Explicit input and output file parameters**:\r + \r + Use the `--in` option to specify the input VCF file and the `--out` option for the output compressed file.\r + ```bash\r + ./gsc compress --in toy/toy.vcf --out toy/toy_lossless.gsc\r + ```\r +2. **Input file parameter and output redirection**:\r + \r + Use the `--out` option for the output compressed file and redirect the input VCF file into the command.\r + ```bash\r + ./gsc compress --out toy/toy_lossless.gsc < toy/toy.vcf\r + ```\r +3. **Output file redirection and input file parameter**:\r + \r + Specify the input VCF file with the `--in` option and redirect the output to create the compressed file.\r + ```bash\r + ./gsc compress --in toy/toy.vcf > toy/toy_lossless.gsc\r + ```\r +4. **Input and output redirection**:\r + \r + Use shell redirection for both input and output. This method does not use the `--in` and `--out` options.\r + ```bash\r + ./gsc compress < toy/toy.vcf > toy/toy_lossless.gsc\r + ```\r +This will create a file:\r +* `toy_lossless.gsc` - The compressed archive of the entire VCF file.\r +\r +#### lossy compression:\r +\r +The input file format is VCF. The commands are similar to those used for lossless compression, with the addition of the `-M` parameter to enable lossy compression.\r +\r + For example, to compress a VCF file in lossy mode:\r +\r + ```bash\r + ./gsc compress -M --in toy/toy.vcf --out toy/toy_lossy.gsc\r + ```\r + Or using redirection:\r + ```bash\r + ./gsc compress -M --out toy/toy_lossy.gsc < toy/toy.vcf\r + ``` \r + This will create a file:\r + * `toy_lossy.gsc` - The compressed archive of the entire VCF file is implemented with lossy compression. It only retains the 'GT' subfield within the INFO and FORMAT fields, and excludes all other subfields..\r + \r +### Decompress (The commands are similar to those used for compression)\r +lossless decompression:\r +\r +To decompress the compressed toy_lossless.gsc into a VCF file named toy_lossless.vcf:\r +```bash\r +./gsc decompress --in toy/toy_lossless.gsc --out toy/toy_lossless.vcf\r +```\r +lossy decompression:\r +\r +To decompress the compressed toy_lossy.gsc into a VCF file named toy_lossy.vcf:\r +```bash\r +./gsc decompress -M --in toy/toy_lossy.gsc --out toy/toy_lossy.vcf\r +```\r +## Dockerfile\r +Dockerfile can be used to build a Docker image with all necessary dependencies and GSC compressor. The image is based on Ubuntu 18.04. To build a Docker image and run a Docker container, you need Docker Desktop (https://www.docker.com). Example commands (run it within a directory with Dockerfile):\r +```bash\r +docker build -t gsc_project .\r +docker run -it gsc_project\r +```\r +## Citations\r +- **bio.tools ID**: `gsc_genotype_sparse_compression`\r +- **Research Resource Identifier (RRID)**: `SCR_025071`\r +""" ; + schema1:keywords "Bioinformatics, Genomics, C++" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "GSC (Genotype Sparse Compression)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/887?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:description "Analysis of variation within individual COVID-19 samples using Illumina Paired End data. More info can be found at https://covid19.galaxyproject.org/genomics/" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/7?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genomics - PE Variation" ; + schema1:sdDatePublished "2024-07-12 13:37:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/7/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 7268 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 37131 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2020-04-10T12:52:22Z" ; + schema1:dateModified "2023-05-30T12:07:57Z" ; + schema1:description "Analysis of variation within individual COVID-19 samples using Illumina Paired End data. More info can be found at https://covid19.galaxyproject.org/genomics/" ; + schema1:image ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Genomics - PE Variation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/7?version=1" ; + schema1:version 1 ; + ns1:input , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 13428 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.54.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_ligand_parameterization" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter GMX Notebook Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-07-12 13:24:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/54/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14629 ; + schema1:creator , + , + ; + schema1:dateCreated "2021-06-29T07:40:34Z" ; + schema1:dateModified "2022-09-15T10:58:18Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/54?version=5" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter GMX Notebook Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_ligand_parameterization/28ef9a099ddff3096ed47477ff72b4b08f8eb355/biobb_wf_ligand_parameterization/notebooks/biobb_ligand_parameterization_tutorial.ipynb" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# GermlineShortV_biovalidation\r +\r + - [Description](#description)\r + - [Diagram](#diagram)\r + - [User guide](#user-guide)\r + - [Quick start guide](#quick-start-guide)\r + - [Benchmarking](#benchmarking)\r + - [Workflow summaries](#workflow-summaries)\r + - [Metadata](#metadata)\r + - [Component tools](#component-tools)\r + - [Required (minimum)\r + inputs/parameters](#required-minimum-inputsparameters) \r + [Preparing your own input files](#preparing-input-files)\r + - [Additional notes](#additional-notes)\r + - [Understanding your outputs](#understanding-your-outputs) \r + - [Performance metrics explained](#performance-metrics-explained) \r + - [Help/FAQ/Troubleshooting](#helpfaqtroubleshooting)\r + - [Acknowledgements/citations/credits](#acknowledgementscitationscredits)\r +\r +## Description \r +Population-scale WGS cohorts are essential resources for genetic analyses including heritable diseases, evolutionary genomics, conservation biology, and population genomics. Processing raw reads into analysis-ready variants remains challenging. Various mapping and variant calling pipelines have been made publicly available in recent decades. Designing a mapping and variant calling pipeline to meet your needs is dependent on the compute infrastructure you’re working on, the types of variants you’re primarily interested in, and the sequencing technology you use to generate raw sequencing data. Keep in mind that the tools you use to build your pipeline can affect variant calling accuracy. Further, optimisation and customisation of these tools’ commands can also affect their performance. Best-practice recommendations for variant calling pipelines vary dramatically between species and research questions, depending on the availability of genomic resources for the population of interest, genome structure, and clinical relevance of the resulting variant dataset. It is important to not only design a robust variant calling pipeline but also fine-tune it to achieve optimal performance for your dataset and research question. \r +\r +There are various measurements that you can apply to evaluate the biological accuracy of your germline variant calling pipeline. Currently, no best practice methods for interrogating joint-called variant sets exist in the literature. A number of publicly available, human ‘gold standard’ truth datasets including Platinum Genomes and Genome in a Bottle (GIAB) are useful for benchmarking across high confidence regions of the genome and evaluating the recall and precision of the pipeline. We recommend individuals working with human datasets benchmark their germline variant calling pipelines using one of these datasets. Unfortunately, these resources are not typically available for non-human organisms. \r +\r +Here, we present protocols for benchmarking and validating germline short variant (SNVs and indels) datasets using a combination of methods that can capture the quality of your variant sets for human, non-human model, and non-model organisms. The process you can apply will depend on the organism you’re working with and the genomic resources available to that organism. \r +\r +## Diagram \r +\r +

\r + \r +

\r +\r +## User guide \r +### Quick start guide \r +\r +These bash scripts were written for the University of Sydney’s high performance computer, Artemis. They can be run on the command line or submitted as PBS jobs. These scripts assume your input is a gzipped multi-sample (cohort) VCF file. Before running, edit the PBS project directive and define the variables at the top of the script. All software used in this protocol is installed on Artemis- to use alternate versions or run on a different compute infrastructure, edit the modules according to your needs. \r +\r +#### Human datasets \r +For human datasets, we recommend you benchmark your germline variant calling pipeline using a gold standard dataset such as Platinum Genomes. Raw sequence data in FASTQ format for these datasets can be downloaded along with their high confidence variant calls and regions from public repositories. See [Preparing input files]() for more information on how to download and prepare these files. \r +\r +##### 1. Collect vcf summary metrics \r +Edit the PBS -P directive and variables for your dataset in `vcfstat.sh`. Then run script with: \r +\r +```\r +qsub vcfstat.sh (or bash vcfstat.sh)\r +```\r +This will produce summary and quality metrics reports and plots for your cohort. It will also produce summary and detail files for known variant representation. BCFtools stats plots will be housed in a directory labelled `${cohort}_vcfplots`. \r +\r +##### 2. Biological benchmarking using a truth set \r +\r +Edit the PBS -P directive and variables for your files. Then run script with: \r +\r +```\r +qsub run_happy.sh\r +```\r +This script will subset your multi-sample VCF into individual samples, prepare them for hap.py, and output a number of files including summary metrics (including recall, precision and F1-score) and ROC count files that can be used to produce ROC curves, separately for SNVs and indels. See the [hap.py user guide](https://github.com/Illumina/hap.py/blob/master/doc/happy.md) for more information on how to interpret hap.py output. ROC curves of Hap.py runs can be plotted using the script [rocplot.Rscript](https://github.com/Illumina/hap.py/blob/master/src/R/rocplot.Rscript). \r +\r +#### Non-human model organism datasets\r +\r +##### 1. Collect vcf summary metrics \r +Edit the PBS -P directive and variables for your dataset in `vcfstat.sh`. We recommend you use the set of known variants used for base quality score recalibration to validate population level variants. If you used trio data, unhash the Mendelian error command within the script. Then run script with: \r +\r +```\r +qsub vcfstat.sh (or bash vcfstat.sh)\r +```\r +This will produce summary and quality metrics reports and plots for your cohort. It will also produce summary and detail files for known variant representation. BCFtools stats plots will be housed in a directory labelled `${cohort}_vcfplots`. \r +#### Non-model organism datasets \r +\r +##### 1. Collect vcf summary metrics \r +\r +Edit the PBS -P directive and variables for your dataset in `vcfstat_nonmodel.sh`. Then run script with: \r +\r +```\r +qsub vcfstat_nonmodel.sh (or bash vcfstat_nonmodel.sh)\r +```\r +\r +This will produce summary and quality metrics reports and plots for your cohort. It will also produce summary and detail files for known variant representation. BCFtools stats plots will be housed in a directory labelled `${cohort}_vcfplots`. \r +\r +## Benchmarking \r +Coming soon! \r +\r +## Workflow summaries \r +### Metadata \r +|metadata field | workflow_name / workflow_version |\r +|-------------------|:---------------------------------:|\r +|Version | 1.0 |\r +|Maturity | stable |\r +|Creators | Georgie Samaha, Tracy Chew, Cali Willet |\r +|Source | NA |\r +|License | NA |\r +|Workflow manager | NA |\r +|Container | None |\r +|Install method | Manual |\r +|GitHub | NA |\r +|bio.tools | NA |\r +|BioContainers | NA | \r +|bioconda | NA |\r +\r +### Component tools \r +\r +bcftools/1.14 \r +htslib/1.14 \r +python/3.8.2 \r +R/4.1.1 \r +hap.py/0.3.14 \r +\r +### Required (minimum) inputs/parameters \r +\r +- Multi-sample or single sample VCF file (VCF.gz format)\r +- List of sample IDs that match the VCF (.txt format)\r +- Known variant dataset (VCF format. Human and non-human model organisms only)\r +- Pedigree file (format: mother,father,offspring. Trios or Platinum Genomes only)\r +- Truth set variant calls (VCF.gz format. Human, Platinum Genomes only)\r +- High confidence call regions (BED format. Human, Platinum Genomes only)\r +\r +### Preparing input files \r +\r +#### Gold standard variant truth sets \r +\r +The benchmarking protocol for human datasets assumes you have performed mapping and germline variant calling on a gold standard truth set. These datasets contain millions of variants that have been confirmed using orthologous technologies [Eberle et al. 2017](https://doi.org/10.1101/gr.210500.116). \r +\r +We recommend you use the Platinum Genomes dataset for benchmarking germline variant calling pipelines that include joint genotyping of multiple samples. Six members, comprising two trios, of the Platinum Genomes dataset can be downloaded from the Illumina BaseSpace Sequence Hub, the ENA, or dbGaP. The Platinum Genomes dataset contains multiple files including the following files you will need for running `run_happy.sh`: \r +- Paired-end FASTQ files for each sample\r +- High-confidence germline variant VCF files for each sample\r +- High-confidence genomic regions (BED format)\r +\r +Currently, these files are available for Hg19 (GRCh37) and Hg38 (GRCh38) . Links to raw data are [here](https://github.com/Illumina/PlatinumGenomes). BaseSpace offers a command line tool for downloading files, see [here](https://developer.basespace.illumina.com/docs/content/documentation/cli/cli-examples) for instructions. \r +\r +#### Providing your own ‘truth set’ \r +*A word of caution*- testing the performance of your pipeline using a truth set is only intended to estimate the overall quality of your pipeline and detect any potential sources of error in your method. It is not intended to test the truthfulness of your variant set. See [here](https://gatk.broadinstitute.org/hc/en-us/articles/360035531572-Evaluating-the-quality-of-a-germline-short-variant-callset) for further discussion of the assumptions we make about truth sets. Most non-human organisms do not have access to gold standard truth set resources like the Platinum Genomes dataset. However there are a few alternative options you could try: \r + - Genotyping arrays: if you have genotyping data for the same samples you tested your germline variant calling pipeline with, you can reformat these to VCF using a tool like [PLINK’s recode](https://www.cog-genomics.org/plink/1.9/data#recode) and use it as a truth set. \r + - Known variant datasets: if your organism of interest has a set of known population-level variants you can use these as a truth-set. Just remember that these variants might not always be validated (i.e. dbSNP). \r +\r +Using this method you will need to also provide your own high-confidence regions file in BED format. The location and size of these regions will depend on your dataset, organism, reference assembly and sequencing method. Typically these regions would exclude centromeres, telomeres and repetitive parts of the genome that are likely to complicate variant calling. \r +\r +\r +## Additional notes \r +\r +Test data for Hap.py can be found [here](https://github.com/Illumina/hap.py/blob/master/doc/microbench.md) \r +\r +Instructions on how to install Hap.py can be found [here](https://github.com/Illumina/hap.py#installation) \r +\r +This warning may be thrown by Hap.py and can be ignored: `WARNING No reference file found at default locations. You can set the environment variable 'HGREF' or 'HG19' to point to a suitable Fasta file.` \r +\r +\r +### Understanding your outputs \r +The following files will be produced and stored in your designated working directory. They will all be labelled with your specified cohort name. \r +\r +#### Variant based metrics \r +Produced by BCFtools stats command. Output file:\r +- ${cohort}.bcftools.metrics \r +- ${cohort}_bcftools.metrics_vcfstatplots (directory and files) \r +\r +#### Sample based metrics \r +Produced by BCFtools smplstats and mendelian commands. Output files:\r +- ${cohort}.smplstats\r +- ${cohort}.smplstats.pdf\r +- ${cohort}.Mendelianerr\r +\r +#### Known variant concordance \r +Produced by GATK CollectVariantCallingMetrics command. Output files:\r +- ${cohort}.known.variant_calling_summary_metrics\r +- ${cohort}.known.variant_calling_detail_metrics\r +\r +#### Biological validation using a truth set \r +Produced by Hap.py. Output files:\r +- ${sample}.happy.metrics.json.gz\r +- ${sample}.happy.roc.all.csv.gz\r +- ${sample}.happy.roc.Locations.INDEL.csv.gz\r +- ${sample}.happy.roc.Locations.INDEL.PASS.csv.gz\r +- ${sample}.happy.roc.Locations.SNP.csv.gz\r +- ${sample}.happy.roc.Locations.SNP.PASS.csv.gz\r +- ${sample}.happy.roc.tsv\r +- ${sample}.happy.runinfo.json\r +- ${sample}.happy.summary.csv\r +\r +### Performance metrics explained \r +\r +|Metric |Expected/ideal value |Tool |Relevance |\r +|--------------------------------------|----------------------------------------------------|---------------|---------------------------------------------------------------------------------------------------------------|\r +|Number of SNVs and indels (per sample)|Human WGS: ~4.4M, Human WES: ~41k, Species dependent|bcftools stats |Population, sequencing approach, and genomic region dependent. Alone, this metric cannot indicate data quality.|\r +|Indel length distribution |Indel length range is 1-10,000bp. |bcftools stats |Increased length is conflated with reduced mapping quality. Distribution is dataset dependent. Recommend filtering for high quality.|\r +|Depth of coverage |Depends on the sequencing coverage of samples. |bcftools stats |Dramatic deviation from expected distribution can indicate artifactual bias. |\r +|Substitution type counts |See TiTv ratio. |bcftools stats |Twice as many possible transversions as transitions. See [here](https://dx.doi.org/10.1093%2Fbioinformatics%2Fbtu668) |\r +|TiTv ratio (genome wide) |For mammals: WGS: 2.0-2.1, WES: 3.0-3.3 |bcftools stats |Dramatic deviation from expected ratio can indicate artifactual bias. Typically elevated in coding regions where transversions are more likely to occur. |\r +|Base quality distribution |Dataset dependent. |bcftools stats |This will reflect the quality based filtering you performed. Dramatic deviation from expected ratio can indicate artifactual bias.|\r +|Indel ratio |Common: ~1.0, Rare: 0.2-0.5 |GATK CollectVariantCallingMetrics|This should be evaluated after custom filtering variants for your needs. Dramatic deviation from expected ratio can indicate artifactual bias.|\r +|Het/hom(non-ref) |~2.0 assuming Hardy-Weinberg equilibrium. |GATK CollectVariantCallingMetrics|Ancestry dependent, can vary dramatically. See [Wang et al. 2015](https://dx.doi.org/10.1093%2Fbioinformatics%2Fbtu668)|\r +|Mendelian error |0 |BCFtools +mendelian|Mendelian inheritance errors are likely erroneous genotype calls. See [Pilipenko et al. 2014](https://dx.doi.org/10.1186%2F1753-6561-8-S1-S21)|\r +|True positives |Dataset dependent. |Hap.py |Number of query variants that are present in the truth set. |\r +|False negatives |Dataset dependent. |Hap.py |Number of variants in truth set, not present in query VCF. |\r +|False positives |Dataset dependent. |Hap.py |Number of variants in query VCF, not present in truth set. |\r +|Recall |1 |Hap.py |Absence of false negatives. See [Krusche et al. 2019](https://doi.org/10.1038/s41587-019-0054-x) |\r +|Precision |1 |Hap.py |Absence of false positives. See [Krusche et al. 2019](https://doi.org/10.1038/s41587-019-0054-x) |\r +|F1-score |1 |Hap.py |Harmonic mean of recall and precision. See [Krusche et al. 2019](https://doi.org/10.1038/s41587-019-0054-x) |\r +|Genotype errors (FP.GT) |Dataset dependent. |Hap.py |Number of query variants with incorrect genotype |\r +\r +### Resources and references \r +\r +Eberle, M. A., Fritzilas, E., Krusche, P., Källberg, M., Moore, B. L., Bekritsky, M. A., Iqbal, Z., Chuang, H. Y., Humphray, S. J., Halpern, A. L., Kruglyak, S., Margulies, E. H., McVean, G., & Bentley, D. R. (2017). A reference data set of 5.4 million phased human variants validated by genetic inheritance from sequencing a three-generation 17-member pedigree. Genome research, 27(1), 157–164. https://doi.org/10.1101/gr.210500.116 \r +\r +Koboldt, D.C. Best practises for variant calling in clinical sequencing. Genome Med 12, 91 (2020). https://doi.org/10.1186/s13073-020-00791-w \r +\r +Krusche, P., Trigg, L., Boutros, P.C. et al. Best practices for benchmarking germline small-variant calls in human genomes. Nat Biotechnol 37, 555–560 (2019). https://doi.org/10.1038/s41587-019-0054-x \r +\r +Marshall, C.R., Chowdhury, S., Taft, R.J. et al. Best practices for the analytical validation of clinical whole-genome sequencing intended for the diagnosis of germline disease. npj Genom. Med. 5, 47 (2020). https://doi.org/10.1038/s41525-020-00154-9 \r +\r +Pilipenko, V.V., He, H., Kurowski, B.G. et al. Using Mendelian inheritance errors as quality control criteria in whole genome sequencing data set. BMC Proc 8, S21 (2014). https://doi.org/10.1186/1753-6561-8-S1-S21 \r +\r +Wang, J., Raskin, J., Samuels, D., Shyr, Y., Guo, Y., Genome measures used for quality control are dependent on gene function and ancestry, Bioinformatics 31, 318–323 (2015) https://doi.org/10.1093/bioinformatics/btu668 \r +\r +\r +## Help/FAQ/Troubleshooting\r +\r +If Hap.py throws an error, search the [issues at Hap.py GitHub repository](https://github.com/Illumina/hap.py/issues) and attempt to resolve it before submitting an issue here. \r +\r +## Acknowledgements/citations/credits \r +\r +### Authors \r +- Georgie Samaha (Sydney Informatics Hub, University of Sydney) \r +- Tracy Chew (Sydney Informatics Hub, University of Sydney) \r +- Cali Willet (Sydney Informatics Hub, University of Sydney) \r +- Nandan Deshpande (Sydney Informatics Hub, University of Sydney)\r +\r +Acknowledgements (and co-authorship, where appropriate) are an important way for us to demonstrate the value we bring to your research. Your research outcomes are vital for ongoing funding of the Sydney Informatics Hub and national compute facilities. We suggest including the following acknowledgement in any publications that follow from this work: \r +\r +The authors acknowledge the technical assistance provided by the Sydney Informatics Hub, a Core Research Facility of the University of Sydney and the Australian BioCommons which is enabled by NCRIS via Bioplatforms Australia. \r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.339.1" ; + schema1:isBasedOn "https://github.com/Sydney-Informatics-Hub/GermlineShortV_biovalidation.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for GermlineShortV_biovalidation" ; + schema1:sdDatePublished "2024-07-12 13:35:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/339/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 61599 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 19380 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2022-05-05T05:02:08Z" ; + schema1:dateModified "2023-01-16T13:59:52Z" ; + schema1:description """# GermlineShortV_biovalidation\r +\r + - [Description](#description)\r + - [Diagram](#diagram)\r + - [User guide](#user-guide)\r + - [Quick start guide](#quick-start-guide)\r + - [Benchmarking](#benchmarking)\r + - [Workflow summaries](#workflow-summaries)\r + - [Metadata](#metadata)\r + - [Component tools](#component-tools)\r + - [Required (minimum)\r + inputs/parameters](#required-minimum-inputsparameters) \r + [Preparing your own input files](#preparing-input-files)\r + - [Additional notes](#additional-notes)\r + - [Understanding your outputs](#understanding-your-outputs) \r + - [Performance metrics explained](#performance-metrics-explained) \r + - [Help/FAQ/Troubleshooting](#helpfaqtroubleshooting)\r + - [Acknowledgements/citations/credits](#acknowledgementscitationscredits)\r +\r +## Description \r +Population-scale WGS cohorts are essential resources for genetic analyses including heritable diseases, evolutionary genomics, conservation biology, and population genomics. Processing raw reads into analysis-ready variants remains challenging. Various mapping and variant calling pipelines have been made publicly available in recent decades. Designing a mapping and variant calling pipeline to meet your needs is dependent on the compute infrastructure you’re working on, the types of variants you’re primarily interested in, and the sequencing technology you use to generate raw sequencing data. Keep in mind that the tools you use to build your pipeline can affect variant calling accuracy. Further, optimisation and customisation of these tools’ commands can also affect their performance. Best-practice recommendations for variant calling pipelines vary dramatically between species and research questions, depending on the availability of genomic resources for the population of interest, genome structure, and clinical relevance of the resulting variant dataset. It is important to not only design a robust variant calling pipeline but also fine-tune it to achieve optimal performance for your dataset and research question. \r +\r +There are various measurements that you can apply to evaluate the biological accuracy of your germline variant calling pipeline. Currently, no best practice methods for interrogating joint-called variant sets exist in the literature. A number of publicly available, human ‘gold standard’ truth datasets including Platinum Genomes and Genome in a Bottle (GIAB) are useful for benchmarking across high confidence regions of the genome and evaluating the recall and precision of the pipeline. We recommend individuals working with human datasets benchmark their germline variant calling pipelines using one of these datasets. Unfortunately, these resources are not typically available for non-human organisms. \r +\r +Here, we present protocols for benchmarking and validating germline short variant (SNVs and indels) datasets using a combination of methods that can capture the quality of your variant sets for human, non-human model, and non-model organisms. The process you can apply will depend on the organism you’re working with and the genomic resources available to that organism. \r +\r +## Diagram \r +\r +

\r + \r +

\r +\r +## User guide \r +### Quick start guide \r +\r +These bash scripts were written for the University of Sydney’s high performance computer, Artemis. They can be run on the command line or submitted as PBS jobs. These scripts assume your input is a gzipped multi-sample (cohort) VCF file. Before running, edit the PBS project directive and define the variables at the top of the script. All software used in this protocol is installed on Artemis- to use alternate versions or run on a different compute infrastructure, edit the modules according to your needs. \r +\r +#### Human datasets \r +For human datasets, we recommend you benchmark your germline variant calling pipeline using a gold standard dataset such as Platinum Genomes. Raw sequence data in FASTQ format for these datasets can be downloaded along with their high confidence variant calls and regions from public repositories. See [Preparing input files]() for more information on how to download and prepare these files. \r +\r +##### 1. Collect vcf summary metrics \r +Edit the PBS -P directive and variables for your dataset in `vcfstat.sh`. Then run script with: \r +\r +```\r +qsub vcfstat.sh (or bash vcfstat.sh)\r +```\r +This will produce summary and quality metrics reports and plots for your cohort. It will also produce summary and detail files for known variant representation. BCFtools stats plots will be housed in a directory labelled `${cohort}_vcfplots`. \r +\r +##### 2. Biological benchmarking using a truth set \r +\r +Edit the PBS -P directive and variables for your files. Then run script with: \r +\r +```\r +qsub run_happy.sh\r +```\r +This script will subset your multi-sample VCF into individual samples, prepare them for hap.py, and output a number of files including summary metrics (including recall, precision and F1-score) and ROC count files that can be used to produce ROC curves, separately for SNVs and indels. See the [hap.py user guide](https://github.com/Illumina/hap.py/blob/master/doc/happy.md) for more information on how to interpret hap.py output. ROC curves of Hap.py runs can be plotted using the script [rocplot.Rscript](https://github.com/Illumina/hap.py/blob/master/src/R/rocplot.Rscript). \r +\r +#### Non-human model organism datasets\r +\r +##### 1. Collect vcf summary metrics \r +Edit the PBS -P directive and variables for your dataset in `vcfstat.sh`. We recommend you use the set of known variants used for base quality score recalibration to validate population level variants. If you used trio data, unhash the Mendelian error command within the script. Then run script with: \r +\r +```\r +qsub vcfstat.sh (or bash vcfstat.sh)\r +```\r +This will produce summary and quality metrics reports and plots for your cohort. It will also produce summary and detail files for known variant representation. BCFtools stats plots will be housed in a directory labelled `${cohort}_vcfplots`. \r +#### Non-model organism datasets \r +\r +##### 1. Collect vcf summary metrics \r +\r +Edit the PBS -P directive and variables for your dataset in `vcfstat_nonmodel.sh`. Then run script with: \r +\r +```\r +qsub vcfstat_nonmodel.sh (or bash vcfstat_nonmodel.sh)\r +```\r +\r +This will produce summary and quality metrics reports and plots for your cohort. It will also produce summary and detail files for known variant representation. BCFtools stats plots will be housed in a directory labelled `${cohort}_vcfplots`. \r +\r +## Benchmarking \r +Coming soon! \r +\r +## Workflow summaries \r +### Metadata \r +|metadata field | workflow_name / workflow_version |\r +|-------------------|:---------------------------------:|\r +|Version | 1.0 |\r +|Maturity | stable |\r +|Creators | Georgie Samaha, Tracy Chew, Cali Willet |\r +|Source | NA |\r +|License | NA |\r +|Workflow manager | NA |\r +|Container | None |\r +|Install method | Manual |\r +|GitHub | NA |\r +|bio.tools | NA |\r +|BioContainers | NA | \r +|bioconda | NA |\r +\r +### Component tools \r +\r +bcftools/1.14 \r +htslib/1.14 \r +python/3.8.2 \r +R/4.1.1 \r +hap.py/0.3.14 \r +\r +### Required (minimum) inputs/parameters \r +\r +- Multi-sample or single sample VCF file (VCF.gz format)\r +- List of sample IDs that match the VCF (.txt format)\r +- Known variant dataset (VCF format. Human and non-human model organisms only)\r +- Pedigree file (format: mother,father,offspring. Trios or Platinum Genomes only)\r +- Truth set variant calls (VCF.gz format. Human, Platinum Genomes only)\r +- High confidence call regions (BED format. Human, Platinum Genomes only)\r +\r +### Preparing input files \r +\r +#### Gold standard variant truth sets \r +\r +The benchmarking protocol for human datasets assumes you have performed mapping and germline variant calling on a gold standard truth set. These datasets contain millions of variants that have been confirmed using orthologous technologies [Eberle et al. 2017](https://doi.org/10.1101/gr.210500.116). \r +\r +We recommend you use the Platinum Genomes dataset for benchmarking germline variant calling pipelines that include joint genotyping of multiple samples. Six members, comprising two trios, of the Platinum Genomes dataset can be downloaded from the Illumina BaseSpace Sequence Hub, the ENA, or dbGaP. The Platinum Genomes dataset contains multiple files including the following files you will need for running `run_happy.sh`: \r +- Paired-end FASTQ files for each sample\r +- High-confidence germline variant VCF files for each sample\r +- High-confidence genomic regions (BED format)\r +\r +Currently, these files are available for Hg19 (GRCh37) and Hg38 (GRCh38) . Links to raw data are [here](https://github.com/Illumina/PlatinumGenomes). BaseSpace offers a command line tool for downloading files, see [here](https://developer.basespace.illumina.com/docs/content/documentation/cli/cli-examples) for instructions. \r +\r +#### Providing your own ‘truth set’ \r +*A word of caution*- testing the performance of your pipeline using a truth set is only intended to estimate the overall quality of your pipeline and detect any potential sources of error in your method. It is not intended to test the truthfulness of your variant set. See [here](https://gatk.broadinstitute.org/hc/en-us/articles/360035531572-Evaluating-the-quality-of-a-germline-short-variant-callset) for further discussion of the assumptions we make about truth sets. Most non-human organisms do not have access to gold standard truth set resources like the Platinum Genomes dataset. However there are a few alternative options you could try: \r + - Genotyping arrays: if you have genotyping data for the same samples you tested your germline variant calling pipeline with, you can reformat these to VCF using a tool like [PLINK’s recode](https://www.cog-genomics.org/plink/1.9/data#recode) and use it as a truth set. \r + - Known variant datasets: if your organism of interest has a set of known population-level variants you can use these as a truth-set. Just remember that these variants might not always be validated (i.e. dbSNP). \r +\r +Using this method you will need to also provide your own high-confidence regions file in BED format. The location and size of these regions will depend on your dataset, organism, reference assembly and sequencing method. Typically these regions would exclude centromeres, telomeres and repetitive parts of the genome that are likely to complicate variant calling. \r +\r +\r +## Additional notes \r +\r +Test data for Hap.py can be found [here](https://github.com/Illumina/hap.py/blob/master/doc/microbench.md) \r +\r +Instructions on how to install Hap.py can be found [here](https://github.com/Illumina/hap.py#installation) \r +\r +This warning may be thrown by Hap.py and can be ignored: `WARNING No reference file found at default locations. You can set the environment variable 'HGREF' or 'HG19' to point to a suitable Fasta file.` \r +\r +\r +### Understanding your outputs \r +The following files will be produced and stored in your designated working directory. They will all be labelled with your specified cohort name. \r +\r +#### Variant based metrics \r +Produced by BCFtools stats command. Output file:\r +- ${cohort}.bcftools.metrics \r +- ${cohort}_bcftools.metrics_vcfstatplots (directory and files) \r +\r +#### Sample based metrics \r +Produced by BCFtools smplstats and mendelian commands. Output files:\r +- ${cohort}.smplstats\r +- ${cohort}.smplstats.pdf\r +- ${cohort}.Mendelianerr\r +\r +#### Known variant concordance \r +Produced by GATK CollectVariantCallingMetrics command. Output files:\r +- ${cohort}.known.variant_calling_summary_metrics\r +- ${cohort}.known.variant_calling_detail_metrics\r +\r +#### Biological validation using a truth set \r +Produced by Hap.py. Output files:\r +- ${sample}.happy.metrics.json.gz\r +- ${sample}.happy.roc.all.csv.gz\r +- ${sample}.happy.roc.Locations.INDEL.csv.gz\r +- ${sample}.happy.roc.Locations.INDEL.PASS.csv.gz\r +- ${sample}.happy.roc.Locations.SNP.csv.gz\r +- ${sample}.happy.roc.Locations.SNP.PASS.csv.gz\r +- ${sample}.happy.roc.tsv\r +- ${sample}.happy.runinfo.json\r +- ${sample}.happy.summary.csv\r +\r +### Performance metrics explained \r +\r +|Metric |Expected/ideal value |Tool |Relevance |\r +|--------------------------------------|----------------------------------------------------|---------------|---------------------------------------------------------------------------------------------------------------|\r +|Number of SNVs and indels (per sample)|Human WGS: ~4.4M, Human WES: ~41k, Species dependent|bcftools stats |Population, sequencing approach, and genomic region dependent. Alone, this metric cannot indicate data quality.|\r +|Indel length distribution |Indel length range is 1-10,000bp. |bcftools stats |Increased length is conflated with reduced mapping quality. Distribution is dataset dependent. Recommend filtering for high quality.|\r +|Depth of coverage |Depends on the sequencing coverage of samples. |bcftools stats |Dramatic deviation from expected distribution can indicate artifactual bias. |\r +|Substitution type counts |See TiTv ratio. |bcftools stats |Twice as many possible transversions as transitions. See [here](https://dx.doi.org/10.1093%2Fbioinformatics%2Fbtu668) |\r +|TiTv ratio (genome wide) |For mammals: WGS: 2.0-2.1, WES: 3.0-3.3 |bcftools stats |Dramatic deviation from expected ratio can indicate artifactual bias. Typically elevated in coding regions where transversions are more likely to occur. |\r +|Base quality distribution |Dataset dependent. |bcftools stats |This will reflect the quality based filtering you performed. Dramatic deviation from expected ratio can indicate artifactual bias.|\r +|Indel ratio |Common: ~1.0, Rare: 0.2-0.5 |GATK CollectVariantCallingMetrics|This should be evaluated after custom filtering variants for your needs. Dramatic deviation from expected ratio can indicate artifactual bias.|\r +|Het/hom(non-ref) |~2.0 assuming Hardy-Weinberg equilibrium. |GATK CollectVariantCallingMetrics|Ancestry dependent, can vary dramatically. See [Wang et al. 2015](https://dx.doi.org/10.1093%2Fbioinformatics%2Fbtu668)|\r +|Mendelian error |0 |BCFtools +mendelian|Mendelian inheritance errors are likely erroneous genotype calls. See [Pilipenko et al. 2014](https://dx.doi.org/10.1186%2F1753-6561-8-S1-S21)|\r +|True positives |Dataset dependent. |Hap.py |Number of query variants that are present in the truth set. |\r +|False negatives |Dataset dependent. |Hap.py |Number of variants in truth set, not present in query VCF. |\r +|False positives |Dataset dependent. |Hap.py |Number of variants in query VCF, not present in truth set. |\r +|Recall |1 |Hap.py |Absence of false negatives. See [Krusche et al. 2019](https://doi.org/10.1038/s41587-019-0054-x) |\r +|Precision |1 |Hap.py |Absence of false positives. See [Krusche et al. 2019](https://doi.org/10.1038/s41587-019-0054-x) |\r +|F1-score |1 |Hap.py |Harmonic mean of recall and precision. See [Krusche et al. 2019](https://doi.org/10.1038/s41587-019-0054-x) |\r +|Genotype errors (FP.GT) |Dataset dependent. |Hap.py |Number of query variants with incorrect genotype |\r +\r +### Resources and references \r +\r +Eberle, M. A., Fritzilas, E., Krusche, P., Källberg, M., Moore, B. L., Bekritsky, M. A., Iqbal, Z., Chuang, H. Y., Humphray, S. J., Halpern, A. L., Kruglyak, S., Margulies, E. H., McVean, G., & Bentley, D. R. (2017). A reference data set of 5.4 million phased human variants validated by genetic inheritance from sequencing a three-generation 17-member pedigree. Genome research, 27(1), 157–164. https://doi.org/10.1101/gr.210500.116 \r +\r +Koboldt, D.C. Best practises for variant calling in clinical sequencing. Genome Med 12, 91 (2020). https://doi.org/10.1186/s13073-020-00791-w \r +\r +Krusche, P., Trigg, L., Boutros, P.C. et al. Best practices for benchmarking germline small-variant calls in human genomes. Nat Biotechnol 37, 555–560 (2019). https://doi.org/10.1038/s41587-019-0054-x \r +\r +Marshall, C.R., Chowdhury, S., Taft, R.J. et al. Best practices for the analytical validation of clinical whole-genome sequencing intended for the diagnosis of germline disease. npj Genom. Med. 5, 47 (2020). https://doi.org/10.1038/s41525-020-00154-9 \r +\r +Pilipenko, V.V., He, H., Kurowski, B.G. et al. Using Mendelian inheritance errors as quality control criteria in whole genome sequencing data set. BMC Proc 8, S21 (2014). https://doi.org/10.1186/1753-6561-8-S1-S21 \r +\r +Wang, J., Raskin, J., Samuels, D., Shyr, Y., Guo, Y., Genome measures used for quality control are dependent on gene function and ancestry, Bioinformatics 31, 318–323 (2015) https://doi.org/10.1093/bioinformatics/btu668 \r +\r +\r +## Help/FAQ/Troubleshooting\r +\r +If Hap.py throws an error, search the [issues at Hap.py GitHub repository](https://github.com/Illumina/hap.py/issues) and attempt to resolve it before submitting an issue here. \r +\r +## Acknowledgements/citations/credits \r +\r +### Authors \r +- Georgie Samaha (Sydney Informatics Hub, University of Sydney) \r +- Tracy Chew (Sydney Informatics Hub, University of Sydney) \r +- Cali Willet (Sydney Informatics Hub, University of Sydney) \r +- Nandan Deshpande (Sydney Informatics Hub, University of Sydney)\r +\r +Acknowledgements (and co-authorship, where appropriate) are an important way for us to demonstrate the value we bring to your research. Your research outcomes are vital for ongoing funding of the Sydney Informatics Hub and national compute facilities. We suggest including the following acknowledgement in any publications that follow from this work: \r +\r +The authors acknowledge the technical assistance provided by the Sydney Informatics Hub, a Core Research Facility of the University of Sydney and the Australian BioCommons which is enabled by NCRIS via Bioplatforms Australia. \r +""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "GermlineShortV_biovalidation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/339?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1017?version=13" ; + schema1:isBasedOn "https://github.com/nf-core/rnafusion" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnafusion" ; + schema1:sdDatePublished "2024-07-12 13:18:22 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1017/ro_crate?version=13" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10551 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:10Z" ; + schema1:dateModified "2024-06-11T12:55:10Z" ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1017?version=16" ; + schema1:keywords "fusion, fusion-genes, gene-fusion, rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnafusion" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1017?version=13" ; + schema1:version 13 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "SARS-CoV-2 variant prediction using Read It And Keep, fastp, bbmap and iVar" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/519?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for SARS-CoV-2 Illumina Amplicon pipeline - SANBI - v1.2" ; + schema1:sdDatePublished "2024-07-12 13:32:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/519/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 55051 ; + schema1:creator , + ; + schema1:dateCreated "2023-06-28T11:42:46Z" ; + schema1:dateModified "2023-06-30T05:14:33Z" ; + schema1:description "SARS-CoV-2 variant prediction using Read It And Keep, fastp, bbmap and iVar" ; + schema1:keywords "covid-19, ARTIC, SARS-CoV-2, SANBI" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "SARS-CoV-2 Illumina Amplicon pipeline - SANBI - v1.2" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/519?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=13" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-07-12 13:20:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=13" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5840 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:04Z" ; + schema1:dateModified "2024-06-11T12:55:04Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=13" ; + schema1:version 13 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """This workflow represents the Default ML Pipeline for AutoML feature from MLme. Machine Learning Made Easy (MLme) is a novel tool that simplifies machine learning (ML) for researchers. By integrating four essential functionalities, namely data exploration, AutoML, CustomML, and visualization, MLme fulfills the diverse requirements of researchers while eliminating the need for extensive coding efforts. MLme serves as a valuable resource that empowers researchers of all technical levels to leverage ML for insightful data analysis and enhance research outcomes. By simplifying and automating various stages of the ML workflow, it enables researchers to allocate more time to their core research tasks, thereby enhancing efficiency and productivity.\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.571.1" ; + schema1:license "CC0-1.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for MLme: Machine Learning Made Easy" ; + schema1:sdDatePublished "2024-07-12 13:27:21 +0100" ; + schema1:url "https://workflowhub.eu/workflows/571/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 208026 ; + schema1:creator ; + schema1:dateCreated "2023-09-15T14:36:32Z" ; + schema1:dateModified "2023-09-15T15:36:33Z" ; + schema1:description """This workflow represents the Default ML Pipeline for AutoML feature from MLme. Machine Learning Made Easy (MLme) is a novel tool that simplifies machine learning (ML) for researchers. By integrating four essential functionalities, namely data exploration, AutoML, CustomML, and visualization, MLme fulfills the diverse requirements of researchers while eliminating the need for extensive coding efforts. MLme serves as a valuable resource that empowers researchers of all technical levels to leverage ML for insightful data analysis and enhance research outcomes. By simplifying and automating various stages of the ML workflow, it enables researchers to allocate more time to their core research tasks, thereby enhancing efficiency and productivity.\r +\r +""" ; + schema1:keywords "Bioinformatics, Machine Learning, automated workflows, GUI" ; + schema1:license "https://spdx.org/licenses/CC0-1.0" ; + schema1:name "MLme: Machine Learning Made Easy" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/571?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A pipeline to demultiplex, QC and map Nanopore data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1002?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/nanoseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/nanoseq" ; + schema1:sdDatePublished "2024-07-12 13:20:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1002/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8657 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:05Z" ; + schema1:dateModified "2024-06-11T12:55:05Z" ; + schema1:description "A pipeline to demultiplex, QC and map Nanopore data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1002?version=5" ; + schema1:keywords "Alignment, demultiplexing, nanopore, QC" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/nanoseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1002?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Rare disease researchers workflow is that they submit their raw data (fastq), run the mapping and variant calling RD-Connect pipeline and obtain unannotated gvcf files to further submit to the RD-Connect GPAP or analyse on their own.\r +\r +This demonstrator focuses on the variant calling pipeline. The raw genomic data is processed using the RD-Connect pipeline ([Laurie et al., 2016](https://www.ncbi.nlm.nih.gov/pubmed/27604516)) running on the standards (GA4GH) compliant, interoperable container orchestration platform.\r +\r +This demonstrator will be aligned with the current implementation study on [Development of Architecture for Software Containers at ELIXIR and its use by EXCELERATE use-case communities](docs/Appendix%201%20-%20Project%20Plan%202018-biocontainers%2020171117.pdf) \r +\r +For this implementation, different steps are required:\r +\r +1. Adapt the pipeline to CWL and dockerise elements \r +2. Align with IS efforts on software containers to package the different components (Nextflow) \r +3. Submit trio of Illumina NA12878 Platinum Genome or Exome to the GA4GH platform cloud (by Aspera or ftp server)\r +4. Run the RD-Connect pipeline on the container platform\r +5. Return corresponding gvcf files\r +6. OPTIONAL: annotate and update to RD-Connect playground instance\r +\r +N.B: The demonstrator might have some manual steps, which will not be in production. \r +\r +## RD-Connect pipeline\r +\r +Detailed information about the RD-Connect pipeline can be found in [Laurie et al., 2016](https://www.ncbi.nlm.nih.gov/pubmed/?term=27604516)\r +\r +![alt text](https://raw.githubusercontent.com/inab/Wetlab2Variations/eosc-life/docs/RD-Connect_pipeline.jpg)\r +\r +## The applications\r +\r +**1\\. Name of the application: Adaptor removal**\r +Function: remove sequencing adaptors \r +Container (readiness status, location, version): [cutadapt (v.1.18)](https://hub.docker.com/r/cnag/cutadapt) \r +Required resources in cores and RAM: current container size 169MB \r +Input data (amount, format, directory..): raw fastq \r +Output data: paired fastq without adaptors \r +\r +**2\\. Name of the application: Mapping and bam sorting** \r +Function: align data to reference genome \r +Container : [bwa-mem (v.0.7.17)](https://hub.docker.com/r/cnag/bwa) / [Sambamba (v. 0.6.8 )](https://hub.docker.com/r/cnag/sambamba)(or samtools) \r +Resources :current container size 111MB / 32MB \r +Input data: paired fastq without adaptors \r +Output data: sorted bam \r +\r +**3\\. Name of the application: MarkDuplicates** \r +Function: Mark (and remove) duplicates \r +Container: [Picard (v.2.18.25)](https://hub.docker.com/r/cnag/picard)\r +Resources: current container size 261MB \r +Input data:sorted bam \r +Output data: Sorted bam with marked (or removed) duplicates \r +\r +**4\\. Name of the application: Base quality recalibration (BQSR)** \r +Function: Base quality recalibration \r +Container: [GATK (v.3.6-0)](https://hub.docker.com/r/cnag/gatk)\r +Resources: current container size 270MB \r +Input data: Sorted bam with marked (or removed) duplicates \r +Output data: Sorted bam with marked duplicates & base quality recalculated \r +\r +**5\\. Name of the application: Variant calling** \r +Function: variant calling \r +Container: [GATK (v.3.6-0)](https://hub.docker.com/r/cnag/gatk)\r +Resources: current container size 270MB \r +Input data:Sorted bam with marked duplicates & base quality recalculated \r +Output data: unannotated gvcf per sample \r +\r +**6\\. (OPTIONAL)Name of the application: Quality of the fastq** \r +Function: report on the sequencing quality \r +Container: [fastqc 0.11.8](https://hub.docker.com/r/cnag/fastqc)\r +Resources: current container size 173MB \r +Input data: raw fastq \r +Output data: QC report \r +\r +## Licensing\r +\r +GATK declares that archived packages are made available for free to academic researchers under a limited license for non-commercial use. If you need to use one of these packages for commercial use. https://software.broadinstitute.org/gatk/download/archive """ ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.107.1" ; + schema1:isBasedOn "https://github.com/inab/Wetlab2Variations/blob/eosc-life/cwl-workflows/workflows/workflow.cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for VariantCaller_GATK3.6" ; + schema1:sdDatePublished "2024-07-12 13:37:13 +0100" ; + schema1:url "https://workflowhub.eu/workflows/107/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 30352 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7205 ; + schema1:creator , + ; + schema1:dateCreated "2021-02-18T15:01:31Z" ; + schema1:dateModified "2023-04-21T12:35:06Z" ; + schema1:description """Rare disease researchers workflow is that they submit their raw data (fastq), run the mapping and variant calling RD-Connect pipeline and obtain unannotated gvcf files to further submit to the RD-Connect GPAP or analyse on their own.\r +\r +This demonstrator focuses on the variant calling pipeline. The raw genomic data is processed using the RD-Connect pipeline ([Laurie et al., 2016](https://www.ncbi.nlm.nih.gov/pubmed/27604516)) running on the standards (GA4GH) compliant, interoperable container orchestration platform.\r +\r +This demonstrator will be aligned with the current implementation study on [Development of Architecture for Software Containers at ELIXIR and its use by EXCELERATE use-case communities](docs/Appendix%201%20-%20Project%20Plan%202018-biocontainers%2020171117.pdf) \r +\r +For this implementation, different steps are required:\r +\r +1. Adapt the pipeline to CWL and dockerise elements \r +2. Align with IS efforts on software containers to package the different components (Nextflow) \r +3. Submit trio of Illumina NA12878 Platinum Genome or Exome to the GA4GH platform cloud (by Aspera or ftp server)\r +4. Run the RD-Connect pipeline on the container platform\r +5. Return corresponding gvcf files\r +6. OPTIONAL: annotate and update to RD-Connect playground instance\r +\r +N.B: The demonstrator might have some manual steps, which will not be in production. \r +\r +## RD-Connect pipeline\r +\r +Detailed information about the RD-Connect pipeline can be found in [Laurie et al., 2016](https://www.ncbi.nlm.nih.gov/pubmed/?term=27604516)\r +\r +![alt text](https://raw.githubusercontent.com/inab/Wetlab2Variations/eosc-life/docs/RD-Connect_pipeline.jpg)\r +\r +## The applications\r +\r +**1\\. Name of the application: Adaptor removal**\r +Function: remove sequencing adaptors \r +Container (readiness status, location, version): [cutadapt (v.1.18)](https://hub.docker.com/r/cnag/cutadapt) \r +Required resources in cores and RAM: current container size 169MB \r +Input data (amount, format, directory..): raw fastq \r +Output data: paired fastq without adaptors \r +\r +**2\\. Name of the application: Mapping and bam sorting** \r +Function: align data to reference genome \r +Container : [bwa-mem (v.0.7.17)](https://hub.docker.com/r/cnag/bwa) / [Sambamba (v. 0.6.8 )](https://hub.docker.com/r/cnag/sambamba)(or samtools) \r +Resources :current container size 111MB / 32MB \r +Input data: paired fastq without adaptors \r +Output data: sorted bam \r +\r +**3\\. Name of the application: MarkDuplicates** \r +Function: Mark (and remove) duplicates \r +Container: [Picard (v.2.18.25)](https://hub.docker.com/r/cnag/picard)\r +Resources: current container size 261MB \r +Input data:sorted bam \r +Output data: Sorted bam with marked (or removed) duplicates \r +\r +**4\\. Name of the application: Base quality recalibration (BQSR)** \r +Function: Base quality recalibration \r +Container: [GATK (v.3.6-0)](https://hub.docker.com/r/cnag/gatk)\r +Resources: current container size 270MB \r +Input data: Sorted bam with marked (or removed) duplicates \r +Output data: Sorted bam with marked duplicates & base quality recalculated \r +\r +**5\\. Name of the application: Variant calling** \r +Function: variant calling \r +Container: [GATK (v.3.6-0)](https://hub.docker.com/r/cnag/gatk)\r +Resources: current container size 270MB \r +Input data:Sorted bam with marked duplicates & base quality recalculated \r +Output data: unannotated gvcf per sample \r +\r +**6\\. (OPTIONAL)Name of the application: Quality of the fastq** \r +Function: report on the sequencing quality \r +Container: [fastqc 0.11.8](https://hub.docker.com/r/cnag/fastqc)\r +Resources: current container size 173MB \r +Input data: raw fastq \r +Output data: QC report \r +\r +## Licensing\r +\r +GATK declares that archived packages are made available for free to academic researchers under a limited license for non-commercial use. If you need to use one of these packages for commercial use. https://software.broadinstitute.org/gatk/download/archive """ ; + schema1:image ; + schema1:keywords "CWL, variant_calling" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "VariantCaller_GATK3.6" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/107?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + ; + ns1:output , + . + + a schema1:Dataset ; + schema1:datePublished "2023-01-13T10:20:05.455448" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/chipseq-sr" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "chipseq-sr/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.3" . + + a schema1:Dataset ; + schema1:datePublished "2024-06-19T07:13:32.852335" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/quality-and-contamination-control" ; + schema1:license "GPL-3.0-or-later" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "quality-and-contamination-control/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + schema1:datePublished "2023-10-19T15:11:18.072068" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "atacseq/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.14" . + + a schema1:Dataset ; + schema1:datePublished "2021-10-14T14:17:22.150269" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-ivar-analysis" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:sdDatePublished "2021-10-27 15:45:06 +0100" ; + schema1:softwareVersion "v0.2" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.8" . + + a schema1:Dataset ; + schema1:datePublished "2021-09-23T09:39:44.360747" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-ont-artic-variant-calling" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:sdDatePublished "2021-10-27 15:45:07 +0100" ; + schema1:softwareVersion "v0.3" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.7" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "From Copernicus Sentinel 5P data to panoply visualization of volcanic activity impact to atmosphere" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/756?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Sentinel 5P volcanic data visualization" ; + schema1:sdDatePublished "2024-07-12 13:24:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/756/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2951 ; + schema1:creator ; + schema1:dateCreated "2024-02-15T11:46:22Z" ; + schema1:dateModified "2024-02-15T11:47:12Z" ; + schema1:description "From Copernicus Sentinel 5P data to panoply visualization of volcanic activity impact to atmosphere" ; + schema1:isPartOf ; + schema1:keywords "Climate" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Sentinel 5P volcanic data visualization" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/756?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1017?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/rnafusion" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnafusion" ; + schema1:sdDatePublished "2024-07-12 13:18:21 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1017/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10513 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:10Z" ; + schema1:dateModified "2024-06-11T12:55:10Z" ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1017?version=16" ; + schema1:keywords "fusion, fusion-genes, gene-fusion, rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnafusion" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1017?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + schema1:datePublished "2024-02-14T16:45:00.609259" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/assembly-with-flye" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "assembly-with-flye/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:Dataset ; + schema1:datePublished "2021-12-06T14:19:09.194259" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/gromacs-dctmd" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "gromacs-dctmd/main" ; + schema1:sdDatePublished "2021-12-07 03:00:58 +0000" ; + schema1:softwareVersion "v0.1.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Variant Interpretation Pipeline (VIP) that annotates, filters and reports prioritized causal variants in humans, see https://github.com/molgenis/vip for more information." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/125?version=1" ; + schema1:isBasedOn "https://github.com/molgenis/vip" ; + schema1:license "LGPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for MOLGENIS/VIP: Variant Interpretation Pipeline" ; + schema1:sdDatePublished "2024-07-12 13:18:19 +0100" ; + schema1:url "https://workflowhub.eu/workflows/125/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15164 ; + schema1:dateCreated "2021-06-21T08:33:47Z" ; + schema1:dateModified "2024-06-12T09:57:49Z" ; + schema1:description "Variant Interpretation Pipeline (VIP) that annotates, filters and reports prioritized causal variants in humans, see https://github.com/molgenis/vip for more information." ; + schema1:keywords "Annotation, Report, VCF, Classification, SV, Pipeline, Bioinformatics, Genomics, Workflows, Java, SNPs, variation, Nextflow" ; + schema1:license "https://spdx.org/licenses/LGPL-3.0" ; + schema1:name "MOLGENIS/VIP: Variant Interpretation Pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/125?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# The Polygenic Score Catalog Calculator (`pgsc_calc`)\r +\r +[![Documentation Status](https://readthedocs.org/projects/pgsc-calc/badge/?version=latest)](https://pgsc-calc.readthedocs.io/en/latest/?badge=latest)\r +[![pgscatalog/pgsc_calc CI](https://github.com/PGScatalog/pgsc_calc/actions/workflows/ci.yml/badge.svg)](https://github.com/PGScatalog/pgsc_calc/actions/workflows/ci.yml)\r +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.5970794.svg)](https://doi.org/10.5281/zenodo.5970794)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-≥22.10.0-23aa62.svg?labelColor=000000)](https://www.nextflow.io/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +\r +## Introduction\r +\r +`pgsc_calc` is a bioinformatics best-practice analysis pipeline for calculating\r +polygenic [risk] scores on samples with imputed genotypes using existing scoring\r +files from the [Polygenic Score (PGS) Catalog](https://www.pgscatalog.org/)\r +and/or user-defined PGS/PRS.\r +\r +## Pipeline summary\r +\r +

\r + \r +

\r +\r +The workflow performs the following steps:\r +\r +* Downloading scoring files using the PGS Catalog API in a specified genome build (GRCh37 and GRCh38).\r +* Reading custom scoring files (and performing a liftover if genotyping data is in a different build).\r +* Automatically combines and creates scoring files for efficient parallel computation of multiple PGS\r + - Matching variants in the scoring files against variants in the target dataset (in plink bfile/pfile or VCF format)\r +* Calculates PGS for all samples (linear sum of weights and dosages)\r +* Creates a summary report to visualize score distributions and pipeline metadata (variant matching QC)\r +\r +And optionally:\r +\r +- Genetic Ancestry: calculate similarity of target samples to populations in a\r + reference dataset ([1000 Genomes (1000G)](http://www.nature.com/nature/journal/v526/n7571/full/nature15393.html)), using principal components analysis (PCA)\r +- PGS Normalization: Using reference population data and/or PCA projections to report\r + individual-level PGS predictions (e.g. percentiles, z-scores) that account for genetic ancestry\r +\r +See documentation for a list of planned [features under development](https://pgsc-calc.readthedocs.io/en/latest/index.html#Features-under-development).\r +\r +## Quick start\r +\r +1. Install\r +[`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation)\r +(`>=22.10.0`)\r +\r +2. Install [`Docker`](https://docs.docker.com/engine/installation/) or\r +[`Singularity (v3.8.3 minimum)`](https://www.sylabs.io/guides/3.0/user-guide/)\r +(please only use [`Conda`](https://conda.io/miniconda.html) as a last resort)\r +\r +3. Download the pipeline and test it on a minimal dataset with a single command:\r +\r + ```console\r + nextflow run pgscatalog/pgsc_calc -profile test,\r + ```\r +\r +4. Start running your own analysis!\r +\r + ```console\r + nextflow run pgscatalog/pgsc_calc -profile --input samplesheet.csv --pgs_id PGS001229\r + ```\r +\r +See [getting\r +started](https://pgsc-calc.readthedocs.io/en/latest/getting-started.html) for more\r +details.\r +\r +## Documentation\r +\r +[Full documentation is available on Read the Docs](https://pgsc-calc.readthedocs.io/)\r +\r +## Credits\r +\r +pgscatalog/pgsc_calc is developed as part of the PGS Catalog project, a\r +collaboration between the University of Cambridge’s Department of Public Health\r +and Primary Care (Michael Inouye, Samuel Lambert) and the European\r +Bioinformatics Institute (Helen Parkinson, Laura Harris).\r +\r +The pipeline seeks to provide a standardized workflow for PGS calculation and\r +ancestry inference implemented in nextflow derived from an existing set of\r +tools/scripts developed by Inouye lab (Rodrigo Canovas, Scott Ritchie, Jingqin\r +Wu) and PGS Catalog teams (Samuel Lambert, Laurent Gil).\r +\r +The adaptation of the codebase, nextflow implementation, and PGS Catalog features\r +are written by Benjamin Wingfield, Samuel Lambert, Laurent Gil with additional input\r +from Aoife McMahon (EBI). Development of new features, testing, and code review\r +is ongoing including Inouye lab members (Rodrigo Canovas, Scott Ritchie) and others. A\r +manuscript describing the tool is *in preparation*. In the meantime if you use the\r +tool we ask you to cite the repo and the paper describing the PGS Catalog\r +resource:\r +\r +- >PGS Catalog Calculator _(in preparation)_. PGS Catalog\r + Team. [https://github.com/PGScatalog/pgsc_calc](https://github.com/PGScatalog/pgsc_calc)\r +- >Lambert _et al._ (2021) The Polygenic Score Catalog as an open database for\r +reproducibility and systematic evaluation. Nature Genetics. 53:420–425\r +doi:[10.1038/s41588-021-00783-5](https://doi.org/10.1038/s41588-021-00783-5).\r +\r +This pipeline is distrubuted under an [Apache License](LICENSE) amd uses code and \r +infrastructure developed and maintained by the [nf-core](https://nf-co.re) community \r +(Ewels *et al. Nature Biotech* (2020) doi:[10.1038/s41587-020-0439-x](https://doi.org/10.1038/s41587-020-0439-x)), \r +reused here under the [MIT license](https://github.com/nf-core/tools/blob/master/LICENSE).\r +\r +Additional references of open-source tools and data used in this pipeline are described in\r +[`CITATIONS.md`](CITATIONS.md).\r +\r +This work has received funding from EMBL-EBI core funds, the Baker Institute,\r +the University of Cambridge, Health Data Research UK (HDRUK), and the European\r +Union’s Horizon 2020 research and innovation programme under grant agreement No\r +101016775 INTERVENE.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/556?version=4" ; + schema1:isBasedOn "https://github.com/PGScatalog/pgsc_calc" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for The Polygenic Score Catalog Calculator" ; + schema1:sdDatePublished "2024-07-12 13:27:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/556/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1673 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-08-11T14:30:22Z" ; + schema1:dateModified "2023-08-11T14:30:22Z" ; + schema1:description """# The Polygenic Score Catalog Calculator (`pgsc_calc`)\r +\r +[![Documentation Status](https://readthedocs.org/projects/pgsc-calc/badge/?version=latest)](https://pgsc-calc.readthedocs.io/en/latest/?badge=latest)\r +[![pgscatalog/pgsc_calc CI](https://github.com/PGScatalog/pgsc_calc/actions/workflows/ci.yml/badge.svg)](https://github.com/PGScatalog/pgsc_calc/actions/workflows/ci.yml)\r +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.5970794.svg)](https://doi.org/10.5281/zenodo.5970794)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-≥22.10.0-23aa62.svg?labelColor=000000)](https://www.nextflow.io/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +\r +## Introduction\r +\r +`pgsc_calc` is a bioinformatics best-practice analysis pipeline for calculating\r +polygenic [risk] scores on samples with imputed genotypes using existing scoring\r +files from the [Polygenic Score (PGS) Catalog](https://www.pgscatalog.org/)\r +and/or user-defined PGS/PRS.\r +\r +## Pipeline summary\r +\r +

\r + \r +

\r +\r +The workflow performs the following steps:\r +\r +* Downloading scoring files using the PGS Catalog API in a specified genome build (GRCh37 and GRCh38).\r +* Reading custom scoring files (and performing a liftover if genotyping data is in a different build).\r +* Automatically combines and creates scoring files for efficient parallel computation of multiple PGS\r + - Matching variants in the scoring files against variants in the target dataset (in plink bfile/pfile or VCF format)\r +* Calculates PGS for all samples (linear sum of weights and dosages)\r +* Creates a summary report to visualize score distributions and pipeline metadata (variant matching QC)\r +\r +And optionally:\r +\r +- Genetic Ancestry: calculate similarity of target samples to populations in a\r + reference dataset ([1000 Genomes (1000G)](http://www.nature.com/nature/journal/v526/n7571/full/nature15393.html)), using principal components analysis (PCA)\r +- PGS Normalization: Using reference population data and/or PCA projections to report\r + individual-level PGS predictions (e.g. percentiles, z-scores) that account for genetic ancestry\r +\r +See documentation for a list of planned [features under development](https://pgsc-calc.readthedocs.io/en/latest/index.html#Features-under-development).\r +\r +## Quick start\r +\r +1. Install\r +[`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation)\r +(`>=22.10.0`)\r +\r +2. Install [`Docker`](https://docs.docker.com/engine/installation/) or\r +[`Singularity (v3.8.3 minimum)`](https://www.sylabs.io/guides/3.0/user-guide/)\r +(please only use [`Conda`](https://conda.io/miniconda.html) as a last resort)\r +\r +3. Download the pipeline and test it on a minimal dataset with a single command:\r +\r + ```console\r + nextflow run pgscatalog/pgsc_calc -profile test,\r + ```\r +\r +4. Start running your own analysis!\r +\r + ```console\r + nextflow run pgscatalog/pgsc_calc -profile --input samplesheet.csv --pgs_id PGS001229\r + ```\r +\r +See [getting\r +started](https://pgsc-calc.readthedocs.io/en/latest/getting-started.html) for more\r +details.\r +\r +## Documentation\r +\r +[Full documentation is available on Read the Docs](https://pgsc-calc.readthedocs.io/)\r +\r +## Credits\r +\r +pgscatalog/pgsc_calc is developed as part of the PGS Catalog project, a\r +collaboration between the University of Cambridge’s Department of Public Health\r +and Primary Care (Michael Inouye, Samuel Lambert) and the European\r +Bioinformatics Institute (Helen Parkinson, Laura Harris).\r +\r +The pipeline seeks to provide a standardized workflow for PGS calculation and\r +ancestry inference implemented in nextflow derived from an existing set of\r +tools/scripts developed by Inouye lab (Rodrigo Canovas, Scott Ritchie, Jingqin\r +Wu) and PGS Catalog teams (Samuel Lambert, Laurent Gil).\r +\r +The adaptation of the codebase, nextflow implementation, and PGS Catalog features\r +are written by Benjamin Wingfield, Samuel Lambert, Laurent Gil with additional input\r +from Aoife McMahon (EBI). Development of new features, testing, and code review\r +is ongoing including Inouye lab members (Rodrigo Canovas, Scott Ritchie) and others. A\r +manuscript describing the tool is *in preparation*. In the meantime if you use the\r +tool we ask you to cite the repo and the paper describing the PGS Catalog\r +resource:\r +\r +- >PGS Catalog Calculator _(in preparation)_. PGS Catalog\r + Team. [https://github.com/PGScatalog/pgsc_calc](https://github.com/PGScatalog/pgsc_calc)\r +- >Lambert _et al._ (2021) The Polygenic Score Catalog as an open database for\r +reproducibility and systematic evaluation. Nature Genetics. 53:420–425\r +doi:[10.1038/s41588-021-00783-5](https://doi.org/10.1038/s41588-021-00783-5).\r +\r +This pipeline is distrubuted under an [Apache License](LICENSE) amd uses code and \r +infrastructure developed and maintained by the [nf-core](https://nf-co.re) community \r +(Ewels *et al. Nature Biotech* (2020) doi:[10.1038/s41587-020-0439-x](https://doi.org/10.1038/s41587-020-0439-x)), \r +reused here under the [MIT license](https://github.com/nf-core/tools/blob/master/LICENSE).\r +\r +Additional references of open-source tools and data used in this pipeline are described in\r +[`CITATIONS.md`](CITATIONS.md).\r +\r +This work has received funding from EMBL-EBI core funds, the Baker Institute,\r +the University of Cambridge, Health Data Research UK (HDRUK), and the European\r +Union’s Horizon 2020 research and innovation programme under grant agreement No\r +101016775 INTERVENE.\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/556?version=3" ; + schema1:keywords "Nextflow, Workflows, polygenic risk score, polygenic score, prediction, GWAS, genomic ancestry" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "The Polygenic Score Catalog Calculator" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/556?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# skim2mt\r +\r +**skim2mt** is a snakemake pipeline for the batch assembly, annotation, and phylogenetic analysis of mitochondrial genomes from low coverage genome skims. The pipeline was designed to work with sequence data from museum collections. However, it should also work with genome skims from recently collected samples.\r +\r +## Contents\r + - [Setup](#setup)\r + - [Example data](#example-data)\r + - [Input](#input)\r + - [Output](#output)\r + - [Filtering contaminants](#filtering-contaminants)\r + - [Assembly and annotation only](#assembly-and-annotation-only)\r + - [Running your own data](#running-your-own-data)\r + - [Getting help](#getting-help)\r + - [Citations](#citations)\r +\r +## Setup\r +\r +The pipeline is written in Snakemake and uses conda and singularity to install the necessary tools.\r +\r +It is *strongly recommended* to install conda using Mambaforge. See details here https://snakemake.readthedocs.io/en/stable/getting_started/installation.html\r +\r +Once conda is installed, you can pull the github repo and set up the base conda environment.\r +\r +```\r +# get github repo\r +git clone https://github.com/o-william-white/skim2mt\r +\r +# change dir\r +cd skim2mt\r +\r +# setup conda env\r +conda env create -n snakemake -f workflow/envs/conda_env.yaml\r +```\r +\r +
\r +\r +
\r +\r +## Example data\r +\r +Before you run your own data, it is recommended to run the example datasets provided . This will confirm there are no user-specific issues with the setup and it also installs all the dependencies. The example data includes simulated mitochondrial data from 25 different butterfly species. \r +\r +To run the example data, use the code below. **Note that you need to change the user email to your own address**. The email is required by the Bio Entrez package to fetch reference sequences. The first time you run the pipeline, it will take some time to install each of the conda environments, so it is a good time to take a tea break :).\r +```\r +conda activate snakemake\r +\r +snakemake \\\r + --cores 4 \\\r + --use-conda \\\r + --use-singularity \\ \r + --config user_email=user@example_email.com\r +```\r +\r +
\r +\r +
\r +\r +## Input\r +\r +Snakemake requires a `config.yaml` and `samples.csv` to define input parameters and sequence data for each sample. \r +\r +For the example data provided, the config file is located here `config/config.yaml` and it looks like this:\r +```\r +# path to sample sheet csv with columns for ID,forward,reverse,taxid,seed,gene\r +samples: config/samples.csv\r +\r +# user email\r +user_email: user@example_email.com\r +\r +# getorganelle reference (go_fetch, custom)\r +go_reference: go_fetch\r +\r +# forward adapter\r +forward_adapter: AGATCGGAAGAGCACACGTCTGAACTCCAGTCA\r +\r +# reverse adapter\r +reverse_adapter: AGATCGGAAGAGCGTCGTGTAGGGAAAGAGTGT\r +\r +# fastp deduplication (True/False)\r +fastp_dedup: True\r +\r +# mitos refseq database (refseq39, refseq63f, refseq63m, refseq63o, refseq89f, refseq89m, refseq89o)\r +mitos_refseq: refseq39\r +\r +# mito code (2 = Vertebrate, 4 = Mold, 5 = Invertebrate, 9 = Echinoderm, 13 = Ascidian, 14 = Alternative flatworm)\r +mitos_code: 5\r +\r +# alignment trimming method to use (gblocks or clipkit)\r +alignment_trim: gblocks\r +\r +# alignment missing data threshold for alignment (0.0 - 1.0)\r +missing_threshold: 0.5\r +\r +# name of outgroup sample (optional)\r +# use "NA" if there is no obvious outgroup\r +# if more than one outgroup use a comma separated list i.e. "sampleA,sampleB"\r +outgroup: Eurema_blanda\r +\r +# plot dimensions (cm)\r +plot_height: 20\r +plot_width: 20\r +```\r +\r +The example samples.csv file is located here `config/samples.csv` and it looks like this (note that the seed and gene columns are only required if the custom getorganelle database option is specified in the config file):\r +\r +\r + ID | forward | reverse | taxid | seed | gene \r +----|---------|---------|-------|------|------\r +Adelpha_iphiclus | .test/reads/Adelpha_iphiclus_1.fq.gz | .test/reads/Adelpha_iphiclus_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Anartia_jatrophae_saturata | .test/reads/Anartia_jatrophae_saturata_1.fq.gz | .test/reads/Anartia_jatrophae_saturata_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Araschnia_levana | .test/reads/Araschnia_levana_1.fq.gz | .test/reads/Araschnia_levana_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Auzakia_danava | .test/reads/Auzakia_danava_1.fq.gz | .test/reads/Auzakia_danava_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Baeotus_beotus | .test/reads/Baeotus_beotus_1.fq.gz | .test/reads/Baeotus_beotus_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Catacroptera_cloanthe | .test/reads/Catacroptera_cloanthe_1.fq.gz | .test/reads/Catacroptera_cloanthe_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Chalinga_pratti | .test/reads/Chalinga_pratti_1.fq.gz | .test/reads/Chalinga_pratti_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Diaethria_gabaza_eupepla | .test/reads/Diaethria_gabaza_eupepla_1.fq.gz | .test/reads/Diaethria_gabaza_eupepla_2.fq.gz | 127268 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Doleschallia_melana | .test/reads/Doleschallia_melana_1.fq.gz | .test/reads/Doleschallia_melana_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Eurema_blanda | .test/reads/Eurema_blanda_1.fq.gz | .test/reads/Eurema_blanda_2.fq.gz | 42450 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Hypolimnas_usambara | .test/reads/Hypolimnas_usambara_1.fq.gz | .test/reads/Hypolimnas_usambara_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Junonia_villida | .test/reads/Junonia_villida_1.fq.gz | .test/reads/Junonia_villida_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Kallima_paralekta | .test/reads/Kallima_paralekta_1.fq.gz | .test/reads/Kallima_paralekta_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Kallimoides_rumia | .test/reads/Kallimoides_rumia_1.fq.gz | .test/reads/Kallimoides_rumia_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Litinga_cottini | .test/reads/Litinga_cottini_1.fq.gz | .test/reads/Litinga_cottini_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Mallika_jacksoni | .test/reads/Mallika_jacksoni_1.fq.gz | .test/reads/Mallika_jacksoni_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Moduza_procris | .test/reads/Moduza_procris_1.fq.gz | .test/reads/Moduza_procris_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Parasarpa_zayla | .test/reads/Parasarpa_zayla_1.fq.gz | .test/reads/Parasarpa_zayla_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Phaedyma_columella | .test/reads/Phaedyma_columella_1.fq.gz | .test/reads/Phaedyma_columella_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Precis_pelarga | .test/reads/Precis_pelarga_1.fq.gz | .test/reads/Precis_pelarga_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Protogoniomorpha_temora | .test/reads/Protogoniomorpha_temora_1.fq.gz | .test/reads/Protogoniomorpha_temora_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Salamis_cacta | .test/reads/Salamis_cacta_1.fq.gz | .test/reads/Salamis_cacta_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Smyrna_blomfildia | .test/reads/Smyrna_blomfildia_1.fq.gz | .test/reads/Smyrna_blomfildia_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Tacola_larymna | .test/reads/Tacola_larymna_1.fq.gz | .test/reads/Tacola_larymna_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Yoma_algina | .test/reads/Yoma_algina_1.fq.gz | .test/reads/Yoma_algina_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +\r +\r +
\r +\r +
\r +\r +## Output\r +\r +All output files are saved to the `results` direcotry. Below is a table summarising all of the output files generated by the pipeline.\r +\r +| Directory | Description |\r +|-----------------------|---------------------------|\r +| fastqc_raw | Fastqc reports for raw input reads |\r +| fastp | Fastp reports from quality control of raw reads |\r +| fastqc_qc | Fastqc reports for quality controlled reads |\r +| go_fetch | Optional output containing reference databasesused by GetOrganelle |\r +| getorganelle | GetOrganelle output with a directory for each sample |\r +| assembled_sequence | Assembled sequences selected from GetOrganelle output and renamed |\r +| seqkit | Seqkit summary of each assembly |\r +| blastn | Blastn output of each assembly |\r +| minimap | Mapping output of quality filtered reads against each assembly |\r +| blobtools | Blobtools assembly summary collating blastn and mapping output |\r +| assess_assembly | Plots of annotations, mean depth, GC content and proportion mismatches |\r +| annotations | Annotation outputs of mitos |\r +| summary | Summary per sample (seqkit stats), contig (GC content, length, coverage, taxonomy and annotations) and annotated gene counts |\r +| annotated_genes | Unaligned fasta files of annotated genes identified across all samples |\r +| mafft | Mafft aligned fasta files of annotated genes identified across all samples |\r +| mafft_filtered | Mafft aligned fasta files after the removal of sequences based on a missing data threshold |\r +| alignment_trim | Ambiguous parts of alignment removed using either gblocks or clipkit |\r +| iqtree | Iqtree phylogenetic analysis of annotated genes |\r +| plot_tree | Plots of phylogenetic trees |\r +\r +
\r +\r +
\r +\r +## Filtering contaminants\r +\r +If you are working with museum collections, it is possible that you may assemble and annotate sequences from contaminant/non-target species. *Contaminant sequences can be identified based on the blast search output or unusual placement in the phylogenetic trees* (see blobtools and plot_tree outputs). \r +\r +A supplementary python script `format_alignments.py `is provided to remove putative contaminants from alignments, and format the alignments for downstream phylogenetic analysis.\r +\r +For example, let's say we wanted to remove all sequences from the sample "Kallima_paralekta" and atp6 gene sequences, you could run the script as shown below. The script works by identifying and removing sequences that have names with `Kallima_paralekta` or `atp6` in the sequence names. The filtered alignments are written to a new output directory `filter_alignments_output`.\r +\r +```\r +python workflow/scripts/format_alignments.py \\\r + --input results/mafft_filtered/ \\\r + --cont Kallima_paralekta atp6 \\\r + --output filter_alignments_output\r +```\r +\r +*Note that the output fasta files have been reformatted so each alignment file is named after the gene and each sequence is named after the sample.* This is useful if you would like to run our related pipeline **gene2phylo** for further phylogenetic analyses.\r +\r +
\r +\r +
\r +\r +## Assembly and annotation only\r +\r +If you are only interested in the assembly of mitochondrial sequences and annotation of genes without the phylogenetic analysis, you can stop the pipeline from running the gene alignment and phylogenetic analyses using the `--omit-from` parameter.\r +```\r +snakemake \\\r + --cores 4 \\\r + --use-conda \\\r + --use-singularity \\\r + --config user_email=user@example_email.com \\\r + --omit-from mafft\r +```\r +\r +
\r +\r +
\r +\r +## Running your own data\r +\r +The first thing you need to do is generate your own config.yaml and samples.csv files, using the files provided as a template.\r +\r +GetOrganelle requires reference data in the format of seed and gene reference fasta files. By default the pipeline uses a basic python script called go_fetch.py https://github.com/o-william-white/go_fetch to download and format reference data formatted for GetOrganelle. \r +\r +go_fetch.py works by searching NCBI based on the NCBI taxonomy specified by the taxid column in the samples.csv file. Note that the seed and gene columns in the samples.csv file are only required if you want to provide your own custom GetOrganelle seed and gene reference databases. \r +\r +You can use the default reference data for GetOrganelle, but I would recommend using custom reference databases where possible. See here for details of how to set up your own databases https://github.com/Kinggerm/GetOrganelle/wiki/FAQ#how-to-assemble-a-target-organelle-genome-using-my-own-reference\r +\r +## Getting help\r +\r +If you have any questions, please do get in touch in the issues or by email o.william.white@gmail.com\r +\r +
\r +\r +
\r +\r +## Citations\r +\r +If you use the pipeline, please cite our bioarxiv preprint: https://doi.org/10.1101/2023.08.11.552985\r +\r +Since the pipeline is a wrapper for several other bioinformatic tools we also ask that you cite the tools used by the pipeline:\r + - Fastqc https://github.com/s-andrews/FastQC\r + - Fastp https://doi.org/10.1093/bioinformatics/bty560\r + - GetOrganelle https://doi.org/10.1186/s13059-020-02154-5\r + - Blastn https://doi.org/10.1186/1471-2105-10-421\r + - Minimap2 https://doi.org/10.1093/bioinformatics/bty191\r + - Blobtools https://doi.org/10.12688/f1000research.12232.1\r + - Seqkit https://doi.org/10.1371/journal.pone.0163962\r + - MITOS2 https://doi.org/10.1016/j.ympev.2012.08.023\r + - Gblocks (default) https://doi.org/10.1093/oxfordjournals.molbev.a026334\r + - Clipkit (optional) https://doi.org/10.1371/journal.pbio.3001007\r + - Mafft https://doi.org/10.1093/molbev/mst010\r + - Iqtree https://doi.org/10.1093/molbev/msu300\r + - ete3 https://doi.org/10.1093/molbev/msw046\r + - ggtree https://doi.org/10.1111/2041-210X.12628\r +\r +
\r +\r +
\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/791?version=1" ; + schema1:isBasedOn "https://github.com/o-william-white/skim2mt.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for skim2mt" ; + schema1:sdDatePublished "2024-07-12 13:23:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/791/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2844 ; + schema1:dateCreated "2024-03-12T15:03:20Z" ; + schema1:dateModified "2024-03-12T15:03:20Z" ; + schema1:description """# skim2mt\r +\r +**skim2mt** is a snakemake pipeline for the batch assembly, annotation, and phylogenetic analysis of mitochondrial genomes from low coverage genome skims. The pipeline was designed to work with sequence data from museum collections. However, it should also work with genome skims from recently collected samples.\r +\r +## Contents\r + - [Setup](#setup)\r + - [Example data](#example-data)\r + - [Input](#input)\r + - [Output](#output)\r + - [Filtering contaminants](#filtering-contaminants)\r + - [Assembly and annotation only](#assembly-and-annotation-only)\r + - [Running your own data](#running-your-own-data)\r + - [Getting help](#getting-help)\r + - [Citations](#citations)\r +\r +## Setup\r +\r +The pipeline is written in Snakemake and uses conda and singularity to install the necessary tools.\r +\r +It is *strongly recommended* to install conda using Mambaforge. See details here https://snakemake.readthedocs.io/en/stable/getting_started/installation.html\r +\r +Once conda is installed, you can pull the github repo and set up the base conda environment.\r +\r +```\r +# get github repo\r +git clone https://github.com/o-william-white/skim2mt\r +\r +# change dir\r +cd skim2mt\r +\r +# setup conda env\r +conda env create -n snakemake -f workflow/envs/conda_env.yaml\r +```\r +\r +
\r +\r +
\r +\r +## Example data\r +\r +Before you run your own data, it is recommended to run the example datasets provided . This will confirm there are no user-specific issues with the setup and it also installs all the dependencies. The example data includes simulated mitochondrial data from 25 different butterfly species. \r +\r +To run the example data, use the code below. **Note that you need to change the user email to your own address**. The email is required by the Bio Entrez package to fetch reference sequences. The first time you run the pipeline, it will take some time to install each of the conda environments, so it is a good time to take a tea break :).\r +```\r +conda activate snakemake\r +\r +snakemake \\\r + --cores 4 \\\r + --use-conda \\\r + --use-singularity \\ \r + --config user_email=user@example_email.com\r +```\r +\r +
\r +\r +
\r +\r +## Input\r +\r +Snakemake requires a `config.yaml` and `samples.csv` to define input parameters and sequence data for each sample. \r +\r +For the example data provided, the config file is located here `config/config.yaml` and it looks like this:\r +```\r +# path to sample sheet csv with columns for ID,forward,reverse,taxid,seed,gene\r +samples: config/samples.csv\r +\r +# user email\r +user_email: user@example_email.com\r +\r +# getorganelle reference (go_fetch, custom)\r +go_reference: go_fetch\r +\r +# forward adapter\r +forward_adapter: AGATCGGAAGAGCACACGTCTGAACTCCAGTCA\r +\r +# reverse adapter\r +reverse_adapter: AGATCGGAAGAGCGTCGTGTAGGGAAAGAGTGT\r +\r +# fastp deduplication (True/False)\r +fastp_dedup: True\r +\r +# mitos refseq database (refseq39, refseq63f, refseq63m, refseq63o, refseq89f, refseq89m, refseq89o)\r +mitos_refseq: refseq39\r +\r +# mito code (2 = Vertebrate, 4 = Mold, 5 = Invertebrate, 9 = Echinoderm, 13 = Ascidian, 14 = Alternative flatworm)\r +mitos_code: 5\r +\r +# alignment trimming method to use (gblocks or clipkit)\r +alignment_trim: gblocks\r +\r +# alignment missing data threshold for alignment (0.0 - 1.0)\r +missing_threshold: 0.5\r +\r +# name of outgroup sample (optional)\r +# use "NA" if there is no obvious outgroup\r +# if more than one outgroup use a comma separated list i.e. "sampleA,sampleB"\r +outgroup: Eurema_blanda\r +\r +# plot dimensions (cm)\r +plot_height: 20\r +plot_width: 20\r +```\r +\r +The example samples.csv file is located here `config/samples.csv` and it looks like this (note that the seed and gene columns are only required if the custom getorganelle database option is specified in the config file):\r +\r +\r + ID | forward | reverse | taxid | seed | gene \r +----|---------|---------|-------|------|------\r +Adelpha_iphiclus | .test/reads/Adelpha_iphiclus_1.fq.gz | .test/reads/Adelpha_iphiclus_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Anartia_jatrophae_saturata | .test/reads/Anartia_jatrophae_saturata_1.fq.gz | .test/reads/Anartia_jatrophae_saturata_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Araschnia_levana | .test/reads/Araschnia_levana_1.fq.gz | .test/reads/Araschnia_levana_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Auzakia_danava | .test/reads/Auzakia_danava_1.fq.gz | .test/reads/Auzakia_danava_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Baeotus_beotus | .test/reads/Baeotus_beotus_1.fq.gz | .test/reads/Baeotus_beotus_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Catacroptera_cloanthe | .test/reads/Catacroptera_cloanthe_1.fq.gz | .test/reads/Catacroptera_cloanthe_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Chalinga_pratti | .test/reads/Chalinga_pratti_1.fq.gz | .test/reads/Chalinga_pratti_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Diaethria_gabaza_eupepla | .test/reads/Diaethria_gabaza_eupepla_1.fq.gz | .test/reads/Diaethria_gabaza_eupepla_2.fq.gz | 127268 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Doleschallia_melana | .test/reads/Doleschallia_melana_1.fq.gz | .test/reads/Doleschallia_melana_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Eurema_blanda | .test/reads/Eurema_blanda_1.fq.gz | .test/reads/Eurema_blanda_2.fq.gz | 42450 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Hypolimnas_usambara | .test/reads/Hypolimnas_usambara_1.fq.gz | .test/reads/Hypolimnas_usambara_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Junonia_villida | .test/reads/Junonia_villida_1.fq.gz | .test/reads/Junonia_villida_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Kallima_paralekta | .test/reads/Kallima_paralekta_1.fq.gz | .test/reads/Kallima_paralekta_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Kallimoides_rumia | .test/reads/Kallimoides_rumia_1.fq.gz | .test/reads/Kallimoides_rumia_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Litinga_cottini | .test/reads/Litinga_cottini_1.fq.gz | .test/reads/Litinga_cottini_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Mallika_jacksoni | .test/reads/Mallika_jacksoni_1.fq.gz | .test/reads/Mallika_jacksoni_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Moduza_procris | .test/reads/Moduza_procris_1.fq.gz | .test/reads/Moduza_procris_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Parasarpa_zayla | .test/reads/Parasarpa_zayla_1.fq.gz | .test/reads/Parasarpa_zayla_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Phaedyma_columella | .test/reads/Phaedyma_columella_1.fq.gz | .test/reads/Phaedyma_columella_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Precis_pelarga | .test/reads/Precis_pelarga_1.fq.gz | .test/reads/Precis_pelarga_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Protogoniomorpha_temora | .test/reads/Protogoniomorpha_temora_1.fq.gz | .test/reads/Protogoniomorpha_temora_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Salamis_cacta | .test/reads/Salamis_cacta_1.fq.gz | .test/reads/Salamis_cacta_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Smyrna_blomfildia | .test/reads/Smyrna_blomfildia_1.fq.gz | .test/reads/Smyrna_blomfildia_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Tacola_larymna | .test/reads/Tacola_larymna_1.fq.gz | .test/reads/Tacola_larymna_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Yoma_algina | .test/reads/Yoma_algina_1.fq.gz | .test/reads/Yoma_algina_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +\r +\r +
\r +\r +
\r +\r +## Output\r +\r +All output files are saved to the `results` direcotry. Below is a table summarising all of the output files generated by the pipeline.\r +\r +| Directory | Description |\r +|-----------------------|---------------------------|\r +| fastqc_raw | Fastqc reports for raw input reads |\r +| fastp | Fastp reports from quality control of raw reads |\r +| fastqc_qc | Fastqc reports for quality controlled reads |\r +| go_fetch | Optional output containing reference databasesused by GetOrganelle |\r +| getorganelle | GetOrganelle output with a directory for each sample |\r +| assembled_sequence | Assembled sequences selected from GetOrganelle output and renamed |\r +| seqkit | Seqkit summary of each assembly |\r +| blastn | Blastn output of each assembly |\r +| minimap | Mapping output of quality filtered reads against each assembly |\r +| blobtools | Blobtools assembly summary collating blastn and mapping output |\r +| assess_assembly | Plots of annotations, mean depth, GC content and proportion mismatches |\r +| annotations | Annotation outputs of mitos |\r +| summary | Summary per sample (seqkit stats), contig (GC content, length, coverage, taxonomy and annotations) and annotated gene counts |\r +| annotated_genes | Unaligned fasta files of annotated genes identified across all samples |\r +| mafft | Mafft aligned fasta files of annotated genes identified across all samples |\r +| mafft_filtered | Mafft aligned fasta files after the removal of sequences based on a missing data threshold |\r +| alignment_trim | Ambiguous parts of alignment removed using either gblocks or clipkit |\r +| iqtree | Iqtree phylogenetic analysis of annotated genes |\r +| plot_tree | Plots of phylogenetic trees |\r +\r +
\r +\r +
\r +\r +## Filtering contaminants\r +\r +If you are working with museum collections, it is possible that you may assemble and annotate sequences from contaminant/non-target species. *Contaminant sequences can be identified based on the blast search output or unusual placement in the phylogenetic trees* (see blobtools and plot_tree outputs). \r +\r +A supplementary python script `format_alignments.py `is provided to remove putative contaminants from alignments, and format the alignments for downstream phylogenetic analysis.\r +\r +For example, let's say we wanted to remove all sequences from the sample "Kallima_paralekta" and atp6 gene sequences, you could run the script as shown below. The script works by identifying and removing sequences that have names with `Kallima_paralekta` or `atp6` in the sequence names. The filtered alignments are written to a new output directory `filter_alignments_output`.\r +\r +```\r +python workflow/scripts/format_alignments.py \\\r + --input results/mafft_filtered/ \\\r + --cont Kallima_paralekta atp6 \\\r + --output filter_alignments_output\r +```\r +\r +*Note that the output fasta files have been reformatted so each alignment file is named after the gene and each sequence is named after the sample.* This is useful if you would like to run our related pipeline **gene2phylo** for further phylogenetic analyses.\r +\r +
\r +\r +
\r +\r +## Assembly and annotation only\r +\r +If you are only interested in the assembly of mitochondrial sequences and annotation of genes without the phylogenetic analysis, you can stop the pipeline from running the gene alignment and phylogenetic analyses using the `--omit-from` parameter.\r +```\r +snakemake \\\r + --cores 4 \\\r + --use-conda \\\r + --use-singularity \\\r + --config user_email=user@example_email.com \\\r + --omit-from mafft\r +```\r +\r +
\r +\r +
\r +\r +## Running your own data\r +\r +The first thing you need to do is generate your own config.yaml and samples.csv files, using the files provided as a template.\r +\r +GetOrganelle requires reference data in the format of seed and gene reference fasta files. By default the pipeline uses a basic python script called go_fetch.py https://github.com/o-william-white/go_fetch to download and format reference data formatted for GetOrganelle. \r +\r +go_fetch.py works by searching NCBI based on the NCBI taxonomy specified by the taxid column in the samples.csv file. Note that the seed and gene columns in the samples.csv file are only required if you want to provide your own custom GetOrganelle seed and gene reference databases. \r +\r +You can use the default reference data for GetOrganelle, but I would recommend using custom reference databases where possible. See here for details of how to set up your own databases https://github.com/Kinggerm/GetOrganelle/wiki/FAQ#how-to-assemble-a-target-organelle-genome-using-my-own-reference\r +\r +## Getting help\r +\r +If you have any questions, please do get in touch in the issues or by email o.william.white@gmail.com\r +\r +
\r +\r +
\r +\r +## Citations\r +\r +If you use the pipeline, please cite our bioarxiv preprint: https://doi.org/10.1101/2023.08.11.552985\r +\r +Since the pipeline is a wrapper for several other bioinformatic tools we also ask that you cite the tools used by the pipeline:\r + - Fastqc https://github.com/s-andrews/FastQC\r + - Fastp https://doi.org/10.1093/bioinformatics/bty560\r + - GetOrganelle https://doi.org/10.1186/s13059-020-02154-5\r + - Blastn https://doi.org/10.1186/1471-2105-10-421\r + - Minimap2 https://doi.org/10.1093/bioinformatics/bty191\r + - Blobtools https://doi.org/10.12688/f1000research.12232.1\r + - Seqkit https://doi.org/10.1371/journal.pone.0163962\r + - MITOS2 https://doi.org/10.1016/j.ympev.2012.08.023\r + - Gblocks (default) https://doi.org/10.1093/oxfordjournals.molbev.a026334\r + - Clipkit (optional) https://doi.org/10.1371/journal.pbio.3001007\r + - Mafft https://doi.org/10.1093/molbev/mst010\r + - Iqtree https://doi.org/10.1093/molbev/msu300\r + - ete3 https://doi.org/10.1093/molbev/msw046\r + - ggtree https://doi.org/10.1111/2041-210X.12628\r +\r +
\r +\r +
\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "skim2mt" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/791?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """IDR is based on OMERO and thus all what we show in this notebook can be easily adjusted for use against another OMERO server, e.g. your institutional OMERO server instance.\r +\r +The main objective of this notebook is to demonstrate how public resources such as the IDR can be used to train your neural network or validate software tools.\r +\r +The authors of the PLOS Biology paper, "Nessys: A new set of tools for the automated detection of nuclei within intact tissues and dense 3D cultures" published in August 2019: https://doi.org/10.1371/journal.pbio.3000388, considered several image segmenation packages, but they did not use the approach described in this notebook.\r +\r +We will analyse the data using Cellpose and compare the output with the original segmentation produced by the authors. StarDist was not considered by the authors. Our workflow shows how public repository can be accessed and data inside it used to validate software tools or new algorithms.\r +\r +We will use an image (id=6001247) referenced in the paper. The image can be viewed online in the Image Data Resource (IDR).\r +\r +We will use a predefined model from Cellpose as a starting point. Steps to access data from IDR could be re-used if you wish to create a new model (outside the scope of this notebook).\r +\r +## Launch\r +This notebook uses the [environment_cellpose.yml](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/environment_cellpose.yml) file.\r +\r +See [Setup](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/setup.md).""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.489.1" ; + schema1:isBasedOn "https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/Cellpose.ipynb" ; + schema1:license "BSD-2-Clause" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Validate a tool against IDR data: Load Image with labels from IDR, re-analyze using Cellpose" ; + schema1:sdDatePublished "2024-07-12 13:33:14 +0100" ; + schema1:url "https://workflowhub.eu/workflows/489/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 525382 ; + schema1:creator , + ; + schema1:dateCreated "2023-06-01T09:47:01Z" ; + schema1:dateModified "2023-06-01T10:57:27Z" ; + schema1:description """IDR is based on OMERO and thus all what we show in this notebook can be easily adjusted for use against another OMERO server, e.g. your institutional OMERO server instance.\r +\r +The main objective of this notebook is to demonstrate how public resources such as the IDR can be used to train your neural network or validate software tools.\r +\r +The authors of the PLOS Biology paper, "Nessys: A new set of tools for the automated detection of nuclei within intact tissues and dense 3D cultures" published in August 2019: https://doi.org/10.1371/journal.pbio.3000388, considered several image segmenation packages, but they did not use the approach described in this notebook.\r +\r +We will analyse the data using Cellpose and compare the output with the original segmentation produced by the authors. StarDist was not considered by the authors. Our workflow shows how public repository can be accessed and data inside it used to validate software tools or new algorithms.\r +\r +We will use an image (id=6001247) referenced in the paper. The image can be viewed online in the Image Data Resource (IDR).\r +\r +We will use a predefined model from Cellpose as a starting point. Steps to access data from IDR could be re-used if you wish to create a new model (outside the scope of this notebook).\r +\r +## Launch\r +This notebook uses the [environment_cellpose.yml](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/environment_cellpose.yml) file.\r +\r +See [Setup](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/setup.md).""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "imaging, Machine Learning, Python" ; + schema1:license "https://spdx.org/licenses/BSD-2-Clause" ; + schema1:name "Validate a tool against IDR data: Load Image with labels from IDR, re-analyze using Cellpose" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/Cellpose.ipynb" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 513401 ; + schema1:url "https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/includes/CellposeIDR.png" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.280.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_ligand_parameterization/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python GMX Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-07-12 13:34:06 +0100" ; + schema1:url "https://workflowhub.eu/workflows/280/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1728 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-22T09:52:07Z" ; + schema1:dateModified "2023-01-16T13:58:31Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/280?version=3" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python GMX Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_ligand_parameterization/python/workflow.py" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """# MoP2- DSL2 version of Master of Pores\r +[![Docker Build Status](https://img.shields.io/docker/automated/biocorecrg/nanopore.svg)](https://cloud.docker.com/u/biocorecrg/repository/docker/biocorecrg/nanopore/builds)\r +[![mop2-CI](https://github.com/biocorecrg/MoP2/actions/workflows/build.yml/badge.svg)](https://github.com/biocorecrg/MoP2/actions/workflows/build.yml)\r +[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)\r +[![Nextflow version](https://img.shields.io/badge/Nextflow-21.04.1-brightgreen)](https://www.nextflow.io/)\r +[![Nextflow DSL2](https://img.shields.io/badge/Nextflow-DSL2-brightgreen)](https://www.nextflow.io/)\r +[![Singularity version](https://img.shields.io/badge/Singularity-v3.2.1-green.svg)](https://www.sylabs.io/)\r +[![Docker version](https://img.shields.io/badge/Docker-v20.10.8-blue)](https://www.docker.com/)\r +\r +
\r +\r +![MOP2](https://github.com/biocorecrg/MoP2/blob/main/img/master_red.jpg?raw=true)\r +\r +\r +Inspired by Metallica's [Master Of Puppets](https://www.youtube.com/watch?v=S7blkui3nQc)\r +\r +## Install\r +Please install nextflow and singularity or docker before.\r +\r +Then download the repo:\r +\r +```\r +git clone --depth 1 --recurse-submodules git@github.com:biocorecrg/MOP2.git\r +```\r +\r +You can use INSTALL.sh to download the version 3.4.5 of guppy or you can replace it with the version you prefer. Please consider that the support of VBZ compression of fast5 started with version 3.4.X. \r +\r +```\r +cd MoP2; sh INSTALL.sh\r +```\r +\r +## Testing\r +You can replace ```-with-singularity``` with ```-with-docker``` if you want to use the docker engine.\r +\r +```\r +cd mop_preprocess\r +nextflow run mop_preprocess.nf -with-singularity -bg -profile local > log\r +\r +```\r +\r +## Reference\r +If you use this tool, please cite our papers:\r +\r +["Nanopore Direct RNA Sequencing Data Processing and Analysis Using MasterOfPores"\r +Cozzuto L, Delgado-Tejedor A, Hermoso Pulido T, Novoa EM, Ponomarenko J. *N. Methods Mol Biol. 2023*;2624:185-205. doi: 10.1007/978-1-0716-2962-8_13.](https://link.springer.com/protocol/10.1007/978-1-0716-2962-8_13)\r +\r +["MasterOfPores: A Workflow for the Analysis of Oxford Nanopore Direct RNA Sequencing Datasets"\r +Luca Cozzuto, Huanle Liu, Leszek P. Pryszcz, Toni Hermoso Pulido, Anna Delgado-Tejedor, Julia Ponomarenko, Eva Maria Novoa.\r +*Front. Genet., 17 March 2020.* https://doi.org/10.3389/fgene.2020.00211](https://www.frontiersin.org/articles/10.3389/fgene.2020.00211/full)\r +\r +\r +## Documentation\r +The documentation is available at [https://biocorecrg.github.io/MOP2/docs/](https://biocorecrg.github.io/MOP2/docs/about.html)\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/438?version=1" ; + schema1:isBasedOn "https://github.com/biocorecrg/MOP2.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Master of Pores 2" ; + schema1:sdDatePublished "2024-07-12 13:34:29 +0100" ; + schema1:url "https://workflowhub.eu/workflows/438/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 24119 ; + schema1:dateCreated "2023-02-20T15:48:34Z" ; + schema1:dateModified "2023-02-20T15:48:34Z" ; + schema1:description """# MoP2- DSL2 version of Master of Pores\r +[![Docker Build Status](https://img.shields.io/docker/automated/biocorecrg/nanopore.svg)](https://cloud.docker.com/u/biocorecrg/repository/docker/biocorecrg/nanopore/builds)\r +[![mop2-CI](https://github.com/biocorecrg/MoP2/actions/workflows/build.yml/badge.svg)](https://github.com/biocorecrg/MoP2/actions/workflows/build.yml)\r +[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)\r +[![Nextflow version](https://img.shields.io/badge/Nextflow-21.04.1-brightgreen)](https://www.nextflow.io/)\r +[![Nextflow DSL2](https://img.shields.io/badge/Nextflow-DSL2-brightgreen)](https://www.nextflow.io/)\r +[![Singularity version](https://img.shields.io/badge/Singularity-v3.2.1-green.svg)](https://www.sylabs.io/)\r +[![Docker version](https://img.shields.io/badge/Docker-v20.10.8-blue)](https://www.docker.com/)\r +\r +
\r +\r +![MOP2](https://github.com/biocorecrg/MoP2/blob/main/img/master_red.jpg?raw=true)\r +\r +\r +Inspired by Metallica's [Master Of Puppets](https://www.youtube.com/watch?v=S7blkui3nQc)\r +\r +## Install\r +Please install nextflow and singularity or docker before.\r +\r +Then download the repo:\r +\r +```\r +git clone --depth 1 --recurse-submodules git@github.com:biocorecrg/MOP2.git\r +```\r +\r +You can use INSTALL.sh to download the version 3.4.5 of guppy or you can replace it with the version you prefer. Please consider that the support of VBZ compression of fast5 started with version 3.4.X. \r +\r +```\r +cd MoP2; sh INSTALL.sh\r +```\r +\r +## Testing\r +You can replace ```-with-singularity``` with ```-with-docker``` if you want to use the docker engine.\r +\r +```\r +cd mop_preprocess\r +nextflow run mop_preprocess.nf -with-singularity -bg -profile local > log\r +\r +```\r +\r +## Reference\r +If you use this tool, please cite our papers:\r +\r +["Nanopore Direct RNA Sequencing Data Processing and Analysis Using MasterOfPores"\r +Cozzuto L, Delgado-Tejedor A, Hermoso Pulido T, Novoa EM, Ponomarenko J. *N. Methods Mol Biol. 2023*;2624:185-205. doi: 10.1007/978-1-0716-2962-8_13.](https://link.springer.com/protocol/10.1007/978-1-0716-2962-8_13)\r +\r +["MasterOfPores: A Workflow for the Analysis of Oxford Nanopore Direct RNA Sequencing Datasets"\r +Luca Cozzuto, Huanle Liu, Leszek P. Pryszcz, Toni Hermoso Pulido, Anna Delgado-Tejedor, Julia Ponomarenko, Eva Maria Novoa.\r +*Front. Genet., 17 March 2020.* https://doi.org/10.3389/fgene.2020.00211](https://www.frontiersin.org/articles/10.3389/fgene.2020.00211/full)\r +\r +\r +## Documentation\r +The documentation is available at [https://biocorecrg.github.io/MOP2/docs/](https://biocorecrg.github.io/MOP2/docs/about.html)\r +""" ; + schema1:keywords "nanopore, ONT, dRNAseq, Transcriptomics, metatranscriptomics" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Master of Pores 2" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/438?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=16" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-07-12 13:20:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=16" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17391 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:59Z" ; + schema1:dateModified "2024-06-11T12:54:59Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=16" ; + schema1:version 16 . + + a schema1:Dataset ; + schema1:datePublished "2022-11-29T12:22:16.778376" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/cutandrun" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "cutandrun/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.2" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """# Inclusion Body Myositis Active Subnetwork Identification Workflow\r +\r +Workflow for Creating a large disease network from various datasets and databases for IBM, and applying the active subnetwork identification method MOGAMUN.\r +\r +## Links\r +[WorkflowHub](https://workflowhub.eu/workflows/681)\r +\r +[Docker Image](https://hub.docker.com/R/jdwijnbergen/multi-omics_asi)\r +\r +[Docker Image build requirements](https://doi.org/10.5281/zenodo.10210364)\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/681?version=2" ; + schema1:isBasedOn "https://github.com/jdwijnbergen/IBM_ASI_workflow.git" ; + schema1:license "notspecified" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Inclusion Body Myositis Active Subnetwork Identification Workflow" ; + schema1:sdDatePublished "2024-07-12 13:24:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/681/ro_crate?version=2" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 27177 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6953 ; + schema1:creator , + ; + schema1:dateCreated "2023-11-27T15:50:23Z" ; + schema1:dateModified "2023-11-27T15:50:23Z" ; + schema1:description """# Inclusion Body Myositis Active Subnetwork Identification Workflow\r +\r +Workflow for Creating a large disease network from various datasets and databases for IBM, and applying the active subnetwork identification method MOGAMUN.\r +\r +## Links\r +[WorkflowHub](https://workflowhub.eu/workflows/681)\r +\r +[Docker Image](https://hub.docker.com/R/jdwijnbergen/multi-omics_asi)\r +\r +[Docker Image build requirements](https://doi.org/10.5281/zenodo.10210364)\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/681?version=6" ; + schema1:keywords "Bioinformatics, CWL, Genomics, Transcriptomics, Protein-Protein Interaction" ; + schema1:license "https://choosealicense.com/no-permission/" ; + schema1:name "Inclusion Body Myositis Active Subnetwork Identification Workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/681?version=2" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "The workflow takes a trimmed Illumina paired-end reads collection, runs Meryl to create a K-mer database, Genomescope2 to estimate genome properties and Smudgeplot to estimate ploidy. The main results are K-mer ddatabase and genome profiling plots, tables, and values useful for downstream analysis. Default K-mer length and ploidy for Genomescope are 21 and 2, respectively. " ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/698?version=1" ; + schema1:isBasedOn "https://github.com/ERGA-consortium/pipelines/tree/main/assembly/galaxy" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ERGA Profiling Illumina v2311 (WF1)" ; + schema1:sdDatePublished "2024-07-12 13:25:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/698/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 39713 ; + schema1:creator , + ; + schema1:dateCreated "2024-01-08T15:55:18Z" ; + schema1:dateModified "2024-01-08T15:57:54Z" ; + schema1:description "The workflow takes a trimmed Illumina paired-end reads collection, runs Meryl to create a K-mer database, Genomescope2 to estimate genome properties and Smudgeplot to estimate ploidy. The main results are K-mer ddatabase and genome profiling plots, tables, and values useful for downstream analysis. Default K-mer length and ploidy for Genomescope are 21 and 2, respectively. " ; + schema1:image ; + schema1:isPartOf , + ; + schema1:keywords "name:PROFILING, ERGA, illumina" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ERGA Profiling Illumina v2311 (WF1)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/1.Genome_Profiling/Galaxy-Workflow-ERGA_Profiling_Illumina_v2311_(WF1).ga" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 118483 ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/1.Genome_Profiling/pics/Prof_illu_2311.png" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-07-12 13:20:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4227 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:03Z" ; + schema1:dateModified "2024-06-11T12:55:03Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.260.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_md_setup/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL Amber Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:36:09 +0100" ; + schema1:url "https://workflowhub.eu/workflows/260/ro_crate?version=2" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 146391 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 30384 ; + schema1:creator , + ; + schema1:dateCreated "2023-06-08T07:26:33Z" ; + schema1:dateModified "2023-06-08T07:32:27Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/260?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CWL Amber Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_md_setup/cwl/workflow.cwl" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Taxonomic classification and profiling of shotgun metagenomic data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1025?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/taxprofiler" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/taxprofiler" ; + schema1:sdDatePublished "2024-07-12 13:18:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1025/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12635 ; + schema1:creator , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:19Z" ; + schema1:dateModified "2024-06-11T12:55:19Z" ; + schema1:description "Taxonomic classification and profiling of shotgun metagenomic data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1025?version=10" ; + schema1:keywords "Classification, illumina, long-reads, Metagenomics, microbiome, nanopore, pathogen, Profiling, shotgun, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/taxprofiler" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1025?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.817.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/main/biobb_wf_amber_abc_md_setup/docker" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Docker ABC MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:23:33 +0100" ; + schema1:url "https://workflowhub.eu/workflows/817/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 773 ; + schema1:creator , + ; + schema1:dateCreated "2024-04-22T08:35:16Z" ; + schema1:dateModified "2024-05-22T13:49:45Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Docker ABC MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/main/biobb_wf_amber_abc_md_setup/docker/Dockerfile" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/278?version=3" ; + schema1:isBasedOn "https://gitlab.bsc.es/lrodrig1/structuralvariants_poc/-/tree/1.1.3/structuralvariants/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CNV_pipeline" ; + schema1:sdDatePublished "2024-07-12 13:35:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/278/ro_crate?version=3" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 8992 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8794 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-23T14:19:55Z" ; + schema1:dateModified "2022-04-11T09:29:33Z" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/278?version=10" ; + schema1:keywords "cancer, CODEX2, ExomeDepth, manta, TransBioNet, variant calling, GRIDSS, structural variants" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CNV_pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/278?version=3" ; + schema1:version 3 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 65152 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Mutations Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.289.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_md_setup_mutations/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL Protein MD Setup tutorial with mutations" ; + schema1:sdDatePublished "2024-07-12 13:35:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/289/ro_crate?version=2" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 82519 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 22497 ; + schema1:creator , + ; + schema1:dateCreated "2023-06-07T12:22:06Z" ; + schema1:dateModified "2023-06-07T12:30:54Z" ; + schema1:description """# Mutations Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/289?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CWL Protein MD Setup tutorial with mutations" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_md_setup_mutations/cwl/workflow.cwl" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=12" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-07-12 13:20:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=12" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14588 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:59Z" ; + schema1:dateModified "2024-06-11T12:54:59Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=12" ; + schema1:version 12 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Analysis pipeline for CUT&RUN and CUT&TAG experiments that includes sequencing QC, spike-in normalisation, IgG control normalisation, peak calling and downstream peak analysis." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/976?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/cutandrun" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/cutandrun" ; + schema1:sdDatePublished "2024-07-12 13:22:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/976/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15374 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:47Z" ; + schema1:dateModified "2024-06-11T12:54:47Z" ; + schema1:description "Analysis pipeline for CUT&RUN and CUT&TAG experiments that includes sequencing QC, spike-in normalisation, IgG control normalisation, peak calling and downstream peak analysis." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/976?version=7" ; + schema1:keywords "cutandrun, cutandrun-seq, cutandtag, cutandtag-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/cutandrun" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/976?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Purge-duplicates-from-hifiasm-assembly\r +\r +## General recommendations for using `Purge-duplicates-from-hifiasm-assembly`\r +\r +Please see the [`Genome assembly with hifiasm on Galaxy Australia`](https://australianbiocommons.github.io/how-to-guides/genome_assembly/hifi_assembly) guide.\r +\r +## Acknowledgements\r +\r +The workflow & the [doc_guidelines template used](https://github.com/AustralianBioCommons/doc_guidelines) are \r +supported by the Australian BioCommons via Bioplatforms Australia funding, the Australian Research Data Commons \r +(https://doi.org/10.47486/PL105) and the Queensland Government RICF programme. Bioplatforms Australia and the \r +Australian Research Data Commons are enabled by the National Collaborative Research Infrastructure Strategy (NCRIS).\r +\r +\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.237.2" ; + schema1:isBasedOn "https://github.com/AustralianBioCommons/Purge-duplicates-from-hifiasm-assembly" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Purge duplicates from hifiasm assembly v1.0" ; + schema1:sdDatePublished "2024-07-12 13:35:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/237/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 23112 ; + schema1:creator , + ; + schema1:dateCreated "2022-10-17T02:53:20Z" ; + schema1:dateModified "2023-01-30T18:21:31Z" ; + schema1:description """# Purge-duplicates-from-hifiasm-assembly\r +\r +## General recommendations for using `Purge-duplicates-from-hifiasm-assembly`\r +\r +Please see the [`Genome assembly with hifiasm on Galaxy Australia`](https://australianbiocommons.github.io/how-to-guides/genome_assembly/hifi_assembly) guide.\r +\r +## Acknowledgements\r +\r +The workflow & the [doc_guidelines template used](https://github.com/AustralianBioCommons/doc_guidelines) are \r +supported by the Australian BioCommons via Bioplatforms Australia funding, the Australian Research Data Commons \r +(https://doi.org/10.47486/PL105) and the Queensland Government RICF programme. Bioplatforms Australia and the \r +Australian Research Data Commons are enabled by the National Collaborative Research Infrastructure Strategy (NCRIS).\r +\r +\r +\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/237?version=1" ; + schema1:isPartOf , + ; + schema1:keywords "Assembly, purge_dups, HiFi" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Purge duplicates from hifiasm assembly v1.0" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/237?version=2" ; + schema1:version 2 ; + ns1:input , + ; + ns1:output , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/998?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/methylseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/methylseq" ; + schema1:sdDatePublished "2024-07-12 13:18:23 +0100" ; + schema1:url "https://workflowhub.eu/workflows/998/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5320 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:01Z" ; + schema1:dateModified "2024-06-11T12:55:01Z" ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/998?version=14" ; + schema1:keywords "bisulfite-sequencing, dna-methylation, em-seq, epigenome, Epigenomics, methyl-seq, pbat, rrbs" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/methylseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/998?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + schema1:datePublished "2021-12-20T17:04:47.636637" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-wgs-variant-calling" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sars-cov-2-pe-illumina-wgs-variant-calling/COVID-19-PE-WGS-ILLUMINA" ; + schema1:sdDatePublished "2021-12-21 03:01:00 +0000" ; + schema1:softwareVersion "v0.2.2" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Genes and transcripts annotation with Isoseq using uLTRA and TAMA" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/993?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/isoseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/isoseq" ; + schema1:sdDatePublished "2024-07-12 13:21:09 +0100" ; + schema1:url "https://workflowhub.eu/workflows/993/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8438 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:58Z" ; + schema1:dateModified "2024-06-11T12:54:58Z" ; + schema1:description "Genes and transcripts annotation with Isoseq using uLTRA and TAMA" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/993?version=6" ; + schema1:keywords "isoseq, isoseq-3, rna, tama, ultra" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/isoseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/993?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=17" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-07-12 13:18:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=17" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12056 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:40Z" ; + schema1:dateModified "2024-06-11T12:54:40Z" ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=17" ; + schema1:version 17 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:description """**Assembly and quantification metatranscriptome using metagenome data**.\r +\r +Version: see VERSION\r +\r +## Introduction\r +\r +**MetaGT** is a bioinformatics analysis pipeline used for improving and quantification \r +metatranscriptome assembly using metagenome data. The pipeline supports Illumina sequencing \r +data and complete metagenome and metatranscriptome assemblies. The pipeline involves the \r +alignment of metatranscriprome assembly to the metagenome assembly with further extracting CDSs,\r +which are covered by transcripts.\r +\r +The pipeline is built using Nextflow, a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It comes with docker containers making installation trivial and results highly reproducible. The Nextflow DSL2 implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies.\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow-%E2%89%A520.04.0-brightgreen.svg)](https://www.nextflow.io/)\r +\r +[![install with bioconda](https://img.shields.io/badge/install%20with-bioconda-brightgreen.svg)](https://bioconda.github.io/)\r +\r +## Quick Start\r +\r +1. Install [`nextflow`](https://nf-co.re/usage/installation)\r +\r +2. Install any of [`Conda`](https://conda.io/miniconda.html) for full pipeline reproducibility \r +\r +3. Download the pipeline, e.g. by cloning metaGT GitHub repository:\r +\r + ```bash\r + git clone git@github.com:ablab/metaGT.git\r + ```\r + \r +4. Test it on a minimal dataset by running:\r +\r + ```bash\r + nextflow run metaGT -profile test,conda\r + ```\r + \r +5. Start running your own analysis!\r + > Typical command for analysis using reads:\r +\r + ```bash\r + nextflow run metaGT -profile --dna_reads '*_R{1,2}.fastq.gz' --rna_reads '*_R{1,2}.fastq.gz'\r + ```\r + > Typical command for analysis using multiple files with reads:\r +\r + ```bash\r + nextflow run metaGT -profile --dna_reads '*.yaml' --rna_reads '*.yaml' --yaml\r + ```\r + > Typical command for analysis using assemblies:\r +\r + ```bash\r + nextflow run metaGT -profile --genome '*.fasta' --transcriptome '*.fasta'\r + ```\r +## Pipeline Summary\r +Optionally, if raw reades are used:\r +\r +\r +\r +* Sequencing quality control (`FastQC`)\r +* Assembly metagenome or metatranscriptome (`metaSPAdes, rnaSPAdes `)\r +\r +By default, the pipeline currently performs the following:\r +\r +* Annotation metagenome (`Prokka`)\r +* Aligning metatranscriptome on metagenome (`minimap2`)\r +* Annotation unaligned transcripts (`TransDecoder`)\r +* Clustering covered CDS and CDS from unaligned transcripts (`MMseqs2`)\r +* Quantifying abundances of transcripts (`kallisto`)\r +\r +## Citation\r +\r +MetaGT was developed by Daria Shafranskaya and Andrey Prjibelski.\r +If you use it in your research please cite:\r +\r +[MetaGT: A pipeline for de novo assembly of metatranscriptomes with the aid of metagenomic data](https://doi.org/10.3389/fmicb.2022.981458)\r +\r +## Feedback and bug report\r +\r +If you have any questions, please leave an issue at out [GitHub page](https://github.com/ablab/metaGT/issues).\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/454?version=1" ; + schema1:isBasedOn "https://github.com/ablab/metaGT" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for MetaGT: A pipeline for de novo assembly of metatranscriptomes with the aid of metagenomic data" ; + schema1:sdDatePublished "2024-07-12 13:34:19 +0100" ; + schema1:url "https://workflowhub.eu/workflows/454/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12022 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-04-12T09:18:18Z" ; + schema1:dateModified "2023-04-13T06:50:58Z" ; + schema1:description """**Assembly and quantification metatranscriptome using metagenome data**.\r +\r +Version: see VERSION\r +\r +## Introduction\r +\r +**MetaGT** is a bioinformatics analysis pipeline used for improving and quantification \r +metatranscriptome assembly using metagenome data. The pipeline supports Illumina sequencing \r +data and complete metagenome and metatranscriptome assemblies. The pipeline involves the \r +alignment of metatranscriprome assembly to the metagenome assembly with further extracting CDSs,\r +which are covered by transcripts.\r +\r +The pipeline is built using Nextflow, a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It comes with docker containers making installation trivial and results highly reproducible. The Nextflow DSL2 implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies.\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow-%E2%89%A520.04.0-brightgreen.svg)](https://www.nextflow.io/)\r +\r +[![install with bioconda](https://img.shields.io/badge/install%20with-bioconda-brightgreen.svg)](https://bioconda.github.io/)\r +\r +## Quick Start\r +\r +1. Install [`nextflow`](https://nf-co.re/usage/installation)\r +\r +2. Install any of [`Conda`](https://conda.io/miniconda.html) for full pipeline reproducibility \r +\r +3. Download the pipeline, e.g. by cloning metaGT GitHub repository:\r +\r + ```bash\r + git clone git@github.com:ablab/metaGT.git\r + ```\r + \r +4. Test it on a minimal dataset by running:\r +\r + ```bash\r + nextflow run metaGT -profile test,conda\r + ```\r + \r +5. Start running your own analysis!\r + > Typical command for analysis using reads:\r +\r + ```bash\r + nextflow run metaGT -profile --dna_reads '*_R{1,2}.fastq.gz' --rna_reads '*_R{1,2}.fastq.gz'\r + ```\r + > Typical command for analysis using multiple files with reads:\r +\r + ```bash\r + nextflow run metaGT -profile --dna_reads '*.yaml' --rna_reads '*.yaml' --yaml\r + ```\r + > Typical command for analysis using assemblies:\r +\r + ```bash\r + nextflow run metaGT -profile --genome '*.fasta' --transcriptome '*.fasta'\r + ```\r +## Pipeline Summary\r +Optionally, if raw reades are used:\r +\r +\r +\r +* Sequencing quality control (`FastQC`)\r +* Assembly metagenome or metatranscriptome (`metaSPAdes, rnaSPAdes `)\r +\r +By default, the pipeline currently performs the following:\r +\r +* Annotation metagenome (`Prokka`)\r +* Aligning metatranscriptome on metagenome (`minimap2`)\r +* Annotation unaligned transcripts (`TransDecoder`)\r +* Clustering covered CDS and CDS from unaligned transcripts (`MMseqs2`)\r +* Quantifying abundances of transcripts (`kallisto`)\r +\r +## Citation\r +\r +MetaGT was developed by Daria Shafranskaya and Andrey Prjibelski.\r +If you use it in your research please cite:\r +\r +[MetaGT: A pipeline for de novo assembly of metatranscriptomes with the aid of metagenomic data](https://doi.org/10.3389/fmicb.2022.981458)\r +\r +## Feedback and bug report\r +\r +If you have any questions, please leave an issue at out [GitHub page](https://github.com/ablab/metaGT/issues).\r +""" ; + schema1:keywords "Metagenomics, metatranscriptomics, expression, Multi-omics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "MetaGT: A pipeline for de novo assembly of metatranscriptomes with the aid of metagenomic data" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/454?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2024-05-30T07:39:05.349250" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "atacseq/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """Workflow for Metagenomics from raw reads to annotated bins.\r +Steps:\r + - workflow_quality.cwl:\r + - FastQC (control)\r + - fastp (trimming)\r + - Kraken2 (Taxonomic Read Classification\r + - SPAdes (Assembly)\r + - QUAST (Assembly quality report)\r + - BBmap (Read mapping to assembly)\r + - sam_to_bam (sam to indexed bam)\r + - metabatContigDepths (jgi_summarize_bam_contig_depths)\r + - MetaBat2 (binning)\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/64?version=8" ; + schema1:isBasedOn "https://gitlab.com/m-unlock/cwl/-/blob/master/cwl/workflows/workflow_metagenomics_binning.cwl" ; + schema1:license "notspecified" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Metagenomics workflow" ; + schema1:sdDatePublished "2024-07-12 13:34:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/64/ro_crate?version=8" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 51515 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11611 ; + schema1:creator , + ; + schema1:dateCreated "2021-05-06T06:03:42Z" ; + schema1:dateModified "2021-05-06T06:03:42Z" ; + schema1:description """Workflow for Metagenomics from raw reads to annotated bins.\r +Steps:\r + - workflow_quality.cwl:\r + - FastQC (control)\r + - fastp (trimming)\r + - Kraken2 (Taxonomic Read Classification\r + - SPAdes (Assembly)\r + - QUAST (Assembly quality report)\r + - BBmap (Read mapping to assembly)\r + - sam_to_bam (sam to indexed bam)\r + - metabatContigDepths (jgi_summarize_bam_contig_depths)\r + - MetaBat2 (binning)\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/64?version=10" ; + schema1:keywords "Metagenomics, microbial, metagenome, binning" ; + schema1:license "https://choosealicense.com/no-permission/" ; + schema1:name "Metagenomics workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/64?version=8" ; + schema1:version 8 ; + ns1:input , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2023-10-03T10:13:49.682562" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "atacseq/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.12" . + + a schema1:Dataset ; + schema1:datePublished "2021-12-20T17:04:47.641102" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-variation-reporting" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sars-cov-2-variation-reporting/COVID-19-VARIATION-REPORTING" ; + schema1:sdDatePublished "2021-12-21 03:00:59 +0000" ; + schema1:softwareVersion "v0.1.2" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=18" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-07-12 13:20:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=18" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8900 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:04Z" ; + schema1:dateModified "2024-06-11T12:55:04Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=18" ; + schema1:version 18 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=19" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-07-12 13:22:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=19" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10110 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:12Z" ; + schema1:dateModified "2024-06-11T12:55:12Z" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=19" ; + schema1:version 19 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.128.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_virtual-screening" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein-ligand Docking tutorial (PDBe REST API)" ; + schema1:sdDatePublished "2024-07-12 13:33:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/128/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 37193 ; + schema1:creator , + , + ; + schema1:dateCreated "2022-09-15T11:18:43Z" ; + schema1:dateModified "2023-01-16T13:50:19Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/128?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein-ligand Docking tutorial (PDBe REST API)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_virtual-screening/master/biobb_wf_virtual-screening/notebooks/ebi_api/wf_vs_ebi_api.ipynb" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.291.1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python GMX OPLS/AA Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-07-12 13:35:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/291/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1758 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-17T14:08:25Z" ; + schema1:dateModified "2022-03-23T10:04:10Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/291?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python GMX OPLS/AA Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/291?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/963?version=11" ; + schema1:isBasedOn "https://github.com/nf-core/airrflow" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/airrflow" ; + schema1:sdDatePublished "2024-07-12 13:22:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/963/ro_crate?version=11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12810 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:37Z" ; + schema1:dateModified "2024-06-11T12:54:37Z" ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/963?version=13" ; + schema1:keywords "airr, b-cell, immcantation, immunorepertoire, repseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/airrflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/963?version=11" ; + schema1:version 11 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.276.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_md_setup/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:34:07 +0100" ; + schema1:url "https://workflowhub.eu/workflows/276/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6826 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-22T09:38:26Z" ; + schema1:dateModified "2023-01-16T13:57:38Z" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/276?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_md_setup/python/workflow.py" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Masking repeats in a genome using RepeatMasker" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/753?version=1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Masking repeats with RepeatMasker" ; + schema1:sdDatePublished "2024-07-12 13:24:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/753/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7029 ; + schema1:creator ; + schema1:dateCreated "2024-02-15T11:38:35Z" ; + schema1:dateModified "2024-02-15T11:38:35Z" ; + schema1:description "Masking repeats in a genome using RepeatMasker" ; + schema1:isPartOf ; + schema1:keywords "genome-annotation" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Masking repeats with RepeatMasker" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/753?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Analysis of Chromosome Conformation Capture data (Hi-C)" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/989?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/hic" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hic" ; + schema1:sdDatePublished "2024-07-12 13:21:25 +0100" ; + schema1:url "https://workflowhub.eu/workflows/989/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5332 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:54Z" ; + schema1:dateModified "2024-06-11T12:54:54Z" ; + schema1:description "Analysis of Chromosome Conformation Capture data (Hi-C)" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/989?version=7" ; + schema1:keywords "chromosome-conformation-capture, Hi-C" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hic" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/989?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1017?version=17" ; + schema1:isBasedOn "https://github.com/nf-core/rnafusion" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnafusion" ; + schema1:sdDatePublished "2024-07-12 13:18:22 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1017/ro_crate?version=17" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13080 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:11Z" ; + schema1:dateModified "2024-06-11T12:55:11Z" ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1017?version=16" ; + schema1:keywords "fusion, fusion-genes, gene-fusion, rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnafusion" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1017?version=17" ; + schema1:version 17 . + + a schema1:Dataset ; + schema1:datePublished "2023-10-26T10:12:46.371610" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Scaffolding-HiC-VGP8" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Scaffolding-HiC-VGP8/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.14" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-07-12 13:20:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3978 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:02Z" ; + schema1:dateModified "2024-06-11T12:55:02Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """## Summary\r +\r +The validation process proposed has two pipelines for filtering PPIs predicted by some _IN SILICO_ detection method, both pipelines can be executed separately. The first pipeline (i) filter according to association rules of cellular locations extracted from HINT database. The second pipeline (ii) filter according to scientific papers where both proteins in the PPIs appear in interaction context in the sentences.\r +\r +The pipeline (i) starts extracting cellular component annotations from HINT PPIs building a dataset and then the Apriori algorithm is applied in this dataset in an iterative process that repeat the application of this algorithm till the rules cover 15 main locations in the cell. This process generate a database with association rules with two main columns: antecedent and consequent, meaning that a location that occurs in antecedent also occurs with the location in consequent. The filtering task evaluate the PPI checking if some location annotated for the first protein is in the antecedent column and if some location of the second protein is also in the same rule but in the consequent column. If so, the PPI passes according to the criteria.\r +\r +The pipeline (ii) starts getting all papers that mention both proteins in the PPIs and extrating their content using the NCBI [API](https://www.ncbi.nlm.nih.gov/home/develop/api/). These XML files are cleaned removing hypertext markup and references to figures, tables and supplementary materials. The paragraphs of the remaining articles content are processed by Natural language processing steps to extract sentences, tokens, stopwords removal to remove words extremely common in english language and do not help to identify the context of interest, prioritizing tokens using part-of-speech tagging to keep just nouns and verbs. Then the sentences filtered goes to the task that identifies the proteins of the PPI in evaluation among the tokens and also tries to identify tokens or set of tokens that mention experimental methods. The sentences that have the proteins of interest are filtered if the nouns and verbs have some of the items of the list of words indicating interaction relation (recruit, bind, interact, signaling, etc). Finally, a report is made by pair with the article identifiers, the sentences, the proteins and interacting words found.\r +\r +The figure below illustrates all the tasks of these pipelines.\r +\r +
\r + pipeline\r +
\r +\r +## Requirements:\r +* Python packages needed:\r + - pip3 install pandas\r + - pip3 install rdflib\r + - pip3 install mlxtend\r + - pip3 install inflect\r + - pip3 install nltk\r + - pip3 install biopython\r + - pip3 install lxml\r + - pip3 install bs4 (beautiful soup)\r +\r +## Usage Instructions\r +### Preparation:\r +1. ````git clone https://github.com/YasCoMa/ppi_validation_process.git````\r +2. `pip3 install -r requirements.txt`\r +3. ````cd ppi_validation_process/pipe_location_assocRules/````\r +4. ````unzip pygosemsim.zip````\r +5. ````cd ../````\r +\r +### Filtering by association rules of cellular locations (first filtering part) - File ````pipe_location_assocRules/find_pattern.py```` :\r +* Pipeline parameters:\r + - __-fo__ or __--folder__
\r + Folder to store the files (use the folder where the other required file can be found)\r + - __-if__ or __--interactome_file__
\r + File with the pairs (two columns with uniprot identifiers in tsv format)
\r +\r + Example of this file: pipe_location_assocRules/running_example/all_pairs.tsv\r +\r +\r +* Running modes examples:\r + 1. Go to the first filtering part folder:
\r + ````cd pipe_location_assocRules/````\r +\r + 2. Uncompress annotation_data.zip\r + \r + 3. Run:
\r + ````python3 find_pattern.py -fo running_example/ -if all_pairs.tsv````\r +\r +\r +### Filtering by text mining on scientific papers (second filtering part) - File ````ppi_pubminer/pubmed_pmc_literature_pipeline.py````:\r +\r +* Pipeline parameters:\r + - __-em__ or __--execution_mode__
\r + Use to indicate the execution mode desired:
\r + 1 - Mode using a list of protein pairs as bait
\r + 2 - Mode that tries to find sentences of PPI context for any protein pairs given a list of articles\r + \r + - __-fo__ or __--folder__
\r + Folder to store the files (use the folder where the other required file can be found)\r +\r + - __-rtm1__ or __--running_type_mode_1__
\r + Use to indicate which execution step you want to run for mode 1 (it is desirable following the order showed):
\r + 0 (default) - Run all steps
\r + 1 - Run step 1 (Get mentions of both proteins in PMC articles)
\r + 2 - Run step 2 (Get the PMC or Pubmed files, clean and store them)
\r + 3 - Run step 3 (Get the exact sentences where the proteins were found on interacting context)\r +\r + - __-rtm2__ or __--running_type_mode_2__
\r + Use to indicate which execution step you want to run for mode 2 (it is desirable following the order showed):
\r + 0 (default) - Run all steps
\r + 1 - Run step 1 (Get the PMC or Pubmed files from the given list, clean and store them)
\r + 2 - Run step 2 (Get the exact sentences where the proteins were found on an interacting context)\r +\r + - __-fp__ or __--file_pairs__
\r + (For mode 1) File with the pairs (two columns with uniprot identifiers in tsv format)
\r + \r + Example of this file: ppipubminer/running_example/mode_1/all_pairs.tsv\r +\r + - __-fe__ or __--file_evaluation__
\r + (For mode 1) File exported after step 1 execution in tsv format
\r +\r + - __-fa__ or __--file_articles__
\r + (For mode 2) File with the articles (First column indicating if it is from pmc or pubmed and the second one is the article id) in tsv format)
\r + \r + Example of this file: ppipubminer/running_example/mode_2/articles_info.tsv\r +\r +* Running modes examples:\r + - Go to the second filtering part folder:
\r + ````cd ppipubminer/````\r +\r + - Mode 1 - From protein pairs (PPIs) to sentences in articles\r + 1. Running all three steps of mode 1:
\r + ````python3 pubmed_pmc_literature_pipeline.py -em 1 -rtm1 0 -fo running_example/mode_1/ -fp all_pairs.tsv````\r +\r + 2. Running only step 1 of mode 1:
\r + ````python3 pubmed_pmc_literature_pipeline.py -em 1 -rtm1 1 -fo running_example/mode_1/ -fp all_pairs.tsv````\r +\r + 3. Running only step 2 of mode 1:
\r + ````python3 pubmed_pmc_literature_pipeline.py -em 1 -rtm1 2 -fo running_example/mode_1/ -fp all_pairs.tsv -fe literature_evaluation_pairs.tsv````\r +\r + 4. Running only step 3 of mode 1:
\r + ````python3 pubmed_pmc_literature_pipeline.py -em 1 -rtm1 3 -fo running_example/mode_1/ -fp all_pairs.tsv -fe literature_evaluation_pairs.tsv````\r +\r + - Mode 2 - From articles to report of sentences with any protein pairs (PPIs)\r + 1. Running all three steps of mode 2:
\r + ````python3 pubmed_pmc_literature_pipeline.py -em 2 -rtm1 0 -fo running_example/mode_2/ -fa articles_info.tsv````\r +\r + 2. Running only step 1 of mode 2:
\r + ````python3 pubmed_pmc_literature_pipeline.py -em 2 -rtm1 1 -fo running_example/mode_2/ -fa articles_info.tsv````\r +\r + 3. Running only step 2 of mode 2:
\r + ````python3 pubmed_pmc_literature_pipeline.py -em 2 -rtm1 2 -fo running_example/mode_2/ -fa articles_info.tsv ````\r +\r +## Reference\r +Martins YC, Ziviani A, Nicolás MF, de Vasconcelos AT. Large-Scale Protein Interactions Prediction by Multiple Evidence Analysis Associated With an In-Silico Curation Strategy. Frontiers in Bioinformatics. 2021:38.\r +https://www.frontiersin.org/articles/10.3389/fbinf.2021.731345/full\r +\r +## Bug Report\r +Please, use the [Issues](https://github.com/YasCoMa/ppi_validation_process/issues) tab to report any bug.""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/617?version=1" ; + schema1:isBasedOn "https://github.com/YasCoMa/ppi_validation_process" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for PPIVPro - PPI Validation Process" ; + schema1:sdDatePublished "2024-07-12 13:27:11 +0100" ; + schema1:url "https://workflowhub.eu/workflows/617/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 54801 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 36713 ; + schema1:creator ; + schema1:dateCreated "2023-10-21T23:43:48Z" ; + schema1:dateModified "2023-10-21T23:45:17Z" ; + schema1:description """## Summary\r +\r +The validation process proposed has two pipelines for filtering PPIs predicted by some _IN SILICO_ detection method, both pipelines can be executed separately. The first pipeline (i) filter according to association rules of cellular locations extracted from HINT database. The second pipeline (ii) filter according to scientific papers where both proteins in the PPIs appear in interaction context in the sentences.\r +\r +The pipeline (i) starts extracting cellular component annotations from HINT PPIs building a dataset and then the Apriori algorithm is applied in this dataset in an iterative process that repeat the application of this algorithm till the rules cover 15 main locations in the cell. This process generate a database with association rules with two main columns: antecedent and consequent, meaning that a location that occurs in antecedent also occurs with the location in consequent. The filtering task evaluate the PPI checking if some location annotated for the first protein is in the antecedent column and if some location of the second protein is also in the same rule but in the consequent column. If so, the PPI passes according to the criteria.\r +\r +The pipeline (ii) starts getting all papers that mention both proteins in the PPIs and extrating their content using the NCBI [API](https://www.ncbi.nlm.nih.gov/home/develop/api/). These XML files are cleaned removing hypertext markup and references to figures, tables and supplementary materials. The paragraphs of the remaining articles content are processed by Natural language processing steps to extract sentences, tokens, stopwords removal to remove words extremely common in english language and do not help to identify the context of interest, prioritizing tokens using part-of-speech tagging to keep just nouns and verbs. Then the sentences filtered goes to the task that identifies the proteins of the PPI in evaluation among the tokens and also tries to identify tokens or set of tokens that mention experimental methods. The sentences that have the proteins of interest are filtered if the nouns and verbs have some of the items of the list of words indicating interaction relation (recruit, bind, interact, signaling, etc). Finally, a report is made by pair with the article identifiers, the sentences, the proteins and interacting words found.\r +\r +The figure below illustrates all the tasks of these pipelines.\r +\r +
\r + pipeline\r +
\r +\r +## Requirements:\r +* Python packages needed:\r + - pip3 install pandas\r + - pip3 install rdflib\r + - pip3 install mlxtend\r + - pip3 install inflect\r + - pip3 install nltk\r + - pip3 install biopython\r + - pip3 install lxml\r + - pip3 install bs4 (beautiful soup)\r +\r +## Usage Instructions\r +### Preparation:\r +1. ````git clone https://github.com/YasCoMa/ppi_validation_process.git````\r +2. `pip3 install -r requirements.txt`\r +3. ````cd ppi_validation_process/pipe_location_assocRules/````\r +4. ````unzip pygosemsim.zip````\r +5. ````cd ../````\r +\r +### Filtering by association rules of cellular locations (first filtering part) - File ````pipe_location_assocRules/find_pattern.py```` :\r +* Pipeline parameters:\r + - __-fo__ or __--folder__
\r + Folder to store the files (use the folder where the other required file can be found)\r + - __-if__ or __--interactome_file__
\r + File with the pairs (two columns with uniprot identifiers in tsv format)
\r +\r + Example of this file: pipe_location_assocRules/running_example/all_pairs.tsv\r +\r +\r +* Running modes examples:\r + 1. Go to the first filtering part folder:
\r + ````cd pipe_location_assocRules/````\r +\r + 2. Uncompress annotation_data.zip\r + \r + 3. Run:
\r + ````python3 find_pattern.py -fo running_example/ -if all_pairs.tsv````\r +\r +\r +### Filtering by text mining on scientific papers (second filtering part) - File ````ppi_pubminer/pubmed_pmc_literature_pipeline.py````:\r +\r +* Pipeline parameters:\r + - __-em__ or __--execution_mode__
\r + Use to indicate the execution mode desired:
\r + 1 - Mode using a list of protein pairs as bait
\r + 2 - Mode that tries to find sentences of PPI context for any protein pairs given a list of articles\r + \r + - __-fo__ or __--folder__
\r + Folder to store the files (use the folder where the other required file can be found)\r +\r + - __-rtm1__ or __--running_type_mode_1__
\r + Use to indicate which execution step you want to run for mode 1 (it is desirable following the order showed):
\r + 0 (default) - Run all steps
\r + 1 - Run step 1 (Get mentions of both proteins in PMC articles)
\r + 2 - Run step 2 (Get the PMC or Pubmed files, clean and store them)
\r + 3 - Run step 3 (Get the exact sentences where the proteins were found on interacting context)\r +\r + - __-rtm2__ or __--running_type_mode_2__
\r + Use to indicate which execution step you want to run for mode 2 (it is desirable following the order showed):
\r + 0 (default) - Run all steps
\r + 1 - Run step 1 (Get the PMC or Pubmed files from the given list, clean and store them)
\r + 2 - Run step 2 (Get the exact sentences where the proteins were found on an interacting context)\r +\r + - __-fp__ or __--file_pairs__
\r + (For mode 1) File with the pairs (two columns with uniprot identifiers in tsv format)
\r + \r + Example of this file: ppipubminer/running_example/mode_1/all_pairs.tsv\r +\r + - __-fe__ or __--file_evaluation__
\r + (For mode 1) File exported after step 1 execution in tsv format
\r +\r + - __-fa__ or __--file_articles__
\r + (For mode 2) File with the articles (First column indicating if it is from pmc or pubmed and the second one is the article id) in tsv format)
\r + \r + Example of this file: ppipubminer/running_example/mode_2/articles_info.tsv\r +\r +* Running modes examples:\r + - Go to the second filtering part folder:
\r + ````cd ppipubminer/````\r +\r + - Mode 1 - From protein pairs (PPIs) to sentences in articles\r + 1. Running all three steps of mode 1:
\r + ````python3 pubmed_pmc_literature_pipeline.py -em 1 -rtm1 0 -fo running_example/mode_1/ -fp all_pairs.tsv````\r +\r + 2. Running only step 1 of mode 1:
\r + ````python3 pubmed_pmc_literature_pipeline.py -em 1 -rtm1 1 -fo running_example/mode_1/ -fp all_pairs.tsv````\r +\r + 3. Running only step 2 of mode 1:
\r + ````python3 pubmed_pmc_literature_pipeline.py -em 1 -rtm1 2 -fo running_example/mode_1/ -fp all_pairs.tsv -fe literature_evaluation_pairs.tsv````\r +\r + 4. Running only step 3 of mode 1:
\r + ````python3 pubmed_pmc_literature_pipeline.py -em 1 -rtm1 3 -fo running_example/mode_1/ -fp all_pairs.tsv -fe literature_evaluation_pairs.tsv````\r +\r + - Mode 2 - From articles to report of sentences with any protein pairs (PPIs)\r + 1. Running all three steps of mode 2:
\r + ````python3 pubmed_pmc_literature_pipeline.py -em 2 -rtm1 0 -fo running_example/mode_2/ -fa articles_info.tsv````\r +\r + 2. Running only step 1 of mode 2:
\r + ````python3 pubmed_pmc_literature_pipeline.py -em 2 -rtm1 1 -fo running_example/mode_2/ -fa articles_info.tsv````\r +\r + 3. Running only step 2 of mode 2:
\r + ````python3 pubmed_pmc_literature_pipeline.py -em 2 -rtm1 2 -fo running_example/mode_2/ -fa articles_info.tsv ````\r +\r +## Reference\r +Martins YC, Ziviani A, Nicolás MF, de Vasconcelos AT. Large-Scale Protein Interactions Prediction by Multiple Evidence Analysis Associated With an In-Silico Curation Strategy. Frontiers in Bioinformatics. 2021:38.\r +https://www.frontiersin.org/articles/10.3389/fbinf.2021.731345/full\r +\r +## Bug Report\r +Please, use the [Issues](https://github.com/YasCoMa/ppi_validation_process/issues) tab to report any bug.""" ; + schema1:image ; + schema1:keywords "Bioinformatics, scientific publication text mining, validaiton o protein interaction predictions" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "PPIVPro - PPI Validation Process" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/617?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.281.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_protein_complex_md_setup/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:34:02 +0100" ; + schema1:url "https://workflowhub.eu/workflows/281/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8792 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-22T10:00:20Z" ; + schema1:dateModified "2023-01-16T13:58:31Z" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/281?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_protein_complex_md_setup/python/workflow.py" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Automated quantitative analysis of DIA proteomics mass spectrometry measurements." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/980?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/diaproteomics" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/diaproteomics" ; + schema1:sdDatePublished "2024-07-12 13:21:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/980/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7318 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:48Z" ; + schema1:dateModified "2024-06-11T12:54:48Z" ; + schema1:description "Automated quantitative analysis of DIA proteomics mass spectrometry measurements." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/980?version=6" ; + schema1:keywords "data-independent-proteomics, dia-proteomics, openms, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/diaproteomics" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/980?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein MD Analysis tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This workflow computes a set of Quality Control (QC) analyses on top of an uploaded MD trajectory. QC analyses include positional divergence (RMSd), change of shape (Radius of Gyration), identification of flexible regions (atomic/residue fluctuations), and identification of different molecular conformations (trajectory clustering).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.287.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_protein_md_analysis/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Protein MD Analysis tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/287/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5395 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-17T13:13:07Z" ; + schema1:dateModified "2022-04-11T09:29:43Z" ; + schema1:description """# Protein MD Analysis tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This workflow computes a set of Quality Control (QC) analyses on top of an uploaded MD trajectory. QC analyses include positional divergence (RMSd), change of shape (Radius of Gyration), identification of flexible regions (atomic/residue fluctuations), and identification of different molecular conformations (trajectory clustering).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/287?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Protein MD Analysis tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_protein_md_analysis/python/workflow.py" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Conversion of files from BAM to FASTQ, including FASTQC & CCS.BAM file quality control (QC) steps.\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/220?version=1" ; + schema1:isBasedOn "https://github.com/AustralianBioCommons/BAM-to-FASTQ-QC" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CCS.BAM to FASTQ + QC (HiFi genome assembly stage 1)" ; + schema1:sdDatePublished "2024-07-12 13:35:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/220/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10774 ; + schema1:creator ; + schema1:dateCreated "2021-10-21T05:52:36Z" ; + schema1:dateModified "2022-10-17T02:43:04Z" ; + schema1:description """Conversion of files from BAM to FASTQ, including FASTQC & CCS.BAM file quality control (QC) steps.\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/220?version=1" ; + schema1:isPartOf , + ; + schema1:keywords "BAM, FASTQ, Conversion, QC" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "CCS.BAM to FASTQ + QC (HiFi genome assembly stage 1)" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/220?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.293.2" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python CNS/XPLOR MD Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-07-12 13:35:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/293/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1728 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-24T14:22:19Z" ; + schema1:dateModified "2023-01-16T13:58:35Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/293?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python CNS/XPLOR MD Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/293?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=11" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-07-12 13:20:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5700 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:03Z" ; + schema1:dateModified "2024-06-11T12:55:03Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=11" ; + schema1:version 11 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# covid-sequence-analysis-workflow\r +\r +This is the official repository of the SARS-CoV-2 variant surveillance pipeline developed by Danish Technical University (DTU), Eotvos Lorand University (ELTE), EMBL-EBI, Erasmus Medical Center (EMC) under the [Versatile Emerging infectious disease Observatory (VEO)](https://www.globalsurveillance.eu/projects/veo-versatile-emerging-infectious-disease-observatory) project. The project consists of 20 European partners. It is funded by the European Commission.\r +\r +The pipeline has been integrated on EMBL-EBI infrastructure to automatically process raw SARS-CoV-2 read data, presenting in the COVID-19 Data Portal: https://www.covid19dataportal.org/sequences?db=sra-analysis-covid19&size=15&crossReferencesOption=all#search-content.\r +\r +## Architecture\r +\r +The pipeline supports sequence reads from both Illumina and Nanopore platforms. It is designed to be highly portable for both Google Cloud Platform and High Performance Computing cluster with IBM Spectrum LSF. We have performed secondary and tertiary analysis on millions of public samples. The pipeline shows good performance for large scale production. \r +\r +![Component diagram](doc/img/pipeline.components.png)\r +\r +The pipeline takes SRA from the public FTP from ENA. It submits analysis objects back to ENA on the fly. The intermediate results and logs are stored in the cloud storage buckets or high performance local POSIX file system. The metadata is stored in Google BigQuery for metadata and status tracking and analysis. The runtime is created with Docker / Singularity containers and NextFlow. \r +\r +## Process to run the pipelines\r +\r +The pipeline requires the Nextflow Tower for the application level monitoring. A free test account can be created for evaluation purposes at https://tower.nf/.\r +\r +### Preparation\r +\r +1. Store `export TOWER_ACCESS_TOKEN='...'` in `$HOME/.bash_profile`. Restart the current session or source the updated `$HOME/.bash_profile`.\r +2. Run `git clone https://github.com/enasequence/covid-sequence-analysis-workflow`.\r +3. Create `./covid-sequence-analysis-workflow/data/projects_accounts.csv` with submission_account_id and submission_passwor, for example:\r +> project_id,center_name,meta_key,submission_account_id,submission_password,ftp_password\r +> PRJEB45555,"European Bioinformatics Institute",public,,,\r +\r +### Running pipelines\r +\r +1. Run `./covid-sequence-analysis-workflow/init.sra_index.sh` to initialize or reinitialize the metadata in BigQuery.\r +2. Run `./covid-sequence-analysis-workflow/./start.lsf.jobs.sh` with proper parameters to start the batch jobs on LSF or `./covid-sequence-analysis-workflow/./start.gls.jobs.sh` with proper parameters to start the batch jobs on GCP.\r +\r +### Error handling\r +\r +If a job is killed or died, run the following to update the metadata to avoid reprocessing samples completed successfully.\r +\r +1. Run `./covid-sequence-analysis-workflow/update.receipt.sh ` to collect the submission receipts and to update submission metadata. The script can be run at anytime. It needs to be run if a batch job is killed instead of completed for any reason.\r +2. Run `./covid-sequence-analysis-workflow/set.archived.sh` to update stats for analyses submitted. The script can be run at anytime. It needs to be run at least once before ending a snapshot to make sure that the stats are up-to-date.\r +\r +To reprocess the samples failed, delete the record in `sra_processing`.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.664.1" ; + schema1:isBasedOn "https://github.com/enasequence/covid-sequence-analysis-workflow.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for covid-sequence-analysis-workflow" ; + schema1:sdDatePublished "2024-07-12 13:26:46 +0100" ; + schema1:url "https://workflowhub.eu/workflows/664/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 272 ; + schema1:creator ; + schema1:dateCreated "2023-11-14T09:42:17Z" ; + schema1:dateModified "2023-11-14T09:42:53Z" ; + schema1:description """# covid-sequence-analysis-workflow\r +\r +This is the official repository of the SARS-CoV-2 variant surveillance pipeline developed by Danish Technical University (DTU), Eotvos Lorand University (ELTE), EMBL-EBI, Erasmus Medical Center (EMC) under the [Versatile Emerging infectious disease Observatory (VEO)](https://www.globalsurveillance.eu/projects/veo-versatile-emerging-infectious-disease-observatory) project. The project consists of 20 European partners. It is funded by the European Commission.\r +\r +The pipeline has been integrated on EMBL-EBI infrastructure to automatically process raw SARS-CoV-2 read data, presenting in the COVID-19 Data Portal: https://www.covid19dataportal.org/sequences?db=sra-analysis-covid19&size=15&crossReferencesOption=all#search-content.\r +\r +## Architecture\r +\r +The pipeline supports sequence reads from both Illumina and Nanopore platforms. It is designed to be highly portable for both Google Cloud Platform and High Performance Computing cluster with IBM Spectrum LSF. We have performed secondary and tertiary analysis on millions of public samples. The pipeline shows good performance for large scale production. \r +\r +![Component diagram](doc/img/pipeline.components.png)\r +\r +The pipeline takes SRA from the public FTP from ENA. It submits analysis objects back to ENA on the fly. The intermediate results and logs are stored in the cloud storage buckets or high performance local POSIX file system. The metadata is stored in Google BigQuery for metadata and status tracking and analysis. The runtime is created with Docker / Singularity containers and NextFlow. \r +\r +## Process to run the pipelines\r +\r +The pipeline requires the Nextflow Tower for the application level monitoring. A free test account can be created for evaluation purposes at https://tower.nf/.\r +\r +### Preparation\r +\r +1. Store `export TOWER_ACCESS_TOKEN='...'` in `$HOME/.bash_profile`. Restart the current session or source the updated `$HOME/.bash_profile`.\r +2. Run `git clone https://github.com/enasequence/covid-sequence-analysis-workflow`.\r +3. Create `./covid-sequence-analysis-workflow/data/projects_accounts.csv` with submission_account_id and submission_passwor, for example:\r +> project_id,center_name,meta_key,submission_account_id,submission_password,ftp_password\r +> PRJEB45555,"European Bioinformatics Institute",public,,,\r +\r +### Running pipelines\r +\r +1. Run `./covid-sequence-analysis-workflow/init.sra_index.sh` to initialize or reinitialize the metadata in BigQuery.\r +2. Run `./covid-sequence-analysis-workflow/./start.lsf.jobs.sh` with proper parameters to start the batch jobs on LSF or `./covid-sequence-analysis-workflow/./start.gls.jobs.sh` with proper parameters to start the batch jobs on GCP.\r +\r +### Error handling\r +\r +If a job is killed or died, run the following to update the metadata to avoid reprocessing samples completed successfully.\r +\r +1. Run `./covid-sequence-analysis-workflow/update.receipt.sh ` to collect the submission receipts and to update submission metadata. The script can be run at anytime. It needs to be run if a batch job is killed instead of completed for any reason.\r +2. Run `./covid-sequence-analysis-workflow/set.archived.sh` to update stats for analyses submitted. The script can be run at anytime. It needs to be run at least once before ending a snapshot to make sure that the stats are up-to-date.\r +\r +To reprocess the samples failed, delete the record in `sra_processing`.\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "covid-sequence-analysis-workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/664?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1027?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/viralrecon" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/viralrecon" ; + schema1:sdDatePublished "2024-07-12 13:18:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1027/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10177 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:21Z" ; + schema1:dateModified "2024-06-11T12:55:21Z" ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1027?version=10" ; + schema1:keywords "Amplicon, ARTIC, Assembly, covid-19, covid19, illumina, long-read-sequencing, Metagenomics, nanopore, ONT, oxford-nanopore, SARS-CoV-2, variant-calling, viral, Virus" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/viralrecon" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1027?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.XXXXXXX-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.XXXXXXX)\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A522.10.1-23aa62.svg)](https://www.nextflow.io/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +[![Launch on Nextflow Tower](https://img.shields.io/badge/Launch%20%F0%9F%9A%80-Nextflow%20Tower-%234256e7)](https://tower.nf/launch?pipeline=https://github.com/sanger-tol/treeval)\r +\r +## Introduction\r +\r +**sanger-tol/treeval** is a bioinformatics best-practice analysis pipeline for the generation of data supplemental to the curation of reference quality genomes. This pipeline has been written to generate flat files compatible with [JBrowse2](https://jbrowse.org/jb2/).\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!\r +\r +The treeval pipeline has a sister pipeline currently named [curationpretext](https://github.com/sanger-tol/curationpretext) which acts to regenerate the pretext maps and accessory files during genomic curation in order to confirm interventions. This pipeline is sufficiently different to the treeval implementation that it is written as it's own pipeline.\r +\r +1. Parse input yaml ( YAML_INPUT )\r +2. Generate my.genome file ( GENERATE_GENOME )\r +3. Generate insilico digests of the input assembly ( INSILICO_DIGEST )\r +4. Generate gene alignments with high quality data against the input assembly ( GENE_ALIGNMENT )\r +5. Generate a repeat density graph ( REPEAT_DENSITY )\r +6. Generate a gap track ( GAP_FINDER )\r +7. Generate a map of self complementary sequence ( SELFCOMP )\r +8. Generate syntenic alignments with a closely related high quality assembly ( SYNTENY )\r +9. Generate a coverage track using PacBio data ( LONGREAD_COVERAGE )\r +10. Generate HiC maps, pretext and higlass using HiC cram files ( HIC_MAPPING )\r +11. Generate a telomere track based on input motif ( TELO_FINDER )\r +12. Run Busco and convert results into bed format ( BUSCO_ANNOTATION )\r +13. Ancestral Busco linkage if available for clade ( BUSCO_ANNOTATION:ANCESTRAL_GENE )\r +\r +## Usage\r +\r +> **Note**\r +> If you are new to Nextflow and nf-core, please refer to [this page](https://nf-co.re/docs/usage/installation) on how\r +> to set-up Nextflow. Make sure to [test your setup](https://nf-co.re/docs/usage/introduction#how-to-run-a-pipeline)\r +> with `-profile test` before running the workflow on actual data.\r +\r +Currently, it is advised to run the pipeline with docker or singularity as a small number of major modules do not currently have a conda env associated with them.\r +\r +Now, you can run the pipeline using:\r +\r +```bash\r +# For the FULL pipeline\r +nextflow run main.nf -profile singularity --input treeval.yaml --outdir {OUTDIR}\r +\r +# For the RAPID subset\r +nextflow run main.nf -profile singularity --input treeval.yaml -entry RAPID --outdir {OUTDIR}\r +```\r +\r +An example treeval.yaml can be found [here](assets/local_testing/nxOscDF5033.yaml).\r +\r +Further documentation about the pipeline can be found in the following files: [usage](https://pipelines.tol.sanger.ac.uk/treeval/dev/usage), [parameters](https://pipelines.tol.sanger.ac.uk/treeval/dev/parameters) and [output](https://pipelines.tol.sanger.ac.uk/treeval/dev/output).\r +\r +> **Warning:**\r +> Please provide pipeline parameters via the CLI or Nextflow `-params-file` option. Custom config files including those\r +> provided by the `-c` Nextflow option can be used to provide any configuration _**except for parameters**_;\r +> see [docs](https://nf-co.re/usage/configuration#custom-configuration-files).\r +\r +## Credits\r +\r +sanger-tol/treeval has been written by Damon-Lee Pointon (@DLBPointon), Yumi Sims (@yumisims) and William Eagles (@weaglesBio).\r +\r +We thank the following people for their extensive assistance in the development of this pipeline:\r +\r +
    \r +
  • @gq1 - For building the infrastructure around TreeVal and helping with code review
  • \r +
  • @ksenia-krasheninnikova - For help with C code implementation and YAML parsing
  • \r +
  • @mcshane - For guidance on algorithms
  • \r +
  • @muffato - For code reviews and code support
  • \r +
  • @priyanka-surana - For help with the majority of code reviews and code support
  • \r +
\r +\r +## Contributions and Support\r +\r +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\r +\r +## Citations\r +\r +\r +\r +If you use sanger-tol/treeval for your analysis, please cite it using the following doi: [10.5281/zenodo.XXXXXX](https://doi.org/10.5281/zenodo.XXXXXX).\r +\r +### Tools\r +\r +An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\r +\r +You can cite the `nf-core` publication as follows:\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/668?version=1" ; + schema1:isBasedOn "https://github.com/sanger-tol/treeval" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for sanger-tol/treeval v1.0 - Ancient Atlantis" ; + schema1:sdDatePublished "2024-07-12 13:26:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/668/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1736 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2023-11-14T12:10:54Z" ; + schema1:dateModified "2023-11-14T12:10:54Z" ; + schema1:description """[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.XXXXXXX-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.XXXXXXX)\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A522.10.1-23aa62.svg)](https://www.nextflow.io/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +[![Launch on Nextflow Tower](https://img.shields.io/badge/Launch%20%F0%9F%9A%80-Nextflow%20Tower-%234256e7)](https://tower.nf/launch?pipeline=https://github.com/sanger-tol/treeval)\r +\r +## Introduction\r +\r +**sanger-tol/treeval** is a bioinformatics best-practice analysis pipeline for the generation of data supplemental to the curation of reference quality genomes. This pipeline has been written to generate flat files compatible with [JBrowse2](https://jbrowse.org/jb2/).\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!\r +\r +The treeval pipeline has a sister pipeline currently named [curationpretext](https://github.com/sanger-tol/curationpretext) which acts to regenerate the pretext maps and accessory files during genomic curation in order to confirm interventions. This pipeline is sufficiently different to the treeval implementation that it is written as it's own pipeline.\r +\r +1. Parse input yaml ( YAML_INPUT )\r +2. Generate my.genome file ( GENERATE_GENOME )\r +3. Generate insilico digests of the input assembly ( INSILICO_DIGEST )\r +4. Generate gene alignments with high quality data against the input assembly ( GENE_ALIGNMENT )\r +5. Generate a repeat density graph ( REPEAT_DENSITY )\r +6. Generate a gap track ( GAP_FINDER )\r +7. Generate a map of self complementary sequence ( SELFCOMP )\r +8. Generate syntenic alignments with a closely related high quality assembly ( SYNTENY )\r +9. Generate a coverage track using PacBio data ( LONGREAD_COVERAGE )\r +10. Generate HiC maps, pretext and higlass using HiC cram files ( HIC_MAPPING )\r +11. Generate a telomere track based on input motif ( TELO_FINDER )\r +12. Run Busco and convert results into bed format ( BUSCO_ANNOTATION )\r +13. Ancestral Busco linkage if available for clade ( BUSCO_ANNOTATION:ANCESTRAL_GENE )\r +\r +## Usage\r +\r +> **Note**\r +> If you are new to Nextflow and nf-core, please refer to [this page](https://nf-co.re/docs/usage/installation) on how\r +> to set-up Nextflow. Make sure to [test your setup](https://nf-co.re/docs/usage/introduction#how-to-run-a-pipeline)\r +> with `-profile test` before running the workflow on actual data.\r +\r +Currently, it is advised to run the pipeline with docker or singularity as a small number of major modules do not currently have a conda env associated with them.\r +\r +Now, you can run the pipeline using:\r +\r +```bash\r +# For the FULL pipeline\r +nextflow run main.nf -profile singularity --input treeval.yaml --outdir {OUTDIR}\r +\r +# For the RAPID subset\r +nextflow run main.nf -profile singularity --input treeval.yaml -entry RAPID --outdir {OUTDIR}\r +```\r +\r +An example treeval.yaml can be found [here](assets/local_testing/nxOscDF5033.yaml).\r +\r +Further documentation about the pipeline can be found in the following files: [usage](https://pipelines.tol.sanger.ac.uk/treeval/dev/usage), [parameters](https://pipelines.tol.sanger.ac.uk/treeval/dev/parameters) and [output](https://pipelines.tol.sanger.ac.uk/treeval/dev/output).\r +\r +> **Warning:**\r +> Please provide pipeline parameters via the CLI or Nextflow `-params-file` option. Custom config files including those\r +> provided by the `-c` Nextflow option can be used to provide any configuration _**except for parameters**_;\r +> see [docs](https://nf-co.re/usage/configuration#custom-configuration-files).\r +\r +## Credits\r +\r +sanger-tol/treeval has been written by Damon-Lee Pointon (@DLBPointon), Yumi Sims (@yumisims) and William Eagles (@weaglesBio).\r +\r +We thank the following people for their extensive assistance in the development of this pipeline:\r +\r +
    \r +
  • @gq1 - For building the infrastructure around TreeVal and helping with code review
  • \r +
  • @ksenia-krasheninnikova - For help with C code implementation and YAML parsing
  • \r +
  • @mcshane - For guidance on algorithms
  • \r +
  • @muffato - For code reviews and code support
  • \r +
  • @priyanka-surana - For help with the majority of code reviews and code support
  • \r +
\r +\r +## Contributions and Support\r +\r +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\r +\r +## Citations\r +\r +\r +\r +If you use sanger-tol/treeval for your analysis, please cite it using the following doi: [10.5281/zenodo.XXXXXX](https://doi.org/10.5281/zenodo.XXXXXX).\r +\r +### Tools\r +\r +An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\r +\r +You can cite the `nf-core` publication as follows:\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +""" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "sanger-tol/treeval v1.0 - Ancient Atlantis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/668?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Global Run-On sequencing analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1004?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/nascent" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/nascent" ; + schema1:sdDatePublished "2024-07-12 13:20:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1004/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8566 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:06Z" ; + schema1:dateModified "2024-06-11T12:55:06Z" ; + schema1:description "Global Run-On sequencing analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1004?version=4" ; + schema1:keywords "gro-seq, nascent, pro-seq, rna, transcription, tss" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/nascent" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1004?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "Pre-processing of mass spectrometry-based metabolomics data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/57?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/metaboigniter" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/metaboigniter" ; + schema1:sdDatePublished "2024-07-12 13:22:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/57/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 23675 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:00Z" ; + schema1:dateModified "2024-06-11T12:55:00Z" ; + schema1:description "Pre-processing of mass spectrometry-based metabolomics data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/57?version=3" ; + schema1:keywords "Metabolomics, identification, quantification, mass-spectrometry, ms1, ms2" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/metaboigniter" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/57?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# RASflow: RNA-Seq Analysis Snakemake Workflow\r +RASflow is a modular, flexible and user-friendly RNA-Seq analysis workflow. \r +\r +RASflow can be applied to both model and non-model organisms. It supports mapping RNA-Seq raw reads to both genome and transcriptome (can be downloaded from public database or can be homemade by users) and it can do both transcript- and gene-level Differential Expression Analysis (DEA) when transcriptome is used as mapping reference. It requires little programming skill for basic use. If you're good at programming, you can do more magic with RASflow!\r +\r +You can help support RASflow by citing our publication:\r +\r +**Zhang, X., Jonassen, I. RASflow: an RNA-Seq analysis workflow with Snakemake. BMC Bioinformatics 21, 110 (2020). https://doi.org/10.1186/s12859-020-3433-x**\r +""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/451?version=1" ; + schema1:isBasedOn "https://github.com/zhxiaokang/RASflow.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for RASflow: RNA-Seq Analysis Snakemake Workflow" ; + schema1:sdDatePublished "2024-07-12 13:34:22 +0100" ; + schema1:url "https://workflowhub.eu/workflows/451/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 2091 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5359 ; + schema1:creator ; + schema1:dateCreated "2023-04-06T07:54:59Z" ; + schema1:dateModified "2023-04-06T07:54:59Z" ; + schema1:description """# RASflow: RNA-Seq Analysis Snakemake Workflow\r +RASflow is a modular, flexible and user-friendly RNA-Seq analysis workflow. \r +\r +RASflow can be applied to both model and non-model organisms. It supports mapping RNA-Seq raw reads to both genome and transcriptome (can be downloaded from public database or can be homemade by users) and it can do both transcript- and gene-level Differential Expression Analysis (DEA) when transcriptome is used as mapping reference. It requires little programming skill for basic use. If you're good at programming, you can do more magic with RASflow!\r +\r +You can help support RASflow by citing our publication:\r +\r +**Zhang, X., Jonassen, I. RASflow: an RNA-Seq analysis workflow with Snakemake. BMC Bioinformatics 21, 110 (2020). https://doi.org/10.1186/s12859-020-3433-x**\r +""" ; + schema1:image ; + schema1:keywords "Transcriptomics" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "RASflow: RNA-Seq Analysis Snakemake Workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/451?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 733943 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Integration of viral sequences in genomic data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1026?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/viralintegration" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/viralintegration" ; + schema1:sdDatePublished "2024-07-12 13:18:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1026/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9792 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:20Z" ; + schema1:dateModified "2024-06-11T12:55:20Z" ; + schema1:description "Integration of viral sequences in genomic data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1026?version=1" ; + schema1:keywords "chimeric-alignment, ctat, viral-integration, Virus, virusintegrationfinder" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/viralintegration" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1026?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + schema1:datePublished "2024-03-13T15:58:39.020374" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein MD Analysis tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This workflow computes a set of Quality Control (QC) analyses on top of an uploaded MD trajectory. QC analyses include positional divergence (RMSd), change of shape (Radius of Gyration), identification of flexible regions (atomic/residue fluctuations), and identification of different molecular conformations (trajectory clustering).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.287.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_protein_md_analysis/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Protein MD Analysis tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/287/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5395 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-23T08:37:12Z" ; + schema1:dateModified "2023-01-16T13:58:32Z" ; + schema1:description """# Protein MD Analysis tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This workflow computes a set of Quality Control (QC) analyses on top of an uploaded MD trajectory. QC analyses include positional divergence (RMSd), change of shape (Radius of Gyration), identification of flexible regions (atomic/residue fluctuations), and identification of different molecular conformations (trajectory clustering).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/287?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Protein MD Analysis tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_protein_md_analysis/python/workflow.py" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """## Summary\r +\r +This pipeline contains the following functions: \r +(1) Data processing to handle the tansformations needed to obtain the original pathway scores of the samples according to single sample analysis GSEA\r +(2) Model training based on the disease and healthy sample pathway scores, to classify them\r +(3) Scoring matrix weights optimization according to a gold standard list of drugs (those that went on clinical trials or are approved for the disease).It tests the weights in a range of 0 to 30 (you may change as you want). The evaluation function tests and try to maximize the number of approved drugs whose modified pathway scores for disease samples is changed from disease to healthy sample classification, according to the trained model.\r +(4) Computation of the calibrated disease samples pathwa scores according to the interaction among drug and targets found in the sample pathways & Drug ranking based on the disease samples whose calibrated matrix were responsible to change the trained model decision from disease to healthy state.\r +(5) Drug combination ranking evaluated the same way as in option (4) but adding the effects of multiple drugs in each sample while calculating the calibrated scoring matrix\r + \r +## Input configuration file:\r +* The pipeline only needs a configuration file and the step number you want to run.\r +- Configuration file keys (see also the example in config.json):\r + - **identifier**: project identifier to be used in the result files\r + - **type_normalization**: normalization type (possible values: tpm, fpkm, tmm, cpm or fpkm_uq)\r + - **genome_assembly**: the supported assemblies are the 37 and 38 (values may be: g37 or g38)\r + - **pathway_geneset**: pathway-based gene sets, choose one identifier from the list in [genesets_available.txt](https://github.com/YasCoMa/caliscoma_pipeline/blob/master/genesets_available.txt)\r + - **folder**: working directory\r + - **expression_file**: compressed gene expression file for the desired icgc project, it must be separated by tabulation. The following columns are mandatory: submitted_file_id (sample names), raw_read_count (the read counts without normalization) and gene_id (genes in ensembl or hgnc symbol). File expected to be in {folder}.\r + - **labels_file** (optional for function 1): file with two columns, one named 'sample' corresponding to the unique values of submitted_sample_id; the second named 'label' corresponding to a disease (or confirmed tumour) (1) or a healthy (0) case. File expected to be in {folder}.\r + - **trained_model** (optional for function 1): file with the trained model to separate healthy and disease cases. Full path is expected.\r + - **means_table_file** (optional for function 1): file with the means table calculated when the model is trained by the function 3. Full path is expected.\r + - **samples_pathway_scores** (optional for function 1): file with the original model calculated pathway scores by function 1, in order to check the number of features expected by the original model. Full path is expected.\r + - **optimized_weights_file**: tab separated table file with two columns representing the weights (w1, w2, w3) and their respective values.\r + - **drug_list_file** (only mandatory for function 3): file with the gold standard drug list (one drugbank id per line), this file is expected to be in the in the experiment item folder results ({folder}/{identifier})\r + - **drug_combination_file** (only mandatory for function 5): file with the drug combination candidates list (drugbank ids concatenated with comma in each line). Full path is expected.\r +\r +- Observation: \r + * The "labels_file" parameter is mandatory for the weights optimization, scoring matrix calculation, model traning and drug (or drug combination) ranking \r + * In case of transfer learning, "labels_file" may be ignored only if both "trained_model", "means_table_file" and "samples_pathway_scores" are present. This is only possible for the functions 2, 4 and 5. For weights optimization, only labels file is accepted.\r + * If type_normalization and/or genome_assembly are missing or empty, it will switch to the default fpkm_uq\r + * If pathway_geneset is missing or empty, it will switch to the default KEGG_2021_HUMAN\r + * If optimized_weights_file is missing or empty, it will switch to the default values (w1: 20, w2: 5, w3: 10)\r + \r +## Usage Instructions\r +### Preparation:\r +1. ````git clone https://github.com/YasCoMa/caliscoma_pipeline.git````\r +2. ````cd caliscoma_pipeline````\r +3. Create conda environment to handle dependencies: ````conda env create -f drugresponse_env.yml````\r +4. ````conda activate drugresponse_env````\r +5. Setup an environment variable named "path_workflow" with the full path to this workflow folder\r +\r +### Getting data for the running example in the LICA-FR and LIRI-JP projects from ICGC\r +1. Download the [expression file for LICA-FR](https://dcc.icgc.org/api/v1/download?fn=/current/Projects/LICA-FR/exp_seq.LICA-FR.tsv.gz) and put it in data_icgc folder\r +2. Download the [expression file for LIRI-JP](https://dcc.icgc.org/api/v1/download?fn=/current/Projects/LIRI-JP/exp_seq.LIRI-JP.tsv.gz) and put it in data_icgc folder\r +3. For the liri-jp project, the labels file is already processed, to given an example of a project that run all steps proposed by this workflow\r +\r +### Run analysis\r +- Run all steps: ````python3 main.py -rt 0 -cf config.json````\r +- Run all steps: ````python3 main.py -rt 0 -cf config_transfer_options.json````\r +\r +- Run only data processing: ````python3 main.py -rt 1 -cf config.json````\r +- Run only data processing: ````python3 main.py -rt 1 -cf config_transfer_options.json````\r +\r +- Run only model training & modified pathway score matrix: ````python3 main.py -rt 2 -cf config.json````\r +- Run only model training & modified pathway score matrix: ````python3 main.py -rt 2 -cf config_transfer_options.json````\r +\r +- Run only weights optimization: ````python3 main.py -rt 3 -cf config.json````\r +\r +- Run only drug ranking: ````python3 main.py -rt 4 -cf config.json````\r +- Run only drug ranking: ````python3 main.py -rt 4 -cf config_transfer_options.json````\r +\r +- Run only drug combination evaluation: ````python3 main.py -rt 5 -cf config.json````\r +- Run only drug combination evaluation: ````python3 main.py -rt 5 -cf config_transfer_options.json````\r +\r +## Reference\r +Martins, Y. C. (2023). Multi-task analysis of gene expression data on cancer public datasets. medRxiv, 2023-09.\r +\r +## Bug Report\r +Please, use the [Issues](https://github.com/YasCoMa/caliscoma_pipeline/issues) tab to report any bug.""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/620?version=1" ; + schema1:isBasedOn "https://github.com/YasCoMa/caliscoma_pipeline" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for DReCaS - Pipeline for drug ranking based on computed pathway scores of disease and healthy samples" ; + schema1:sdDatePublished "2024-07-12 13:27:10 +0100" ; + schema1:url "https://workflowhub.eu/workflows/620/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 16450 ; + schema1:creator ; + schema1:dateCreated "2023-10-22T00:11:33Z" ; + schema1:dateModified "2023-10-22T00:11:33Z" ; + schema1:description """## Summary\r +\r +This pipeline contains the following functions: \r +(1) Data processing to handle the tansformations needed to obtain the original pathway scores of the samples according to single sample analysis GSEA\r +(2) Model training based on the disease and healthy sample pathway scores, to classify them\r +(3) Scoring matrix weights optimization according to a gold standard list of drugs (those that went on clinical trials or are approved for the disease).It tests the weights in a range of 0 to 30 (you may change as you want). The evaluation function tests and try to maximize the number of approved drugs whose modified pathway scores for disease samples is changed from disease to healthy sample classification, according to the trained model.\r +(4) Computation of the calibrated disease samples pathwa scores according to the interaction among drug and targets found in the sample pathways & Drug ranking based on the disease samples whose calibrated matrix were responsible to change the trained model decision from disease to healthy state.\r +(5) Drug combination ranking evaluated the same way as in option (4) but adding the effects of multiple drugs in each sample while calculating the calibrated scoring matrix\r + \r +## Input configuration file:\r +* The pipeline only needs a configuration file and the step number you want to run.\r +- Configuration file keys (see also the example in config.json):\r + - **identifier**: project identifier to be used in the result files\r + - **type_normalization**: normalization type (possible values: tpm, fpkm, tmm, cpm or fpkm_uq)\r + - **genome_assembly**: the supported assemblies are the 37 and 38 (values may be: g37 or g38)\r + - **pathway_geneset**: pathway-based gene sets, choose one identifier from the list in [genesets_available.txt](https://github.com/YasCoMa/caliscoma_pipeline/blob/master/genesets_available.txt)\r + - **folder**: working directory\r + - **expression_file**: compressed gene expression file for the desired icgc project, it must be separated by tabulation. The following columns are mandatory: submitted_file_id (sample names), raw_read_count (the read counts without normalization) and gene_id (genes in ensembl or hgnc symbol). File expected to be in {folder}.\r + - **labels_file** (optional for function 1): file with two columns, one named 'sample' corresponding to the unique values of submitted_sample_id; the second named 'label' corresponding to a disease (or confirmed tumour) (1) or a healthy (0) case. File expected to be in {folder}.\r + - **trained_model** (optional for function 1): file with the trained model to separate healthy and disease cases. Full path is expected.\r + - **means_table_file** (optional for function 1): file with the means table calculated when the model is trained by the function 3. Full path is expected.\r + - **samples_pathway_scores** (optional for function 1): file with the original model calculated pathway scores by function 1, in order to check the number of features expected by the original model. Full path is expected.\r + - **optimized_weights_file**: tab separated table file with two columns representing the weights (w1, w2, w3) and their respective values.\r + - **drug_list_file** (only mandatory for function 3): file with the gold standard drug list (one drugbank id per line), this file is expected to be in the in the experiment item folder results ({folder}/{identifier})\r + - **drug_combination_file** (only mandatory for function 5): file with the drug combination candidates list (drugbank ids concatenated with comma in each line). Full path is expected.\r +\r +- Observation: \r + * The "labels_file" parameter is mandatory for the weights optimization, scoring matrix calculation, model traning and drug (or drug combination) ranking \r + * In case of transfer learning, "labels_file" may be ignored only if both "trained_model", "means_table_file" and "samples_pathway_scores" are present. This is only possible for the functions 2, 4 and 5. For weights optimization, only labels file is accepted.\r + * If type_normalization and/or genome_assembly are missing or empty, it will switch to the default fpkm_uq\r + * If pathway_geneset is missing or empty, it will switch to the default KEGG_2021_HUMAN\r + * If optimized_weights_file is missing or empty, it will switch to the default values (w1: 20, w2: 5, w3: 10)\r + \r +## Usage Instructions\r +### Preparation:\r +1. ````git clone https://github.com/YasCoMa/caliscoma_pipeline.git````\r +2. ````cd caliscoma_pipeline````\r +3. Create conda environment to handle dependencies: ````conda env create -f drugresponse_env.yml````\r +4. ````conda activate drugresponse_env````\r +5. Setup an environment variable named "path_workflow" with the full path to this workflow folder\r +\r +### Getting data for the running example in the LICA-FR and LIRI-JP projects from ICGC\r +1. Download the [expression file for LICA-FR](https://dcc.icgc.org/api/v1/download?fn=/current/Projects/LICA-FR/exp_seq.LICA-FR.tsv.gz) and put it in data_icgc folder\r +2. Download the [expression file for LIRI-JP](https://dcc.icgc.org/api/v1/download?fn=/current/Projects/LIRI-JP/exp_seq.LIRI-JP.tsv.gz) and put it in data_icgc folder\r +3. For the liri-jp project, the labels file is already processed, to given an example of a project that run all steps proposed by this workflow\r +\r +### Run analysis\r +- Run all steps: ````python3 main.py -rt 0 -cf config.json````\r +- Run all steps: ````python3 main.py -rt 0 -cf config_transfer_options.json````\r +\r +- Run only data processing: ````python3 main.py -rt 1 -cf config.json````\r +- Run only data processing: ````python3 main.py -rt 1 -cf config_transfer_options.json````\r +\r +- Run only model training & modified pathway score matrix: ````python3 main.py -rt 2 -cf config.json````\r +- Run only model training & modified pathway score matrix: ````python3 main.py -rt 2 -cf config_transfer_options.json````\r +\r +- Run only weights optimization: ````python3 main.py -rt 3 -cf config.json````\r +\r +- Run only drug ranking: ````python3 main.py -rt 4 -cf config.json````\r +- Run only drug ranking: ````python3 main.py -rt 4 -cf config_transfer_options.json````\r +\r +- Run only drug combination evaluation: ````python3 main.py -rt 5 -cf config.json````\r +- Run only drug combination evaluation: ````python3 main.py -rt 5 -cf config_transfer_options.json````\r +\r +## Reference\r +Martins, Y. C. (2023). Multi-task analysis of gene expression data on cancer public datasets. medRxiv, 2023-09.\r +\r +## Bug Report\r +Please, use the [Issues](https://github.com/YasCoMa/caliscoma_pipeline/issues) tab to report any bug.""" ; + schema1:keywords "Workflows, durg response simulation, gene set enrichment analysis, personalized medicine, data retrieval and transformation" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "DReCaS - Pipeline for drug ranking based on computed pathway scores of disease and healthy samples" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/620?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-15T16:30:18.151248" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Purge-duplicate-contigs-VGP6/main" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Purge-duplicate-contigs-VGP6/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:version "0.3.1" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """# Pangenome databases provide superior host removal and mycobacteria classification from clinical metagenomic data\r +\r +> Hall, M, Coin, L., Pangenome databases provide superior host removal and mycobacteria classification from clinical metagenomic data. bioRxiv 2023. doi: [10.1101/2023.09.18.558339][doi]\r +\r +Benchmarking different ways of doing read (taxonomic) classification, with a focus on\r +removal of contamination and classification of _M. tuberculosis_ reads.\r +\r +This repository contains the code and snakemake pipeline to build/download the\r +databases, obtain all results from [the paper][doi], along with accompanying configuration\r +files.\r +\r +Custom databases have all been uploaded to Zenodo, along with the simulated reads:\r +\r +- Nanopore simulated metagenomic reads - \r +- Illumina simulated metagenomic reads - \r +- Nanopore and Illumina artificial real reads - \r +- Kraken2 database built from the Human Pangenome Reference Consortium\r + genomes - \r +- Kraken2 database built from the kraken2 Human\r + library - \r +- Kraken2 database built from a *Mycobacterium* representative set of\r + genomes - \r +- A (fasta) database of representative genomes from the *Mycobacterium*\r + genus - \r +- A (fasta) database of *M. tuberculosis* genomes from a variety of\r + lineages - \r +- The fasta file built from the [Clockwork](https://github.com/iqbal-lab-org/clockwork)\r + decontamination pipeline - \r +\r +## Example usage\r +\r +We provide some usage examples showing how to download the databases and then use them\r +on your reads.\r +\r +### Human read removal\r +\r +The method we found to give the best balance of runtime, memory usage, and precision and\r +recall was kraken2 with a database built from the Human Pangenome Reference Consortium\r +genomes.\r +\r +This example has been wrapped into a standalone tool called [`nohuman`](https://github.com/mbhall88/nohuman/) which takes a fastq as input and returns a fastq with human reads removed.\r +\r +#### Download human database\r +\r +```\r +mkdir HPRC_db/\r +cd HPRC_db\r +URL="https://zenodo.org/record/8339732/files/k2_HPRC_20230810.tar.gz"\r +wget "$URL"\r +tar -xzf k2_HPRC_20230810.tar.gz\r +rm k2_HPRC_20230810.tar.gz\r +```\r +\r +#### Run kraken2 with HPRC database\r +\r +You'll need [kraken2](https://github.com/DerrickWood/kraken2) installed for this step.\r +\r +```\r +kraken2 --threads 4 --db HPRC_db/ --output classifications.tsv reads.fq\r +```\r +\r +If you are using Illumina reads, a slight adjustment is needed\r +\r +```\r +kraken2 --paired --threads 4 --db HPRC_db/ --output classifications.tsv reads_1.fq reads_2.fq\r +```\r +\r +#### Extract non-human reads\r +\r +You'll need [seqkit](https://github.com/shenwei356/seqkit) installed for this step\r +\r +For Nanopore data\r +\r +```\r +awk -F'\\t' '$1=="U" {print $2}' classifications.tsv | \\\r + seqkit grep -f - -o reads.depleted.fq reads.fq\r +```\r +\r +For Illumina data\r +\r +```\r +awk -F'\\t' '$1=="U" {print $2}' classifications.tsv > ids.txt\r +seqkit grep --id-regexp '^(\\S+)/[12]' -f ids.txt -o reads_1.depleted.fq reads_1.fq\r +seqkit grep --id-regexp '^(\\S+)/[12]' -f ids.txt -o reads_2.depleted.fq reads_2.fq\r +```\r +\r +### *M. tuberculosis* classification/enrichment\r +\r +For this step we recommend either [minimap2](https://github.com/lh3/minimap2) or kraken2\r +with a *Mycobacterium* genus database. We leave it to the user to decide which approach\r +they prefer based on the results in our manuscript.\r +\r +#### Download databases\r +\r +```\r +mkdir Mycobacterium_db\r +cd Mycobacterium_db\r +# download database for use with minimap2\r +URL="https://zenodo.org/record/8339941/files/Mycobacterium.rep.fna.gz"\r +wget "$URL"\r +IDS_URL="https://zenodo.org/record/8343322/files/mtb.ids"\r +wget "$IDS_URL"\r +# download kraken database\r +URL="https://zenodo.org/record/8339822/files/k2_Mycobacterium_20230817.tar.gz"\r +wget "$URL"\r +tar -xzf k2_Mycobacterium_20230817.tar.gz\r +rm k2_Mycobacterium_20230817.tar.gz\r +```\r +\r +#### Classify reads\r +\r +**minimap2**\r +\r +```\r +# nanopore\r +minimap2 --secondary=no -c -t 4 -x map-ont -o reads.aln.paf Mycobacterium_db/Mycobacterium.rep.fna.gz reads.depleted.fq\r +# illumina\r +minimap2 --secondary=no -c -t 4 -x sr -o reads.aln.paf Mycobacterium_db/Mycobacterium.rep.fna.gz reads_1.depleted.fq reads_2.depleted.fq\r +```\r +\r +**kraken2**\r +\r +```\r +# nanopore\r +kraken2 --db Mycobacterium_db --threads 4 --report myco.kreport --output classifications.myco.tsv reads.depleted.fq\r +# illumina\r +kraken2 --db Mycobacterium_db --paired --threads 4 --report myco.kreport --output classifications.myco.tsv reads_1.depleted.fq reads_2.depleted.fq\r +```\r +\r +#### Extract *M. tuberculosis* reads\r +\r +**minimap2**\r +\r +```\r +# nanopore\r +grep -Ff Mycobacterium_db/mtb.ids reads.aln.paf | cut -f1 | \\\r + seqkit grep -f - -o reads.enriched.fq reads.depleted.fq\r +# illumina\r +grep -Ff Mycobacterium_db/mtb.ids reads.aln.paf | cut -f1 > keep.ids\r +seqkit grep -f keep.ids -o reads_1.enriched.fq reads_1.depleted.fq\r +seqkit grep -f keep.ids -o reads_2.enriched.fq reads_2.depleted.fq\r +```\r +\r +**kraken2**\r +\r +We'll use\r +the [`extract_kraken_reads.py` script](https://github.com/jenniferlu717/KrakenTools#extract_kraken_readspy)\r +for this\r +\r +```\r +# nanopore\r +python extract_kraken_reads.py -k classifications.myco.tsv -1 reads.depleted.fq -o reads.enriched.fq -t 1773 -r myco.kreport --include-children\r +# illumina\r +python extract_kraken_reads.py -k classifications.myco.tsv -1 reads_1.depleted.fq -2 reads_2.depleted.fq -o reads_1.enriched.fq -o2 reads_2.enriched.fq -t 1773 -r myco.kreport --include-children\r +```\r +\r +[doi]: https://doi.org/10.1101/2023.09.18.558339 \r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.700.2" ; + schema1:isBasedOn "https://github.com/mbhall88/classification_benchmark.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Pangenome databases provide superior host removal and mycobacteria classification from clinical metagenomic data" ; + schema1:sdDatePublished "2024-07-12 13:25:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/700/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1531 ; + schema1:creator ; + schema1:dateCreated "2024-01-10T01:19:39Z" ; + schema1:dateModified "2024-01-10T01:20:12Z" ; + schema1:description """# Pangenome databases provide superior host removal and mycobacteria classification from clinical metagenomic data\r +\r +> Hall, M, Coin, L., Pangenome databases provide superior host removal and mycobacteria classification from clinical metagenomic data. bioRxiv 2023. doi: [10.1101/2023.09.18.558339][doi]\r +\r +Benchmarking different ways of doing read (taxonomic) classification, with a focus on\r +removal of contamination and classification of _M. tuberculosis_ reads.\r +\r +This repository contains the code and snakemake pipeline to build/download the\r +databases, obtain all results from [the paper][doi], along with accompanying configuration\r +files.\r +\r +Custom databases have all been uploaded to Zenodo, along with the simulated reads:\r +\r +- Nanopore simulated metagenomic reads - \r +- Illumina simulated metagenomic reads - \r +- Nanopore and Illumina artificial real reads - \r +- Kraken2 database built from the Human Pangenome Reference Consortium\r + genomes - \r +- Kraken2 database built from the kraken2 Human\r + library - \r +- Kraken2 database built from a *Mycobacterium* representative set of\r + genomes - \r +- A (fasta) database of representative genomes from the *Mycobacterium*\r + genus - \r +- A (fasta) database of *M. tuberculosis* genomes from a variety of\r + lineages - \r +- The fasta file built from the [Clockwork](https://github.com/iqbal-lab-org/clockwork)\r + decontamination pipeline - \r +\r +## Example usage\r +\r +We provide some usage examples showing how to download the databases and then use them\r +on your reads.\r +\r +### Human read removal\r +\r +The method we found to give the best balance of runtime, memory usage, and precision and\r +recall was kraken2 with a database built from the Human Pangenome Reference Consortium\r +genomes.\r +\r +This example has been wrapped into a standalone tool called [`nohuman`](https://github.com/mbhall88/nohuman/) which takes a fastq as input and returns a fastq with human reads removed.\r +\r +#### Download human database\r +\r +```\r +mkdir HPRC_db/\r +cd HPRC_db\r +URL="https://zenodo.org/record/8339732/files/k2_HPRC_20230810.tar.gz"\r +wget "$URL"\r +tar -xzf k2_HPRC_20230810.tar.gz\r +rm k2_HPRC_20230810.tar.gz\r +```\r +\r +#### Run kraken2 with HPRC database\r +\r +You'll need [kraken2](https://github.com/DerrickWood/kraken2) installed for this step.\r +\r +```\r +kraken2 --threads 4 --db HPRC_db/ --output classifications.tsv reads.fq\r +```\r +\r +If you are using Illumina reads, a slight adjustment is needed\r +\r +```\r +kraken2 --paired --threads 4 --db HPRC_db/ --output classifications.tsv reads_1.fq reads_2.fq\r +```\r +\r +#### Extract non-human reads\r +\r +You'll need [seqkit](https://github.com/shenwei356/seqkit) installed for this step\r +\r +For Nanopore data\r +\r +```\r +awk -F'\\t' '$1=="U" {print $2}' classifications.tsv | \\\r + seqkit grep -f - -o reads.depleted.fq reads.fq\r +```\r +\r +For Illumina data\r +\r +```\r +awk -F'\\t' '$1=="U" {print $2}' classifications.tsv > ids.txt\r +seqkit grep --id-regexp '^(\\S+)/[12]' -f ids.txt -o reads_1.depleted.fq reads_1.fq\r +seqkit grep --id-regexp '^(\\S+)/[12]' -f ids.txt -o reads_2.depleted.fq reads_2.fq\r +```\r +\r +### *M. tuberculosis* classification/enrichment\r +\r +For this step we recommend either [minimap2](https://github.com/lh3/minimap2) or kraken2\r +with a *Mycobacterium* genus database. We leave it to the user to decide which approach\r +they prefer based on the results in our manuscript.\r +\r +#### Download databases\r +\r +```\r +mkdir Mycobacterium_db\r +cd Mycobacterium_db\r +# download database for use with minimap2\r +URL="https://zenodo.org/record/8339941/files/Mycobacterium.rep.fna.gz"\r +wget "$URL"\r +IDS_URL="https://zenodo.org/record/8343322/files/mtb.ids"\r +wget "$IDS_URL"\r +# download kraken database\r +URL="https://zenodo.org/record/8339822/files/k2_Mycobacterium_20230817.tar.gz"\r +wget "$URL"\r +tar -xzf k2_Mycobacterium_20230817.tar.gz\r +rm k2_Mycobacterium_20230817.tar.gz\r +```\r +\r +#### Classify reads\r +\r +**minimap2**\r +\r +```\r +# nanopore\r +minimap2 --secondary=no -c -t 4 -x map-ont -o reads.aln.paf Mycobacterium_db/Mycobacterium.rep.fna.gz reads.depleted.fq\r +# illumina\r +minimap2 --secondary=no -c -t 4 -x sr -o reads.aln.paf Mycobacterium_db/Mycobacterium.rep.fna.gz reads_1.depleted.fq reads_2.depleted.fq\r +```\r +\r +**kraken2**\r +\r +```\r +# nanopore\r +kraken2 --db Mycobacterium_db --threads 4 --report myco.kreport --output classifications.myco.tsv reads.depleted.fq\r +# illumina\r +kraken2 --db Mycobacterium_db --paired --threads 4 --report myco.kreport --output classifications.myco.tsv reads_1.depleted.fq reads_2.depleted.fq\r +```\r +\r +#### Extract *M. tuberculosis* reads\r +\r +**minimap2**\r +\r +```\r +# nanopore\r +grep -Ff Mycobacterium_db/mtb.ids reads.aln.paf | cut -f1 | \\\r + seqkit grep -f - -o reads.enriched.fq reads.depleted.fq\r +# illumina\r +grep -Ff Mycobacterium_db/mtb.ids reads.aln.paf | cut -f1 > keep.ids\r +seqkit grep -f keep.ids -o reads_1.enriched.fq reads_1.depleted.fq\r +seqkit grep -f keep.ids -o reads_2.enriched.fq reads_2.depleted.fq\r +```\r +\r +**kraken2**\r +\r +We'll use\r +the [`extract_kraken_reads.py` script](https://github.com/jenniferlu717/KrakenTools#extract_kraken_readspy)\r +for this\r +\r +```\r +# nanopore\r +python extract_kraken_reads.py -k classifications.myco.tsv -1 reads.depleted.fq -o reads.enriched.fq -t 1773 -r myco.kreport --include-children\r +# illumina\r +python extract_kraken_reads.py -k classifications.myco.tsv -1 reads_1.depleted.fq -2 reads_2.depleted.fq -o reads_1.enriched.fq -o2 reads_2.enriched.fq -t 1773 -r myco.kreport --include-children\r +```\r +\r +[doi]: https://doi.org/10.1101/2023.09.18.558339 \r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/700?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Pangenome databases provide superior host removal and mycobacteria classification from clinical metagenomic data" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/700?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """The workflows in this collection are from the '16S Microbial Analysis with mothur' tutorial for analysis of 16S data (Saskia Hiltemann, Bérénice Batut, Dave Clements), adapted for pipeline use on galaxy australia (Ahmed Mehdi). The workflows developed in galaxy use mothur software package developed by Schloss et al https://pubmed.ncbi.nlm.nih.gov/19801464/. \r +\r +Please also refer to the 16S tutorials available at Galaxy https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop-short/tutorial.html and [https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop/tutorial.html\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/650?version=1" ; + schema1:isBasedOn "https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop-short/tutorial.html" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Workflow 3: Classification [Galaxy Training: 16S Microbial Analysis With Mothur]" ; + schema1:sdDatePublished "2024-07-12 13:26:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/650/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12146 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2023-11-09T05:16:29Z" ; + schema1:dateModified "2023-11-09T05:16:29Z" ; + schema1:description """The workflows in this collection are from the '16S Microbial Analysis with mothur' tutorial for analysis of 16S data (Saskia Hiltemann, Bérénice Batut, Dave Clements), adapted for pipeline use on galaxy australia (Ahmed Mehdi). The workflows developed in galaxy use mothur software package developed by Schloss et al https://pubmed.ncbi.nlm.nih.gov/19801464/. \r +\r +Please also refer to the 16S tutorials available at Galaxy https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop-short/tutorial.html and [https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop/tutorial.html\r +""" ; + schema1:isPartOf ; + schema1:keywords "Metagenomics" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Workflow 3: Classification [Galaxy Training: 16S Microbial Analysis With Mothur]" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/650?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + ; + ns1:output , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.131.5" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/131/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 76457 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-04T15:31:10Z" ; + schema1:dateModified "2024-05-14T10:16:26Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/131?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_amber_md_setup/blob/main/biobb_wf_amber_md_setup/notebooks/mdsetup_lig/biobb_amber_complex_setup_notebook.ipynb" ; + schema1:version 5 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-23T06:34:49+00:00" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/AustralianBioCommons/Genome-assessment-post-assembly.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Genome-assessment-post-assembly" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Genome-assessment-post-assembly" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/AustralianBioCommons/Genome-assessment-post-assembly.git" ; + schema1:version "main" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.818.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/main/biobb_wf_amber_complex_md_setup/docker" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Docker Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:23:33 +0100" ; + schema1:url "https://workflowhub.eu/workflows/818/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 780 ; + schema1:creator , + ; + schema1:dateCreated "2024-04-22T10:11:25Z" ; + schema1:dateModified "2024-05-22T13:49:10Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Docker Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/main/biobb_wf_amber_complex_md_setup/docker/Dockerfile" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Conformational ensembles generation\r +\r +## Workflow included in the [ELIXIR 3D-Bioinfo](https://elixir-europe.org/communities/3d-bioinfo) Implementation Study:\r +\r +### Building on PDBe-KB to chart and characterize the conformation landscape of native proteins\r +\r +This tutorial aims to illustrate the process of generating **protein conformational ensembles** from** 3D structures **and analysing its **molecular flexibility**, step by step, using the **BioExcel Building Blocks library (biobb)**.\r +\r +## Conformational landscape of native proteins\r +**Proteins** are **dynamic** systems that adopt multiple **conformational states**, a property essential for many **biological processes** (e.g. binding other proteins, nucleic acids, small molecule ligands, or switching between functionaly active and inactive states). Characterizing the different **conformational states** of proteins and the transitions between them is therefore critical for gaining insight into their **biological function** and can help explain the effects of genetic variants in **health** and **disease** and the action of drugs.\r +\r +**Structural biology** has become increasingly efficient in sampling the different **conformational states** of proteins. The **PDB** has currently archived more than **170,000 individual structures**, but over two thirds of these structures represent **multiple conformations** of the same or related protein, observed in different crystal forms, when interacting with other proteins or other macromolecules, or upon binding small molecule ligands. Charting this conformational diversity across the PDB can therefore be employed to build a useful approximation of the **conformational landscape** of native proteins.\r +\r +A number of resources and **tools** describing and characterizing various often complementary aspects of protein **conformational diversity** in known structures have been developed, notably by groups in Europe. These tools include algorithms with varying degree of sophistication, for aligning the 3D structures of individual protein chains or domains, of protein assemblies, and evaluating their degree of **structural similarity**. Using such tools one can **align structures pairwise**, compute the corresponding **similarity matrix**, and identify ensembles of **structures/conformations** with a defined **similarity level** that tend to recur in different PDB entries, an operation typically performed using **clustering** methods. Such workflows are at the basis of resources such as **CATH, Contemplate, or PDBflex** that offer access to **conformational ensembles** comprised of similar **conformations** clustered according to various criteria. Other types of tools focus on differences between **protein conformations**, identifying regions of proteins that undergo large **collective displacements** in different PDB entries, those that act as **hinges or linkers**, or regions that are inherently **flexible**.\r +\r +To build a meaningful approximation of the **conformational landscape** of native proteins, the **conformational ensembles** (and the differences between them), identified on the basis of **structural similarity/dissimilarity** measures alone, need to be **biophysically characterized**. This may be approached at **two different levels**. \r +- At the **biological level**, it is important to link observed **conformational ensembles**, to their **functional roles** by evaluating the correspondence with **protein family classifications** based on sequence information and **functional annotations** in public databases e.g. Uniprot, PDKe-Knowledge Base (KB). These links should provide valuable mechanistic insights into how the **conformational and dynamic properties** of proteins are exploited by evolution to regulate their **biological function**.

\r +\r +- At the **physical level** one needs to introduce **energetic consideration** to evaluate the likelihood that the identified **conformational ensembles** represent **conformational states** that the protein (or domain under study) samples in isolation. Such evaluation is notoriously **challenging** and can only be roughly approximated by using **computational methods** to evaluate the extent to which the observed **conformational ensembles** can be reproduced by algorithms that simulate the **dynamic behavior** of protein systems. These algorithms include the computationally expensive **classical molecular dynamics (MD) simulations** to sample local thermal fluctuations but also faster more approximate methods such as **Elastic Network Models** and **Normal Node Analysis** (NMA) to model low energy **collective motions**. Alternatively, **enhanced sampling molecular dynamics** can be used to model complex types of **conformational changes** but at a very high computational cost. \r +\r +The **ELIXIR 3D-Bioinfo Implementation Study** *Building on PDBe-KB to chart and characterize the conformation landscape of native proteins* focuses on:\r +\r +1. Mapping the **conformational diversity** of proteins and their homologs across the PDB. \r +2. Characterize the different **flexibility properties** of protein regions, and link this information to sequence and functional annotation.\r +3. Benchmark **computational methods** that can predict a biophysical description of protein motions.\r +\r +This notebook is part of the third objective, where a list of **computational resources** that are able to predict **protein flexibility** and **conformational ensembles** have been collected, evaluated, and integrated in reproducible and interoperable workflows using the **BioExcel Building Blocks library**. Note that the list is not meant to be exhaustive, it is built following the expertise of the implementation study partners.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.486.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_flexdyn" ; + schema1:license "other-open" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein conformational ensembles generation" ; + schema1:sdDatePublished "2024-07-12 13:33:23 +0100" ; + schema1:url "https://workflowhub.eu/workflows/486/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 108623 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-04T15:18:56Z" ; + schema1:dateModified "2024-04-22T10:40:33Z" ; + schema1:description """# Protein Conformational ensembles generation\r +\r +## Workflow included in the [ELIXIR 3D-Bioinfo](https://elixir-europe.org/communities/3d-bioinfo) Implementation Study:\r +\r +### Building on PDBe-KB to chart and characterize the conformation landscape of native proteins\r +\r +This tutorial aims to illustrate the process of generating **protein conformational ensembles** from** 3D structures **and analysing its **molecular flexibility**, step by step, using the **BioExcel Building Blocks library (biobb)**.\r +\r +## Conformational landscape of native proteins\r +**Proteins** are **dynamic** systems that adopt multiple **conformational states**, a property essential for many **biological processes** (e.g. binding other proteins, nucleic acids, small molecule ligands, or switching between functionaly active and inactive states). Characterizing the different **conformational states** of proteins and the transitions between them is therefore critical for gaining insight into their **biological function** and can help explain the effects of genetic variants in **health** and **disease** and the action of drugs.\r +\r +**Structural biology** has become increasingly efficient in sampling the different **conformational states** of proteins. The **PDB** has currently archived more than **170,000 individual structures**, but over two thirds of these structures represent **multiple conformations** of the same or related protein, observed in different crystal forms, when interacting with other proteins or other macromolecules, or upon binding small molecule ligands. Charting this conformational diversity across the PDB can therefore be employed to build a useful approximation of the **conformational landscape** of native proteins.\r +\r +A number of resources and **tools** describing and characterizing various often complementary aspects of protein **conformational diversity** in known structures have been developed, notably by groups in Europe. These tools include algorithms with varying degree of sophistication, for aligning the 3D structures of individual protein chains or domains, of protein assemblies, and evaluating their degree of **structural similarity**. Using such tools one can **align structures pairwise**, compute the corresponding **similarity matrix**, and identify ensembles of **structures/conformations** with a defined **similarity level** that tend to recur in different PDB entries, an operation typically performed using **clustering** methods. Such workflows are at the basis of resources such as **CATH, Contemplate, or PDBflex** that offer access to **conformational ensembles** comprised of similar **conformations** clustered according to various criteria. Other types of tools focus on differences between **protein conformations**, identifying regions of proteins that undergo large **collective displacements** in different PDB entries, those that act as **hinges or linkers**, or regions that are inherently **flexible**.\r +\r +To build a meaningful approximation of the **conformational landscape** of native proteins, the **conformational ensembles** (and the differences between them), identified on the basis of **structural similarity/dissimilarity** measures alone, need to be **biophysically characterized**. This may be approached at **two different levels**. \r +- At the **biological level**, it is important to link observed **conformational ensembles**, to their **functional roles** by evaluating the correspondence with **protein family classifications** based on sequence information and **functional annotations** in public databases e.g. Uniprot, PDKe-Knowledge Base (KB). These links should provide valuable mechanistic insights into how the **conformational and dynamic properties** of proteins are exploited by evolution to regulate their **biological function**.

\r +\r +- At the **physical level** one needs to introduce **energetic consideration** to evaluate the likelihood that the identified **conformational ensembles** represent **conformational states** that the protein (or domain under study) samples in isolation. Such evaluation is notoriously **challenging** and can only be roughly approximated by using **computational methods** to evaluate the extent to which the observed **conformational ensembles** can be reproduced by algorithms that simulate the **dynamic behavior** of protein systems. These algorithms include the computationally expensive **classical molecular dynamics (MD) simulations** to sample local thermal fluctuations but also faster more approximate methods such as **Elastic Network Models** and **Normal Node Analysis** (NMA) to model low energy **collective motions**. Alternatively, **enhanced sampling molecular dynamics** can be used to model complex types of **conformational changes** but at a very high computational cost. \r +\r +The **ELIXIR 3D-Bioinfo Implementation Study** *Building on PDBe-KB to chart and characterize the conformation landscape of native proteins* focuses on:\r +\r +1. Mapping the **conformational diversity** of proteins and their homologs across the PDB. \r +2. Characterize the different **flexibility properties** of protein regions, and link this information to sequence and functional annotation.\r +3. Benchmark **computational methods** that can predict a biophysical description of protein motions.\r +\r +This notebook is part of the third objective, where a list of **computational resources** that are able to predict **protein flexibility** and **conformational ensembles** have been collected, evaluated, and integrated in reproducible and interoperable workflows using the **BioExcel Building Blocks library**. Note that the list is not meant to be exhaustive, it is built following the expertise of the implementation study partners.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/486?version=2" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "" ; + schema1:name "Jupyter Notebook Protein conformational ensembles generation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_flexdyn/blob/main/biobb_wf_flexdyn/notebooks/biobb_wf_flexdyn.ipynb" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:description """# RNA-seq Scientific Workflow\r +Workflow for RNA sequencing using the Parallel Scripting Library - Parsl.\r +\r +**Reference:** Cruz, L., Coelho, M., Terra, R., Carvalho, D., Gadelha, L., Osthoff, C., & Ocaña, K. (2021). *Workflows* Científicos de RNA-Seq em Ambientes Distribuídos de Alto Desempenho: Otimização de Desempenho e Análises de Dados de Expressão Diferencial de Genes. In *Anais do XV Brazilian e-Science Workshop*, p. 57-64. Porto Alegre: SBC. DOI: https://doi.org/10.5753/bresci.2021.15789\r +\r +## Requirements\r +\r +In order to use RNA-seq Workflow the following tools must be available:\r +\r +- [Bowtie2](http://bowtie-bio.sourceforge.net/bowtie2/index.shtml)\r +\r +You can install Bowtie2 by running:\r +\r +> bowtie2-2.3.5.1-linux-x86_64.zip\r +\r +Or\r +\r +> sudo yum install bowtie2-2.3.5-linux-x86_64\r +\r +- [Samtools](http://www.htslib.org/)\r +\r +Samtools is a suite of programs for interacting with high-throughput sequencing data.\r +\r +- [Picard](https://github.com/broadinstitute/picard)\r +\r +Picard is a set of Java command line tools for manipulating high-throughput sequencing (HTS) data and formats.\r +\r +- [HTSeq](https://htseq.readthedocs.io/en/master/)\r +\r +HTSeq is a native Python library that folows conventions of many Python packages. You can install it by running:\r +\r +> pip install HTSeq\r +\r +HTSeq uses [NumPy](https://numpy.org/), [Pysam](https://github.com/pysam-developers/pysam) and [matplotlib](https://matplotlib.org/). Be sure this tools are installed.\r +\r +- [R](https://www.r-project.org/)\r +\r +To use [DESEq2](https://bioconductor.org/packages/release/bioc/html/DESeq2.html) script make sure R language is also installed. You can install it by running:\r +\r +\r +> sudo apt install r-base\r +\r +- [Parsl - Parallel Scripting Library](https://parsl.readthedocs.io/en/stable/index.html)\r +\r +The recommended way to install Parsl is the suggest approach from Parsl's documentation:\r +\r +\r +> python3 -m pip install parsl\r +\r +- [Python (version >= 3.5)](https://www.python.org/)\r +\r +To use Parsl, you need Python 3.5 or above. You also need Python to use HTSeq, so you should load only one Python version.\r +\r +## Workflow invocation\r +\r +First of all, make a Comma Separated Values (CSV) file. So, onto the first line type: ``sampleName,fileName,condition``. **Remember, there must be no spaces between items**. You can use the file *"table.csv"* in this repository as an example. Your CSV file will be like this:\r +\r + | sampleName | fileName |condition|\r + |------------------|------------------|---------|\r + | tissue control 1 | SRR5445794.merge.count | control |\r + | tissue control 2 | SRR5445795.merge.count | control |\r + | tissue control 3 | SRR5445796.merge.count | control |\r + | tissue wntup 1 | SRR5445797.merge.count | wntup |\r + | tissue wntup 2 | SRR5445798.merge.count | wntup |\r + | tissue wntup 3 | SRR5445799.merge.count | wntup |\r +\r +The list of command line arguments passed to Python script, beyond the script's name, must be: \r +\r + 1. The indexed genome; \r + 2. The number of threads for bowtie task, sort task, number of splitted files for split_picard task and number of CPU running in htseq task; \r + 3. Path to read fastaq file, which is the path of the input files; \r + 4. Directory's name where the output files must be placed; \r + 5. GTF file;\r + 7. and, lastly the DESeq script. \r + \r +Make sure all the files necessary to run the workflow are in the same directory and the fastaq files in a dedicated folder, as a input directory. The command line will be like this:\r +\r +> python3 rna-seq.py ../mm9/mm9 24 ../inputs/ ../outputs ../Mus_musculus.NCBIM37.67.gtf ../DESeq.R\r +\r +**Remember to adjust the parameter multithreaded and multicore according with your computational environment.** \r +Example: If your machine has 8 cores, you should set the parameter on 8.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.411.1" ; + schema1:isBasedOn "https://github.com/lucruzz/RNA-seq" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ParslRNA-seq Scientific Workflow" ; + schema1:sdDatePublished "2024-07-12 13:34:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/411/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4579 ; + schema1:creator , + , + ; + schema1:dateCreated "2022-12-06T19:17:17Z" ; + schema1:dateModified "2023-01-16T14:04:54Z" ; + schema1:description """# RNA-seq Scientific Workflow\r +Workflow for RNA sequencing using the Parallel Scripting Library - Parsl.\r +\r +**Reference:** Cruz, L., Coelho, M., Terra, R., Carvalho, D., Gadelha, L., Osthoff, C., & Ocaña, K. (2021). *Workflows* Científicos de RNA-Seq em Ambientes Distribuídos de Alto Desempenho: Otimização de Desempenho e Análises de Dados de Expressão Diferencial de Genes. In *Anais do XV Brazilian e-Science Workshop*, p. 57-64. Porto Alegre: SBC. DOI: https://doi.org/10.5753/bresci.2021.15789\r +\r +## Requirements\r +\r +In order to use RNA-seq Workflow the following tools must be available:\r +\r +- [Bowtie2](http://bowtie-bio.sourceforge.net/bowtie2/index.shtml)\r +\r +You can install Bowtie2 by running:\r +\r +> bowtie2-2.3.5.1-linux-x86_64.zip\r +\r +Or\r +\r +> sudo yum install bowtie2-2.3.5-linux-x86_64\r +\r +- [Samtools](http://www.htslib.org/)\r +\r +Samtools is a suite of programs for interacting with high-throughput sequencing data.\r +\r +- [Picard](https://github.com/broadinstitute/picard)\r +\r +Picard is a set of Java command line tools for manipulating high-throughput sequencing (HTS) data and formats.\r +\r +- [HTSeq](https://htseq.readthedocs.io/en/master/)\r +\r +HTSeq is a native Python library that folows conventions of many Python packages. You can install it by running:\r +\r +> pip install HTSeq\r +\r +HTSeq uses [NumPy](https://numpy.org/), [Pysam](https://github.com/pysam-developers/pysam) and [matplotlib](https://matplotlib.org/). Be sure this tools are installed.\r +\r +- [R](https://www.r-project.org/)\r +\r +To use [DESEq2](https://bioconductor.org/packages/release/bioc/html/DESeq2.html) script make sure R language is also installed. You can install it by running:\r +\r +\r +> sudo apt install r-base\r +\r +- [Parsl - Parallel Scripting Library](https://parsl.readthedocs.io/en/stable/index.html)\r +\r +The recommended way to install Parsl is the suggest approach from Parsl's documentation:\r +\r +\r +> python3 -m pip install parsl\r +\r +- [Python (version >= 3.5)](https://www.python.org/)\r +\r +To use Parsl, you need Python 3.5 or above. You also need Python to use HTSeq, so you should load only one Python version.\r +\r +## Workflow invocation\r +\r +First of all, make a Comma Separated Values (CSV) file. So, onto the first line type: ``sampleName,fileName,condition``. **Remember, there must be no spaces between items**. You can use the file *"table.csv"* in this repository as an example. Your CSV file will be like this:\r +\r + | sampleName | fileName |condition|\r + |------------------|------------------|---------|\r + | tissue control 1 | SRR5445794.merge.count | control |\r + | tissue control 2 | SRR5445795.merge.count | control |\r + | tissue control 3 | SRR5445796.merge.count | control |\r + | tissue wntup 1 | SRR5445797.merge.count | wntup |\r + | tissue wntup 2 | SRR5445798.merge.count | wntup |\r + | tissue wntup 3 | SRR5445799.merge.count | wntup |\r +\r +The list of command line arguments passed to Python script, beyond the script's name, must be: \r +\r + 1. The indexed genome; \r + 2. The number of threads for bowtie task, sort task, number of splitted files for split_picard task and number of CPU running in htseq task; \r + 3. Path to read fastaq file, which is the path of the input files; \r + 4. Directory's name where the output files must be placed; \r + 5. GTF file;\r + 7. and, lastly the DESeq script. \r + \r +Make sure all the files necessary to run the workflow are in the same directory and the fastaq files in a dedicated folder, as a input directory. The command line will be like this:\r +\r +> python3 rna-seq.py ../mm9/mm9 24 ../inputs/ ../outputs ../Mus_musculus.NCBIM37.67.gtf ../DESeq.R\r +\r +**Remember to adjust the parameter multithreaded and multicore according with your computational environment.** \r +Example: If your machine has 8 cores, you should set the parameter on 8.\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "ParslRNA-seq Scientific Workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/411?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 19053 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Automated quantitative analysis of DIA proteomics mass spectrometry measurements." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/980?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/diaproteomics" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/diaproteomics" ; + schema1:sdDatePublished "2024-07-12 13:21:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/980/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7318 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:48Z" ; + schema1:dateModified "2024-06-11T12:54:48Z" ; + schema1:description "Automated quantitative analysis of DIA proteomics mass spectrometry measurements." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/980?version=6" ; + schema1:keywords "data-independent-proteomics, dia-proteomics, openms, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/diaproteomics" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/980?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for the identification of circular DNAs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/972?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/circdna" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/circdna" ; + schema1:sdDatePublished "2024-07-12 13:22:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/972/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10158 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:45Z" ; + schema1:dateModified "2024-06-11T12:54:45Z" ; + schema1:description "Pipeline for the identification of circular DNAs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/972?version=7" ; + schema1:keywords "ampliconarchitect, ampliconsuite, circle-seq, circular, DNA, eccdna, ecdna, extrachromosomal-circular-dna, Genomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/circdna" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/972?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """\r +Author: AMBARISH KUMAR er.ambarish@gmail.com & ambari73_sit@jnu.ac.in\r +\r +This is a proposed standard operating procedure for genomic variant detection using GATK4.\r +\r +It is hoped to be effective and useful for getting SARS-CoV-2 genome variants.\r +\r +\r +\r +It uses Illumina RNASEQ reads and genome sequence.\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/33?version=1" ; + schema1:isBasedOn "https://github.com/ambarishK/bio-cwl-tools/blob/release/gatk4W-spark.cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genomic variants - SNPs and INDELs detection using GATK4 spark based tools." ; + schema1:sdDatePublished "2024-07-12 13:37:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/33/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4305 ; + schema1:creator ; + schema1:dateCreated "2020-06-17T06:38:32Z" ; + schema1:dateModified "2023-01-16T13:42:33Z" ; + schema1:description """\r +Author: AMBARISH KUMAR er.ambarish@gmail.com & ambari73_sit@jnu.ac.in\r +\r +This is a proposed standard operating procedure for genomic variant detection using GATK4.\r +\r +It is hoped to be effective and useful for getting SARS-CoV-2 genome variants.\r +\r +\r +\r +It uses Illumina RNASEQ reads and genome sequence.\r +""" ; + schema1:image ; + schema1:keywords "CWL, GATK4, SNPs, INDELs, SPARK" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Genomic variants - SNPs and INDELs detection using GATK4 spark based tools." ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/33?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + ; + ns1:output , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 72307 . + + a schema1:Dataset ; + schema1:datePublished "2024-03-06T13:06:49.225296" ; + schema1:hasPart , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/fastq-to-matrix-10x" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + ; + schema1:name "fastq-to-matrix-10x/scrna-seq-fastq-to-matrix-10x-cellplex" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + , + , + ; + schema1:name "fastq-to-matrix-10x/scrna-seq-fastq-to-matrix-10x-v3" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/fastq-to-matrix-10x" ; + schema1:version "0.3" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# ![sanger-tol/ensemblrepeatdownload](docs/images/sanger-tol-ensemblrepeatdownload_logo.png)\r +\r +[![GitHub Actions CI Status](https://github.com/sanger-tol/ensemblrepeatdownload/workflows/nf-core%20CI/badge.svg)](https://github.com/sanger-tol/ensemblrepeatdownload/actions?query=workflow%3A%22nf-core+CI%22)\r +\r +\r +\r +[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.7183380-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.7183380)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A522.04.0-23aa62.svg)](https://www.nextflow.io/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +\r +[![Get help on Slack](http://img.shields.io/badge/slack-SangerTreeofLife%20%23pipelines-4A154B?labelColor=000000&logo=slack)](https://SangerTreeofLife.slack.com/channels/pipelines)\r +[![Follow on Twitter](http://img.shields.io/badge/twitter-%40sangertol-1DA1F2?labelColor=000000&logo=twitter)](https://twitter.com/sangertol)\r +[![Watch on YouTube](http://img.shields.io/badge/youtube-tree--of--life-FF0000?labelColor=000000&logo=youtube)](https://www.youtube.com/channel/UCFeDpvjU58SA9V0ycRXejhA)\r +\r +## Introduction\r +\r +**sanger-tol/ensemblrepeatdownload** is a pipeline that downloads repeat annotations from Ensembl into a Tree of Life directory structure.\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!\r +\r +On release, automated continuous integration tests run the pipeline on a full-sized dataset on the GitHub CI infrastructure. This ensures that the pipeline runs in a third-party environment, and has sensible resource allocation defaults set to run on real-world datasets.\r +\r +## Pipeline summary\r +\r +## Overview\r +\r +The pipeline takes a CSV file that contains assembly accession number, Ensembl species names (as they may differ from Tree of Life ones !), output directories.\r +Assembly accession numbers are optional too. If missing, the pipeline assumes it can be retrieved from files named `ACCESSION` in the standard location on disk.\r +The pipeline downloads the repeat annotation as the masked Fasta file and a BED file.\r +All files are compressed with `bgzip`, and indexed with `samtools faidx` or `tabix`.\r +\r +Steps involved:\r +\r +- Download the masked fasta file from Ensembl.\r +- Extract the coordinates of the masked regions into a BED file.\r +- Compress and index the BED file with `bgzip` and `tabix`.\r +\r +## Quick Start\r +\r +1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=22.04.0`)\r +\r +2. Install any of [`Docker`](https://docs.docker.com/engine/installation/), [`Singularity`](https://www.sylabs.io/guides/3.0/user-guide/) (you can follow [this tutorial](https://singularity-tutorial.github.io/01-installation/)), [`Podman`](https://podman.io/), [`Shifter`](https://nersc.gitlab.io/development/shifter/how-to-use/) or [`Charliecloud`](https://hpc.github.io/charliecloud/) for full pipeline reproducibility _(you can use [`Conda`](https://conda.io/miniconda.html) both to install Nextflow itself and also to manage software within pipelines. Please only use it within pipelines as a last resort; see [docs](https://nf-co.re/usage/configuration#basic-configuration-profiles))_.\r +\r +3. Download the pipeline and test it on a minimal dataset with a single command:\r +\r + ```bash\r + nextflow run sanger-tol/ensemblrepeatdownload -profile test,YOURPROFILE --outdir \r + ```\r +\r + Note that some form of configuration will be needed so that Nextflow knows how to fetch the required software. This is usually done in the form of a config profile (`YOURPROFILE` in the example command above). You can chain multiple config profiles in a comma-separated string.\r +\r + > - The pipeline comes with config profiles called `docker`, `singularity`, `podman`, `shifter`, `charliecloud` and `conda` which instruct the pipeline to use the named tool for software management. For example, `-profile test,docker`.\r + > - Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment.\r + > - If you are using `singularity`, please use the [`nf-core download`](https://nf-co.re/tools/#downloading-pipelines-for-offline-use) command to download images first, before running the pipeline. Setting the [`NXF_SINGULARITY_CACHEDIR` or `singularity.cacheDir`](https://www.nextflow.io/docs/latest/singularity.html?#singularity-docker-hub) Nextflow options enables you to store and re-use the images from a central location for future pipeline runs.\r + > - If you are using `conda`, it is highly recommended to use the [`NXF_CONDA_CACHEDIR` or `conda.cacheDir`](https://www.nextflow.io/docs/latest/conda.html) settings to store the environments in a central location for future pipeline runs.\r +\r +4. Start running your own analysis!\r +\r + ```console\r + nextflow run sanger-tol/ensemblrepeatdownload --input $PWD/assets/samplesheet.csv --outdir -profile \r + ```\r +\r +## Documentation\r +\r +The sanger-tol/ensemblrepeatdownload pipeline comes with documentation about the pipeline [usage](docs/usage.md) and [output](docs/output.md).\r +\r +## Credits\r +\r +sanger-tol/ensemblrepeatdownload was originally written by @muffato.\r +\r +## Contributions and Support\r +\r +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\r +\r +For further information or help, don't hesitate to get in touch on the [Slack `#pipelines` channel](https://sangertreeoflife.slack.com/channels/pipelines). Please [create an issue](https://github.com/sanger-tol/ensemblrepeatdownload/issues/new/choose) on GitHub if you are not on the Sanger slack channel.\r +\r +## Citations\r +\r +If you use sanger-tol/ensemblrepeatdownload for your analysis, please cite it using the following doi: [10.5281/zenodo.7183380](https://doi.org/10.5281/zenodo.7183380)\r +\r +An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\r +\r +This pipeline uses code and infrastructure developed and maintained by the [nf-core](https://nf-co.re) community, reused here under the [MIT license](https://github.com/nf-core/tools/blob/master/LICENSE).\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/667?version=1" ; + schema1:isBasedOn "https://github.com/sanger-tol/ensemblrepeatdownload" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for sanger-tol/ensemblrepeatdownload v1.0.0 - Gwaihir the Windlord" ; + schema1:sdDatePublished "2024-07-12 13:26:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/667/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1721 ; + schema1:creator , + ; + schema1:dateCreated "2023-11-14T12:06:10Z" ; + schema1:dateModified "2023-11-14T12:06:10Z" ; + schema1:description """# ![sanger-tol/ensemblrepeatdownload](docs/images/sanger-tol-ensemblrepeatdownload_logo.png)\r +\r +[![GitHub Actions CI Status](https://github.com/sanger-tol/ensemblrepeatdownload/workflows/nf-core%20CI/badge.svg)](https://github.com/sanger-tol/ensemblrepeatdownload/actions?query=workflow%3A%22nf-core+CI%22)\r +\r +\r +\r +[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.7183380-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.7183380)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A522.04.0-23aa62.svg)](https://www.nextflow.io/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +\r +[![Get help on Slack](http://img.shields.io/badge/slack-SangerTreeofLife%20%23pipelines-4A154B?labelColor=000000&logo=slack)](https://SangerTreeofLife.slack.com/channels/pipelines)\r +[![Follow on Twitter](http://img.shields.io/badge/twitter-%40sangertol-1DA1F2?labelColor=000000&logo=twitter)](https://twitter.com/sangertol)\r +[![Watch on YouTube](http://img.shields.io/badge/youtube-tree--of--life-FF0000?labelColor=000000&logo=youtube)](https://www.youtube.com/channel/UCFeDpvjU58SA9V0ycRXejhA)\r +\r +## Introduction\r +\r +**sanger-tol/ensemblrepeatdownload** is a pipeline that downloads repeat annotations from Ensembl into a Tree of Life directory structure.\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!\r +\r +On release, automated continuous integration tests run the pipeline on a full-sized dataset on the GitHub CI infrastructure. This ensures that the pipeline runs in a third-party environment, and has sensible resource allocation defaults set to run on real-world datasets.\r +\r +## Pipeline summary\r +\r +## Overview\r +\r +The pipeline takes a CSV file that contains assembly accession number, Ensembl species names (as they may differ from Tree of Life ones !), output directories.\r +Assembly accession numbers are optional too. If missing, the pipeline assumes it can be retrieved from files named `ACCESSION` in the standard location on disk.\r +The pipeline downloads the repeat annotation as the masked Fasta file and a BED file.\r +All files are compressed with `bgzip`, and indexed with `samtools faidx` or `tabix`.\r +\r +Steps involved:\r +\r +- Download the masked fasta file from Ensembl.\r +- Extract the coordinates of the masked regions into a BED file.\r +- Compress and index the BED file with `bgzip` and `tabix`.\r +\r +## Quick Start\r +\r +1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=22.04.0`)\r +\r +2. Install any of [`Docker`](https://docs.docker.com/engine/installation/), [`Singularity`](https://www.sylabs.io/guides/3.0/user-guide/) (you can follow [this tutorial](https://singularity-tutorial.github.io/01-installation/)), [`Podman`](https://podman.io/), [`Shifter`](https://nersc.gitlab.io/development/shifter/how-to-use/) or [`Charliecloud`](https://hpc.github.io/charliecloud/) for full pipeline reproducibility _(you can use [`Conda`](https://conda.io/miniconda.html) both to install Nextflow itself and also to manage software within pipelines. Please only use it within pipelines as a last resort; see [docs](https://nf-co.re/usage/configuration#basic-configuration-profiles))_.\r +\r +3. Download the pipeline and test it on a minimal dataset with a single command:\r +\r + ```bash\r + nextflow run sanger-tol/ensemblrepeatdownload -profile test,YOURPROFILE --outdir \r + ```\r +\r + Note that some form of configuration will be needed so that Nextflow knows how to fetch the required software. This is usually done in the form of a config profile (`YOURPROFILE` in the example command above). You can chain multiple config profiles in a comma-separated string.\r +\r + > - The pipeline comes with config profiles called `docker`, `singularity`, `podman`, `shifter`, `charliecloud` and `conda` which instruct the pipeline to use the named tool for software management. For example, `-profile test,docker`.\r + > - Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment.\r + > - If you are using `singularity`, please use the [`nf-core download`](https://nf-co.re/tools/#downloading-pipelines-for-offline-use) command to download images first, before running the pipeline. Setting the [`NXF_SINGULARITY_CACHEDIR` or `singularity.cacheDir`](https://www.nextflow.io/docs/latest/singularity.html?#singularity-docker-hub) Nextflow options enables you to store and re-use the images from a central location for future pipeline runs.\r + > - If you are using `conda`, it is highly recommended to use the [`NXF_CONDA_CACHEDIR` or `conda.cacheDir`](https://www.nextflow.io/docs/latest/conda.html) settings to store the environments in a central location for future pipeline runs.\r +\r +4. Start running your own analysis!\r +\r + ```console\r + nextflow run sanger-tol/ensemblrepeatdownload --input $PWD/assets/samplesheet.csv --outdir -profile \r + ```\r +\r +## Documentation\r +\r +The sanger-tol/ensemblrepeatdownload pipeline comes with documentation about the pipeline [usage](docs/usage.md) and [output](docs/output.md).\r +\r +## Credits\r +\r +sanger-tol/ensemblrepeatdownload was originally written by @muffato.\r +\r +## Contributions and Support\r +\r +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\r +\r +For further information or help, don't hesitate to get in touch on the [Slack `#pipelines` channel](https://sangertreeoflife.slack.com/channels/pipelines). Please [create an issue](https://github.com/sanger-tol/ensemblrepeatdownload/issues/new/choose) on GitHub if you are not on the Sanger slack channel.\r +\r +## Citations\r +\r +If you use sanger-tol/ensemblrepeatdownload for your analysis, please cite it using the following doi: [10.5281/zenodo.7183380](https://doi.org/10.5281/zenodo.7183380)\r +\r +An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\r +\r +This pipeline uses code and infrastructure developed and maintained by the [nf-core](https://nf-co.re) community, reused here under the [MIT license](https://github.com/nf-core/tools/blob/master/LICENSE).\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "sanger-tol/ensemblrepeatdownload v1.0.0 - Gwaihir the Windlord" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/667?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +**Based on the official [pmx tutorial](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate how to compute a **fast-growth mutation free energy** calculation, step by step, using the BioExcel **Building Blocks library (biobb)**. The particular example used is the **Staphylococcal nuclease** protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +The **non-equilibrium free energy calculation** protocol performs a **fast alchemical transition** in the direction **WT->Mut** and back **Mut->WT**. The two equilibrium trajectories needed for the tutorial, one for **Wild Type (WT)** and another for the **Mutated (Mut)** protein (Isoleucine 10 to Alanine -I10A-), have already been generated and are included in this example. We will name **WT as stateA** and **Mut as stateB**.\r +\r +![](https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/schema.png)\r +\r +The tutorial calculates the **free energy difference** in the folded state of a protein. Starting from **two 1ns-length independent equilibrium simulations** (WT and mutant), snapshots are selected to start **fast (50ps) transitions** driving the system in the **forward** (WT to mutant) and **reverse** (mutant to WT) directions, and the **work values** required to perform these transitions are collected. With these values, **Crooks Gaussian Intersection** (CGI), **Bennett Acceptance Ratio** (BAR) and **Jarzynski estimator** methods are used to calculate the **free energy difference** between the two states.\r +\r +*Please note that for the sake of disk space this tutorial is using 1ns-length equilibrium trajectories, whereas in the [original example](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/eq.mdp) the equilibrium trajectories used were obtained from 10ns-length simulations.*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.55.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_pmx_tutorial" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)" ; + schema1:sdDatePublished "2024-07-12 13:24:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/55/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 48363 ; + schema1:creator , + ; + schema1:dateCreated "2021-07-01T10:33:20Z" ; + schema1:dateModified "2022-06-10T09:43:14Z" ; + schema1:description """# Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +**Based on the official [pmx tutorial](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate how to compute a **fast-growth mutation free energy** calculation, step by step, using the BioExcel **Building Blocks library (biobb)**. The particular example used is the **Staphylococcal nuclease** protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +The **non-equilibrium free energy calculation** protocol performs a **fast alchemical transition** in the direction **WT->Mut** and back **Mut->WT**. The two equilibrium trajectories needed for the tutorial, one for **Wild Type (WT)** and another for the **Mutated (Mut)** protein (Isoleucine 10 to Alanine -I10A-), have already been generated and are included in this example. We will name **WT as stateA** and **Mut as stateB**.\r +\r +![](https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/schema.png)\r +\r +The tutorial calculates the **free energy difference** in the folded state of a protein. Starting from **two 1ns-length independent equilibrium simulations** (WT and mutant), snapshots are selected to start **fast (50ps) transitions** driving the system in the **forward** (WT to mutant) and **reverse** (mutant to WT) directions, and the **work values** required to perform these transitions are collected. With these values, **Crooks Gaussian Intersection** (CGI), **Bennett Acceptance Ratio** (BAR) and **Jarzynski estimator** methods are used to calculate the **free energy difference** between the two states.\r +\r +*Please note that for the sake of disk space this tutorial is using 1ns-length equilibrium trajectories, whereas in the [original example](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/eq.mdp) the equilibrium trajectories used were obtained from 10ns-length simulations.*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/55?version=5" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/55?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +For this workflow:\r +\r +Inputs:\r +* assembled-genome.fasta\r +* hard-repeat-masked-genome.fasta\r +* If using the mRNAs option, the additional inputs required are .cdna, .pro and .dat files. \r +\r +What it does:\r +* This workflow splits the input genomes into single sequences (to decrease computation time), annotates using FgenesH++, and merges the output. \r +\r +Outputs:\r +* genome annotation in gff3 format\r +* fasta files of mRNAs, cDNAs and proteins\r +* Busco report\r +\r +\r +\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.881.4" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Fgenesh annotation -TSI" ; + schema1:sdDatePublished "2024-07-12 13:18:07 +0100" ; + schema1:url "https://workflowhub.eu/workflows/881/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 18063 ; + schema1:creator ; + schema1:dateCreated "2024-06-18T09:46:52Z" ; + schema1:dateModified "2024-06-18T09:49:24Z" ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +For this workflow:\r +\r +Inputs:\r +* assembled-genome.fasta\r +* hard-repeat-masked-genome.fasta\r +* If using the mRNAs option, the additional inputs required are .cdna, .pro and .dat files. \r +\r +What it does:\r +* This workflow splits the input genomes into single sequences (to decrease computation time), annotates using FgenesH++, and merges the output. \r +\r +Outputs:\r +* genome annotation in gff3 format\r +* fasta files of mRNAs, cDNAs and proteins\r +* Busco report\r +\r +\r +\r +\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/881?version=3" ; + schema1:isPartOf ; + schema1:keywords "TSI-annotation" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Fgenesh annotation -TSI" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/881?version=4" ; + schema1:version 4 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 189968 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.283.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_md_setup/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Amber Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/283/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6933 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-17T10:27:30Z" ; + schema1:dateModified "2022-04-11T09:29:43Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/283?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Amber Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_md_setup/python/workflow.py" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2022-10-19T12:24:27.604420" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/cutandrun" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "cutandrun/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.11" . + + a schema1:Dataset ; + schema1:datePublished "2023-12-21T09:59:05.410152" ; + schema1:hasPart , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/fastq-to-matrix-10x" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + ; + schema1:name "fastq-to-matrix-10x/scrna-seq-fastq-to-matrix-10x-cellplex" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.19" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.19" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + , + , + ; + schema1:name "fastq-to-matrix-10x/scrna-seq-fastq-to-matrix-10x-v3" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/fastq-to-matrix-10x" ; + schema1:version "0.1" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """\r +\r +![Logo](https://cbg-ethz.github.io/V-pipe/img/logo.svg)\r +\r +[![bio.tools](https://img.shields.io/badge/bio-tools-blue.svg)](https://bio.tools/V-Pipe)\r +[![Snakemake](https://img.shields.io/badge/snakemake-≥6.8.1-blue.svg)](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe)\r +[![Deploy Docker image](https://github.com/cbg-ethz/V-pipe/actions/workflows/deploy-docker.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe)\r +[![Tests](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml)\r +[![Mega-Linter](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml)\r +[![License: Apache-2.0](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)\r +\r +V-pipe is a workflow designed for the analysis of next generation sequencing (NGS) data from viral pathogens. It produces a number of results in a curated format (e.g., consensus sequences, SNV calls, local/global haplotypes).\r +V-pipe is written using the Snakemake workflow management system.\r +\r +## Usage\r +\r +Different ways of initializing V-pipe are presented below. We strongly encourage you to deploy it [using the quick install script](#using-quick-install-script), as this is our preferred method.\r +\r +To configure V-pipe refer to the documentation present in [config/README.md](config/README.md).\r +\r +V-pipe expects the input samples to be organized in a [two-level](config/README.md#samples) directory hierarchy,\r +and the sequencing reads must be provided in a sub-folder named `raw_data`. Further details can be found on the [website](https://cbg-ethz.github.io/V-pipe/usage/).\r +Check the utils subdirectory for [mass-importers tools](utils/README.md#samples-mass-importers) that can assist you in generating this hierarchy.\r +\r +We provide [virus-specific base configuration files](config/README.md#virus-base-config) which contain handy defaults for, e.g., HIV and SARS-CoV-2. Set the virus in the general section of the configuration file:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +```\r +\r +Also see [snakemake's documentation](https://snakemake.readthedocs.io/en/stable/executing/cli.html) to learn more about the command-line options available when executing the workflow.\r +\r +### Using quick install script\r +\r +To deploy V-pipe, use the [installation script](utils/README.md#quick-installer) with the following parameters:\r +\r +```bash\r +curl -O 'https://raw.githubusercontent.com/cbg-ethz/V-pipe/master/utils/quick_install.sh'\r +./quick_install.sh -w work\r +```\r +\r +This script will download and install miniconda, checkout the V-pipe git repository (use `-b` to specify which branch/tag) and setup a work directory (specified with `-w`) with an executable script that will execute the workflow:\r +\r +```bash\r +cd work\r +# edit config.yaml and provide samples/ directory\r +./vpipe --jobs 4 --printshellcmds --dry-run\r +```\r +\r +### Using Docker\r +\r +Note: the [docker image](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe) is only setup with components to run the workflow for HIV and SARS-CoV-2 virus base configurations.\r +Using V-pipe with other viruses or configurations might require internet connectivity for additional software components.\r +\r +Create `config.yaml` or `vpipe.config` and then populate the `samples/` directory.\r +For example, the following config file could be used:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +\r +output:\r + snv: true\r + local: true\r + global: false\r + visualization: true\r + QA: true\r +```\r +\r +Then execute:\r +\r +```bash\r +docker run --rm -it -v $PWD:/work ghcr.io/cbg-ethz/v-pipe:master --jobs 4 --printshellcmds --dry-run\r +```\r +\r +### Using Snakedeploy\r +\r +First install [mamba](https://github.com/conda-forge/miniforge#mambaforge), then create and activate an environment with Snakemake and Snakedeploy:\r +\r +```bash\r +mamba create -c bioconda -c conda-forge --name snakemake snakemake snakedeploy\r +conda activate snakemake\r +```\r +\r +Snakemake's [official workflow installer Snakedeploy](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe) can now be used:\r +\r +```bash\r +snakedeploy deploy-workflow https://github.com/cbg-ethz/V-pipe --tag master .\r +# edit config/config.yaml and provide samples/ directory\r +snakemake --use-conda --jobs 4 --printshellcmds --dry-run\r +```\r +\r +## Dependencies\r +\r +- **[Conda](https://conda.io/docs/index.html)**\r +\r + Conda is a cross-platform package management system and an environment manager application. Snakemake uses mamba as a package manager.\r +\r +- **[Snakemake](https://snakemake.readthedocs.io/)**\r +\r + Snakemake is the central workflow and dependency manager of V-pipe. It determines the order in which individual tools are invoked and checks that programs do not exit unexpectedly.\r +\r +- **[VICUNA](https://www.broadinstitute.org/viral-genomics/vicuna)**\r +\r + VICUNA is a *de novo* assembly software designed for populations with high mutation rates. It is used to build an initial reference for mapping reads with ngshmmalign aligner when a `references/cohort_consensus.fasta` file is not provided. Further details can be found in the [wiki](https://github.com/cbg-ethz/V-pipe/wiki/getting-started#input-files) pages.\r +\r +### Computational tools\r +\r +Other dependencies are managed by using isolated conda environments per rule, and below we list some of the computational tools integrated in V-pipe:\r +\r +- **[FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/)**\r +\r + FastQC gives an overview of the raw sequencing data. Flowcells that have been overloaded or otherwise fail during sequencing can easily be determined with FastQC.\r +\r +- **[PRINSEQ](http://prinseq.sourceforge.net/)**\r +\r + Trimming and clipping of reads is performed by PRINSEQ. It is currently the most versatile raw read processor with many customization options.\r +\r +- **[ngshmmalign](https://github.com/cbg-ethz/ngshmmalign)**\r +\r + We perform the alignment of the curated NGS data using our custom ngshmmalign that takes structural variants into account. It produces multiple consensus sequences that include either majority bases or ambiguous bases.\r +\r +- **[bwa](https://github.com/lh3/bwa)**\r +\r + In order to detect specific cross-contaminations with other probes, the Burrows-Wheeler aligner is used. It quickly yields estimates for foreign genomic material in an experiment.\r + Additionally, It can be used as an alternative aligner to ngshmmalign.\r +\r +- **[MAFFT](http://mafft.cbrc.jp/alignment/software/)**\r +\r + To standardise multiple samples to the same reference genome (say HXB2 for HIV-1), the multiple sequence aligner MAFFT is employed. The multiple sequence alignment helps in determining regions of low conservation and thus makes standardisation of alignments more robust.\r +\r +- **[Samtools and bcftools](https://www.htslib.org/)**\r +\r + The Swiss Army knife of alignment postprocessing and diagnostics. bcftools is also used to generate consensus sequence with indels.\r +\r +- **[SmallGenomeUtilities](https://github.com/cbg-ethz/smallgenomeutilities)**\r +\r + We perform genomic liftovers to standardised reference genomes using our in-house developed python library of utilities for rewriting alignments.\r +\r +- **[ShoRAH](https://github.com/cbg-ethz/shorah)**\r +\r + ShoRAh performs SNV calling and local haplotype reconstruction by using bayesian clustering.\r +\r +- **[LoFreq](https://csb5.github.io/lofreq/)**\r +\r + LoFreq (version 2) is SNVs and indels caller from next-generation sequencing data, and can be used as an alternative engine for SNV calling.\r +\r +- **[SAVAGE](https://bitbucket.org/jbaaijens/savage) and [Haploclique](https://github.com/cbg-ethz/haploclique)**\r +\r + We use HaploClique or SAVAGE to perform global haplotype reconstruction for heterogeneous viral populations by using an overlap graph.\r +\r +## Citation\r +\r +If you use this software in your research, please cite:\r +\r +Posada-Céspedes S., Seifert D., Topolsky I., Jablonski K.P., Metzner K.J., and Beerenwinkel N. 2021.\r +"V-pipe: a computational pipeline for assessing viral genetic diversity from high-throughput sequencing data."\r +_Bioinformatics_, January. doi:[10.1093/bioinformatics/btab015](https://doi.org/10.1093/bioinformatics/btab015).\r +\r +## Contributions\r +\r +- [Ivan Topolsky\\* ![orcid]](https://orcid.org/0000-0002-7561-0810), [![github]](https://github.com/dryak)\r +- [Kim Philipp Jablonski ![orcid]](https://orcid.org/0000-0002-4166-4343), [![github]](https://github.com/kpj)\r +- [Lara Fuhrmann ![orcid]](https://orcid.org/0000-0001-6405-0654), [![github]](https://github.com/LaraFuhrmann)\r +- [Uwe Schmitt ![orcid]](https://orcid.org/0000-0002-4658-0616), [![github]](https://github.com/uweschmitt)\r +- [Monica Dragan ![orcid]](https://orcid.org/0000-0002-7719-5892), [![github]](https://github.com/monicadragan)\r +- [Susana Posada Céspedes ![orcid]](https://orcid.org/0000-0002-7459-8186), [![github]](https://github.com/sposadac)\r +- [David Seifert ![orcid]](https://orcid.org/0000-0003-4739-5110), [![github]](https://github.com/SoapZA)\r +- Tobias Marschall\r +- [Niko Beerenwinkel\\*\\* ![orcid]](https://orcid.org/0000-0002-0573-6119)\r +\r +\\* software maintainer ;\r +\\** group leader\r +\r +[github]: https://cbg-ethz.github.io/V-pipe/img/mark-github.svg\r +[orcid]: https://cbg-ethz.github.io/V-pipe/img/ORCIDiD_iconvector.svg\r +\r +## Contact\r +\r +We encourage users to use the [issue tracker](https://github.com/cbg-ethz/V-pipe/issues). For further enquiries, you can also contact the V-pipe Dev Team .\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/301?version=1" ; + schema1:isBasedOn "https://github.com/cbg-ethz/V-pipe.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for V-pipe (main multi-virus version)" ; + schema1:sdDatePublished "2024-07-12 13:18:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/301/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1308 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-30T16:50:14Z" ; + schema1:dateModified "2022-03-30T16:50:14Z" ; + schema1:description """\r +\r +![Logo](https://cbg-ethz.github.io/V-pipe/img/logo.svg)\r +\r +[![bio.tools](https://img.shields.io/badge/bio-tools-blue.svg)](https://bio.tools/V-Pipe)\r +[![Snakemake](https://img.shields.io/badge/snakemake-≥6.8.1-blue.svg)](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe)\r +[![Deploy Docker image](https://github.com/cbg-ethz/V-pipe/actions/workflows/deploy-docker.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe)\r +[![Tests](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml)\r +[![Mega-Linter](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml)\r +[![License: Apache-2.0](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)\r +\r +V-pipe is a workflow designed for the analysis of next generation sequencing (NGS) data from viral pathogens. It produces a number of results in a curated format (e.g., consensus sequences, SNV calls, local/global haplotypes).\r +V-pipe is written using the Snakemake workflow management system.\r +\r +## Usage\r +\r +Different ways of initializing V-pipe are presented below. We strongly encourage you to deploy it [using the quick install script](#using-quick-install-script), as this is our preferred method.\r +\r +To configure V-pipe refer to the documentation present in [config/README.md](config/README.md).\r +\r +V-pipe expects the input samples to be organized in a [two-level](config/README.md#samples) directory hierarchy,\r +and the sequencing reads must be provided in a sub-folder named `raw_data`. Further details can be found on the [website](https://cbg-ethz.github.io/V-pipe/usage/).\r +Check the utils subdirectory for [mass-importers tools](utils/README.md#samples-mass-importers) that can assist you in generating this hierarchy.\r +\r +We provide [virus-specific base configuration files](config/README.md#virus-base-config) which contain handy defaults for, e.g., HIV and SARS-CoV-2. Set the virus in the general section of the configuration file:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +```\r +\r +Also see [snakemake's documentation](https://snakemake.readthedocs.io/en/stable/executing/cli.html) to learn more about the command-line options available when executing the workflow.\r +\r +### Using quick install script\r +\r +To deploy V-pipe, use the [installation script](utils/README.md#quick-installer) with the following parameters:\r +\r +```bash\r +curl -O 'https://raw.githubusercontent.com/cbg-ethz/V-pipe/master/utils/quick_install.sh'\r +./quick_install.sh -w work\r +```\r +\r +This script will download and install miniconda, checkout the V-pipe git repository (use `-b` to specify which branch/tag) and setup a work directory (specified with `-w`) with an executable script that will execute the workflow:\r +\r +```bash\r +cd work\r +# edit config.yaml and provide samples/ directory\r +./vpipe --jobs 4 --printshellcmds --dry-run\r +```\r +\r +### Using Docker\r +\r +Note: the [docker image](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe) is only setup with components to run the workflow for HIV and SARS-CoV-2 virus base configurations.\r +Using V-pipe with other viruses or configurations might require internet connectivity for additional software components.\r +\r +Create `config.yaml` or `vpipe.config` and then populate the `samples/` directory.\r +For example, the following config file could be used:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +\r +output:\r + snv: true\r + local: true\r + global: false\r + visualization: true\r + QA: true\r +```\r +\r +Then execute:\r +\r +```bash\r +docker run --rm -it -v $PWD:/work ghcr.io/cbg-ethz/v-pipe:master --jobs 4 --printshellcmds --dry-run\r +```\r +\r +### Using Snakedeploy\r +\r +First install [mamba](https://github.com/conda-forge/miniforge#mambaforge), then create and activate an environment with Snakemake and Snakedeploy:\r +\r +```bash\r +mamba create -c bioconda -c conda-forge --name snakemake snakemake snakedeploy\r +conda activate snakemake\r +```\r +\r +Snakemake's [official workflow installer Snakedeploy](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe) can now be used:\r +\r +```bash\r +snakedeploy deploy-workflow https://github.com/cbg-ethz/V-pipe --tag master .\r +# edit config/config.yaml and provide samples/ directory\r +snakemake --use-conda --jobs 4 --printshellcmds --dry-run\r +```\r +\r +## Dependencies\r +\r +- **[Conda](https://conda.io/docs/index.html)**\r +\r + Conda is a cross-platform package management system and an environment manager application. Snakemake uses mamba as a package manager.\r +\r +- **[Snakemake](https://snakemake.readthedocs.io/)**\r +\r + Snakemake is the central workflow and dependency manager of V-pipe. It determines the order in which individual tools are invoked and checks that programs do not exit unexpectedly.\r +\r +- **[VICUNA](https://www.broadinstitute.org/viral-genomics/vicuna)**\r +\r + VICUNA is a *de novo* assembly software designed for populations with high mutation rates. It is used to build an initial reference for mapping reads with ngshmmalign aligner when a `references/cohort_consensus.fasta` file is not provided. Further details can be found in the [wiki](https://github.com/cbg-ethz/V-pipe/wiki/getting-started#input-files) pages.\r +\r +### Computational tools\r +\r +Other dependencies are managed by using isolated conda environments per rule, and below we list some of the computational tools integrated in V-pipe:\r +\r +- **[FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/)**\r +\r + FastQC gives an overview of the raw sequencing data. Flowcells that have been overloaded or otherwise fail during sequencing can easily be determined with FastQC.\r +\r +- **[PRINSEQ](http://prinseq.sourceforge.net/)**\r +\r + Trimming and clipping of reads is performed by PRINSEQ. It is currently the most versatile raw read processor with many customization options.\r +\r +- **[ngshmmalign](https://github.com/cbg-ethz/ngshmmalign)**\r +\r + We perform the alignment of the curated NGS data using our custom ngshmmalign that takes structural variants into account. It produces multiple consensus sequences that include either majority bases or ambiguous bases.\r +\r +- **[bwa](https://github.com/lh3/bwa)**\r +\r + In order to detect specific cross-contaminations with other probes, the Burrows-Wheeler aligner is used. It quickly yields estimates for foreign genomic material in an experiment.\r + Additionally, It can be used as an alternative aligner to ngshmmalign.\r +\r +- **[MAFFT](http://mafft.cbrc.jp/alignment/software/)**\r +\r + To standardise multiple samples to the same reference genome (say HXB2 for HIV-1), the multiple sequence aligner MAFFT is employed. The multiple sequence alignment helps in determining regions of low conservation and thus makes standardisation of alignments more robust.\r +\r +- **[Samtools and bcftools](https://www.htslib.org/)**\r +\r + The Swiss Army knife of alignment postprocessing and diagnostics. bcftools is also used to generate consensus sequence with indels.\r +\r +- **[SmallGenomeUtilities](https://github.com/cbg-ethz/smallgenomeutilities)**\r +\r + We perform genomic liftovers to standardised reference genomes using our in-house developed python library of utilities for rewriting alignments.\r +\r +- **[ShoRAH](https://github.com/cbg-ethz/shorah)**\r +\r + ShoRAh performs SNV calling and local haplotype reconstruction by using bayesian clustering.\r +\r +- **[LoFreq](https://csb5.github.io/lofreq/)**\r +\r + LoFreq (version 2) is SNVs and indels caller from next-generation sequencing data, and can be used as an alternative engine for SNV calling.\r +\r +- **[SAVAGE](https://bitbucket.org/jbaaijens/savage) and [Haploclique](https://github.com/cbg-ethz/haploclique)**\r +\r + We use HaploClique or SAVAGE to perform global haplotype reconstruction for heterogeneous viral populations by using an overlap graph.\r +\r +## Citation\r +\r +If you use this software in your research, please cite:\r +\r +Posada-Céspedes S., Seifert D., Topolsky I., Jablonski K.P., Metzner K.J., and Beerenwinkel N. 2021.\r +"V-pipe: a computational pipeline for assessing viral genetic diversity from high-throughput sequencing data."\r +_Bioinformatics_, January. doi:[10.1093/bioinformatics/btab015](https://doi.org/10.1093/bioinformatics/btab015).\r +\r +## Contributions\r +\r +- [Ivan Topolsky\\* ![orcid]](https://orcid.org/0000-0002-7561-0810), [![github]](https://github.com/dryak)\r +- [Kim Philipp Jablonski ![orcid]](https://orcid.org/0000-0002-4166-4343), [![github]](https://github.com/kpj)\r +- [Lara Fuhrmann ![orcid]](https://orcid.org/0000-0001-6405-0654), [![github]](https://github.com/LaraFuhrmann)\r +- [Uwe Schmitt ![orcid]](https://orcid.org/0000-0002-4658-0616), [![github]](https://github.com/uweschmitt)\r +- [Monica Dragan ![orcid]](https://orcid.org/0000-0002-7719-5892), [![github]](https://github.com/monicadragan)\r +- [Susana Posada Céspedes ![orcid]](https://orcid.org/0000-0002-7459-8186), [![github]](https://github.com/sposadac)\r +- [David Seifert ![orcid]](https://orcid.org/0000-0003-4739-5110), [![github]](https://github.com/SoapZA)\r +- Tobias Marschall\r +- [Niko Beerenwinkel\\*\\* ![orcid]](https://orcid.org/0000-0002-0573-6119)\r +\r +\\* software maintainer ;\r +\\** group leader\r +\r +[github]: https://cbg-ethz.github.io/V-pipe/img/mark-github.svg\r +[orcid]: https://cbg-ethz.github.io/V-pipe/img/ORCIDiD_iconvector.svg\r +\r +## Contact\r +\r +We encourage users to use the [issue tracker](https://github.com/cbg-ethz/V-pipe/issues). For further enquiries, you can also contact the V-pipe Dev Team .\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/301?version=4" ; + schema1:keywords "Alignment, Assembly, covid-19, Genomics, INDELs, rna, SNPs, variant_calling, workflow" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "V-pipe (main multi-virus version)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/301?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=18" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-07-12 13:19:11 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=18" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17485 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:16Z" ; + schema1:dateModified "2024-06-11T12:55:16Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=18" ; + schema1:version 18 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Analysis pipeline for CUT&RUN and CUT&TAG experiments that includes sequencing QC, spike-in normalisation, IgG control normalisation, peak calling and downstream peak analysis." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/976?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/cutandrun" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/cutandrun" ; + schema1:sdDatePublished "2024-07-12 13:22:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/976/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15235 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:47Z" ; + schema1:dateModified "2024-06-11T12:54:47Z" ; + schema1:description "Analysis pipeline for CUT&RUN and CUT&TAG experiments that includes sequencing QC, spike-in normalisation, IgG control normalisation, peak calling and downstream peak analysis." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/976?version=7" ; + schema1:keywords "cutandrun, cutandrun-seq, cutandtag, cutandtag-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/cutandrun" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/976?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """This workflow demonstrates the usage of EODIE, a toolkit to extract object based timeseries information from Earth Observation data.\r +\r +EODIE is a toolkit to extract object based timeseries information from Earth Observation data.\r +\r +The EODIE code can be found on [Gitlab](https://gitlab.com/fgi_nls/public/EODIE) .\r +\r +The goal of EODIE is to ease the extraction of time series information at object level. Today, vast amounts of Earth Observation data are available to the users via for example earth explorer or scihub. Often, not the whole images are needed for exploitation, but only the timeseries of a certain feature on object level. Objects may be polygons depicting agricultural field parcels, forest plots, or areas of a certain land cover type.\r +\r +EODIE takes the objects in as polygons in a shapefile as well as the timeframe of interest and the features (eg vegetation indices) to be extracted. The output is a per polygon timeseries of the selected features over the timeframe of interest.\r +\r +**Online documentation**\r +EODIE documentation can be found [here](https://eodie.readthedocs.io/en/latest/).\r +\r +**Abstract CWL**\r +Automatically generated from the Galaxy workflow file: Workflow constructed from history 'EODIE Sentinel'""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.274.1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy workflow demonstrating the usage of EODIE Galaxy Tool" ; + schema1:sdDatePublished "2024-07-12 13:36:20 +0100" ; + schema1:url "https://workflowhub.eu/workflows/274/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1581 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8775 ; + schema1:creator ; + schema1:dateCreated "2022-03-11T12:32:55Z" ; + schema1:dateModified "2023-01-16T13:57:33Z" ; + schema1:description """This workflow demonstrates the usage of EODIE, a toolkit to extract object based timeseries information from Earth Observation data.\r +\r +EODIE is a toolkit to extract object based timeseries information from Earth Observation data.\r +\r +The EODIE code can be found on [Gitlab](https://gitlab.com/fgi_nls/public/EODIE) .\r +\r +The goal of EODIE is to ease the extraction of time series information at object level. Today, vast amounts of Earth Observation data are available to the users via for example earth explorer or scihub. Often, not the whole images are needed for exploitation, but only the timeseries of a certain feature on object level. Objects may be polygons depicting agricultural field parcels, forest plots, or areas of a certain land cover type.\r +\r +EODIE takes the objects in as polygons in a shapefile as well as the timeframe of interest and the features (eg vegetation indices) to be extracted. The output is a per polygon timeseries of the selected features over the timeframe of interest.\r +\r +**Online documentation**\r +EODIE documentation can be found [here](https://eodie.readthedocs.io/en/latest/).\r +\r +**Abstract CWL**\r +Automatically generated from the Galaxy workflow file: Workflow constructed from history 'EODIE Sentinel'""" ; + schema1:image ; + schema1:keywords "earth observation, copernicus, ndvi, sentinel-2 data" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy workflow demonstrating the usage of EODIE Galaxy Tool" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/274?version=1" ; + schema1:version 1 ; + ns1:input , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 4128 . + + a schema1:Dataset ; + schema1:datePublished "2021-12-06T14:19:09.191770" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/fragment-based-docking-scoring" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "fragment-based-docking-scoring/main" ; + schema1:sdDatePublished "2021-12-07 03:00:58 +0000" ; + schema1:softwareVersion "v0.1.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "RNA-RNA interactome analysis using ChiRA tools suite. The aligner used is CLAN." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/67?version=1" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for RNA-RNA interactome analysis using CLAN" ; + schema1:sdDatePublished "2024-07-12 13:37:22 +0100" ; + schema1:url "https://workflowhub.eu/workflows/67/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 31282 ; + schema1:creator ; + schema1:dateCreated "2020-11-03T19:49:27Z" ; + schema1:dateModified "2023-02-13T14:06:45Z" ; + schema1:description "RNA-RNA interactome analysis using ChiRA tools suite. The aligner used is CLAN." ; + schema1:keywords "rna, Transcriptomics" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "RNA-RNA interactome analysis using CLAN" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/67?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 28082 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:MediaObject ; + schema1:contentSize 3 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "energy.selection" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "genion.group" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1044 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "ions.mdp" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 971 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "minim.mdp" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 109917 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1aki.pdb" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2998215 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1u3m.pdb" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2918025 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1xyw.pdb" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88246 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1aki.gro" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577586 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1aki.top" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1280044 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1aki_em.tpr" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1aki_em_energy.edr" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1279304 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1aki_ions.tpr" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88246 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1aki_newbox.gro" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2142 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1aki_potential.xvg" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1525186 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1aki_solv.gro" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1524475 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1aki_solv_ions.gro" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 82046 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1u3m.gro" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 537920 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1u3m.top" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1416272 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1u3m_em.tpr" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1u3m_em_energy.edr" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1415196 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1u3m_ions.tpr" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 82046 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1u3m_newbox.gro" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2160 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1u3m_potential.xvg" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1837046 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1u3m_solv.gro" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1836965 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1u3m_solv_ions.gro" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 80112 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1xyw.gro" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 523996 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1xyw.top" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1408872 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1xyw_em.tpr" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1xyw_em_energy.edr" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1407844 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1xyw_ions.tpr" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 80112 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1xyw_newbox.gro" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2144 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1xyw_potential.xvg" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1845237 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1xyw_solv.gro" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1845066 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1xyw_solv_ions.gro" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Workflow converts one or multiple bam files back to the fastq format" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/968?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/bamtofastq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for qbic-pipelines/bamtofastq" ; + schema1:sdDatePublished "2024-07-12 13:22:07 +0100" ; + schema1:url "https://workflowhub.eu/workflows/968/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4134 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:44Z" ; + schema1:dateModified "2024-06-11T12:54:44Z" ; + schema1:description "Workflow converts one or multiple bam files back to the fastq format" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/968?version=5" ; + schema1:keywords "bamtofastq, Conversion, cramtofastq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "qbic-pipelines/bamtofastq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/968?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/963?version=10" ; + schema1:isBasedOn "https://github.com/nf-core/airrflow" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/airrflow" ; + schema1:sdDatePublished "2024-07-12 13:22:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/963/ro_crate?version=10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11664 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:37Z" ; + schema1:dateModified "2024-06-11T12:54:37Z" ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/963?version=13" ; + schema1:keywords "airr, b-cell, immcantation, immunorepertoire, repseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/airrflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/963?version=10" ; + schema1:version 10 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1023?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/smrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/smrnaseq" ; + schema1:sdDatePublished "2024-07-12 13:18:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1023/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8315 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:18Z" ; + schema1:dateModified "2024-06-11T12:55:18Z" ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1023?version=10" ; + schema1:keywords "small-rna, smrna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/smrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1023?version=3" ; + schema1:version 3 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 2317017 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description "Basic workflows inspired by the Nanopolish tutorials" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/50?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ONT --Tutorial-Nanopolish-variants" ; + schema1:sdDatePublished "2024-07-12 13:37:25 +0100" ; + schema1:url "https://workflowhub.eu/workflows/50/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9930 ; + schema1:creator ; + schema1:dateCreated "2020-08-05T12:52:50Z" ; + schema1:dateModified "2023-02-13T14:06:46Z" ; + schema1:description "Basic workflows inspired by the Nanopolish tutorials" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ONT --Tutorial-Nanopolish-variants" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/50?version=1" ; + schema1:version 1 ; + ns1:input , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1017?version=12" ; + schema1:isBasedOn "https://github.com/nf-core/rnafusion" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnafusion" ; + schema1:sdDatePublished "2024-07-12 13:18:22 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1017/ro_crate?version=12" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10551 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:10Z" ; + schema1:dateModified "2024-06-11T12:55:10Z" ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1017?version=16" ; + schema1:keywords "fusion, fusion-genes, gene-fusion, rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnafusion" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1017?version=12" ; + schema1:version 12 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1021?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/scrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/scrnaseq" ; + schema1:sdDatePublished "2024-07-12 13:19:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1021/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9948 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:17Z" ; + schema1:dateModified "2024-06-11T12:55:17Z" ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1021?version=13" ; + schema1:keywords "10x-genomics, 10xgenomics, alevin, bustools, Cellranger, kallisto, rna-seq, single-cell, star-solo" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/scrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1021?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """CWL version of the md_list.py workflow for HPC. This performs a system setup and runs a molecular dynamics simulation on the structure passed to this workflow. This workflow uses the md\\_gather.cwl sub-workflow to gather the outputs together to return these.\r +To work with more than one structure this workflow can be called from either the md\\_launch.cwl workflow, or the md\\_launch\\_mutate.cwl workflow. These use scatter for parallelising the workflow. md\\_launch.cwl operates on a list of individual input molecule files. md\\_launch\\_mutate.cwl operates on a single input molecule file, and a list of mutations to apply to that molecule. Within that list of mutations, a value of 'WT' will indicate that the molecule should be simulated without any mutation being applied.\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.121.1" ; + schema1:isBasedOn "https://github.com/douglowe/biobb_hpc_cwl_md_list" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Molecular Dynamics Simulation" ; + schema1:sdDatePublished "2024-07-12 13:37:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/121/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9242 ; + schema1:dateCreated "2021-05-20T14:41:19Z" ; + schema1:dateModified "2023-01-16T13:49:52Z" ; + schema1:description """CWL version of the md_list.py workflow for HPC. This performs a system setup and runs a molecular dynamics simulation on the structure passed to this workflow. This workflow uses the md\\_gather.cwl sub-workflow to gather the outputs together to return these.\r +To work with more than one structure this workflow can be called from either the md\\_launch.cwl workflow, or the md\\_launch\\_mutate.cwl workflow. These use scatter for parallelising the workflow. md\\_launch.cwl operates on a list of individual input molecule files. md\\_launch\\_mutate.cwl operates on a single input molecule file, and a list of mutations to apply to that molecule. Within that list of mutations, a value of 'WT' will indicate that the molecule should be simulated without any mutation being applied.\r +""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Molecular Dynamics Simulation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/121?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + ; + ns1:output . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 34463 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "Pre-processing of mass spectrometry-based metabolomics data with quantification and identification based on MS1 and MS2 data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/57?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/metaboigniter" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/metaboigniter" ; + schema1:sdDatePublished "2024-07-12 13:22:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/57/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5375 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:00Z" ; + schema1:dateModified "2024-06-11T12:55:00Z" ; + schema1:description "Pre-processing of mass spectrometry-based metabolomics data with quantification and identification based on MS1 and MS2 data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/57?version=3" ; + schema1:keywords "Metabolomics, identification, quantification, mass-spectrometry, ms1, ms2" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/metaboigniter" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/57?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + schema1:datePublished "2021-09-14T16:21:52.393735" ; + schema1:description "Cryo-EM processing workflow" ; + schema1:hasPart , + , + ; + schema1:image "workflow.svg" ; + schema1:keywords "cryoem" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "testEntryTitleNew" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:image ; + schema1:name "workflow" ; + schema1:programmingLanguage . + + a schema1:MediaObject . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Analysis of Chromosome Conformation Capture data (Hi-C)" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/989?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/hic" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hic" ; + schema1:sdDatePublished "2024-07-12 13:21:23 +0100" ; + schema1:url "https://workflowhub.eu/workflows/989/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3924 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:54Z" ; + schema1:dateModified "2024-06-11T12:54:54Z" ; + schema1:description "Analysis of Chromosome Conformation Capture data (Hi-C)" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/989?version=7" ; + schema1:keywords "chromosome-conformation-capture, Hi-C" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hic" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/989?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Scipion is a workflow engine mostly for Cryo-Electron Microscopy image processing. In this extremely simple workflow, we load the Relion 3.0 tutorial data and process it to 2.9A resolution." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/69?version=1" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Scipion Tutorial example reaching 2.9A resolution" ; + schema1:sdDatePublished "2024-07-12 13:37:20 +0100" ; + schema1:url "https://workflowhub.eu/workflows/69/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 16475 ; + schema1:dateCreated "2020-11-19T13:29:51Z" ; + schema1:dateModified "2023-06-27T12:39:11Z" ; + schema1:description "Scipion is a workflow engine mostly for Cryo-Electron Microscopy image processing. In this extremely simple workflow, we load the Relion 3.0 tutorial data and process it to 2.9A resolution." ; + schema1:keywords "Electron microscopy, image processing, single particle analysis" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Scipion Tutorial example reaching 2.9A resolution" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/69?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for the analysis of crispr data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/975?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/crisprseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/crisprseq" ; + schema1:sdDatePublished "2024-07-12 13:18:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/975/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10090 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:46Z" ; + schema1:dateModified "2024-06-11T12:54:46Z" ; + schema1:description "Pipeline for the analysis of crispr data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/975?version=4" ; + schema1:keywords "crispr, crispr-analysis, crispr-cas, NGS" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/crisprseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/975?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1027?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/viralrecon" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/viralrecon" ; + schema1:sdDatePublished "2024-07-12 13:18:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1027/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9965 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:20Z" ; + schema1:dateModified "2024-06-11T12:55:20Z" ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1027?version=10" ; + schema1:keywords "Amplicon, ARTIC, Assembly, covid-19, covid19, illumina, long-read-sequencing, Metagenomics, nanopore, ONT, oxford-nanopore, SARS-CoV-2, variant-calling, viral, Virus" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/viralrecon" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1027?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# TronFlow BAM preprocessing pipeline\r +\r +![GitHub tag (latest SemVer)](https://img.shields.io/github/v/release/tron-bioinformatics/tronflow-bam-preprocessing?sort=semver)\r +[![Automated tests](https://github.com/TRON-Bioinformatics/tronflow-bam-preprocessing/actions/workflows/automated_tests.yml/badge.svg)](https://github.com/TRON-Bioinformatics/tronflow-bam-preprocessing/actions/workflows/automated_tests.yml)\r +[![DOI](https://zenodo.org/badge/358400957.svg)](https://zenodo.org/badge/latestdoi/358400957)\r +[![License](https://img.shields.io/badge/license-MIT-green)](https://opensource.org/licenses/MIT)\r +[![Powered by Nextflow](https://img.shields.io/badge/powered%20by-Nextflow-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)](https://www.nextflow.io/)\r +\r +The TronFlow BAM preprocessing pipeline is part of a collection of computational workflows for tumor-normal pair \r +somatic variant calling. These workflows are implemented in the Nextflow (Di Tommaso, 2017) framework.\r +\r +Find the documentation here [![Documentation Status](https://readthedocs.org/projects/tronflow-docs/badge/?version=latest)](https://tronflow-docs.readthedocs.io/en/latest/?badge=latest)\r +\r +\r +The aim of this workflow is to preprocess BAM files based on Picard and GATK (DePristo, 2011) best practices.\r +\r +\r +## Background\r +\r +In order to have a variant calling ready BAM file there are a number of operations that need to be applied on the BAM. \r +This pipeline depends on the particular variant caller, but there are some common operations.\r +\r +GATK has been providing a well known best practices document on BAM preprocessing, the latest best practices for \r +GATK4 (https://software.broadinstitute.org/gatk/best-practices/workflow?id=11165) does not perform anymore realignment around indels as opposed to best practices for GATK3 (https://software.broadinstitute.org/gatk/documentation/article?id=3238). This pipeline is based on both Picard and GATK. These best practices have been implemented a number of times, see for instance this implementation in Workflow Definition Language https://github.com/gatk-workflows/gatk4-data-processing/blob/master/processing-for-variant-discovery-gatk4.wdl.\r +\r +\r +## Objectives\r +\r +We aim at providing a single implementation of the BAM preprocessing pipeline that can be used across different \r +use cases. \r +For this purpose there are some required steps and some optional steps. \r +\r +The input can be either a tab-separated values file (`--input_files`) where each line corresponds to one input BAM or a single BAM (`--input_bam` and `--input_name`).\r +\r +## Implementation\r +\r +Steps:\r +\r +* **Clean BAM**. Sets the mapping quality to 0 for all unmapped reads and avoids soft clipping going beyond the reference genome boundaries. Implemented in Picard\r +* **Reorder chromosomes**. Makes the chromosomes in the BAM follow the same order as the reference genome. Implemented in Picard\r +* **Add read groups**. GATK requires that some headers are adde to the BAM, also we want to flag somehow the normal and tumor BAMs in the header as some callers, such as Mutect2 require it. Implemented in Picard.\r +* **Mark duplicates** (optional). Identify the PCR and the optical duplications and marks those reads. This uses the parallelized version on Spark, it is reported to scale linearly up to 16 CPUs.\r +* **Realignment around indels** (optional). This procedure is important for locus based variant callers, but for any variant caller doing haplotype assembly it is not needed. This is computing intensive as it first finds regions for realignment where there are indication of indels and then it performs a local realignment over those regions. Implemented in GATK3, deprecated in GATK4\r +* **Base Quality Score Recalibration (BQSR)** (optional). It aims at correcting systematic errors in the sequencer when assigning the base call quality errors, as these scores are used by variant callers it improves variant calling in some situations. Implemented in GATK4\r +* **Metrics** (optional). A number of metrics are obtained from the BAM file with Picard's CollectMetrics, CollectHsMetrics and samtools' coverage and depth.\r +\r +![Pipeline](figures/bam_preprocessing2.png)\r +\r +\r +## How to run it\r +\r +```\r +$ nextflow run tron-bioinformatics/tronflow-bam-preprocessing --help\r +\r +N E X T F L O W ~ version 19.07.0\r +Launching `main.nf` [intergalactic_shannon] - revision: e707c77d7b\r +\r +Usage:\r + main.nf --input_files input_files\r +\r +Input:\r + * --input_bam: the path to a single BAM (this option is not compatible with --input_files)\r + * --input_files: the path to a tab-separated values file containing in each row the sample name, sample type (eg: tumor or normal) and path to the BAM file (this option is not compatible with --input_bam)\r + Sample type will be added to the BAM header @SN sample name\r + The input file does not have header!\r + Example input file:\r + name1 tumor tumor.1.bam\r + name1 normal normal.1.bam\r + name2 tumor tumor.2.bam\r + * --reference: path to the FASTA genome reference (indexes expected *.fai, *.dict)\r +\r +Optional input:\r + * --input_name: the name of the sample. Only used when --input_bam is provided (default: normal)\r + * --dbsnp: path to the dbSNP VCF (required to perform BQSR)\r + * --known_indels1: path to a VCF of known indels (optional to perform realignment around indels)\r + * --known_indels2: path to a second VCF of known indels (optional to perform realignment around indels)\r + * --intervals: path to a BED file to collect coverage and HS metrics from (default: None)\r + * --collect_hs_minimum_base_quality: minimum base quality for a base to contribute coverage (default: 20).\r + * --collect_hs_minimum_mapping_quality: minimum mapping quality for a read to contribute coverage (default: 20).\r + * --skip_bqsr: optionally skip BQSR (default: false)\r + * --skip_realignment: optionally skip realignment (default: false)\r + * --skip_deduplication: optionally skip deduplication (default: false)\r + * --remove_duplicates: removes duplicate reads from output BAM instead of flagging them (default: true)\r + * --skip_metrics: optionally skip metrics (default: false)\r + * --output: the folder where to publish output (default: ./output)\r + * --platform: the platform to be added to the BAM header. Valid values: [ILLUMINA, SOLID, LS454, HELICOS and PACBIO] (default: ILLUMINA)\r +\r +Computational resources:\r + * --prepare_bam_cpus: (default: 3)\r + * --prepare_bam_memory: (default: 8g)\r + * --mark_duplicates_cpus: (default: 16)\r + * --mark_duplicates_memory: (default: 64g)\r + * --realignment_around_indels_cpus: (default: 2)\r + * --realignment_around_indels_memory: (default: 31g)\r + * --bqsr_cpus: (default: 3)\r + * --bqsr_memory: (default: 4g)\r + * --metrics_cpus: (default: 1)\r + * --metrics_memory: (default: 8g)\r +\r + Output:\r + * Preprocessed and indexed BAMs\r + * Tab-separated values file with the absolute paths to the preprocessed BAMs, preprocessed_bams.txt\r +\r +Optional output:\r + * Recalibration report\r + * Deduplication metrics\r + * Realignment intervals\r + * GATK multiple metrics\r + * HS metrics\r + * Horizontal and vertical coverage metrics\r +```\r +\r +### Input table\r +\r +The table with FASTQ files expects two tab-separated columns **without a header**\r +\r +| Sample name | Sample type | BAM |\r +|----------------------|---------------------------------|------------------------------|\r +| sample_1 | normal | /path/to/sample_1.normal.bam |\r +| sample_1 | tumor | /path/to/sample_1.tumor.bam |\r +| sample_2 | normal | /path/to/sample_2.normal.bam |\r +| sample_2 | tumor | /path/to/sample_2.tumor.bam |\r +\r +The values used in `sample type` are arbitrary. These will be set in the BAM header tag @RG:SM for sample. There may be some downstream constraints, eg: Mutect2 pipeline requires that the sample type between normal and tumor samples of the same pair are not the same.\r +\r +### References\r +\r +The BAM preprocessing workflow requires the human reference genome (`--reference`)\r +Base Quality Score Recalibration (BQSR) requires dbSNP to avoid extracting error metrics from polymorphic sites (`--dbsnp`)\r +Realignment around indels requires a set of known indels (`--known_indels1` and `--known_indels2`).\r +These resources can be fetched from the GATK bundle https://gatk.broadinstitute.org/hc/en-us/articles/360035890811-Resource-bundle.\r +\r +Optionally, in order to run Picard's CollectHsMetrics a BED file will need to be provided (`--intervals`).\r +This BED file will also be used for `samtools coverage`.\r +\r +## Troubleshooting\r +\r +### Too new Java version for MarkDuplicatesSpark\r +\r +When using Java 11 the cryptic error messsage `java.lang.IllegalArgumentException: Unsupported class file major version 55` has been observed.\r +This issue is described here and the solution is to use Java 8 https://gatk.broadinstitute.org/hc/en-us/community/posts/360056174592-MarkDuplicatesSpark-crash.\r +\r +\r +\r +## Bibliography\r +\r +* DePristo M, Banks E, Poplin R, Garimella K, Maguire J, Hartl C, Philippakis A, del Angel G, Rivas MA, Hanna M, McKenna A, Fennell T, Kernytsky A, Sivachenko A, Cibulskis K, Gabriel S, Altshuler D, Daly M. (2011). A framework for variation discovery and genotyping using next-generation DNA sequencing data. Nat Genet, 43:491-498. DOI: 10.1038/ng.806.\r +* Di Tommaso, P., Chatzou, M., Floden, E. W., Barja, P. P., Palumbo, E., & Notredame, C. (2017). Nextflow enables reproducible computational workflows. Nature Biotechnology, 35(4), 316–319. 10.1038/nbt.3820\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/419?version=1" ; + schema1:isBasedOn "https://github.com/TRON-Bioinformatics/tronflow-bam-preprocessing" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for TronFlow BAM preprocessing pipeline" ; + schema1:sdDatePublished "2024-07-12 13:34:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/419/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4865 ; + schema1:dateCreated "2023-01-17T16:54:14Z" ; + schema1:dateModified "2023-01-17T16:54:14Z" ; + schema1:description """# TronFlow BAM preprocessing pipeline\r +\r +![GitHub tag (latest SemVer)](https://img.shields.io/github/v/release/tron-bioinformatics/tronflow-bam-preprocessing?sort=semver)\r +[![Automated tests](https://github.com/TRON-Bioinformatics/tronflow-bam-preprocessing/actions/workflows/automated_tests.yml/badge.svg)](https://github.com/TRON-Bioinformatics/tronflow-bam-preprocessing/actions/workflows/automated_tests.yml)\r +[![DOI](https://zenodo.org/badge/358400957.svg)](https://zenodo.org/badge/latestdoi/358400957)\r +[![License](https://img.shields.io/badge/license-MIT-green)](https://opensource.org/licenses/MIT)\r +[![Powered by Nextflow](https://img.shields.io/badge/powered%20by-Nextflow-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)](https://www.nextflow.io/)\r +\r +The TronFlow BAM preprocessing pipeline is part of a collection of computational workflows for tumor-normal pair \r +somatic variant calling. These workflows are implemented in the Nextflow (Di Tommaso, 2017) framework.\r +\r +Find the documentation here [![Documentation Status](https://readthedocs.org/projects/tronflow-docs/badge/?version=latest)](https://tronflow-docs.readthedocs.io/en/latest/?badge=latest)\r +\r +\r +The aim of this workflow is to preprocess BAM files based on Picard and GATK (DePristo, 2011) best practices.\r +\r +\r +## Background\r +\r +In order to have a variant calling ready BAM file there are a number of operations that need to be applied on the BAM. \r +This pipeline depends on the particular variant caller, but there are some common operations.\r +\r +GATK has been providing a well known best practices document on BAM preprocessing, the latest best practices for \r +GATK4 (https://software.broadinstitute.org/gatk/best-practices/workflow?id=11165) does not perform anymore realignment around indels as opposed to best practices for GATK3 (https://software.broadinstitute.org/gatk/documentation/article?id=3238). This pipeline is based on both Picard and GATK. These best practices have been implemented a number of times, see for instance this implementation in Workflow Definition Language https://github.com/gatk-workflows/gatk4-data-processing/blob/master/processing-for-variant-discovery-gatk4.wdl.\r +\r +\r +## Objectives\r +\r +We aim at providing a single implementation of the BAM preprocessing pipeline that can be used across different \r +use cases. \r +For this purpose there are some required steps and some optional steps. \r +\r +The input can be either a tab-separated values file (`--input_files`) where each line corresponds to one input BAM or a single BAM (`--input_bam` and `--input_name`).\r +\r +## Implementation\r +\r +Steps:\r +\r +* **Clean BAM**. Sets the mapping quality to 0 for all unmapped reads and avoids soft clipping going beyond the reference genome boundaries. Implemented in Picard\r +* **Reorder chromosomes**. Makes the chromosomes in the BAM follow the same order as the reference genome. Implemented in Picard\r +* **Add read groups**. GATK requires that some headers are adde to the BAM, also we want to flag somehow the normal and tumor BAMs in the header as some callers, such as Mutect2 require it. Implemented in Picard.\r +* **Mark duplicates** (optional). Identify the PCR and the optical duplications and marks those reads. This uses the parallelized version on Spark, it is reported to scale linearly up to 16 CPUs.\r +* **Realignment around indels** (optional). This procedure is important for locus based variant callers, but for any variant caller doing haplotype assembly it is not needed. This is computing intensive as it first finds regions for realignment where there are indication of indels and then it performs a local realignment over those regions. Implemented in GATK3, deprecated in GATK4\r +* **Base Quality Score Recalibration (BQSR)** (optional). It aims at correcting systematic errors in the sequencer when assigning the base call quality errors, as these scores are used by variant callers it improves variant calling in some situations. Implemented in GATK4\r +* **Metrics** (optional). A number of metrics are obtained from the BAM file with Picard's CollectMetrics, CollectHsMetrics and samtools' coverage and depth.\r +\r +![Pipeline](figures/bam_preprocessing2.png)\r +\r +\r +## How to run it\r +\r +```\r +$ nextflow run tron-bioinformatics/tronflow-bam-preprocessing --help\r +\r +N E X T F L O W ~ version 19.07.0\r +Launching `main.nf` [intergalactic_shannon] - revision: e707c77d7b\r +\r +Usage:\r + main.nf --input_files input_files\r +\r +Input:\r + * --input_bam: the path to a single BAM (this option is not compatible with --input_files)\r + * --input_files: the path to a tab-separated values file containing in each row the sample name, sample type (eg: tumor or normal) and path to the BAM file (this option is not compatible with --input_bam)\r + Sample type will be added to the BAM header @SN sample name\r + The input file does not have header!\r + Example input file:\r + name1 tumor tumor.1.bam\r + name1 normal normal.1.bam\r + name2 tumor tumor.2.bam\r + * --reference: path to the FASTA genome reference (indexes expected *.fai, *.dict)\r +\r +Optional input:\r + * --input_name: the name of the sample. Only used when --input_bam is provided (default: normal)\r + * --dbsnp: path to the dbSNP VCF (required to perform BQSR)\r + * --known_indels1: path to a VCF of known indels (optional to perform realignment around indels)\r + * --known_indels2: path to a second VCF of known indels (optional to perform realignment around indels)\r + * --intervals: path to a BED file to collect coverage and HS metrics from (default: None)\r + * --collect_hs_minimum_base_quality: minimum base quality for a base to contribute coverage (default: 20).\r + * --collect_hs_minimum_mapping_quality: minimum mapping quality for a read to contribute coverage (default: 20).\r + * --skip_bqsr: optionally skip BQSR (default: false)\r + * --skip_realignment: optionally skip realignment (default: false)\r + * --skip_deduplication: optionally skip deduplication (default: false)\r + * --remove_duplicates: removes duplicate reads from output BAM instead of flagging them (default: true)\r + * --skip_metrics: optionally skip metrics (default: false)\r + * --output: the folder where to publish output (default: ./output)\r + * --platform: the platform to be added to the BAM header. Valid values: [ILLUMINA, SOLID, LS454, HELICOS and PACBIO] (default: ILLUMINA)\r +\r +Computational resources:\r + * --prepare_bam_cpus: (default: 3)\r + * --prepare_bam_memory: (default: 8g)\r + * --mark_duplicates_cpus: (default: 16)\r + * --mark_duplicates_memory: (default: 64g)\r + * --realignment_around_indels_cpus: (default: 2)\r + * --realignment_around_indels_memory: (default: 31g)\r + * --bqsr_cpus: (default: 3)\r + * --bqsr_memory: (default: 4g)\r + * --metrics_cpus: (default: 1)\r + * --metrics_memory: (default: 8g)\r +\r + Output:\r + * Preprocessed and indexed BAMs\r + * Tab-separated values file with the absolute paths to the preprocessed BAMs, preprocessed_bams.txt\r +\r +Optional output:\r + * Recalibration report\r + * Deduplication metrics\r + * Realignment intervals\r + * GATK multiple metrics\r + * HS metrics\r + * Horizontal and vertical coverage metrics\r +```\r +\r +### Input table\r +\r +The table with FASTQ files expects two tab-separated columns **without a header**\r +\r +| Sample name | Sample type | BAM |\r +|----------------------|---------------------------------|------------------------------|\r +| sample_1 | normal | /path/to/sample_1.normal.bam |\r +| sample_1 | tumor | /path/to/sample_1.tumor.bam |\r +| sample_2 | normal | /path/to/sample_2.normal.bam |\r +| sample_2 | tumor | /path/to/sample_2.tumor.bam |\r +\r +The values used in `sample type` are arbitrary. These will be set in the BAM header tag @RG:SM for sample. There may be some downstream constraints, eg: Mutect2 pipeline requires that the sample type between normal and tumor samples of the same pair are not the same.\r +\r +### References\r +\r +The BAM preprocessing workflow requires the human reference genome (`--reference`)\r +Base Quality Score Recalibration (BQSR) requires dbSNP to avoid extracting error metrics from polymorphic sites (`--dbsnp`)\r +Realignment around indels requires a set of known indels (`--known_indels1` and `--known_indels2`).\r +These resources can be fetched from the GATK bundle https://gatk.broadinstitute.org/hc/en-us/articles/360035890811-Resource-bundle.\r +\r +Optionally, in order to run Picard's CollectHsMetrics a BED file will need to be provided (`--intervals`).\r +This BED file will also be used for `samtools coverage`.\r +\r +## Troubleshooting\r +\r +### Too new Java version for MarkDuplicatesSpark\r +\r +When using Java 11 the cryptic error messsage `java.lang.IllegalArgumentException: Unsupported class file major version 55` has been observed.\r +This issue is described here and the solution is to use Java 8 https://gatk.broadinstitute.org/hc/en-us/community/posts/360056174592-MarkDuplicatesSpark-crash.\r +\r +\r +\r +## Bibliography\r +\r +* DePristo M, Banks E, Poplin R, Garimella K, Maguire J, Hartl C, Philippakis A, del Angel G, Rivas MA, Hanna M, McKenna A, Fennell T, Kernytsky A, Sivachenko A, Cibulskis K, Gabriel S, Altshuler D, Daly M. (2011). A framework for variation discovery and genotyping using next-generation DNA sequencing data. Nat Genet, 43:491-498. DOI: 10.1038/ng.806.\r +* Di Tommaso, P., Chatzou, M., Floden, E. W., Barja, P. P., Palumbo, E., & Notredame, C. (2017). Nextflow enables reproducible computational workflows. Nature Biotechnology, 35(4), 316–319. 10.1038/nbt.3820\r +""" ; + schema1:isPartOf ; + schema1:keywords "Bioinformatics, GATK4, sambamba" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "TronFlow BAM preprocessing pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/419?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """This workflow has been created as part of Demonstrator 6 of the project EOSC-Life (within WP3) and is focused on reusing publicly available RNAi screens to gain insights into the nucleolus biology. The workflow downloads images from the Image Data Resource (IDR), performs object segmentation (of nuclei and nucleoli) and feature extraction of the images and objects identified.\r +\r +Tutorial: https://training.galaxyproject.org/training-material/topics/imaging/tutorials/tutorial-CP/tutorial.html""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/41?version=2" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Nucleoli segmentation using CellProfiler (EOSC-Life D6)" ; + schema1:sdDatePublished "2024-07-12 13:33:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/41/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 56581 ; + schema1:dateCreated "2020-11-03T22:12:24Z" ; + schema1:dateModified "2023-07-03T10:15:31Z" ; + schema1:description """This workflow has been created as part of Demonstrator 6 of the project EOSC-Life (within WP3) and is focused on reusing publicly available RNAi screens to gain insights into the nucleolus biology. The workflow downloads images from the Image Data Resource (IDR), performs object segmentation (of nuclei and nucleoli) and feature extraction of the images and objects identified.\r +\r +Tutorial: https://training.galaxyproject.org/training-material/topics/imaging/tutorials/tutorial-CP/tutorial.html""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/41?version=1" ; + schema1:keywords "CellProfiler, Galaxy, image processing, imaging" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Nucleoli segmentation using CellProfiler (EOSC-Life D6)" ; + schema1:producer , + , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/41?version=2" ; + schema1:version 2 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """The aim of this workflow is to handle the routine part of shotgun metagenomics data processing on Galaxy Australia. \r +\r +The workflow is using the tools MetaPhlAn2 for taxonomy classification and HUMAnN2 for functional profiling of the metagenomes. The workflow is based on the Galaxy Training tutorial 'Analyses of metagenomics data - The global picture' (Saskia Hiltemann, Bérénice Batut) https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/general-tutorial/tutorial.html#shotgun-metagenomics-data. \r +\r +The how-to guide is available here: https://vmurigneu.github.io/shotgun_howto_ga_workflows/\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.624.1" ; + schema1:isBasedOn "https://github.com/vmurigneu/shotgun_howto_ga_workflows" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Analyses of shotgun metagenomics data with MetaPhlAn2" ; + schema1:sdDatePublished "2024-07-12 13:26:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/624/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 156545 ; + schema1:creator , + , + , + , + , + ; + schema1:dateCreated "2023-10-26T19:34:08Z" ; + schema1:dateModified "2024-04-05T05:20:07Z" ; + schema1:description """The aim of this workflow is to handle the routine part of shotgun metagenomics data processing on Galaxy Australia. \r +\r +The workflow is using the tools MetaPhlAn2 for taxonomy classification and HUMAnN2 for functional profiling of the metagenomes. The workflow is based on the Galaxy Training tutorial 'Analyses of metagenomics data - The global picture' (Saskia Hiltemann, Bérénice Batut) https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/general-tutorial/tutorial.html#shotgun-metagenomics-data. \r +\r +The how-to guide is available here: https://vmurigneu.github.io/shotgun_howto_ga_workflows/\r +""" ; + schema1:image ; + schema1:keywords "Metagenomics, GUCFG2galaxy, shotgun" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Analyses of shotgun metagenomics data with MetaPhlAn2" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/624?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 115054 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 6396 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + schema1:dateModified "2024-05-29T10:05:27+00:00" ; + schema1:hasPart , + , + , + ; + schema1:name "A" ; + schema1:sdDatePublished "2024-06-18T13:37:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16 ; + schema1:dateModified "2024-06-18T13:37:20+00:00" ; + schema1:name "A.0.0" ; + schema1:sdDatePublished "2024-06-18T13:37:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16 ; + schema1:dateModified "2024-06-18T13:37:20+00:00" ; + schema1:name "A.0.1" ; + schema1:sdDatePublished "2024-06-18T13:37:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16 ; + schema1:dateModified "2024-06-18T13:37:20+00:00" ; + schema1:name "A.1.0" ; + schema1:sdDatePublished "2024-06-18T13:37:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16 ; + schema1:dateModified "2024-06-18T13:37:20+00:00" ; + schema1:name "A.1.1" ; + schema1:sdDatePublished "2024-06-18T13:37:29+00:00" . + + a schema1:Dataset ; + schema1:dateModified "2022-05-09T10:23:59+00:00" ; + schema1:hasPart , + , + , + ; + schema1:name "B" ; + schema1:sdDatePublished "2024-06-18T13:37:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16 ; + schema1:dateModified "2024-06-18T13:37:20+00:00" ; + schema1:name "B.0.0" ; + schema1:sdDatePublished "2024-06-18T13:37:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16 ; + schema1:dateModified "2024-06-18T13:37:20+00:00" ; + schema1:name "B.0.1" ; + schema1:sdDatePublished "2024-06-18T13:37:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16 ; + schema1:dateModified "2024-06-18T13:37:20+00:00" ; + schema1:name "B.1.0" ; + schema1:sdDatePublished "2024-06-18T13:37:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16 ; + schema1:dateModified "2024-06-18T13:37:20+00:00" ; + schema1:name "B.1.1" ; + schema1:sdDatePublished "2024-06-18T13:37:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 20 ; + schema1:dateModified "2024-06-18T13:37:24+00:00" ; + schema1:name "C.0.0" ; + schema1:sdDatePublished "2024-06-18T13:37:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 20 ; + schema1:dateModified "2024-06-18T13:37:24+00:00" ; + schema1:name "C.0.1" ; + schema1:sdDatePublished "2024-06-18T13:37:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 20 ; + schema1:dateModified "2024-06-18T13:37:24+00:00" ; + schema1:name "C.1.0" ; + schema1:sdDatePublished "2024-06-18T13:37:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 20 ; + schema1:dateModified "2024-06-18T13:37:24+00:00" ; + schema1:name "C.1.1" ; + schema1:sdDatePublished "2024-06-18T13:37:29+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1023?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/smrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/smrnaseq" ; + schema1:sdDatePublished "2024-07-12 13:18:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1023/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6277 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:18Z" ; + schema1:dateModified "2024-06-11T12:55:18Z" ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1023?version=10" ; + schema1:keywords "small-rna, smrna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/smrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1023?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# HiC contact map generation\r +\r +Snakemake pipeline for the generation of `.pretext` and `.mcool` files for visualisation of HiC contact maps with the softwares PretextView and HiGlass, respectively.\r +\r +## Prerequisites\r +\r +This pipeine has been tested using `Snakemake v7.32.4` and requires conda for installation of required tools. To run the pipline use the command:\r +\r +`snakemake --use-conda`\r +\r +There are provided a set of configuration and running scripts for exectution on a slurm queueing system. After configuring the `cluster.json` file run:\r +\r +`./run_cluster`\r +\r +## Before starting\r +\r +You need to create a temporary folder and specify the path in the `config.yaml` file. This should be able to hold the temporary files created when sorting the `.pairsam` file (100s of GB or even many TBs)\r +\r +The path to the genome assemly must be given in the `config.yaml`.\r +\r +The HiC reads should be paired and named as follows: `Library_1.fastq.gz Library_2.fastq.gz`. The pipeline can accept any number of paired HiC read files, but the naming must be consistent. The folder containing these files must be provided in the `config.yaml`.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.795.2" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for HiC contact map generation" ; + schema1:sdDatePublished "2024-07-12 13:23:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/795/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5790 ; + schema1:creator ; + schema1:dateCreated "2024-04-29T12:22:21Z" ; + schema1:dateModified "2024-04-29T12:25:04Z" ; + schema1:description """# HiC contact map generation\r +\r +Snakemake pipeline for the generation of `.pretext` and `.mcool` files for visualisation of HiC contact maps with the softwares PretextView and HiGlass, respectively.\r +\r +## Prerequisites\r +\r +This pipeine has been tested using `Snakemake v7.32.4` and requires conda for installation of required tools. To run the pipline use the command:\r +\r +`snakemake --use-conda`\r +\r +There are provided a set of configuration and running scripts for exectution on a slurm queueing system. After configuring the `cluster.json` file run:\r +\r +`./run_cluster`\r +\r +## Before starting\r +\r +You need to create a temporary folder and specify the path in the `config.yaml` file. This should be able to hold the temporary files created when sorting the `.pairsam` file (100s of GB or even many TBs)\r +\r +The path to the genome assemly must be given in the `config.yaml`.\r +\r +The HiC reads should be paired and named as follows: `Library_1.fastq.gz Library_2.fastq.gz`. The pipeline can accept any number of paired HiC read files, but the naming must be consistent. The folder containing these files must be provided in the `config.yaml`.\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/795?version=1" ; + schema1:isPartOf ; + schema1:keywords "Bioinformatics, Genomics, Snakemake, Genome assembly" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "HiC contact map generation" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/795?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for screening for functional components of assembled contigs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/987?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/funcscan" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/funcscan" ; + schema1:sdDatePublished "2024-07-12 13:21:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/987/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14829 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:53Z" ; + schema1:dateModified "2024-06-11T12:54:53Z" ; + schema1:description "Pipeline for screening for functional components of assembled contigs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/987?version=8" ; + schema1:keywords "amp, AMR, antibiotic-resistance, antimicrobial-peptides, antimicrobial-resistance-genes, arg, Assembly, bgc, biosynthetic-gene-clusters, contigs, function, Metagenomics, natural-products, screening, secondary-metabolites" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/funcscan" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/987?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 260152 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "application/pdf" ; + schema1:name "complete_graph.pdf" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """This workflow is used to process timeseries from meteorological stations in Finland but can be applied to any timeseries according it follows the same format.\r +\r +Take a temperature timeseries from any meteorological station. Input format is csv and it must be standardized with 6 columns:\r +\r +1. Year (ex: 2021)\r +2. month (ex: 1)\r +3. day (ex: 15) \r +4. Time (ex: 16:56)\r +5. Time zone (such as UTC)\r +6. Air temperature (degC)""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/123?version=1" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Compute daily and monthly mean from meteorological station measurements" ; + schema1:sdDatePublished "2024-07-12 13:37:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/123/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 27186 ; + schema1:dateCreated "2021-05-23T19:28:06Z" ; + schema1:dateModified "2023-01-16T13:50:01Z" ; + schema1:description """This workflow is used to process timeseries from meteorological stations in Finland but can be applied to any timeseries according it follows the same format.\r +\r +Take a temperature timeseries from any meteorological station. Input format is csv and it must be standardized with 6 columns:\r +\r +1. Year (ex: 2021)\r +2. month (ex: 1)\r +3. day (ex: 15) \r +4. Time (ex: 16:56)\r +5. Time zone (such as UTC)\r +6. Air temperature (degC)""" ; + schema1:keywords "Climate, eosc-nordic, observation" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Compute daily and monthly mean from meteorological station measurements" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/123?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "Abstract CWL Automatically generated from the Galaxy workflow file: Workflow with Copernicus Essential Climate Variable - select and plot" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/46?version=1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Copernicus Essential Climate Variable - select and plot" ; + schema1:sdDatePublished "2024-07-12 13:37:26 +0100" ; + schema1:url "https://workflowhub.eu/workflows/46/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 2916 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15139 ; + schema1:dateCreated "2020-07-23T18:22:21Z" ; + schema1:dateModified "2023-01-16T13:44:09Z" ; + schema1:description "Abstract CWL Automatically generated from the Galaxy workflow file: Workflow with Copernicus Essential Climate Variable - select and plot" ; + schema1:image ; + schema1:keywords "Galaxy, Climate, copernicus" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Copernicus Essential Climate Variable - select and plot" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/46?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 7145 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Take a scRNAseq counts matrix from a single sample, and perform basic QC with scanpy. Then, do further processing by making a UMAP and clustering. Produces a processed AnnData \r +object.""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/514?version=2" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq Single Sample Processing Counts Matrix" ; + schema1:sdDatePublished "2024-07-12 13:22:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/514/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 105641 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-11-09T01:56:24Z" ; + schema1:dateModified "2023-11-09T03:52:51Z" ; + schema1:description """Take a scRNAseq counts matrix from a single sample, and perform basic QC with scanpy. Then, do further processing by making a UMAP and clustering. Produces a processed AnnData \r +object.""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/514?version=2" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "scRNAseq Single Sample Processing Counts Matrix" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/514?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/278?version=11" ; + schema1:isBasedOn "https://gitlab.bsc.es/lrodrig1/structuralvariants_poc/-/tree/1.1.3/structuralvariants/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CNV_pipeline" ; + schema1:sdDatePublished "2024-07-12 13:35:30 +0100" ; + schema1:url "https://workflowhub.eu/workflows/278/ro_crate?version=11" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 9694 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9474 ; + schema1:creator , + ; + schema1:dateCreated "2022-10-11T11:04:44Z" ; + schema1:dateModified "2023-01-16T13:57:59Z" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/278?version=10" ; + schema1:keywords "cancer, CODEX2, ExomeDepth, manta, TransBioNet, variant calling, GRIDSS, structural variants" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CNV_pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/278?version=11" ; + schema1:version 11 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 72914 . + + a schema1:Dataset ; + schema1:datePublished "2024-01-12T17:00:57.136582" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-Trio-phasing-VGP5/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.19" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# CMIP tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of computing **classical molecular interaction potentials** from **protein structures**, step by step, using the **BioExcel Building Blocks library (biobb)**. Examples shown are **Molecular Interaction Potentials (MIPs) grids, protein-protein/ligand interaction potentials, and protein titration**. The particular structures used are the **Lysozyme** protein (PDB code [1AKI](https://www.rcsb.org/structure/1aki)), and a MD simulation of the complex formed by the **SARS-CoV-2 Receptor Binding Domain and the human Angiotensin Converting Enzyme 2** (PDB code [6VW1](https://www.rcsb.org/structure/6vw1)).\r +\r +The code wrapped is the ***Classical Molecular Interaction Potentials (CMIP)*** code:\r +\r +**Classical molecular interaction potentials: Improved setup procedure in molecular dynamics simulations of proteins.**\r +*Gelpí, J.L., Kalko, S.G., Barril, X., Cirera, J., de la Cruz, X., Luque, F.J. and Orozco, M. (2001)*\r +*Proteins, 45: 428-437. [https://doi.org/10.1002/prot.1159](https://doi.org/10.1002/prot.1159)*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.820.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/main/biobb_wf_cmip/docker" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Docker Classical Molecular Interaction Potentials" ; + schema1:sdDatePublished "2024-07-12 13:23:33 +0100" ; + schema1:url "https://workflowhub.eu/workflows/820/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 737 ; + schema1:creator , + ; + schema1:dateCreated "2024-04-22T10:28:04Z" ; + schema1:dateModified "2024-04-22T10:32:32Z" ; + schema1:description """# CMIP tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of computing **classical molecular interaction potentials** from **protein structures**, step by step, using the **BioExcel Building Blocks library (biobb)**. Examples shown are **Molecular Interaction Potentials (MIPs) grids, protein-protein/ligand interaction potentials, and protein titration**. The particular structures used are the **Lysozyme** protein (PDB code [1AKI](https://www.rcsb.org/structure/1aki)), and a MD simulation of the complex formed by the **SARS-CoV-2 Receptor Binding Domain and the human Angiotensin Converting Enzyme 2** (PDB code [6VW1](https://www.rcsb.org/structure/6vw1)).\r +\r +The code wrapped is the ***Classical Molecular Interaction Potentials (CMIP)*** code:\r +\r +**Classical molecular interaction potentials: Improved setup procedure in molecular dynamics simulations of proteins.**\r +*Gelpí, J.L., Kalko, S.G., Barril, X., Cirera, J., de la Cruz, X., Luque, F.J. and Orozco, M. (2001)*\r +*Proteins, 45: 428-437. [https://doi.org/10.1002/prot.1159](https://doi.org/10.1002/prot.1159)*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Docker Classical Molecular Interaction Potentials" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/main/biobb_wf_cmip/docker/Dockerfile" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """Workflow for Metagenomics from raw reads to annotated bins.\r +**Steps:**\r +* workflow_quality.cwl:\r + * FastQC (control)\r + * fastp (quality trimming)\r + * bbmap contamination filter\r +* SPAdes (Assembly)\r +* QUAST (Assembly quality report)\r +* BBmap (Read mapping to assembly)\r +* MetaBat2 (binning)\r +* CheckM (bin completeness and contamination)\r +* GTDB-Tk (bin taxonomic classification)\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/64?version=9" ; + schema1:isBasedOn "https://gitlab.com/m-unlock/cwl/-/blob/master/cwl/workflows/workflow_metagenomics_binning.cwl" ; + schema1:license "CC0-1.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Metagenomics Binning Workflow" ; + schema1:sdDatePublished "2024-07-12 13:34:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/64/ro_crate?version=9" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 61045 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13780 ; + schema1:creator , + ; + schema1:dateCreated "2021-06-01T10:43:13Z" ; + schema1:dateModified "2021-06-07T17:04:02Z" ; + schema1:description """Workflow for Metagenomics from raw reads to annotated bins.\r +**Steps:**\r +* workflow_quality.cwl:\r + * FastQC (control)\r + * fastp (quality trimming)\r + * bbmap contamination filter\r +* SPAdes (Assembly)\r +* QUAST (Assembly quality report)\r +* BBmap (Read mapping to assembly)\r +* MetaBat2 (binning)\r +* CheckM (bin completeness and contamination)\r +* GTDB-Tk (bin taxonomic classification)\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/64?version=10" ; + schema1:keywords "Metagenomics, microbial, metagenome, binning" ; + schema1:license "https://spdx.org/licenses/CC0-1.0" ; + schema1:name "Metagenomics Binning Workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/64?version=9" ; + schema1:version 9 ; + ns1:input , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Conformational Transitions calculations tutorial using BioExcel Building Blocks (biobb) and GOdMD\r +\r +This tutorial aims to illustrate the process of computing a conformational transition between two known structural conformations of a protein, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.548.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_godmd" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein Conformational Transitions calculations tutorial" ; + schema1:sdDatePublished "2024-07-12 13:32:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/548/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 20808 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-04T15:26:51Z" ; + schema1:dateModified "2024-05-14T10:15:26Z" ; + schema1:description """# Protein Conformational Transitions calculations tutorial using BioExcel Building Blocks (biobb) and GOdMD\r +\r +This tutorial aims to illustrate the process of computing a conformational transition between two known structural conformations of a protein, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/548?version=1" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein Conformational Transitions calculations tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_godmd/blob/main/biobb_wf_godmd/notebooks/biobb_wf_godmd.ipynb" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-07-12 13:20:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3843 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:02Z" ; + schema1:dateModified "2024-06-11T12:55:02Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2020-12-18T14:44:57.573950" ; + schema1:description "Continuous flexibility analysis of SARS-CoV-2 Spike prefusion structures" ; + schema1:hasPart , + ; + schema1:image "workflow.svg" ; + schema1:keywords "cryoem", + "scipion" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Cryo electron microscopy of SARS-CoV-2 spike in prefusion state" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:image ; + schema1:name "workflow" ; + schema1:programmingLanguage . + + a schema1:MediaObject . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=23" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-07-12 13:18:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=23" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15178 ; + schema1:creator , + ; + schema1:dateCreated "2024-07-02T03:02:47Z" ; + schema1:dateModified "2024-07-02T03:02:47Z" ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=23" ; + schema1:version 23 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """A CWL-based pipeline for calling small germline variants, namely SNPs and small INDELs, by processing data from Whole-genome Sequencing (WGS) or Targeted Sequencing (e.g., Whole-exome sequencing; WES) experiments.\r +\r +On the respective GitHub folder are available:\r +\r +- The CWL wrappers and subworkflows for the workflow\r +- A pre-configured YAML template, based on validation analysis of publicly available HTS data\r +\r +Briefly, the workflow performs the following steps:\r +\r +1. Quality control of Illumina reads (FastQC)\r +2. Trimming of the reads (e.g., removal of adapter and/or low quality sequences) (Trim galore)\r +3. Mapping to reference genome (BWA-MEM)\r +4. Convertion of mapped reads from SAM (Sequence Alignment Map) to BAM (Binary Alignment Map) format (samtools)\r +5. Sorting mapped reads based on read names (samtools)\r +6. Adding information regarding paired end reads (e.g., CIGAR field information) (samtools)\r +7. Re-sorting mapped reads based on chromosomal coordinates (samtools)\r +8. Adding basic Read-Group information regarding sample name, platform unit, platform (e.g., ILLUMINA), library and identifier (picard AddOrReplaceReadGroups)\r +9. Marking PCR and/or optical duplicate reads (picard MarkDuplicates)\r +10. Collection of summary statistics (samtools) \r +11. Creation of indexes for coordinate-sorted BAM files to enable fast random access (samtools)\r +12. Splitting the reference genome into a predefined number of intervals for parallel processing (GATK SplitIntervals)\r +\r +At this point the application of single-sample workflow follows, during which multiple samples are accepted as input and they are not merged into a unified VCF file but are rather processed separately in each step of the workflow, leading to the production of a VCF file for each sample:\r +\r +13. Application of Base Quality Score Recalibration (BQSR) (GATK BaseRecalibrator, GatherBQSRReports and ApplyBQSR tools)\r +14. Variant calling (GATK HaplotypeCaller) \r +15. Merging of all genomic interval-split gVCF files for each sample (GATK MergeVCFs)\r +16. Separate annotation of SNPs and INDELs based on pretrained Convolutional Neural Network (CNN) models (GATK SelectVariants, CNNScoreVariants and FilterVariantTranches tools)\r +17. (Optional) Independent step of hard-filtering (GATK VariantFiltration)\r +18. Variant filtering based on the information added during VQSR and/or custom filters (bcftools)\r +19. Normalization of INDELs (split multiallelic sites) (bcftools)\r +20. Annotation of the final dataset of filtered variants with genomic, population-related and/or clinical information (ANNOVAR)\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.527.1" ; + schema1:isBasedOn "https://github.com/BiodataAnalysisGroup/CWL_HTS_pipelines/tree/main/Germline_Variant_calling/single-sample_analysis/with_BQSR_CNN_%26_hard_filtering" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL-based (single-sample) workflow for germline variant calling" ; + schema1:sdDatePublished "2024-07-12 13:32:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/527/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 36026 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-07-05T09:48:09Z" ; + schema1:dateModified "2023-07-05T09:49:33Z" ; + schema1:description """A CWL-based pipeline for calling small germline variants, namely SNPs and small INDELs, by processing data from Whole-genome Sequencing (WGS) or Targeted Sequencing (e.g., Whole-exome sequencing; WES) experiments.\r +\r +On the respective GitHub folder are available:\r +\r +- The CWL wrappers and subworkflows for the workflow\r +- A pre-configured YAML template, based on validation analysis of publicly available HTS data\r +\r +Briefly, the workflow performs the following steps:\r +\r +1. Quality control of Illumina reads (FastQC)\r +2. Trimming of the reads (e.g., removal of adapter and/or low quality sequences) (Trim galore)\r +3. Mapping to reference genome (BWA-MEM)\r +4. Convertion of mapped reads from SAM (Sequence Alignment Map) to BAM (Binary Alignment Map) format (samtools)\r +5. Sorting mapped reads based on read names (samtools)\r +6. Adding information regarding paired end reads (e.g., CIGAR field information) (samtools)\r +7. Re-sorting mapped reads based on chromosomal coordinates (samtools)\r +8. Adding basic Read-Group information regarding sample name, platform unit, platform (e.g., ILLUMINA), library and identifier (picard AddOrReplaceReadGroups)\r +9. Marking PCR and/or optical duplicate reads (picard MarkDuplicates)\r +10. Collection of summary statistics (samtools) \r +11. Creation of indexes for coordinate-sorted BAM files to enable fast random access (samtools)\r +12. Splitting the reference genome into a predefined number of intervals for parallel processing (GATK SplitIntervals)\r +\r +At this point the application of single-sample workflow follows, during which multiple samples are accepted as input and they are not merged into a unified VCF file but are rather processed separately in each step of the workflow, leading to the production of a VCF file for each sample:\r +\r +13. Application of Base Quality Score Recalibration (BQSR) (GATK BaseRecalibrator, GatherBQSRReports and ApplyBQSR tools)\r +14. Variant calling (GATK HaplotypeCaller) \r +15. Merging of all genomic interval-split gVCF files for each sample (GATK MergeVCFs)\r +16. Separate annotation of SNPs and INDELs based on pretrained Convolutional Neural Network (CNN) models (GATK SelectVariants, CNNScoreVariants and FilterVariantTranches tools)\r +17. (Optional) Independent step of hard-filtering (GATK VariantFiltration)\r +18. Variant filtering based on the information added during VQSR and/or custom filters (bcftools)\r +19. Normalization of INDELs (split multiallelic sites) (bcftools)\r +20. Annotation of the final dataset of filtered variants with genomic, population-related and/or clinical information (ANNOVAR)\r +""" ; + schema1:image ; + schema1:keywords "CWL, workflow, Germline, variant calling, Genomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "CWL-based (single-sample) workflow for germline variant calling" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/527?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 221255 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """Phylogenetic reconstruction using genome-wide and single-gene alignment data. Here we use maximum likelihood reconstruction program IQTree. \r +Data can be prepared using the [phylogenetic data preparation workflow](http://workflowhub.eu/workflows/358) prior to phylogenetic reconstruction.\r +Resulting trees can be viewed interactively using Galaxy's 'Phyloviz' or 'Phylogenetic Tree Visualization'""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/359?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ML phylogenetic reconstruction" ; + schema1:sdDatePublished "2024-07-12 13:35:33 +0100" ; + schema1:url "https://workflowhub.eu/workflows/359/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 21956 ; + schema1:creator ; + schema1:dateCreated "2022-06-06T14:14:25Z" ; + schema1:dateModified "2023-02-13T14:06:44Z" ; + schema1:description """Phylogenetic reconstruction using genome-wide and single-gene alignment data. Here we use maximum likelihood reconstruction program IQTree. \r +Data can be prepared using the [phylogenetic data preparation workflow](http://workflowhub.eu/workflows/358) prior to phylogenetic reconstruction.\r +Resulting trees can be viewed interactively using Galaxy's 'Phyloviz' or 'Phylogenetic Tree Visualization'""" ; + schema1:keywords "phylogenetics, phylogenomics" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "ML phylogenetic reconstruction" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/359?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# HiFi *de novo* genome assembly workflow\r +\r +HiFi-assembly-workflow is a bioinformatics pipeline that can be used to analyse Pacbio CCS reads for *de novo* genome assembly using PacBio Circular Consensus Sequencing (CCS) reads. This workflow is implemented in Nextflow and has 3 major sections. \r + \r +Please refer to the following documentation for detailed description of each workflow section:\r + \r +- [Adapter filtration and pre-assembly quality control (QC)](https://australianbiocommons.github.io/hifi-assembly-workflow/recommendations#stage-1-adapter-filtration-and-pre-assembly-quality-control)\r +- [Assembly](https://australianbiocommons.github.io/hifi-assembly-workflow/recommendations#stage-2-assembly)\r +- [Post-assembly QC](https://australianbiocommons.github.io/hifi-assembly-workflow/recommendations#stage-3-post-assembly-quality-control)\r +\r +\r +## General recommendations \r +\r +A more detailed module and workflow description as well as execution examples on Gadi and Setonix are [available here](https://australianbiocommons.github.io/hifi-assembly-workflow/workflows).\r +\r +\r +## Attributions\r +\r +This work was developed at AGRF and supported by the Australian BioCommons via Bioplatforms Australia funding, the Australian Research Data Commons (https://doi.org/10.47486/PL105) and the Queensland Government RICF programme. Bioplatforms Australia and the Australian Research Data Commons are enabled by the National Collaborative Research Infrastructure Strategy (NCRIS).\r +\r +The documentation in this repository is based on Australian BioCommons guidelines. \r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/560?version=1" ; + schema1:isBasedOn "https://github.com/AustralianBioCommons/hifi-assembly-workflow" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for HiFi de novo genome assembly workflow" ; + schema1:sdDatePublished "2024-07-12 13:27:30 +0100" ; + schema1:url "https://workflowhub.eu/workflows/560/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7193 ; + schema1:creator , + , + , + , + , + ; + schema1:dateCreated "2023-08-31T07:41:03Z" ; + schema1:dateModified "2024-02-05T03:12:14Z" ; + schema1:description """# HiFi *de novo* genome assembly workflow\r +\r +HiFi-assembly-workflow is a bioinformatics pipeline that can be used to analyse Pacbio CCS reads for *de novo* genome assembly using PacBio Circular Consensus Sequencing (CCS) reads. This workflow is implemented in Nextflow and has 3 major sections. \r + \r +Please refer to the following documentation for detailed description of each workflow section:\r + \r +- [Adapter filtration and pre-assembly quality control (QC)](https://australianbiocommons.github.io/hifi-assembly-workflow/recommendations#stage-1-adapter-filtration-and-pre-assembly-quality-control)\r +- [Assembly](https://australianbiocommons.github.io/hifi-assembly-workflow/recommendations#stage-2-assembly)\r +- [Post-assembly QC](https://australianbiocommons.github.io/hifi-assembly-workflow/recommendations#stage-3-post-assembly-quality-control)\r +\r +\r +## General recommendations \r +\r +A more detailed module and workflow description as well as execution examples on Gadi and Setonix are [available here](https://australianbiocommons.github.io/hifi-assembly-workflow/workflows).\r +\r +\r +## Attributions\r +\r +This work was developed at AGRF and supported by the Australian BioCommons via Bioplatforms Australia funding, the Australian Research Data Commons (https://doi.org/10.47486/PL105) and the Queensland Government RICF programme. Bioplatforms Australia and the Australian Research Data Commons are enabled by the National Collaborative Research Infrastructure Strategy (NCRIS).\r +\r +The documentation in this repository is based on Australian BioCommons guidelines. \r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "HiFi de novo genome assembly workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/560?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Simple bacterial assembly and annotation" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/966?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/bacass" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/bacass" ; + schema1:sdDatePublished "2024-07-12 13:22:29 +0100" ; + schema1:url "https://workflowhub.eu/workflows/966/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12331 ; + schema1:creator ; + schema1:dateCreated "2024-06-25T03:02:47Z" ; + schema1:dateModified "2024-06-25T03:02:47Z" ; + schema1:description "Simple bacterial assembly and annotation" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/966?version=7" ; + schema1:keywords "Assembly, bacterial-genomes, denovo, denovo-assembly, genome-assembly, hybrid-assembly, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/bacass" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/966?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """ORSON combine state-of-the-art tools for annotation processes within a Nextflow pipeline: sequence similarity search (PLAST, BLAST or Diamond), functional annotation retrieval (BeeDeeM) and functional prediction (InterProScan). When required, BUSCO completness evaluation and eggNOG Orthogroup annotation can be activated. While ORSON results can be analyzed through the command-line, it also offers the possibility to be compatible with BlastViewer or Blast2GO graphical tools.\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.136.1" ; + schema1:isBasedOn "https://gitlab.ifremer.fr/bioinfo/orson" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ORSON: workflow for prOteome and tRanScriptome functiOnal aNnotation" ; + schema1:sdDatePublished "2024-07-12 13:36:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/136/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 480384 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 0 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2021-07-08T14:18:03Z" ; + schema1:dateModified "2023-01-16T13:50:20Z" ; + schema1:description """ORSON combine state-of-the-art tools for annotation processes within a Nextflow pipeline: sequence similarity search (PLAST, BLAST or Diamond), functional annotation retrieval (BeeDeeM) and functional prediction (InterProScan). When required, BUSCO completness evaluation and eggNOG Orthogroup annotation can be activated. While ORSON results can be analyzed through the command-line, it also offers the possibility to be compatible with BlastViewer or Blast2GO graphical tools.\r +\r +""" ; + schema1:image ; + schema1:keywords "Annotation, Transcriptomics, Genomics, Proteomics, Nextflow" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ORSON: workflow for prOteome and tRanScriptome functiOnal aNnotation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://gitlab.ifremer.fr/bioinfo/orson/-/raw/master/main.nf" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.130.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Amber Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:24:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/130/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 64833 ; + schema1:creator , + ; + schema1:dateCreated "2021-06-30T11:47:18Z" ; + schema1:dateModified "2022-09-15T12:29:05Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/130?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Amber Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_amber_md_setup/8bcda75405183a84476acd7ba733e4cb666ce397/biobb_wf_amber_md_setup/notebooks/mdsetup/biobb_amber_setup_notebook.ipynb" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# COnSensus Interaction Network InFErence Service\r +Inference framework for reconstructing networks using a consensus approach between multiple methods and data sources.\r +\r +![alt text](https://raw.githubusercontent.com/PhosphorylatedRabbits/cosifer/master/docs/_static/logo.png)\r +\r +## Reference\r +[Manica, Matteo, Charlotte, Bunne, Roland, Mathis, Joris, Cadow, Mehmet Eren, Ahsen, Gustavo A, Stolovitzky, and María Rodríguez, Martínez. "COSIFER: a python package for the consensus inference of molecular interaction networks".Bioinformatics (2020)](https://doi.org/10.1093/bioinformatics/btaa942).""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/119?version=1" ; + schema1:isBasedOn "https://github.com/inab/ipc_workflows/tree/main/cosifer/nextflow" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for COSIFER" ; + schema1:sdDatePublished "2024-07-12 13:37:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/119/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1362 ; + schema1:creator , + ; + schema1:dateCreated "2021-05-05T15:53:28Z" ; + schema1:dateModified "2023-04-21T11:04:40Z" ; + schema1:description """# COnSensus Interaction Network InFErence Service\r +Inference framework for reconstructing networks using a consensus approach between multiple methods and data sources.\r +\r +![alt text](https://raw.githubusercontent.com/PhosphorylatedRabbits/cosifer/master/docs/_static/logo.png)\r +\r +## Reference\r +[Manica, Matteo, Charlotte, Bunne, Roland, Mathis, Joris, Cadow, Mehmet Eren, Ahsen, Gustavo A, Stolovitzky, and María Rodríguez, Martínez. "COSIFER: a python package for the consensus inference of molecular interaction networks".Bioinformatics (2020)](https://doi.org/10.1093/bioinformatics/btaa942).""" ; + schema1:keywords "cosifer, cancer, pediatric, rna-seq" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "COSIFER" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/119?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Summary\r +\r +This tutorial aims to illustrate how to compute a fast-growth mutation free energy calculation, step by step, using the BioExcel Building Blocks library (biobb). The particular example used is the Staphylococcal nuclease protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +Workflow engine is a jupyter notebook. Auxiliary libraries used are nb\\_conda\\_kernels, os, and plotly. Environment setup can be carried out using the environment.yml in the code repository. The tutorial uses docker for running pmx - a local setup can be used instead, see notes in the tutorial.\r +\r +# Parameters\r +\r +## Inputs\r +\r +Workflow Input files needed:\r +\r +* **stateA_traj**: Equilibrium trajectory for the WT protein.\r +\r +* **stateB_traj**: Equilibrium trajectory for the Mutated protein.\r +\r +* **stateA_tpr**: WT protein topology (GROMACS tpr format).\r +\r +* **stateB_tpr**: Mutated protein topology (GROMACS tpr format).\r +\r +Auxiliar force field libraries needed:\r +\r +* **mutff45 (folder)**: pmx mutation force field libraries. \r +\r +\r +## Outputs\r +\r +* **pmx.outputs**: Final free energy estimation. Summary of information got applying the different methods.\r +\r +* **pmx.plots.png**: Final free energy plot of the Mutation free energy pipeline.\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.55.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_pmx_tutorial" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Mutation Free Energy Calculations using BioExcel Building Blocks (biobb) (jupyter notebook)" ; + schema1:sdDatePublished "2024-07-12 13:24:22 +0100" ; + schema1:url "https://workflowhub.eu/workflows/55/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 203732 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 49901 ; + schema1:creator , + ; + schema1:dateCreated "2020-09-15T11:56:52Z" ; + schema1:dateModified "2021-05-07T13:30:53Z" ; + schema1:description """# Summary\r +\r +This tutorial aims to illustrate how to compute a fast-growth mutation free energy calculation, step by step, using the BioExcel Building Blocks library (biobb). The particular example used is the Staphylococcal nuclease protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +Workflow engine is a jupyter notebook. Auxiliary libraries used are nb\\_conda\\_kernels, os, and plotly. Environment setup can be carried out using the environment.yml in the code repository. The tutorial uses docker for running pmx - a local setup can be used instead, see notes in the tutorial.\r +\r +# Parameters\r +\r +## Inputs\r +\r +Workflow Input files needed:\r +\r +* **stateA_traj**: Equilibrium trajectory for the WT protein.\r +\r +* **stateB_traj**: Equilibrium trajectory for the Mutated protein.\r +\r +* **stateA_tpr**: WT protein topology (GROMACS tpr format).\r +\r +* **stateB_tpr**: Mutated protein topology (GROMACS tpr format).\r +\r +Auxiliar force field libraries needed:\r +\r +* **mutff45 (folder)**: pmx mutation force field libraries. \r +\r +\r +## Outputs\r +\r +* **pmx.outputs**: Final free energy estimation. Summary of information got applying the different methods.\r +\r +* **pmx.plots.png**: Final free energy plot of the Mutation free energy pipeline.\r +\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/55?version=5" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Mutation Free Energy Calculations using BioExcel Building Blocks (biobb) (jupyter notebook)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/55?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """# COVID-19 Multiscale Modelling of the Virus and Patients’ Tissue Workflow\r +\r +## Table of Contents\r +\r +- [COVID-19 Multiscale Modelling of the Virus and Patients’ Tissue Workflow](#covid-19-multiscale-modelling-of-the-virus-and-patients-tissue-workflow)\r + - [Table of Contents](#table-of-contents)\r + - [Description](#description)\r + - [Contents](#contents)\r + - [Building Blocks](#building-blocks)\r + - [Workflows](#workflows)\r + - [Resources](#resources)\r + - [Tests](#tests)\r + - [Instructions](#instructions)\r + - [Local machine](#local-machine)\r + - [Requirements](#requirements)\r + - [Usage steps](#usage-steps)\r + - [MareNostrum 4](#marenostrum-4)\r + - [Requirements in MN4](#requirements-in-mn4)\r + - [Usage steps in MN4](#usage-steps-in-mn4)\r + - [Mahti or Puhti](#mahti-or-puhti)\r + - [Requirements](#requirements)\r + - [Steps](#steps)\r + - [License](#license)\r + - [Contact](#contact)\r +\r +## Description\r +\r +Uses multiscale simulations to predict patient-specific SARS‑CoV‑2 severity subtypes\r +(moderate, severe or control), using single-cell RNA-Seq data, MaBoSS and PhysiBoSS.\r +Boolean models are used to determine the behaviour of individual agents as a function\r +of extracellular conditions and the concentration of different substrates, including\r +the number of virions. Predictions of severity subtypes are based on a meta-analysis of\r +personalised model outputs simulating cellular apoptosis regulation in epithelial cells\r +infected by SARS‑CoV‑2.\r +\r +The workflow uses the following building blocks, described in order of execution:\r +\r +1. High-throughput mutant analysis\r +2. Single-cell processing\r +3. Personalise patient\r +4. PhysiBoSS\r +5. Analysis of all simulations\r +\r +For details on individual workflow steps, see the user documentation for each building block.\r +\r +[`GitHub repository`]()\r +\r +\r +## Contents\r +\r +### Building Blocks\r +\r +The ``BuildingBlocks`` folder contains the script to install the\r +Building Blocks used in the COVID-19 Workflow.\r +\r +### Workflows\r +\r +The ``Workflow`` folder contains the workflows implementations.\r +\r +Currently contains the implementation using PyCOMPSs and Snakemake (in progress).\r +\r +### Resources\r +\r +The ``Resources`` folder contains dataset files.\r +\r +### Tests\r +\r +The ``Tests`` folder contains the scripts that run each Building Block\r +used in the workflow for the given small dataset.\r +They can be executed individually for testing purposes.\r +\r +## Instructions\r +\r +### Local machine\r +\r +This section explains the requirements and usage for the COVID19 Workflow in a laptop or desktop computer.\r +\r +#### Requirements\r +\r +- [`permedcoe`](https://github.com/PerMedCoE/permedcoe) package\r +- [PyCOMPSs](https://pycompss.readthedocs.io/en/stable/Sections/00_Quickstart.html) / [Snakemake](https://snakemake.readthedocs.io/en/stable/)\r +- [Singularity](https://sylabs.io/guides/3.0/user-guide/installation.html)\r +\r +#### Usage steps\r +\r +1. Clone this repository:\r +\r + ```bash\r + git clone https://github.com/PerMedCoE/covid-19-workflow.git\r + ```\r +\r +2. Install the Building Blocks required for the COVID19 Workflow:\r +\r + ```bash\r + covid-19-workflow/BuildingBlocks/./install_BBs.sh\r + ```\r +\r +3. Get the required Building Block images from the project [B2DROP](https://b2drop.bsc.es/index.php/f/444350):\r +\r + - Required images:\r + - MaBoSS.singularity\r + - meta_analysis.singularity\r + - PhysiCell-COVID19.singularity\r + - single_cell.singularity\r +\r + The path where these files are stored **MUST be exported in the `PERMEDCOE_IMAGES`** environment variable.\r +\r + > :warning: **TIP**: These containers can be built manually as follows (be patient since some of them may take some time):\r + 1. Clone the `BuildingBlocks` repository\r + ```bash\r + git clone https://github.com/PerMedCoE/BuildingBlocks.git\r + ```\r + 2. Build the required Building Block images\r + ```bash\r + cd BuildingBlocks/Resources/images\r + sudo singularity build MaBoSS.sif MaBoSS.singularity\r + sudo singularity build meta_analysis.sif meta_analysis.singularity\r + sudo singularity build PhysiCell-COVID19.sif PhysiCell-COVID19.singularity\r + sudo singularity build single_cell.sif single_cell.singularity\r + cd ../../..\r + ```\r +\r +**If using PyCOMPSs in local PC** (make sure that PyCOMPSs in installed):\r +\r +4. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflows/PyCOMPSs\r + ```\r +\r +5. Execute `./run.sh`\r +\r +**If using Snakemake in local PC** (make sure that SnakeMake is installed):\r +\r +4. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflows/SnakeMake\r + ```\r +\r +5. Execute `./run.sh`\r + > **TIP**: If you want to run the workflow with a different dataset, please update the `run.sh` script setting the `dataset` variable to the new dataset folder and their file names.\r +\r +\r +### MareNostrum 4\r +\r +This section explains the requirements and usage for the COVID19 Workflow in the MareNostrum 4 supercomputer.\r +\r +#### Requirements in MN4\r +\r +- Access to MN4\r +\r +All Building Blocks are already installed in MN4, and the COVID19 Workflow available.\r +\r +#### Usage steps in MN4\r +\r +1. Load the `COMPSs`, `Singularity` and `permedcoe` modules\r +\r + ```bash\r + export COMPSS_PYTHON_VERSION=3\r + module load COMPSs/3.1\r + module load singularity/3.5.2\r + module use /apps/modules/modulefiles/tools/COMPSs/libraries\r + module load permedcoe\r + ```\r +\r + > **TIP**: Include the loading into your `${HOME}/.bashrc` file to load it automatically on the session start.\r +\r + This commands will load COMPSs and the permedcoe package which provides all necessary dependencies, as well as the path to the singularity container images (`PERMEDCOE_IMAGES` environment variable) and testing dataset (`COVID19WORKFLOW_DATASET` environment variable).\r +\r +2. Get a copy of the pilot workflow into your desired folder\r +\r + ```bash\r + mkdir desired_folder\r + cd desired_folder\r + get_covid19workflow\r + ```\r +\r +3. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflow/PyCOMPSs\r + ```\r +\r +4. Execute `./launch.sh`\r +\r + This command will launch a job into the job queuing system (SLURM) requesting 2 nodes (one node acting half master and half worker, and other full worker node) for 20 minutes, and is prepared to use the singularity images that are already deployed in MN4 (located into the `PERMEDCOE_IMAGES` environment variable). It uses the dataset located into `../../Resources/data` folder.\r +\r + > :warning: **TIP**: If you want to run the workflow with a different dataset, please edit the `launch.sh` script and define the appropriate dataset path.\r +\r + After the execution, a `results` folder will be available with with COVID19 Workflow results.\r +\r +### Mahti or Puhti\r +\r +This section explains how to run the COVID19 workflow on CSC supercomputers using SnakeMake.\r +\r +#### Requirements\r +\r +- Install snakemake (or check if there is a version installed using `module spider snakemake`)\r +- Install workflow, using the same steps as for the local machine. With the exception that containers have to be built elsewhere.\r +\r +#### Steps\r +\r +\r +1. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflow/SnakeMake\r + ```\r +\r +2. Edit `launch.sh` with the correct partition, account, and resource specifications. \r +\r +3. Execute `./launch.sh`\r +\r + > :warning: Snakemake provides a `--cluster` flag, but this functionality should be avoided as it's really not suited for HPC systems.\r +\r +## License\r +\r +[Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)\r +\r +## Contact\r +\r +\r +\r +This software has been developed for the [PerMedCoE project](https://permedcoe.eu/), funded by the European Commission (EU H2020 [951773](https://cordis.europa.eu/project/id/951773)).\r +\r +![](https://permedcoe.eu/wp-content/uploads/2020/11/logo_1.png "PerMedCoE")\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/481?version=1" ; + schema1:isBasedOn "https://github.com/PerMedCoE/covid-19-workflow.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for PerMedCoE Covid19 Pilot workflow (Snakemake)" ; + schema1:sdDatePublished "2024-07-12 13:33:24 +0100" ; + schema1:url "https://workflowhub.eu/workflows/481/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3913 ; + schema1:creator ; + schema1:dateCreated "2023-05-23T13:24:53Z" ; + schema1:dateModified "2023-05-23T13:24:53Z" ; + schema1:description """# COVID-19 Multiscale Modelling of the Virus and Patients’ Tissue Workflow\r +\r +## Table of Contents\r +\r +- [COVID-19 Multiscale Modelling of the Virus and Patients’ Tissue Workflow](#covid-19-multiscale-modelling-of-the-virus-and-patients-tissue-workflow)\r + - [Table of Contents](#table-of-contents)\r + - [Description](#description)\r + - [Contents](#contents)\r + - [Building Blocks](#building-blocks)\r + - [Workflows](#workflows)\r + - [Resources](#resources)\r + - [Tests](#tests)\r + - [Instructions](#instructions)\r + - [Local machine](#local-machine)\r + - [Requirements](#requirements)\r + - [Usage steps](#usage-steps)\r + - [MareNostrum 4](#marenostrum-4)\r + - [Requirements in MN4](#requirements-in-mn4)\r + - [Usage steps in MN4](#usage-steps-in-mn4)\r + - [Mahti or Puhti](#mahti-or-puhti)\r + - [Requirements](#requirements)\r + - [Steps](#steps)\r + - [License](#license)\r + - [Contact](#contact)\r +\r +## Description\r +\r +Uses multiscale simulations to predict patient-specific SARS‑CoV‑2 severity subtypes\r +(moderate, severe or control), using single-cell RNA-Seq data, MaBoSS and PhysiBoSS.\r +Boolean models are used to determine the behaviour of individual agents as a function\r +of extracellular conditions and the concentration of different substrates, including\r +the number of virions. Predictions of severity subtypes are based on a meta-analysis of\r +personalised model outputs simulating cellular apoptosis regulation in epithelial cells\r +infected by SARS‑CoV‑2.\r +\r +The workflow uses the following building blocks, described in order of execution:\r +\r +1. High-throughput mutant analysis\r +2. Single-cell processing\r +3. Personalise patient\r +4. PhysiBoSS\r +5. Analysis of all simulations\r +\r +For details on individual workflow steps, see the user documentation for each building block.\r +\r +[`GitHub repository`]()\r +\r +\r +## Contents\r +\r +### Building Blocks\r +\r +The ``BuildingBlocks`` folder contains the script to install the\r +Building Blocks used in the COVID-19 Workflow.\r +\r +### Workflows\r +\r +The ``Workflow`` folder contains the workflows implementations.\r +\r +Currently contains the implementation using PyCOMPSs and Snakemake (in progress).\r +\r +### Resources\r +\r +The ``Resources`` folder contains dataset files.\r +\r +### Tests\r +\r +The ``Tests`` folder contains the scripts that run each Building Block\r +used in the workflow for the given small dataset.\r +They can be executed individually for testing purposes.\r +\r +## Instructions\r +\r +### Local machine\r +\r +This section explains the requirements and usage for the COVID19 Workflow in a laptop or desktop computer.\r +\r +#### Requirements\r +\r +- [`permedcoe`](https://github.com/PerMedCoE/permedcoe) package\r +- [PyCOMPSs](https://pycompss.readthedocs.io/en/stable/Sections/00_Quickstart.html) / [Snakemake](https://snakemake.readthedocs.io/en/stable/)\r +- [Singularity](https://sylabs.io/guides/3.0/user-guide/installation.html)\r +\r +#### Usage steps\r +\r +1. Clone this repository:\r +\r + ```bash\r + git clone https://github.com/PerMedCoE/covid-19-workflow.git\r + ```\r +\r +2. Install the Building Blocks required for the COVID19 Workflow:\r +\r + ```bash\r + covid-19-workflow/BuildingBlocks/./install_BBs.sh\r + ```\r +\r +3. Get the required Building Block images from the project [B2DROP](https://b2drop.bsc.es/index.php/f/444350):\r +\r + - Required images:\r + - MaBoSS.singularity\r + - meta_analysis.singularity\r + - PhysiCell-COVID19.singularity\r + - single_cell.singularity\r +\r + The path where these files are stored **MUST be exported in the `PERMEDCOE_IMAGES`** environment variable.\r +\r + > :warning: **TIP**: These containers can be built manually as follows (be patient since some of them may take some time):\r + 1. Clone the `BuildingBlocks` repository\r + ```bash\r + git clone https://github.com/PerMedCoE/BuildingBlocks.git\r + ```\r + 2. Build the required Building Block images\r + ```bash\r + cd BuildingBlocks/Resources/images\r + sudo singularity build MaBoSS.sif MaBoSS.singularity\r + sudo singularity build meta_analysis.sif meta_analysis.singularity\r + sudo singularity build PhysiCell-COVID19.sif PhysiCell-COVID19.singularity\r + sudo singularity build single_cell.sif single_cell.singularity\r + cd ../../..\r + ```\r +\r +**If using PyCOMPSs in local PC** (make sure that PyCOMPSs in installed):\r +\r +4. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflows/PyCOMPSs\r + ```\r +\r +5. Execute `./run.sh`\r +\r +**If using Snakemake in local PC** (make sure that SnakeMake is installed):\r +\r +4. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflows/SnakeMake\r + ```\r +\r +5. Execute `./run.sh`\r + > **TIP**: If you want to run the workflow with a different dataset, please update the `run.sh` script setting the `dataset` variable to the new dataset folder and their file names.\r +\r +\r +### MareNostrum 4\r +\r +This section explains the requirements and usage for the COVID19 Workflow in the MareNostrum 4 supercomputer.\r +\r +#### Requirements in MN4\r +\r +- Access to MN4\r +\r +All Building Blocks are already installed in MN4, and the COVID19 Workflow available.\r +\r +#### Usage steps in MN4\r +\r +1. Load the `COMPSs`, `Singularity` and `permedcoe` modules\r +\r + ```bash\r + export COMPSS_PYTHON_VERSION=3\r + module load COMPSs/3.1\r + module load singularity/3.5.2\r + module use /apps/modules/modulefiles/tools/COMPSs/libraries\r + module load permedcoe\r + ```\r +\r + > **TIP**: Include the loading into your `${HOME}/.bashrc` file to load it automatically on the session start.\r +\r + This commands will load COMPSs and the permedcoe package which provides all necessary dependencies, as well as the path to the singularity container images (`PERMEDCOE_IMAGES` environment variable) and testing dataset (`COVID19WORKFLOW_DATASET` environment variable).\r +\r +2. Get a copy of the pilot workflow into your desired folder\r +\r + ```bash\r + mkdir desired_folder\r + cd desired_folder\r + get_covid19workflow\r + ```\r +\r +3. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflow/PyCOMPSs\r + ```\r +\r +4. Execute `./launch.sh`\r +\r + This command will launch a job into the job queuing system (SLURM) requesting 2 nodes (one node acting half master and half worker, and other full worker node) for 20 minutes, and is prepared to use the singularity images that are already deployed in MN4 (located into the `PERMEDCOE_IMAGES` environment variable). It uses the dataset located into `../../Resources/data` folder.\r +\r + > :warning: **TIP**: If you want to run the workflow with a different dataset, please edit the `launch.sh` script and define the appropriate dataset path.\r +\r + After the execution, a `results` folder will be available with with COVID19 Workflow results.\r +\r +### Mahti or Puhti\r +\r +This section explains how to run the COVID19 workflow on CSC supercomputers using SnakeMake.\r +\r +#### Requirements\r +\r +- Install snakemake (or check if there is a version installed using `module spider snakemake`)\r +- Install workflow, using the same steps as for the local machine. With the exception that containers have to be built elsewhere.\r +\r +#### Steps\r +\r +\r +1. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflow/SnakeMake\r + ```\r +\r +2. Edit `launch.sh` with the correct partition, account, and resource specifications. \r +\r +3. Execute `./launch.sh`\r +\r + > :warning: Snakemake provides a `--cluster` flag, but this functionality should be avoided as it's really not suited for HPC systems.\r +\r +## License\r +\r +[Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)\r +\r +## Contact\r +\r +\r +\r +This software has been developed for the [PerMedCoE project](https://permedcoe.eu/), funded by the European Commission (EU H2020 [951773](https://cordis.europa.eu/project/id/951773)).\r +\r +![](https://permedcoe.eu/wp-content/uploads/2020/11/logo_1.png "PerMedCoE")\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "PerMedCoE Covid19 Pilot workflow (Snakemake)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/481?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2023-09-04T13:04:29.526383" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + , + ; + schema1:name "consensus-peaks/consensus-peaks-chip-sr" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-atac-cutandrun" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-chip-pe" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.10" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/278?version=2" ; + schema1:isBasedOn "https://gitlab.bsc.es/lrodrig1/structuralvariants_poc/-/tree/1.1.3/structuralvariants/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CNV_pipeline" ; + schema1:sdDatePublished "2024-07-12 13:35:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/278/ro_crate?version=2" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 8862 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8665 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-14T15:00:04Z" ; + schema1:dateModified "2022-03-14T15:01:57Z" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/278?version=10" ; + schema1:keywords "cancer, CODEX2, ExomeDepth, manta, TransBioNet, variant calling, GRIDSS, structural variants" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CNV_pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/278?version=2" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 66395 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "ATACSeq peak-calling and differential analysis pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/965?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/atacseq" ; + schema1:sdDatePublished "2024-07-12 13:22:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/965/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5510 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:41Z" ; + schema1:dateModified "2024-06-11T12:54:41Z" ; + schema1:description "ATACSeq peak-calling and differential analysis pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/965?version=8" ; + schema1:keywords "ATAC-seq, chromatin-accessibiity" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/atacseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/965?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Alignment, assembly RNASEQ reads and annotation of generated transcripts." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/38?version=1" ; + schema1:isBasedOn "https://usegalaxy.eu/u/ambarishk/w/covid-19-unicycler-assembly-and-annotation" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Unicycler assembly and annotation" ; + schema1:sdDatePublished "2024-07-12 13:37:28 +0100" ; + schema1:url "https://workflowhub.eu/workflows/38/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 37964 ; + schema1:dateCreated "2020-06-18T23:07:23Z" ; + schema1:dateModified "2023-02-13T14:06:46Z" ; + schema1:description "Alignment, assembly RNASEQ reads and annotation of generated transcripts." ; + schema1:image ; + schema1:keywords "Unicycler, Alignment, Assembly, Annotation, RNASEQ, covid-19" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Unicycler assembly and annotation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/38?version=1" ; + schema1:version 1 ; + ns1:input , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 192982 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Taxonomic classification and profiling of shotgun short- and long-read metagenomic data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1025?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/taxprofiler" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/taxprofiler" ; + schema1:sdDatePublished "2024-07-12 13:18:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1025/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15728 ; + schema1:creator , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:19Z" ; + schema1:dateModified "2024-06-11T12:55:19Z" ; + schema1:description "Taxonomic classification and profiling of shotgun short- and long-read metagenomic data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1025?version=10" ; + schema1:keywords "Classification, illumina, long-reads, Metagenomics, microbiome, nanopore, pathogen, Profiling, shotgun, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/taxprofiler" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1025?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + , + , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """microPIPE was developed to automate high-quality complete bacterial genome assembly using Oxford Nanopore Sequencing in combination with Illumina sequencing.\r +\r +To build microPIPE we evaluated the performance of several tools at each step of bacterial genome assembly, including basecalling, assembly, and polishing. Results at each step were validated using the high-quality ST131 Escherichia coli strain EC958 (GenBank: HG941718.1). After appraisal of each step, we selected the best combination of tools to achieve the most consistent and best quality bacterial genome assemblies.\r +\r +The workflow below summarises the different steps of the pipeline (with each selected tool) and the approximate run time (using GPU basecalling, averaged over 12 E. coli isolates sequenced on a R9.4 MinION flow cell). Dashed boxes correspond to optional steps in the pipeline.\r +\r +Micropipe has been written in Nextflow and uses Singularity containers. It can use both GPU and CPU resources.\r +\r +For more information please see our publication here: https://bmcgenomics.biomedcentral.com/articles/10.1186/s12864-021-07767-z\r +\r +Infrastructure\\_deployment\\_metadata: Zeus (Pawsey)""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.140.1" ; + schema1:isBasedOn "https://github.com/BeatsonLab-MicrobialGenomics/micropipe" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for microPIPE: a pipeline for high-quality bacterial genome construction using ONT and Illumina sequencing" ; + schema1:sdDatePublished "2024-07-12 13:36:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/140/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 82123 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 39326 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2021-08-09T00:17:36Z" ; + schema1:dateModified "2023-01-16T13:51:29Z" ; + schema1:description """microPIPE was developed to automate high-quality complete bacterial genome assembly using Oxford Nanopore Sequencing in combination with Illumina sequencing.\r +\r +To build microPIPE we evaluated the performance of several tools at each step of bacterial genome assembly, including basecalling, assembly, and polishing. Results at each step were validated using the high-quality ST131 Escherichia coli strain EC958 (GenBank: HG941718.1). After appraisal of each step, we selected the best combination of tools to achieve the most consistent and best quality bacterial genome assemblies.\r +\r +The workflow below summarises the different steps of the pipeline (with each selected tool) and the approximate run time (using GPU basecalling, averaged over 12 E. coli isolates sequenced on a R9.4 MinION flow cell). Dashed boxes correspond to optional steps in the pipeline.\r +\r +Micropipe has been written in Nextflow and uses Singularity containers. It can use both GPU and CPU resources.\r +\r +For more information please see our publication here: https://bmcgenomics.biomedcentral.com/articles/10.1186/s12864-021-07767-z\r +\r +Infrastructure\\_deployment\\_metadata: Zeus (Pawsey)""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "ONT, bacterial-genomics, Assembly, Nextflow, workflow" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "microPIPE: a pipeline for high-quality bacterial genome construction using ONT and Illumina sequencing" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/140?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Simple bacterial assembly and annotation pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/966?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/bacass" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/bacass" ; + schema1:sdDatePublished "2024-07-12 13:22:19 +0100" ; + schema1:url "https://workflowhub.eu/workflows/966/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5135 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:42Z" ; + schema1:dateModified "2024-06-11T12:54:42Z" ; + schema1:description "Simple bacterial assembly and annotation pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/966?version=7" ; + schema1:keywords "Assembly, bacterial-genomes, denovo, denovo-assembly, genome-assembly, hybrid-assembly, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/bacass" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/966?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.195.4" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_dna_helparms" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Structural DNA helical parameters tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/195/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 74219 ; + schema1:creator , + ; + schema1:dateCreated "2023-07-26T13:28:37Z" ; + schema1:dateModified "2023-07-26T13:32:13Z" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/195?version=4" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Structural DNA helical parameters tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_dna_helparms/master/biobb_wf_dna_helparms/notebooks/biobb_dna_helparms_tutorial.ipynb" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# CroMaSt: A workflow for assessing protein domain classification by cross-mapping of structural instances between domain databases and structural alignment\r +\r +CroMaSt (**Cro**ss **Ma**pper of domain **St**ructural instances) is an automated iterative workflow to clarify the assignment of protein domains to a given domain type of interest, based on their 3D structure and by cross-mapping of domain structural instances between domain databases. CroMaSt (for Cross-Mapper of domain Structural instances) will classify all structural instances of a given domain type into 4 different categories (**Core**, **True**, **Domain-like**, and **Failed**). \r +\r +\r +## Requirements\r +1. [Conda](https://docs.conda.io/projects/conda/en/latest/) or [Miniconda](https://docs.conda.io/en/latest/miniconda.html)\r +2. [Kpax](http://kpax.loria.fr/download.php) \r +Download and install conda (or Miniconda) and Kpax by following the instructions from their official site.\r +\r +\r +## Get it running \r +(Considering the requirements are already met)\r +\r +1. Clone the repository and change the directory\r +\r +```\r +git clone https://gitlab.inria.fr/capsid.public_codes/CroMaSt.git\r +cd CroMaSt\r +```\r +\r +2. Create the conda environment for the workflow\r +```\r +conda env create --file yml/environment.yml\r +conda activate CroMaSt\r +```\r +\r +3. Change the path of variables in paramter file\r +```\r +sed -i 's/\\/home\\/hdhondge\\/CroMaSt\\//\\/YOUR\\/PATH\\/TO_CroMaSt\\//g' yml/CroMaSt_input.yml \r +```\r +\r +4. Create the directory to store files from PDB and SIFTS (if not already)\r +```\r +mkdir PDB_files SIFTS\r +```\r +\r +5. Download the source input data\r +```\r +cwl-runner Tools/download_data.cwl yml/download_data.yml\r +```\r +\r +## Basic example\r +\r +### 1. First, we will run the workflow for the KH domain with family identifiers `RRM_1` and `RRM` in Pfam and CATH, respectively.\r +Run the workflow -\r +\r +```\r +cwl-runner --parallel --outdir=Results/ CroMaSt.cwl yml/CroMaSt_input.yml\r +```\r +\r +### 2. Once the iteration is complete, check the `new_param.yml` file from the `outputdir` (Results), if there is any family identifier in either `pfam` or `cath`; run the next iteration using following command (Until there is no new families explored by workflow) -\r +\r +```\r +cwl-runner --parallel --outdir=Results/ CroMaSt.cwl Results/new_param.yml\r +```\r + \r +### **Extra:** Start the workflow with multiple families from one or both databases \r +If you would like to start the workflow with multiple families from one or both databases, then simply add a comma in between two family identifiers. \r +```\r +pfam: ['PF00076', 'PF08777']\r +cath: ['3.30.70.330']\r +```\r +\r +- **Pro Tip**: Don't forget to give different path to `--outdir` option while running the workflow multiple times or at least move the results to some other location after first run.\r +\r +## Run the workflow for protein domain of your choice \r +### 1. You can run the workflow for the domain of your choice by simply changing the family identifers in `yml/CroMaSt_input.yml` file.\r +\r +Simply replace the following values of family identifiers (for pfam and cath) with the family identifiers of your choice in `yml/CroMaSt_input.yml` file. \r +```\r +pfam: ['PF00076']\r +cath: ['3.30.70.330']\r +```\r +\r +\r +\r +## Data files used in current version are as follows:\r +**Files in Data directory can be downloaded as follows**:\r +\r +1. File used from Pfam database: [pdbmap.gz](http://ftp.ebi.ac.uk/pub/databases/Pfam/releases/Pfam35.0/pdbmap.gz)\r +\r +2. File used from CATH database: [cath-domain-description-file.txt](ftp://orengoftp.biochem.ucl.ac.uk:21/cath/releases/latest-release/cath-classification-data/cath-domain-description-file.txt) \r +\r +3. Obsolete entries from RCSB PDB\r +[obsolete_PDB_entry_ids.txt](https://data.rcsb.org/rest/v1/holdings/removed/entry_ids) \r +\r +\r +CATH Version - 4.3.0 (Ver_Date - 11-Sep-2019) [FTP site](ftp://orengoftp.biochem.ucl.ac.uk/cath/releases/latest-release/cath-classification-data/)\r +Pfam Version - 35.0 (Ver_Date - November-2021) [FTP site](http://ftp.ebi.ac.uk/pub/databases/Pfam/releases/Pfam35.0/)\r +\r +## Reference\r +```\r +Poster - \r +1. Hrishikesh Dhondge, Isaure Chauvot de Beauchêne, Marie-Dominique Devignes. CroMaSt: A workflow for domain family curation through cross-mapping of structural instances between protein domain databases. 21st European Conference on Computational Biology, Sep 2022, Sitges, Spain. ⟨hal-03789541⟩\r +\r +```\r +\r +## Acknowledgements\r +This project has received funding from the Marie Skłodowska-Curie Innovative Training Network (MSCA-ITN) RNAct supported by European Union’s Horizon 2020 research and innovation programme under granta greement No 813239.\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.390.2" ; + schema1:isBasedOn "https://github.com/HrishiDhondge/CroMaSt.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CroMaSt: A workflow for assessing protein domain classification by cross-mapping of structural instances between domain databases and structural alignment" ; + schema1:sdDatePublished "2024-07-12 13:33:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/390/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 21410 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-06-20T12:06:49Z" ; + schema1:dateModified "2023-06-20T12:08:47Z" ; + schema1:description """# CroMaSt: A workflow for assessing protein domain classification by cross-mapping of structural instances between domain databases and structural alignment\r +\r +CroMaSt (**Cro**ss **Ma**pper of domain **St**ructural instances) is an automated iterative workflow to clarify the assignment of protein domains to a given domain type of interest, based on their 3D structure and by cross-mapping of domain structural instances between domain databases. CroMaSt (for Cross-Mapper of domain Structural instances) will classify all structural instances of a given domain type into 4 different categories (**Core**, **True**, **Domain-like**, and **Failed**). \r +\r +\r +## Requirements\r +1. [Conda](https://docs.conda.io/projects/conda/en/latest/) or [Miniconda](https://docs.conda.io/en/latest/miniconda.html)\r +2. [Kpax](http://kpax.loria.fr/download.php) \r +Download and install conda (or Miniconda) and Kpax by following the instructions from their official site.\r +\r +\r +## Get it running \r +(Considering the requirements are already met)\r +\r +1. Clone the repository and change the directory\r +\r +```\r +git clone https://gitlab.inria.fr/capsid.public_codes/CroMaSt.git\r +cd CroMaSt\r +```\r +\r +2. Create the conda environment for the workflow\r +```\r +conda env create --file yml/environment.yml\r +conda activate CroMaSt\r +```\r +\r +3. Change the path of variables in paramter file\r +```\r +sed -i 's/\\/home\\/hdhondge\\/CroMaSt\\//\\/YOUR\\/PATH\\/TO_CroMaSt\\//g' yml/CroMaSt_input.yml \r +```\r +\r +4. Create the directory to store files from PDB and SIFTS (if not already)\r +```\r +mkdir PDB_files SIFTS\r +```\r +\r +5. Download the source input data\r +```\r +cwl-runner Tools/download_data.cwl yml/download_data.yml\r +```\r +\r +## Basic example\r +\r +### 1. First, we will run the workflow for the KH domain with family identifiers `RRM_1` and `RRM` in Pfam and CATH, respectively.\r +Run the workflow -\r +\r +```\r +cwl-runner --parallel --outdir=Results/ CroMaSt.cwl yml/CroMaSt_input.yml\r +```\r +\r +### 2. Once the iteration is complete, check the `new_param.yml` file from the `outputdir` (Results), if there is any family identifier in either `pfam` or `cath`; run the next iteration using following command (Until there is no new families explored by workflow) -\r +\r +```\r +cwl-runner --parallel --outdir=Results/ CroMaSt.cwl Results/new_param.yml\r +```\r + \r +### **Extra:** Start the workflow with multiple families from one or both databases \r +If you would like to start the workflow with multiple families from one or both databases, then simply add a comma in between two family identifiers. \r +```\r +pfam: ['PF00076', 'PF08777']\r +cath: ['3.30.70.330']\r +```\r +\r +- **Pro Tip**: Don't forget to give different path to `--outdir` option while running the workflow multiple times or at least move the results to some other location after first run.\r +\r +## Run the workflow for protein domain of your choice \r +### 1. You can run the workflow for the domain of your choice by simply changing the family identifers in `yml/CroMaSt_input.yml` file.\r +\r +Simply replace the following values of family identifiers (for pfam and cath) with the family identifiers of your choice in `yml/CroMaSt_input.yml` file. \r +```\r +pfam: ['PF00076']\r +cath: ['3.30.70.330']\r +```\r +\r +\r +\r +## Data files used in current version are as follows:\r +**Files in Data directory can be downloaded as follows**:\r +\r +1. File used from Pfam database: [pdbmap.gz](http://ftp.ebi.ac.uk/pub/databases/Pfam/releases/Pfam35.0/pdbmap.gz)\r +\r +2. File used from CATH database: [cath-domain-description-file.txt](ftp://orengoftp.biochem.ucl.ac.uk:21/cath/releases/latest-release/cath-classification-data/cath-domain-description-file.txt) \r +\r +3. Obsolete entries from RCSB PDB\r +[obsolete_PDB_entry_ids.txt](https://data.rcsb.org/rest/v1/holdings/removed/entry_ids) \r +\r +\r +CATH Version - 4.3.0 (Ver_Date - 11-Sep-2019) [FTP site](ftp://orengoftp.biochem.ucl.ac.uk/cath/releases/latest-release/cath-classification-data/)\r +Pfam Version - 35.0 (Ver_Date - November-2021) [FTP site](http://ftp.ebi.ac.uk/pub/databases/Pfam/releases/Pfam35.0/)\r +\r +## Reference\r +```\r +Poster - \r +1. Hrishikesh Dhondge, Isaure Chauvot de Beauchêne, Marie-Dominique Devignes. CroMaSt: A workflow for domain family curation through cross-mapping of structural instances between protein domain databases. 21st European Conference on Computational Biology, Sep 2022, Sitges, Spain. ⟨hal-03789541⟩\r +\r +```\r +\r +## Acknowledgements\r +This project has received funding from the Marie Skłodowska-Curie Innovative Training Network (MSCA-ITN) RNAct supported by European Union’s Horizon 2020 research and innovation programme under granta greement No 813239.\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/390?version=1" ; + schema1:keywords "Pfam, CATH, Protein domains, data integration" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "CroMaSt: A workflow for assessing protein domain classification by cross-mapping of structural instances between domain databases and structural alignment" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/390?version=2" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 106918 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=19" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-07-12 13:20:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=19" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8900 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:04Z" ; + schema1:dateModified "2024-06-11T12:55:04Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=19" ; + schema1:version 19 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-09T11:15:18.484186" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/kmer-profiling-hifi-trio-VGP2" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "kmer-profiling-hifi-trio-VGP2/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.17" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-07-12 13:20:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3635 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:58Z" ; + schema1:dateModified "2024-06-11T12:54:58Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +**Based on the official [pmx tutorial](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate how to compute a **fast-growth mutation free energy** calculation, step by step, using the BioExcel **Building Blocks library (biobb)**. The particular example used is the **Staphylococcal nuclease** protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +The **non-equilibrium free energy calculation** protocol performs a **fast alchemical transition** in the direction **WT->Mut** and back **Mut->WT**. The two equilibrium trajectories needed for the tutorial, one for **Wild Type (WT)** and another for the **Mutated (Mut)** protein (Isoleucine 10 to Alanine -I10A-), have already been generated and are included in this example. We will name **WT as stateA** and **Mut as stateB**.\r +\r +![](https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/schema.png)\r +\r +The tutorial calculates the **free energy difference** in the folded state of a protein. Starting from **two 1ns-length independent equilibrium simulations** (WT and mutant), snapshots are selected to start **fast (50ps) transitions** driving the system in the **forward** (WT to mutant) and **reverse** (mutant to WT) directions, and the **work values** required to perform these transitions are collected. With these values, **Crooks Gaussian Intersection** (CGI), **Bennett Acceptance Ratio** (BAR) and **Jarzynski estimator** methods are used to calculate the **free energy difference** between the two states.\r +\r +*Please note that for the sake of disk space this tutorial is using 1ns-length equilibrium trajectories, whereas in the [original example](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/eq.mdp) the equilibrium trajectories used were obtained from 10ns-length simulations.*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.827.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_pmx_tutorial" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Docker Mutation Free Energy Calculations" ; + schema1:sdDatePublished "2024-07-12 13:23:26 +0100" ; + schema1:url "https://workflowhub.eu/workflows/827/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 766 ; + schema1:creator , + ; + schema1:dateCreated "2024-04-22T11:05:41Z" ; + schema1:dateModified "2024-05-22T13:43:56Z" ; + schema1:description """# Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +**Based on the official [pmx tutorial](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate how to compute a **fast-growth mutation free energy** calculation, step by step, using the BioExcel **Building Blocks library (biobb)**. The particular example used is the **Staphylococcal nuclease** protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +The **non-equilibrium free energy calculation** protocol performs a **fast alchemical transition** in the direction **WT->Mut** and back **Mut->WT**. The two equilibrium trajectories needed for the tutorial, one for **Wild Type (WT)** and another for the **Mutated (Mut)** protein (Isoleucine 10 to Alanine -I10A-), have already been generated and are included in this example. We will name **WT as stateA** and **Mut as stateB**.\r +\r +![](https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/schema.png)\r +\r +The tutorial calculates the **free energy difference** in the folded state of a protein. Starting from **two 1ns-length independent equilibrium simulations** (WT and mutant), snapshots are selected to start **fast (50ps) transitions** driving the system in the **forward** (WT to mutant) and **reverse** (mutant to WT) directions, and the **work values** required to perform these transitions are collected. With these values, **Crooks Gaussian Intersection** (CGI), **Bennett Acceptance Ratio** (BAR) and **Jarzynski estimator** methods are used to calculate the **free energy difference** between the two states.\r +\r +*Please note that for the sake of disk space this tutorial is using 1ns-length equilibrium trajectories, whereas in the [original example](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/eq.mdp) the equilibrium trajectories used were obtained from 10ns-length simulations.*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Docker Mutation Free Energy Calculations" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/main/biobb_wf_pmx_tutorial/docker/Dockerfile" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Optional workflow to purge duplicates from the contig assembly.\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/237?version=1" ; + schema1:isBasedOn "https://github.com/AustralianBioCommons/Purge-duplicates-from-hifiasm-assembly" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Purge duplicates from hifiasm assembly v1.0 (HiFi genome assembly stage 3)" ; + schema1:sdDatePublished "2024-07-12 13:34:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/237/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 23345 ; + schema1:creator ; + schema1:dateCreated "2021-11-15T01:39:22Z" ; + schema1:dateModified "2022-10-17T02:51:28Z" ; + schema1:description """Optional workflow to purge duplicates from the contig assembly.\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/237?version=1" ; + schema1:isPartOf , + ; + schema1:keywords "Assembly, purge_dups, HiFi" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Purge duplicates from hifiasm assembly v1.0 (HiFi genome assembly stage 3)" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/237?version=1" ; + schema1:version 1 ; + ns1:input , + ; + ns1:output , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 118673 . + + a schema1:Dataset ; + schema1:datePublished "2024-06-10T13:19:59.881478" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + , + ; + schema1:name "consensus-peaks/consensus-peaks-chip-sr" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-atac-cutandrun" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "1.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-chip-pe" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "1.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + schema1:datePublished "2022-11-30T12:34:43.169128" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/rnaseq-pe" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "rnaseq-pe/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.3" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Call somatic, germline and LoH event variants from PE Illumina sequencing data obtained from matched pairs of tumor and normal tissue samples.\r +\r +This workflow can be used with whole-genome and whole-exome sequencing data as input. For WES data, parts of the analysis can be restricted to the exome capture kits target regions by providing the optional "Regions of Interest" bed dataset.\r +\r +The current version uses bwa-mem for read mapping and varscan somatic for variant calling and somatic status classification.""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.628.1" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Variant calling from matched tumor/normal sample pair (hg38 version)" ; + schema1:sdDatePublished "2024-07-12 13:26:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/628/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 129767 ; + schema1:creator ; + schema1:dateCreated "2023-10-27T13:48:44Z" ; + schema1:dateModified "2023-10-27T13:50:45Z" ; + schema1:description """Call somatic, germline and LoH event variants from PE Illumina sequencing data obtained from matched pairs of tumor and normal tissue samples.\r +\r +This workflow can be used with whole-genome and whole-exome sequencing data as input. For WES data, parts of the analysis can be restricted to the exome capture kits target regions by providing the optional "Regions of Interest" bed dataset.\r +\r +The current version uses bwa-mem for read mapping and varscan somatic for variant calling and somatic status classification.""" ; + schema1:keywords "EOSC4Cancer" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Variant calling from matched tumor/normal sample pair (hg38 version)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://usegalaxy.eu/api/workflows/af30010868c97316/download?format=json-download" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """\r +# Github: https://github.com/Lcornet/GENERA\r +\r +# BCCM GEN-ERA tools repository\r +\r +Please visit the wiki for tutorials and access to the tools:\r +https://github.com/Lcornet/GENERA/wiki \r +\r +# NEWS\r +Mantis is now installed in a singularity container for the Metabolic workflow (install is no longer necessary). \r +\r +# Information about the GEN-ERA project\r +Please visit \r +https://bccm.belspo.be/content/bccm-collections-genomic-era \r +\r +# Publications\r +1. ToRQuEMaDA: tool for retrieving queried Eubacteria, metadata and dereplicating assemblies. \r + Léonard, R. R., Leleu, M., Vlierberghe, M. V., Cornet, L., Kerff, F., and Baurain, D. (2021). \r + PeerJ 9, e11348. doi:10.7717/peerj.11348. \r + https://peerj.com/articles/11348/ \r +2. The taxonomy of the Trichophyton rubrum complex: a phylogenomic approach. \r + Cornet, L., D’hooge, E., Magain, N., Stubbe, D., Packeu, A., Baurain, D., and Becker P. (2021). \r + Microbial Genomics 7, 000707. doi:10.1099/mgen.0.000707. \r + https://www.microbiologyresearch.org/content/journal/mgen/10.1099/mgen.0.000707 \r +3. ORPER: A Workflow for Constrained SSU rRNA Phylogenies. \r + Cornet, L., Ahn, A.-C., Wilmotte, A., and Baurain, D. (2021). \r + Genes 12, 1741. doi:10.3390/genes12111741. \r + https://www.mdpi.com/2073-4425/12/11/1741/html \r +4. AMAW: automated gene annotation for non-model eukaryotic genomes. \r + Meunier, L., Baurain, D., Cornet, L. (2021) \r + https://www.biorxiv.org/content/10.1101/2021.12.07.471566v1 \r +5. Phylogenomic analyses of Snodgrassella isolates from honeybees and bumblebees reveals taxonomic and functional diversity. \r + Cornet, L., Cleenwerck, I., Praet, J., Leonard, R., Vereecken, N.J., Michez, D., Smagghe, G., Baurain, D., Vandamme, P. (2021) \r + https://www.biorxiv.org/content/10.1101/2021.12.10.472130v1 \r +6. Contamination detection in genomic data: more is not enough. \r + Cornet, L & Baurain, D (2022) \r + Genome Biology. 2022;23:60. \r + https://genomebiology.biomedcentral.com/articles/10.1186/s13059-022-02619-9 \r +7. The GEN-ERA toolbox: unified and reproducible workflows for research in microbial genomics \r + Cornet, L., Durieu, B., Baert, F., D’hooge, E., Colignon, D., Meunier, L., Lupo, V., Cleenwerck I.,\r + Daniel, HM., Rigouts, L., Sirjacobs, D., Declerck, D., Vandamme, P., Wilmotte, A., Baurain, D., Becker P (2022). \r + https://www.biorxiv.org/content/10.1101/2022.10.20.513017v1 \r +8. CRitical Assessment of genomic COntamination detection at several Taxonomic ranks (CRACOT) \r + Cornet, L., Lupo, V., Declerck, S., Baurain, D. (2022). \r + https://www.biorxiv.org/content/10.1101/2022.11.14.516442v1 \r +\r +# Copyright and License\r +\r +This softwares is copyright (c) 2017-2021 by University of Liege / Sciensano / BCCM collection by Luc CORNET\r +This is free softwares; you can redistribute it and/or modify.\r +\r +![BCCM](https://github.com/Lcornet/GENERA/blob/main/images/GENERA-logo.png) \r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.416.1" ; + schema1:isBasedOn "https://github.com/Lcornet/GENERA" ; + schema1:license "LGPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for GEN-ERA toolbox" ; + schema1:sdDatePublished "2024-07-12 13:34:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/416/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17141 ; + schema1:creator ; + schema1:dateCreated "2023-01-13T14:30:04Z" ; + schema1:dateModified "2023-03-14T14:35:27Z" ; + schema1:description """\r +# Github: https://github.com/Lcornet/GENERA\r +\r +# BCCM GEN-ERA tools repository\r +\r +Please visit the wiki for tutorials and access to the tools:\r +https://github.com/Lcornet/GENERA/wiki \r +\r +# NEWS\r +Mantis is now installed in a singularity container for the Metabolic workflow (install is no longer necessary). \r +\r +# Information about the GEN-ERA project\r +Please visit \r +https://bccm.belspo.be/content/bccm-collections-genomic-era \r +\r +# Publications\r +1. ToRQuEMaDA: tool for retrieving queried Eubacteria, metadata and dereplicating assemblies. \r + Léonard, R. R., Leleu, M., Vlierberghe, M. V., Cornet, L., Kerff, F., and Baurain, D. (2021). \r + PeerJ 9, e11348. doi:10.7717/peerj.11348. \r + https://peerj.com/articles/11348/ \r +2. The taxonomy of the Trichophyton rubrum complex: a phylogenomic approach. \r + Cornet, L., D’hooge, E., Magain, N., Stubbe, D., Packeu, A., Baurain, D., and Becker P. (2021). \r + Microbial Genomics 7, 000707. doi:10.1099/mgen.0.000707. \r + https://www.microbiologyresearch.org/content/journal/mgen/10.1099/mgen.0.000707 \r +3. ORPER: A Workflow for Constrained SSU rRNA Phylogenies. \r + Cornet, L., Ahn, A.-C., Wilmotte, A., and Baurain, D. (2021). \r + Genes 12, 1741. doi:10.3390/genes12111741. \r + https://www.mdpi.com/2073-4425/12/11/1741/html \r +4. AMAW: automated gene annotation for non-model eukaryotic genomes. \r + Meunier, L., Baurain, D., Cornet, L. (2021) \r + https://www.biorxiv.org/content/10.1101/2021.12.07.471566v1 \r +5. Phylogenomic analyses of Snodgrassella isolates from honeybees and bumblebees reveals taxonomic and functional diversity. \r + Cornet, L., Cleenwerck, I., Praet, J., Leonard, R., Vereecken, N.J., Michez, D., Smagghe, G., Baurain, D., Vandamme, P. (2021) \r + https://www.biorxiv.org/content/10.1101/2021.12.10.472130v1 \r +6. Contamination detection in genomic data: more is not enough. \r + Cornet, L & Baurain, D (2022) \r + Genome Biology. 2022;23:60. \r + https://genomebiology.biomedcentral.com/articles/10.1186/s13059-022-02619-9 \r +7. The GEN-ERA toolbox: unified and reproducible workflows for research in microbial genomics \r + Cornet, L., Durieu, B., Baert, F., D’hooge, E., Colignon, D., Meunier, L., Lupo, V., Cleenwerck I.,\r + Daniel, HM., Rigouts, L., Sirjacobs, D., Declerck, D., Vandamme, P., Wilmotte, A., Baurain, D., Becker P (2022). \r + https://www.biorxiv.org/content/10.1101/2022.10.20.513017v1 \r +8. CRitical Assessment of genomic COntamination detection at several Taxonomic ranks (CRACOT) \r + Cornet, L., Lupo, V., Declerck, S., Baurain, D. (2022). \r + https://www.biorxiv.org/content/10.1101/2022.11.14.516442v1 \r +\r +# Copyright and License\r +\r +This softwares is copyright (c) 2017-2021 by University of Liege / Sciensano / BCCM collection by Luc CORNET\r +This is free softwares; you can redistribute it and/or modify.\r +\r +![BCCM](https://github.com/Lcornet/GENERA/blob/main/images/GENERA-logo.png) \r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/LGPL-3.0" ; + schema1:name "GEN-ERA toolbox" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/416?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A workflow to simulate reads" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1015?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/readsimulator" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/readsimulator" ; + schema1:sdDatePublished "2024-07-12 13:19:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1015/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11824 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:09Z" ; + schema1:dateModified "2024-06-11T12:55:09Z" ; + schema1:description "A workflow to simulate reads" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1015?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/readsimulator" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1015?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# Galaxy Workflow Documentation: MS Finder Pipeline\r +\r +This document outlines a MSFinder Galaxy workflow designed for peak annotation. The workflow consists of several steps aimed at preprocessing MS data, filtering, enhancing, and running MSFinder.\r +\r +## Step 1: Data Collection and Preprocessing\r +Collect if the inchi and smiles are missing from the dataset, and subsequently filter out the spectra which are missing inchi and smiles.\r +\r +### 1.1 MSMetaEnhancer: Collect InChi, Isomeric_smiles, and Nominal_mass\r +- Utilizes MSMetaEnhancer to collect InChi and Isomeric_smiles using PubChem and IDSM databases.\r +- Utilizes MSMetaEnhancer to collect MW using RDkit (For GOLM).\r +\r +### 1.2 replace key\r +- replace isomeric_smiles key to smiles using replace text tool\r +- replace MW key to parent_mass using replace text tool (For GOLM)\r +\r +### 1.3 Matchms Filtering\r +- Filters out invalid SMILES and InChi from the dataset using Matchms filtering.\r +\r +## Step 2: Complex Removal and Subsetting Dataset\r +Removes coordination complexes from the dataset.\r +\r +### 2.1 Remove Complexes and Subset Data\r +- Removes complexes from the dataset.\r +- Exports metadata using Matchms metadata export, cuts the SMILES column, removes complexes using Rem_Complex tool, and updates the dataset using Matchms subsetting.\r +\r +## Step 3: Data Key Manipulation\r +Add missing metadata required by the MSFinder for annotation.\r +\r +### 3.1 Matchms Remove Key\r +- Removes existing keys such as adduct, charge, and ionmode from the dataset.\r +\r +### 3.2 Matchms Add Key\r +- Adds necessary keys like charge, ionmode, and adduct to the dataset.\r +\r +### 3.3 Matchms Filtering\r +- Derives precursor m/z using parent mass and adduct information using matchms filtering.\r +\r +### 3.4 Matchms Convert\r +- Converts the dataset to Riken format for compatibility with MSFinder using matchms convert.\r +\r +## Step 4: Peak Annotation\r +### 4.1 Recetox-MSFinder\r +- Executes MSFinder with a 0.5 Da tolerance for both MS1 and MS2, including all element checks and an extended range for peak annotation.\r +\r +## Step 5: Error Handling and Refinement\r +Check the MSFinder output to see if the output is the results or the log file. If the output is log file remove the smile from the dataset using matchms subsetting tool and rerun MSFinder.\r +\r +### 5.1 Error Handling\r +- Handles errors in peak annotation by removing SMILES that are not accepted by MSFinder.\r +- Reruns MSFinder after error correction or with different parameter (if applicable).\r +\r +## Step 6: High-res Annotation\r +### 6.1 High-Res Peak Overwriting\r +- Utilizes the Use_Theoretical_mz_Annotations tool to Overwrite experimentally measured mz values for peaks with theoretical values from peak comments.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.888.1" ; + schema1:isBasedOn "https://github.com/RECETOX/workflow-testing/blob/main/msfinder_workflow/Galaxy_Workflow_MsFinder_Workflow_GOLM_V2.ga" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Theoretical fragment substructure generation and in silico mass spectral library high-resolution upcycling workflow" ; + schema1:sdDatePublished "2024-07-12 13:18:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/888/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 47532 ; + schema1:creator , + , + , + , + , + , + ; + schema1:dateCreated "2024-05-20T10:05:05Z" ; + schema1:dateModified "2024-06-06T09:58:39Z" ; + schema1:description """# Galaxy Workflow Documentation: MS Finder Pipeline\r +\r +This document outlines a MSFinder Galaxy workflow designed for peak annotation. The workflow consists of several steps aimed at preprocessing MS data, filtering, enhancing, and running MSFinder.\r +\r +## Step 1: Data Collection and Preprocessing\r +Collect if the inchi and smiles are missing from the dataset, and subsequently filter out the spectra which are missing inchi and smiles.\r +\r +### 1.1 MSMetaEnhancer: Collect InChi, Isomeric_smiles, and Nominal_mass\r +- Utilizes MSMetaEnhancer to collect InChi and Isomeric_smiles using PubChem and IDSM databases.\r +- Utilizes MSMetaEnhancer to collect MW using RDkit (For GOLM).\r +\r +### 1.2 replace key\r +- replace isomeric_smiles key to smiles using replace text tool\r +- replace MW key to parent_mass using replace text tool (For GOLM)\r +\r +### 1.3 Matchms Filtering\r +- Filters out invalid SMILES and InChi from the dataset using Matchms filtering.\r +\r +## Step 2: Complex Removal and Subsetting Dataset\r +Removes coordination complexes from the dataset.\r +\r +### 2.1 Remove Complexes and Subset Data\r +- Removes complexes from the dataset.\r +- Exports metadata using Matchms metadata export, cuts the SMILES column, removes complexes using Rem_Complex tool, and updates the dataset using Matchms subsetting.\r +\r +## Step 3: Data Key Manipulation\r +Add missing metadata required by the MSFinder for annotation.\r +\r +### 3.1 Matchms Remove Key\r +- Removes existing keys such as adduct, charge, and ionmode from the dataset.\r +\r +### 3.2 Matchms Add Key\r +- Adds necessary keys like charge, ionmode, and adduct to the dataset.\r +\r +### 3.3 Matchms Filtering\r +- Derives precursor m/z using parent mass and adduct information using matchms filtering.\r +\r +### 3.4 Matchms Convert\r +- Converts the dataset to Riken format for compatibility with MSFinder using matchms convert.\r +\r +## Step 4: Peak Annotation\r +### 4.1 Recetox-MSFinder\r +- Executes MSFinder with a 0.5 Da tolerance for both MS1 and MS2, including all element checks and an extended range for peak annotation.\r +\r +## Step 5: Error Handling and Refinement\r +Check the MSFinder output to see if the output is the results or the log file. If the output is log file remove the smile from the dataset using matchms subsetting tool and rerun MSFinder.\r +\r +### 5.1 Error Handling\r +- Handles errors in peak annotation by removing SMILES that are not accepted by MSFinder.\r +- Reruns MSFinder after error correction or with different parameter (if applicable).\r +\r +## Step 6: High-res Annotation\r +### 6.1 High-Res Peak Overwriting\r +- Utilizes the Use_Theoretical_mz_Annotations tool to Overwrite experimentally measured mz values for peaks with theoretical values from peak comments.\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/888?version=1" ; + schema1:keywords "Bioinformatics, Cheminformatics, Metabolomics" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Theoretical fragment substructure generation and in silico mass spectral library high-resolution upcycling workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/888?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/644?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Workflow 2: Sciensano" ; + schema1:sdDatePublished "2024-07-12 13:26:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/644/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2664 ; + schema1:dateCreated "2023-11-07T17:29:22Z" ; + schema1:dateModified "2023-11-07T18:20:09Z" ; + schema1:description "" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Workflow 2: Sciensano" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/644?version=1" ; + schema1:version 1 ; + ns1:output , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Simple bacterial assembly and annotation pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/966?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/bacass" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/bacass" ; + schema1:sdDatePublished "2024-07-12 13:22:18 +0100" ; + schema1:url "https://workflowhub.eu/workflows/966/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4381 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:42Z" ; + schema1:dateModified "2024-06-11T12:54:42Z" ; + schema1:description "Simple bacterial assembly and annotation pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/966?version=7" ; + schema1:keywords "Assembly, bacterial-genomes, denovo, denovo-assembly, genome-assembly, hybrid-assembly, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/bacass" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/966?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Rare disease researchers workflow is that they submit their raw data (fastq), run the mapping and variant calling RD-Connect pipeline and obtain unannotated gvcf files to further submit to the RD-Connect GPAP or analyse on their own.\r +\r +This demonstrator focuses on the variant calling pipeline. The raw genomic data is processed using the RD-Connect pipeline ([Laurie et al., 2016](https://www.ncbi.nlm.nih.gov/pubmed/27604516)) running on the standards (GA4GH) compliant, interoperable container orchestration platform.\r +\r +This demonstrator will be aligned with the current implementation study on [Development of Architecture for Software Containers at ELIXIR and its use by EXCELERATE use-case communities](docs/Appendix%201%20-%20Project%20Plan%202018-biocontainers%2020171117.pdf) \r +\r +For this implementation, different steps are required:\r +\r +1. Adapt the pipeline to CWL and dockerise elements \r +2. Align with IS efforts on software containers to package the different components (Nextflow) \r +3. Submit trio of Illumina NA12878 Platinum Genome or Exome to the GA4GH platform cloud (by Aspera or ftp server)\r +4. Run the RD-Connect pipeline on the container platform\r +5. Return corresponding gvcf files\r +6. OPTIONAL: annotate and update to RD-Connect playground instance\r +\r +N.B: The demonstrator might have some manual steps, which will not be in production. \r +\r +## RD-Connect pipeline\r +\r +Detailed information about the RD-Connect pipeline can be found in [Laurie et al., 2016](https://www.ncbi.nlm.nih.gov/pubmed/?term=27604516)\r +\r +![alt text](https://raw.githubusercontent.com/inab/Wetlab2Variations/eosc-life/docs/RD-Connect_pipeline.jpg)\r +\r +## The applications\r +\r +**1\\. Name of the application: Adaptor removal**\r +Function: remove sequencing adaptors \r +Container (readiness status, location, version): [cutadapt (v.1.18)](https://hub.docker.com/r/cnag/cutadapt) \r +Required resources in cores and RAM: current container size 169MB \r +Input data (amount, format, directory..): raw fastq \r +Output data: paired fastq without adaptors \r +\r +**2\\. Name of the application: Mapping and bam sorting**\r +Function: align data to reference genome \r +Container : [bwa-mem (v.0.7.17)](https://hub.docker.com/r/cnag/bwa) / [Sambamba (v. 0.6.8 )](https://hub.docker.com/r/cnag/sambamba)(or samtools) \r +Resources :current container size 111MB / 32MB \r +Input data: paired fastq without adaptors \r +Output data: sorted bam \r +\r +**3\\. Name of the application: MarkDuplicates** \r +Function: Mark (and remove) duplicates \r +Container: [Picard (v.2.18.25)](https://hub.docker.com/r/cnag/picard)\r +Resources: current container size 261MB \r +Input data:sorted bam \r +Output data: Sorted bam with marked (or removed) duplicates \r +\r +**4\\. Name of the application: Base quality recalibration (BQSR)** \r +Function: Base quality recalibration \r +Container: [GATK (v.3.6-0)](https://hub.docker.com/r/cnag/gatk)\r +Resources: current container size 270MB \r +Input data: Sorted bam with marked (or removed) duplicates \r +Output data: Sorted bam with marked duplicates & base quality recalculated \r +\r +**5\\. Name of the application: Variant calling** \r +Function: variant calling \r +Container: [GATK (v.3.6-0)](https://hub.docker.com/r/cnag/gatk)\r +Resources: current container size 270MB \r +Input data:Sorted bam with marked duplicates & base quality recalculated \r +Output data: unannotated gvcf per sample \r +\r +**6\\. (OPTIONAL)Name of the application: Quality of the fastq** \r +Function: report on the sequencing quality \r +Container: [fastqc 0.11.8](https://hub.docker.com/r/cnag/fastqc)\r +Resources: current container size 173MB \r +Input data: raw fastq \r +Output data: QC report \r +\r +## Licensing\r +\r +GATK declares that archived packages are made available for free to academic researchers under a limited license for non-commercial use. If you need to use one of these packages for commercial use. https://software.broadinstitute.org/gatk/download/archive """ ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/106?version=2" ; + schema1:isBasedOn "https://github.com/inab/Wetlab2Variations/tree/eosc-life/nextflow" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for VariantCaller_GATK3.6" ; + schema1:sdDatePublished "2024-07-12 13:37:02 +0100" ; + schema1:url "https://workflowhub.eu/workflows/106/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 22429 ; + schema1:creator ; + schema1:dateCreated "2021-05-21T08:43:08Z" ; + schema1:dateModified "2021-05-21T08:43:08Z" ; + schema1:description """Rare disease researchers workflow is that they submit their raw data (fastq), run the mapping and variant calling RD-Connect pipeline and obtain unannotated gvcf files to further submit to the RD-Connect GPAP or analyse on their own.\r +\r +This demonstrator focuses on the variant calling pipeline. The raw genomic data is processed using the RD-Connect pipeline ([Laurie et al., 2016](https://www.ncbi.nlm.nih.gov/pubmed/27604516)) running on the standards (GA4GH) compliant, interoperable container orchestration platform.\r +\r +This demonstrator will be aligned with the current implementation study on [Development of Architecture for Software Containers at ELIXIR and its use by EXCELERATE use-case communities](docs/Appendix%201%20-%20Project%20Plan%202018-biocontainers%2020171117.pdf) \r +\r +For this implementation, different steps are required:\r +\r +1. Adapt the pipeline to CWL and dockerise elements \r +2. Align with IS efforts on software containers to package the different components (Nextflow) \r +3. Submit trio of Illumina NA12878 Platinum Genome or Exome to the GA4GH platform cloud (by Aspera or ftp server)\r +4. Run the RD-Connect pipeline on the container platform\r +5. Return corresponding gvcf files\r +6. OPTIONAL: annotate and update to RD-Connect playground instance\r +\r +N.B: The demonstrator might have some manual steps, which will not be in production. \r +\r +## RD-Connect pipeline\r +\r +Detailed information about the RD-Connect pipeline can be found in [Laurie et al., 2016](https://www.ncbi.nlm.nih.gov/pubmed/?term=27604516)\r +\r +![alt text](https://raw.githubusercontent.com/inab/Wetlab2Variations/eosc-life/docs/RD-Connect_pipeline.jpg)\r +\r +## The applications\r +\r +**1\\. Name of the application: Adaptor removal**\r +Function: remove sequencing adaptors \r +Container (readiness status, location, version): [cutadapt (v.1.18)](https://hub.docker.com/r/cnag/cutadapt) \r +Required resources in cores and RAM: current container size 169MB \r +Input data (amount, format, directory..): raw fastq \r +Output data: paired fastq without adaptors \r +\r +**2\\. Name of the application: Mapping and bam sorting**\r +Function: align data to reference genome \r +Container : [bwa-mem (v.0.7.17)](https://hub.docker.com/r/cnag/bwa) / [Sambamba (v. 0.6.8 )](https://hub.docker.com/r/cnag/sambamba)(or samtools) \r +Resources :current container size 111MB / 32MB \r +Input data: paired fastq without adaptors \r +Output data: sorted bam \r +\r +**3\\. Name of the application: MarkDuplicates** \r +Function: Mark (and remove) duplicates \r +Container: [Picard (v.2.18.25)](https://hub.docker.com/r/cnag/picard)\r +Resources: current container size 261MB \r +Input data:sorted bam \r +Output data: Sorted bam with marked (or removed) duplicates \r +\r +**4\\. Name of the application: Base quality recalibration (BQSR)** \r +Function: Base quality recalibration \r +Container: [GATK (v.3.6-0)](https://hub.docker.com/r/cnag/gatk)\r +Resources: current container size 270MB \r +Input data: Sorted bam with marked (or removed) duplicates \r +Output data: Sorted bam with marked duplicates & base quality recalculated \r +\r +**5\\. Name of the application: Variant calling** \r +Function: variant calling \r +Container: [GATK (v.3.6-0)](https://hub.docker.com/r/cnag/gatk)\r +Resources: current container size 270MB \r +Input data:Sorted bam with marked duplicates & base quality recalculated \r +Output data: unannotated gvcf per sample \r +\r +**6\\. (OPTIONAL)Name of the application: Quality of the fastq** \r +Function: report on the sequencing quality \r +Container: [fastqc 0.11.8](https://hub.docker.com/r/cnag/fastqc)\r +Resources: current container size 173MB \r +Input data: raw fastq \r +Output data: QC report \r +\r +## Licensing\r +\r +GATK declares that archived packages are made available for free to academic researchers under a limited license for non-commercial use. If you need to use one of these packages for commercial use. https://software.broadinstitute.org/gatk/download/archive """ ; + schema1:isBasedOn "https://workflowhub.eu/workflows/106?version=2" ; + schema1:keywords "Nextflow, variant_calling" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "VariantCaller_GATK3.6" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/106?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + schema1:datePublished "2024-07-01T18:43:44.016669" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "Performs Long Read assembly using PacBio data and Hifiasm. Part of VGP assembly pipeline. This workflow generate a phased assembly." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/318?version=1" ; + schema1:isBasedOn "https://github.com/Delphine-L/iwc/tree/VGP/workflows/VGP-assembly-v2" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for VGP HiFi phased assembly with hifiasm and HiC data" ; + schema1:sdDatePublished "2024-07-12 13:35:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/318/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 11731 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 51495 ; + schema1:creator ; + schema1:dateCreated "2022-04-05T21:55:13Z" ; + schema1:dateModified "2023-01-16T13:59:26Z" ; + schema1:description "Performs Long Read assembly using PacBio data and Hifiasm. Part of VGP assembly pipeline. This workflow generate a phased assembly." ; + schema1:keywords "vgp, Assembly, Galaxy" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "VGP HiFi phased assembly with hifiasm and HiC data" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/318?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.415.1" ; + schema1:isBasedOn "https://renkulab.io/gitlab/dsavchenko/gw-backend/-/raw/master/notebooks/conesearch.ipynb" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Gravitational Wave source Cone Search" ; + schema1:sdDatePublished "2024-07-12 13:34:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/415/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1406 ; + schema1:creator ; + schema1:dateCreated "2023-01-11T12:22:37Z" ; + schema1:dateModified "2023-01-16T14:05:00Z" ; + schema1:description "" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "astronomy, Gravitational Waves, FAIR workflows" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Gravitational Wave source Cone Search" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/415?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + ; + ns1:output , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 4924 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Alternative splicing analysis using RNA-seq." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1018?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/rnasplice" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnasplice" ; + schema1:sdDatePublished "2024-07-12 13:19:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1018/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14104 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:14Z" ; + schema1:dateModified "2024-06-11T12:55:14Z" ; + schema1:description "Alternative splicing analysis using RNA-seq." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1018?version=4" ; + schema1:keywords "alternative-splicing, rna, rna-seq, splicing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnasplice" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1018?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1021?version=11" ; + schema1:isBasedOn "https://github.com/nf-core/scrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/scrnaseq" ; + schema1:sdDatePublished "2024-07-12 13:19:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1021/ro_crate?version=11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10731 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:17Z" ; + schema1:dateModified "2024-06-11T12:55:17Z" ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1021?version=13" ; + schema1:keywords "10x-genomics, 10xgenomics, alevin, bustools, Cellranger, kallisto, rna-seq, single-cell, star-solo" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/scrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1021?version=11" ; + schema1:version 11 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """

\r +

+VIRify

\r +

Sankey plot

\r +

VIRify is a recently developed pipeline for the detection, annotation, and taxonomic classification of viral contigs in metagenomic and metatranscriptomic assemblies. The pipeline is part of the repertoire of analysis services offered by MGnify. VIRify’s taxonomic classification relies on the detection of taxon-specific profile hidden Markov models (HMMs), built upon a set of 22,014 orthologous protein domains and referred to as ViPhOGs.

\r +

VIRify was implemented in CWL.

\r +

+What do I need?

\r +

The current implementation uses CWL version 1.2 dev+2. It was tested using Toil version 4.10 as the workflow engine and conda to manage the software dependencies.

\r +

+Docker - Singularity support

\r +

Soon…

\r +

+Setup environment

\r +
conda env create -f cwl/requirements/conda_env.yml\r
+conda activate viral_pipeline\r
+
\r +

+Basic execution

\r +
cd cwl/\r
+virify.sh -h\r
+
\r +

+A note about metatranscriptomes

\r +

Although VIRify has been benchmarked and validated with metagenomic data in mind, it is also possible to use this tool to detect RNA viruses in metatranscriptome assemblies (e.g. SARS-CoV-2). However, some additional considerations for this purpose are outlined below:
\r +1. Quality control: As for metagenomic data, a thorough quality control of the FASTQ sequence reads to remove low-quality bases, adapters and host contamination (if appropriate) is required prior to assembly. This is especially important for metatranscriptomes as small errors can further decrease the quality and contiguity of the assembly obtained. We have used TrimGalore for this purpose.

\r +

2. Assembly: There are many assemblers available that are appropriate for either metagenomic or single-species transcriptomic data. However, to our knowledge, there is no assembler currently available specifically for metatranscriptomic data. From our preliminary investigations, we have found that transcriptome-specific assemblers (e.g. rnaSPAdes) generate more contiguous and complete metatranscriptome assemblies compared to metagenomic alternatives (e.g. MEGAHIT and metaSPAdes).

\r +

3. Post-processing: Metatranscriptomes generate highly fragmented assemblies. Therefore, filtering contigs based on a set minimum length has a substantial impact in the number of contigs processed in VIRify. It has also been observed that the number of false-positive detections of VirFinder (one of the tools included in VIRify) is lower among larger contigs. The choice of a length threshold will depend on the complexity of the sample and the sequencing technology used, but in our experience any contigs <2 kb should be analysed with caution.

\r +

4. Classification: The classification module of VIRify depends on the presence of a minimum number and proportion of phylogenetically-informative genes within each contig in order to confidently assign a taxonomic lineage. Therefore, short contigs typically obtained from metatranscriptome assemblies remain generally unclassified. For targeted classification of RNA viruses (for instance, to search for Coronavirus-related sequences), alternative DNA- or protein-based classification methods can be used. Two of the possible options are: (i) using MashMap to screen the VIRify contigs against a database of RNA viruses (e.g. Coronaviridae) or (ii) using hmmsearch to screen the proteins obtained in the VIRify contigs against marker genes of the taxon of interest.

\r +

Contact us

\r +MGnify helpdesk""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.26.1" ; + schema1:isBasedOn "https://github.com/EBI-Metagenomics/emg-viral-pipeline/blob/master/cwl/src/pipeline.cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for VIRify" ; + schema1:sdDatePublished "2024-07-12 13:37:08 +0100" ; + schema1:url "https://workflowhub.eu/workflows/26/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8472 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2020-06-08T10:21:08Z" ; + schema1:dateModified "2023-01-16T13:41:53Z" ; + schema1:description """

\r +

+VIRify

\r +

Sankey plot

\r +

VIRify is a recently developed pipeline for the detection, annotation, and taxonomic classification of viral contigs in metagenomic and metatranscriptomic assemblies. The pipeline is part of the repertoire of analysis services offered by MGnify. VIRify’s taxonomic classification relies on the detection of taxon-specific profile hidden Markov models (HMMs), built upon a set of 22,014 orthologous protein domains and referred to as ViPhOGs.

\r +

VIRify was implemented in CWL.

\r +

+What do I need?

\r +

The current implementation uses CWL version 1.2 dev+2. It was tested using Toil version 4.10 as the workflow engine and conda to manage the software dependencies.

\r +

+Docker - Singularity support

\r +

Soon…

\r +

+Setup environment

\r +
conda env create -f cwl/requirements/conda_env.yml\r
+conda activate viral_pipeline\r
+
\r +

+Basic execution

\r +
cd cwl/\r
+virify.sh -h\r
+
\r +

+A note about metatranscriptomes

\r +

Although VIRify has been benchmarked and validated with metagenomic data in mind, it is also possible to use this tool to detect RNA viruses in metatranscriptome assemblies (e.g. SARS-CoV-2). However, some additional considerations for this purpose are outlined below:
\r +1. Quality control: As for metagenomic data, a thorough quality control of the FASTQ sequence reads to remove low-quality bases, adapters and host contamination (if appropriate) is required prior to assembly. This is especially important for metatranscriptomes as small errors can further decrease the quality and contiguity of the assembly obtained. We have used TrimGalore for this purpose.

\r +

2. Assembly: There are many assemblers available that are appropriate for either metagenomic or single-species transcriptomic data. However, to our knowledge, there is no assembler currently available specifically for metatranscriptomic data. From our preliminary investigations, we have found that transcriptome-specific assemblers (e.g. rnaSPAdes) generate more contiguous and complete metatranscriptome assemblies compared to metagenomic alternatives (e.g. MEGAHIT and metaSPAdes).

\r +

3. Post-processing: Metatranscriptomes generate highly fragmented assemblies. Therefore, filtering contigs based on a set minimum length has a substantial impact in the number of contigs processed in VIRify. It has also been observed that the number of false-positive detections of VirFinder (one of the tools included in VIRify) is lower among larger contigs. The choice of a length threshold will depend on the complexity of the sample and the sequencing technology used, but in our experience any contigs <2 kb should be analysed with caution.

\r +

4. Classification: The classification module of VIRify depends on the presence of a minimum number and proportion of phylogenetically-informative genes within each contig in order to confidently assign a taxonomic lineage. Therefore, short contigs typically obtained from metatranscriptome assemblies remain generally unclassified. For targeted classification of RNA viruses (for instance, to search for Coronavirus-related sequences), alternative DNA- or protein-based classification methods can be used. Two of the possible options are: (i) using MashMap to screen the VIRify contigs against a database of RNA viruses (e.g. Coronaviridae) or (ii) using hmmsearch to screen the proteins obtained in the VIRify contigs against marker genes of the taxon of interest.

\r +

Contact us

\r +MGnify helpdesk""" ; + schema1:image ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "VIRify" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/26?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 47487 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "A workflow for the quality assessment of mass spectrometry (MS) based proteomics analyses" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.343.1" ; + schema1:license "BSD-3-Clause" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for MaCProQC" ; + schema1:sdDatePublished "2024-07-12 13:35:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/343/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 199601 ; + schema1:creator ; + schema1:dateCreated "2022-05-20T09:38:29Z" ; + schema1:dateModified "2023-01-16T13:59:52Z" ; + schema1:description "A workflow for the quality assessment of mass spectrometry (MS) based proteomics analyses" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/BSD-3-Clause" ; + schema1:name "MaCProQC" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/343?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 136866 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Differential abundance analysis" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/981?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/differentialabundance" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/differentialabundance" ; + schema1:sdDatePublished "2024-07-12 13:21:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/981/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11181 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:48Z" ; + schema1:dateModified "2024-06-11T12:54:48Z" ; + schema1:description "Differential abundance analysis" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/981?version=8" ; + schema1:keywords "ATAC-seq, ChIP-seq, deseq2, differential-abundance, differential-expression, gsea, limma, microarray, rna-seq, shiny" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/differentialabundance" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/981?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=10" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-07-12 13:20:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5097 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:03Z" ; + schema1:dateModified "2024-06-11T12:55:03Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=10" ; + schema1:version 10 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 231159 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:MediaObject ; + schema1:contentSize 1583 ; + schema1:dateModified "2024-03-13T10:40:28+00:00" ; + schema1:name "kmeans.csv" ; + schema1:sdDatePublished "2024-03-22T17:53:30+00:00" . + + a schema1:Dataset ; + schema1:datePublished "2022-10-20T13:35:37.242644" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "atacseq/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.11" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# HiC contact map generation\r +\r +Snakemake pipeline for the generation of `.pretext` and `.mcool` files for visualisation of HiC contact maps with the softwares PretextView and HiGlass, respectively.\r +\r +## Prerequisites\r +\r +This pipeine has been tested using `Snakemake v7.32.4` and requires conda for installation of required tools. To run the pipline use the command:\r +\r +`snakemake --use-conda`\r +\r +There are provided a set of configuration and running scripts for exectution on a slurm queueing system. After configuring the `cluster.json` file run:\r +\r +`./run_cluster`\r +\r +## Before starting\r +\r +You need to create a temporary folder and specify the path in the `config.yaml` file. This should be able to hold the temporary files created when sorting the `.pairsam` file (100s of GB or even many TBs)\r +\r +The path to the genome assemly must be given in the `config.yaml`.\r +\r +The HiC reads should be paired and named as follows: `Library_1.fastq.gz Library_2.fastq.gz`. The pipeline can accept any number of paired HiC read files, but the naming must be consistent. The folder containing these files must be provided in the `config.yaml`.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.795.1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for HiC contact map generation" ; + schema1:sdDatePublished "2024-07-12 13:23:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/795/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5789 ; + schema1:creator ; + schema1:dateCreated "2024-03-14T09:50:42Z" ; + schema1:dateModified "2024-03-14T09:54:40Z" ; + schema1:description """# HiC contact map generation\r +\r +Snakemake pipeline for the generation of `.pretext` and `.mcool` files for visualisation of HiC contact maps with the softwares PretextView and HiGlass, respectively.\r +\r +## Prerequisites\r +\r +This pipeine has been tested using `Snakemake v7.32.4` and requires conda for installation of required tools. To run the pipline use the command:\r +\r +`snakemake --use-conda`\r +\r +There are provided a set of configuration and running scripts for exectution on a slurm queueing system. After configuring the `cluster.json` file run:\r +\r +`./run_cluster`\r +\r +## Before starting\r +\r +You need to create a temporary folder and specify the path in the `config.yaml` file. This should be able to hold the temporary files created when sorting the `.pairsam` file (100s of GB or even many TBs)\r +\r +The path to the genome assemly must be given in the `config.yaml`.\r +\r +The HiC reads should be paired and named as follows: `Library_1.fastq.gz Library_2.fastq.gz`. The pipeline can accept any number of paired HiC read files, but the naming must be consistent. The folder containing these files must be provided in the `config.yaml`.\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/795?version=1" ; + schema1:isPartOf ; + schema1:keywords "Bioinformatics, Genomics, Snakemake, Genome assembly" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "HiC contact map generation" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/795?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# BatchConvert ![DOI:10.5281](https://zenodo.org/badge/doi/10.5281/zenodo.7955974.svg)\r +\r +A command line tool for converting image data into either of the standard file formats OME-TIFF or OME-Zarr. \r +\r +The tool wraps the dedicated file converters bfconvert and bioformats2raw to convert into OME-TIFF or OME-Zarr,\r +respectively. The workflow management system NextFlow is used to perform conversion in parallel for batches of images. \r +\r +The tool also wraps s3 and Aspera clients (go-mc and aspera-cli, respectively). Therefore, input and output locations can \r +be specified as local or remote storage and file transfer will be performed automatically. The conversion can be run on \r +HPC with Slurm. \r +\r +![](figures/diagram.png)\r +\r +## Installation & Dependencies\r +\r +**Important** note: The package has been so far only tested on Ubuntu 20.04.\r +\r +The minimal dependency to run the tool is NextFlow, which should be installed and made accessible from the command line.\r +\r +If conda exists on your system, you can install BatchConvert together with NextFlow using the following script:\r +```\r +git clone https://github.com/Euro-BioImaging/BatchConvert.git && \\ \r +source BatchConvert/installation/install_with_nextflow.sh\r +```\r +\r +\r +If you already have NextFlow installed and accessible from the command line (or if you prefer to install it manually \r +e.g., as shown [here](https://www.nextflow.io/docs/latest/getstarted.html)), you can also install BatchConvert alone, using the following script:\r +```\r +git clone https://github.com/Euro-BioImaging/BatchConvert.git && \\ \r +source BatchConvert/installation/install.sh\r +```\r +\r +\r +Other dependencies (which will be **automatically** installed):\r +- bioformats2raw (entrypoint bioformats2raw)\r +- bftools (entrypoint bfconvert)\r +- go-mc (entrypoint mc)\r +- aspera-cli (entrypoint ascp)\r +\r +These dependencies will be pulled and cached automatically at the first execution of the conversion command. \r +The mode of dependency management can be specified by using the command line option ``--profile`` or `-pf`. Depending \r +on how this option is specified, the dependencies will be acquired / run either via conda or via docker/singularity containers. \r +\r +Specifying ``--profile conda`` (default) will install the dependencies to an \r +environment at ``./.condaCache`` and use this environment to run the workflow. This option \r +requires that miniconda/anaconda is installed on your system. \r +\r +Alternatively, specifying ``--profile docker`` or ``--profile singularity`` will pull a docker or \r +singularity image with the dependencies, respectively, and use this image to run the workflow.\r +These options assume that the respective container runtime (docker or singularity) is available on \r +your system. If singularity is being used, a cache directory will be created at the path \r +``./.singularityCache`` where the singularity image is stored. \r +\r +Finally, you can still choose to install the dependencies manually and use your own installations to run\r +the workflow. In this case, you should specify ``--profile standard`` and make sure the entrypoints\r +specified above are recognised by your shell. \r +\r +\r +## Configuration\r +\r +BatchConvert can be configured to have default options for file conversion and transfer. Probably, the most important sets of parameters\r +to be configured include credentials for the remote ends. The easiest way to configure remote stores is by running the interactive \r +configuration command as indicated below.\r +\r +### Configuration of the s3 object store\r +\r +Run the interactive configuration command: \r +\r +`batchconvert configure_s3_remote`\r +\r +This will start a sequence of requests for s3 credentials such as name, url, access, etc. Provide each requested credential and click\r +enter. Continue this cycle until the process is finished. Upon completing the configuration, the sequence of commands should roughly look like this:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_s3_remote\r +enter remote name (for example s3)\r +s3\r +enter url:\r +https://s3.embl.de\r +enter access key:\r +"your-access-key"\r +enter secret key:\r +"your-secret-key"\r +enter bucket name:\r +"your-bucket"\r +Configuration of the default s3 credentials is complete\r +```\r +\r +\r +### Configuration of the BioStudies user space\r +\r +Run the interactive configuration command: \r +\r +`batchconvert configure_bia_remote`\r +\r +This will prompt a request for the secret directory to connect to. Enter the secret directory for your user space and click enter. \r +Upon completing the configuration, the sequence of commands should roughly look like this:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_bia_remote\r +enter the secret directory for BioImage Archive user space:\r +"your-secret-directory"\r +configuration of the default bia credentials is complete\r +```\r +\r +### Configuration of the slurm options\r +\r +BatchConvert can also run on slurm clusters. In order to configure the slurm parameters, run the interactive configuration command: \r +\r +`batchconvert configure_slurm`\r +\r +This will start a sequence of requests for slurm options. Provide each requested option and click enter. \r +Continue this cycle until the process is finished. Upon completing the configuration, the sequence of commands should \r +roughly look like this:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_slurm\r +Please enter value for queue_size\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´50´\r +s\r +Please enter value for submit_rate_limit\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´10/2min´\r +s\r +Please enter value for cluster_options\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´--mem-per-cpu=3140 --cpus-per-task=16´\r +s\r +Please enter value for time\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´6h´\r +s\r +configuration of the default slurm parameters is complete\r +```\r +\r +### Configuration of the default conversion parameters\r +\r +While all conversion parameters can be specified as command line arguments, it can\r +be useful for the users to set their own default parameters to avoid re-entering those\r +parameters for subsequent executions. BatchConvert allows for interactive configuration of \r +conversion in the same way as configuration of the remote stores described above.\r +\r +To configure the conversion into OME-TIFF, run the following command:\r +\r +`batchconvert configure_ometiff`\r +\r +This will prompt the user to enter a series of parameters, which will then be saved as the \r +default parameters to be passed to the `batchconvert ometiff` command. Upon completing the \r +configuration, the sequence of commands should look similar to:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_ometiff\r +Please enter value for noflat\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is "bfconvert defaults"\r +s\r +Please enter value for series\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is "bfconvert defaults"\r +s\r +Please enter value for timepoint\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is "bfconvert defaults"\r +s\r +...\r +...\r +...\r +...\r +...\r +...\r +Configuration of the default parameters for 'bfconvert' is complete\r +```\r +\r +\r +To configure the conversion into OME-Zarr, run the following command:\r +\r +`batchconvert configure_omezarr`\r +\r +Similarly, this will prompt the user to enter a series of parameters, which will then be saved as the \r +default parameters to be passed to the `batchconvert omezarr` command. Upon completing the configuration, \r +the sequence of commands should look similar to:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_omezarr\r +Please enter value for resolutions_zarr\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is "bioformats2raw defaults"\r +s\r +Please enter value for chunk_h\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is "bioformats2raw defaults"\r +s\r +Please enter value for chunk_w\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is "bioformats2raw defaults"\r +...\r +...\r +...\r +...\r +...\r +...\r +Configuration of the default parameters for 'bioformats2raw' is complete\r +```\r +\r +It is important to note that the initial defaults for the conversion parameters are the same as the defaults\r +of the backend tools bfconvert and bioformats2raw, as noted in the prompt excerpt above. Through interactive configuration, \r +the user is overriding these initial defaults and setting their own defaults. It is possible to reset the initial\r +defaults by running the following command.\r +\r +`batchconvert reset_defaults`\r +\r +Another important point is that any of these configured parameters can be overridden by passing a value to that\r +parameter in the commandline. For instance, in the following command, the value of 20 will be assigned to `chunk_h` parameter \r +even if the value for the same parameter might be different in the configuration file. \r +\r +`batchconvert omezarr --chunk_h 20 "path/to/input" "path/to/output"`\r +\r +\r +## Examples\r +\r +### Local conversion\r +\r +#### Parallel conversion of files to separate OME-TIFFs / OME-Zarrs:\r +Convert a batch of images on your local storage into OME-TIFF format. \r +Note that the `input_path` in the command given below is typically a \r +directory with multiple image files but a single image file can also be passed:\\\r +`batchconvert ometiff -pf conda "input_path" "output_path"` \r +\r +Note that if this is your first conversion with the profile `conda`, \r +it will take a while for a conda environment with the dependencies to be\r +created. All the subsequent conversion commands with the profile `conda`,\r +however, will use this environment, and thus show no such delay.\r +\r +Since conda is the default profile, it does not have to be \r +explicitly included in the command line. Thus, the command can be shortened to:\\\r +`batchconvert ometiff "input_path" "output_path"`\r +\r +Convert only the first channel of the images:\\\r +`batchconvert ometiff -chn 0 "input_path" "output_path"`\r +\r +Crop the images being converted along x and y axis by 150 pixels:\\\r +`batchconvert ometiff -cr 0,0,150,150 "input_path" "output_path"`\r +\r +Convert into OME-Zarr instead:\\\r +`batchconvert omezarr "input_path" "output_path"`\r +\r +Convert into OME-Zarr with 3 resolution levels:\\\r +`batchconvert omezarr -rz 3 "input_path" "output_path"`\r +\r +Select a subset of images with a matching string such as "mutation":\\\r +`batchconvert omezarr -p mutation "input_path" "output_path"`\r +\r +Select a subset of images using wildcards. Note that the use of "" around \r +the input path is necessary when using wildcards:\\\r +`batchconvert omezarr "input_path/*D3*.oir" "output_path"`\r +\r +Convert by using a singularity container instead of conda environment (requires\r +singularity to be installed on your system):\\\r +`batchconvert omezarr -pf singularity "input_path/*D3*.oir" "output_path"`\r +\r +Convert by using a docker container instead of conda environment (requires docker\r +to be installed on your system):\\\r +`batchconvert omezarr -pf docker "input_path/*D3*.oir" "output_path"`\r +\r +Note that similarly to the case with the profile `conda`, the first execution of\r +a conversion with the profile `singularity` or `docker` will take a while for the\r +container image to be pulled. All the subsequent conversion commands using a \r +container option will use this image, and thus show no such delay. \r +\r +Convert local data and upload the output to an s3 bucket. Note that the output \r +path is created relative to the bucket specified in your s3 configuration:\\\r +`batchconvert omezarr -dt s3 "input_path" "output_path"`\r +\r +Receive input files from an s3 bucket, convert locally and upload the output to \r +the same bucket. Note that wildcards cannot be used when the input is from s3. \r +Use pattern matching option `-p` for selecting a subset of input files:\\\r +`batchconvert omezarr -p mutation -st s3 -dt s3 "input_path" "output_path"`\r +\r +Receive input files from your private BioStudies user space and convert them locally.\r +Use pattern matching option `-p` for selecting a subset of input files:\\\r +`batchconvert omezarr -p mutation -st bia "input_path" "output_path"`\r +\r +Receive an input from an s3 bucket, convert locally and upload the output to your \r +private BioStudies user space. Use pattern matching option `-p` for selecting a subset \r +of input files:\\\r +`batchconvert omezarr -p mutation -st s3 -dt bia "input_path" "output_path"`\r +\r +Note that in all the examples shown above, BatchConvert treats each input file as separate,\r +standalone data point, disregarding the possibility that some of the input files might belong to \r +the same multidimensional array. Thus, each input file is converted to an independent \r +OME-TIFF / OME-Zarr and the number of outputs will thus equal the number of selected input files.\r +An alternative scenario is discussed below.\r +\r +#### Parallel conversion of file groups by stacking multiple files into single OME-TIFFs / OME-Zarrs:\r +\r +When the flag `--merge_files` is specified, BatchConvert tries to detect which input files might \r +belong to the same multidimensional array based on the patterns in the filenames. Then a "grouped conversion" \r +is performed, meaning that the files belonging to the same dataset will be incorporated into \r +a single OME-TIFF / OME-Zarr series, in that files will be concatenated along specific dimension(s) \r +during the conversion. Multiple file groups in the input directory can be detected and converted \r +in parallel. \r +\r +This feature uses Bio-Formats's pattern files as described [here](https://docs.openmicroscopy.org/bio-formats/6.6.0/formats/pattern-file.html).\r +However, BatchConvert generates pattern files automatically, allowing the user to directly use the \r +input directory in the conversion command. BatchConvert also has the option of specifying the \r +concatenation axes in the command line, which is especially useful in cases where the filenames \r +may not contain dimension information. \r +\r +To be able to use the `--merge files` flag, the input file names must obey certain rules:\r +1. File names in the same group must be uniform, except for one or more **numeric field(s)**, which\r +should show incremental change across the files. These so-called **variable fields** \r +will be detected and used as the dimension(s) of concatenation.\r +2. The length of variable fields must be uniform within the group. For instance, if the\r +variable field has values reaching multi-digit numbers, leading "0"s should be included where needed \r +in the file names to make the variable field length uniform within the group.\r +3. Typically, each variable field should follow a dimension specifier. What patterns can be used as \r +dimension specifiers are explained [here](https://docs.openmicroscopy.org/bio-formats/6.6.0/formats/pattern-file.html).\r +However, BatchConvert also has the option `--concatenation_order`, which allows the user to\r +specify from the command line, the dimension(s), along which the files must be concatenated.\r +4. File names that are unique and cannot be associated with any group will be assumed as\r +standalone images and converted accordingly. \r +\r +Below are some examples of grouped conversion commands in the context of different possible use-case scenarios:\r +\r +**Example 1:**\r +\r +This is an example of a folder with non-uniform filename lengths:\r +```\r +time-series/test_img_T2\r +time-series/test_img_T4\r +time-series/test_img_T6\r +time-series/test_img_T8\r +time-series/test_img_T10\r +time-series/test_img_T12\r +```\r +In this example, leading zeroes are missing in the variable fields of some filenames. \r +A typical command to convert this folder to a single OME-TIFF would look like: \\\r +`batchconvert --ometiff --merge_files "input_dir/time-series" "output_path"`\r +\r +However, this command would fail to create a single OME-Zarr folder due to the non-uniform \r +lengths of the filenames. Instead, the files would be split into two groups based on the\r +filename length, leading to two separate OME-Zarrs with names:\r +\r +`test_img_TRange{2-8-2}.ome.zarr` and `test_img_TRange{10-12-2}.ome.zarr`\r +\r +Here is the corrected version of the folder for the above example-\r +```\r +time-series/test_img_T02\r +time-series/test_img_T04\r +time-series/test_img_T06\r +time-series/test_img_T08\r +time-series/test_img_T10\r +time-series/test_img_T12\r +```\r +\r +Executing the same command on this folder would result in a single OME-Zarr with the name:\r +`test_img_TRange{02-12-2}.ome.zarr`\r +\r +**Example 2**- \r +\r +In this example, the filename lengths are uniform but the incrementation within the variable field is not.\r +```\r +time-series/test_img_T2\r +time-series/test_img_T4\r +time-series/test_img_T5\r +time-series/test_img_T7\r +```\r +\r +A typical command to convert this folder to a single OME-Zarr would look like: \\\r +`batchconvert --omezarr --merge_files "input_dir/time-series" "output_path"`\r +\r +However, the command would fail to assume these files as a single group due to the\r +non-uniform incrementation in the variable field of the filenames. Instead, the dataset \r +would be split into two groups, leading to two separate OME-Zarrs with the following names:\r +`test_img_TRange{2-4-2}.ome.zarr` and `test_img_TRange{5-7-2}.ome.zarr` \r +\r +\r +**Example 3**\r +\r +This is an example of a case where the conversion attempts to concatenate files along two\r +dimensions, channel and time.\r +```\r +multichannel_time-series/test_img_C1-T1\r +multichannel_time-series/test_img_C1-T2\r +multichannel_time-series/test_img_C1-T3\r +multichannel_time-series/test_img_C2-T1\r +multichannel_time-series/test_img_C2-T2\r +```\r +To convert this folder to a single OME-Zarr, one could try the following command: \\\r +`batchconvert --omezarr --merge_files "input_dir/multichannel_time-series" "output_path"`\r +\r +However, since the channel-2 does not have the same number of timeframes as the channel-1, \r +BatchConvert will fail to assume these two channels as part of the same series and\r +will instead split the two channels into two separate OME-Zarrs. \r +\r +The output would look like: \\\r +`test_img_C1-TRange{1-3-1}.ome.zarr` \\\r +`test_img_C2-TRange{1-2-1}.ome.zarr`\r +\r +To be able to really incorporate all files into a single OME-Zarr, the folder should have equal\r +number of images corresponding to both channels, as shown below:\r +```\r +multichannel_time-series/test_img_C1-T1\r +multichannel_time-series/test_img_C1-T2\r +multichannel_time-series/test_img_C1-T3\r +multichannel_time-series/test_img_C2-T1\r +multichannel_time-series/test_img_C2-T2\r +multichannel_time-series/test_img_C2-T3\r +```\r +The same conversion command on this version of the input folder would result in a single \r +OME-Zarr with the name: \\\r +`test_img_CRange{1-2-1}-TRange{1-3-1}.ome.zarr`\r +\r +\r +**Example 4**\r +\r +This is another example of a case, where there are multiple filename patterns in the input folder.\r +\r +```\r +folder_with_multiple_groups/test_img_C1-T1\r +folder_with_multiple_groups/test_img_C1-T2\r +folder_with_multiple_groups/test_img_C2-T1\r +folder_with_multiple_groups/test_img_C2-T2\r +folder_with_multiple_groups/test_img_T1-Z1\r +folder_with_multiple_groups/test_img_T1-Z2\r +folder_with_multiple_groups/test_img_T1-Z3\r +folder_with_multiple_groups/test_img_T2-Z1\r +folder_with_multiple_groups/test_img_T2-Z2\r +folder_with_multiple_groups/test_img_T2-Z3\r +```\r +\r +One can convert this folder with- \\\r +`batchconvert --omezarr --merge_files "input_dir/folder_with_multiple_groups" "output_path"`\r + \r +BatchConvert will detect the two patterns in this folder and perform two grouped conversions. \r +The output folders will be named as `test_img_CRange{1-2-1}-TRange{1-2-1}.ome.zarr` and \r +`test_img_TRange{1-2-1}-ZRange{1-3-1}.ome.zarr`. \r +\r +\r +**Example 5**\r +\r +Now imagine that we have the same files as in the example 4 but the filenames of the\r +first group lack any dimension specifier, so we have the following folder:\r +\r +```\r +folder_with_multiple_groups/test_img_1-1\r +folder_with_multiple_groups/test_img_1-2\r +folder_with_multiple_groups/test_img_2-1\r +folder_with_multiple_groups/test_img_2-2\r +folder_with_multiple_groups/test_img_T1-Z1\r +folder_with_multiple_groups/test_img_T1-Z2\r +folder_with_multiple_groups/test_img_T1-Z3\r +folder_with_multiple_groups/test_img_T2-Z1\r +folder_with_multiple_groups/test_img_T2-Z2\r +folder_with_multiple_groups/test_img_T2-Z3\r +```\r +\r +In such a scenario, BatchConvert allows the user to specify the concatenation axes \r +via `--concatenation_order` option. This option expects comma-separated strings of dimensions \r +for each group. In this example, the user must provide a string of 2 characters, such as `ct` for \r +channel and time, for group 1, since there are two variable fields for this group. Since group 2 \r +already has dimension specifiers (T and Z as specified in the filenames preceding the variable fields),\r +the user does not need to specify anything for this group, and can enter `auto` or `aa` for automatic\r +detection of the specifiers. \r +\r +So the following line can be used to convert this folder: \\\r +`batchconvert --omezarr --merge_files --concatenation_order ct,aa "input_dir/folder_with_multiple_groups" "output_path"`\r +\r +The resulting OME-Zarrs will have the names:\r +`test_img_CRange{1-2-1}-TRange{1-2-1}.ome.zarr` and\r +`test_img_TRange{1-2-1}-ZRange{1-3-1}.ome.zarr`\r +\r +Note that `--concatenation_order` will override any dimension specifiers already\r +existing in the filenames.\r +\r +\r +**Example 6**\r +\r +There can be scenarios where the user may want to have further control over the axes along \r +which to concatenate the images. For example, the filenames might contain the data acquisition\r +date, which can be recognised by BatchConvert as a concatenation axis in the automatic \r +detection mode. An example of such a fileset might look like:\r +\r +```\r +filenames_with_dates/test_data_date03.03.2023_imageZ1-T1\r +filenames_with_dates/test_data_date03.03.2023_imageZ1-T2\r +filenames_with_dates/test_data_date03.03.2023_imageZ1-T3\r +filenames_with_dates/test_data_date03.03.2023_imageZ2-T1\r +filenames_with_dates/test_data_date03.03.2023_imageZ2-T2\r +filenames_with_dates/test_data_date03.03.2023_imageZ2-T3\r +filenames_with_dates/test_data_date04.03.2023_imageZ1-T1\r +filenames_with_dates/test_data_date04.03.2023_imageZ1-T2\r +filenames_with_dates/test_data_date04.03.2023_imageZ1-T3\r +filenames_with_dates/test_data_date04.03.2023_imageZ2-T1\r +filenames_with_dates/test_data_date04.03.2023_imageZ2-T2\r +filenames_with_dates/test_data_date04.03.2023_imageZ2-T3\r +```\r +\r +One may try the following command to convert this folder:\r +\r +`batchconvert --omezarr --merge_files "input_dir/filenames_with_dates" "output_path"`\r +\r +Since the concatenation axes are not specified, this command would try to create\r +a single OME-Zarr with name: `test_data_dateRange{03-04-1}.03.2023_imageZRange{1-2-1}-TRange{1-3-1}`.\r +\r +In order to force BatchConvert to ignore the date field, the user can restrict the concatenation \r +axes to the last two numeric fields. This can be done by using a command such as: \\\r +`batchconvert --omezarr --merge_files --concatenation_order aa "input_dir/filenames_with_dates" "output_path"` \\\r +This command will avoid concatenation along the date field, and therefore, there will be two\r +OME-Zarrs corresponding to the two dates. The number of characters being passed to the \r +`--concatenation_order` option specifies the number of numeric fields (starting from the right \r +end of the filename) that are recognised by the BatchConvert as valid concatenation axes. \r +Passing `aa`, therefore, means that the last two numeric fields must be recognised as \r +concatenation axes and the dimension type should be automatically detected (`a` for automatic). \r +In the same logic, one could, for example, convert each Z section into a separate OME-Zarr by \r +specifying `--concatenation_order a`.\r +\r +\r +\r +### Conversion on slurm\r +\r +All the examples given above can also be run on slurm by specifying `-pf cluster` option. \r +Note that this option automatically uses the singularity profile:\\\r +`batchconvert omezarr -pf cluster -p .oir "input_path" "output_path"`\r +\r +\r +\r +\r +\r +\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.453.3" ; + schema1:isBasedOn "https://github.com/Euro-BioImaging/BatchConvert.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for BatchConvert" ; + schema1:sdDatePublished "2024-07-12 13:32:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/453/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 89 ; + schema1:creator ; + schema1:dateCreated "2023-07-05T13:47:29Z" ; + schema1:dateModified "2023-07-11T10:35:35Z" ; + schema1:description """# BatchConvert ![DOI:10.5281](https://zenodo.org/badge/doi/10.5281/zenodo.7955974.svg)\r +\r +A command line tool for converting image data into either of the standard file formats OME-TIFF or OME-Zarr. \r +\r +The tool wraps the dedicated file converters bfconvert and bioformats2raw to convert into OME-TIFF or OME-Zarr,\r +respectively. The workflow management system NextFlow is used to perform conversion in parallel for batches of images. \r +\r +The tool also wraps s3 and Aspera clients (go-mc and aspera-cli, respectively). Therefore, input and output locations can \r +be specified as local or remote storage and file transfer will be performed automatically. The conversion can be run on \r +HPC with Slurm. \r +\r +![](figures/diagram.png)\r +\r +## Installation & Dependencies\r +\r +**Important** note: The package has been so far only tested on Ubuntu 20.04.\r +\r +The minimal dependency to run the tool is NextFlow, which should be installed and made accessible from the command line.\r +\r +If conda exists on your system, you can install BatchConvert together with NextFlow using the following script:\r +```\r +git clone https://github.com/Euro-BioImaging/BatchConvert.git && \\ \r +source BatchConvert/installation/install_with_nextflow.sh\r +```\r +\r +\r +If you already have NextFlow installed and accessible from the command line (or if you prefer to install it manually \r +e.g., as shown [here](https://www.nextflow.io/docs/latest/getstarted.html)), you can also install BatchConvert alone, using the following script:\r +```\r +git clone https://github.com/Euro-BioImaging/BatchConvert.git && \\ \r +source BatchConvert/installation/install.sh\r +```\r +\r +\r +Other dependencies (which will be **automatically** installed):\r +- bioformats2raw (entrypoint bioformats2raw)\r +- bftools (entrypoint bfconvert)\r +- go-mc (entrypoint mc)\r +- aspera-cli (entrypoint ascp)\r +\r +These dependencies will be pulled and cached automatically at the first execution of the conversion command. \r +The mode of dependency management can be specified by using the command line option ``--profile`` or `-pf`. Depending \r +on how this option is specified, the dependencies will be acquired / run either via conda or via docker/singularity containers. \r +\r +Specifying ``--profile conda`` (default) will install the dependencies to an \r +environment at ``./.condaCache`` and use this environment to run the workflow. This option \r +requires that miniconda/anaconda is installed on your system. \r +\r +Alternatively, specifying ``--profile docker`` or ``--profile singularity`` will pull a docker or \r +singularity image with the dependencies, respectively, and use this image to run the workflow.\r +These options assume that the respective container runtime (docker or singularity) is available on \r +your system. If singularity is being used, a cache directory will be created at the path \r +``./.singularityCache`` where the singularity image is stored. \r +\r +Finally, you can still choose to install the dependencies manually and use your own installations to run\r +the workflow. In this case, you should specify ``--profile standard`` and make sure the entrypoints\r +specified above are recognised by your shell. \r +\r +\r +## Configuration\r +\r +BatchConvert can be configured to have default options for file conversion and transfer. Probably, the most important sets of parameters\r +to be configured include credentials for the remote ends. The easiest way to configure remote stores is by running the interactive \r +configuration command as indicated below.\r +\r +### Configuration of the s3 object store\r +\r +Run the interactive configuration command: \r +\r +`batchconvert configure_s3_remote`\r +\r +This will start a sequence of requests for s3 credentials such as name, url, access, etc. Provide each requested credential and click\r +enter. Continue this cycle until the process is finished. Upon completing the configuration, the sequence of commands should roughly look like this:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_s3_remote\r +enter remote name (for example s3)\r +s3\r +enter url:\r +https://s3.embl.de\r +enter access key:\r +"your-access-key"\r +enter secret key:\r +"your-secret-key"\r +enter bucket name:\r +"your-bucket"\r +Configuration of the default s3 credentials is complete\r +```\r +\r +\r +### Configuration of the BioStudies user space\r +\r +Run the interactive configuration command: \r +\r +`batchconvert configure_bia_remote`\r +\r +This will prompt a request for the secret directory to connect to. Enter the secret directory for your user space and click enter. \r +Upon completing the configuration, the sequence of commands should roughly look like this:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_bia_remote\r +enter the secret directory for BioImage Archive user space:\r +"your-secret-directory"\r +configuration of the default bia credentials is complete\r +```\r +\r +### Configuration of the slurm options\r +\r +BatchConvert can also run on slurm clusters. In order to configure the slurm parameters, run the interactive configuration command: \r +\r +`batchconvert configure_slurm`\r +\r +This will start a sequence of requests for slurm options. Provide each requested option and click enter. \r +Continue this cycle until the process is finished. Upon completing the configuration, the sequence of commands should \r +roughly look like this:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_slurm\r +Please enter value for queue_size\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´50´\r +s\r +Please enter value for submit_rate_limit\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´10/2min´\r +s\r +Please enter value for cluster_options\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´--mem-per-cpu=3140 --cpus-per-task=16´\r +s\r +Please enter value for time\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´6h´\r +s\r +configuration of the default slurm parameters is complete\r +```\r +\r +### Configuration of the default conversion parameters\r +\r +While all conversion parameters can be specified as command line arguments, it can\r +be useful for the users to set their own default parameters to avoid re-entering those\r +parameters for subsequent executions. BatchConvert allows for interactive configuration of \r +conversion in the same way as configuration of the remote stores described above.\r +\r +To configure the conversion into OME-TIFF, run the following command:\r +\r +`batchconvert configure_ometiff`\r +\r +This will prompt the user to enter a series of parameters, which will then be saved as the \r +default parameters to be passed to the `batchconvert ometiff` command. Upon completing the \r +configuration, the sequence of commands should look similar to:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_ometiff\r +Please enter value for noflat\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is "bfconvert defaults"\r +s\r +Please enter value for series\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is "bfconvert defaults"\r +s\r +Please enter value for timepoint\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is "bfconvert defaults"\r +s\r +...\r +...\r +...\r +...\r +...\r +...\r +Configuration of the default parameters for 'bfconvert' is complete\r +```\r +\r +\r +To configure the conversion into OME-Zarr, run the following command:\r +\r +`batchconvert configure_omezarr`\r +\r +Similarly, this will prompt the user to enter a series of parameters, which will then be saved as the \r +default parameters to be passed to the `batchconvert omezarr` command. Upon completing the configuration, \r +the sequence of commands should look similar to:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_omezarr\r +Please enter value for resolutions_zarr\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is "bioformats2raw defaults"\r +s\r +Please enter value for chunk_h\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is "bioformats2raw defaults"\r +s\r +Please enter value for chunk_w\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is "bioformats2raw defaults"\r +...\r +...\r +...\r +...\r +...\r +...\r +Configuration of the default parameters for 'bioformats2raw' is complete\r +```\r +\r +It is important to note that the initial defaults for the conversion parameters are the same as the defaults\r +of the backend tools bfconvert and bioformats2raw, as noted in the prompt excerpt above. Through interactive configuration, \r +the user is overriding these initial defaults and setting their own defaults. It is possible to reset the initial\r +defaults by running the following command.\r +\r +`batchconvert reset_defaults`\r +\r +Another important point is that any of these configured parameters can be overridden by passing a value to that\r +parameter in the commandline. For instance, in the following command, the value of 20 will be assigned to `chunk_h` parameter \r +even if the value for the same parameter might be different in the configuration file. \r +\r +`batchconvert omezarr --chunk_h 20 "path/to/input" "path/to/output"`\r +\r +\r +## Examples\r +\r +### Local conversion\r +\r +#### Parallel conversion of files to separate OME-TIFFs / OME-Zarrs:\r +Convert a batch of images on your local storage into OME-TIFF format. \r +Note that the `input_path` in the command given below is typically a \r +directory with multiple image files but a single image file can also be passed:\\\r +`batchconvert ometiff -pf conda "input_path" "output_path"` \r +\r +Note that if this is your first conversion with the profile `conda`, \r +it will take a while for a conda environment with the dependencies to be\r +created. All the subsequent conversion commands with the profile `conda`,\r +however, will use this environment, and thus show no such delay.\r +\r +Since conda is the default profile, it does not have to be \r +explicitly included in the command line. Thus, the command can be shortened to:\\\r +`batchconvert ometiff "input_path" "output_path"`\r +\r +Convert only the first channel of the images:\\\r +`batchconvert ometiff -chn 0 "input_path" "output_path"`\r +\r +Crop the images being converted along x and y axis by 150 pixels:\\\r +`batchconvert ometiff -cr 0,0,150,150 "input_path" "output_path"`\r +\r +Convert into OME-Zarr instead:\\\r +`batchconvert omezarr "input_path" "output_path"`\r +\r +Convert into OME-Zarr with 3 resolution levels:\\\r +`batchconvert omezarr -rz 3 "input_path" "output_path"`\r +\r +Select a subset of images with a matching string such as "mutation":\\\r +`batchconvert omezarr -p mutation "input_path" "output_path"`\r +\r +Select a subset of images using wildcards. Note that the use of "" around \r +the input path is necessary when using wildcards:\\\r +`batchconvert omezarr "input_path/*D3*.oir" "output_path"`\r +\r +Convert by using a singularity container instead of conda environment (requires\r +singularity to be installed on your system):\\\r +`batchconvert omezarr -pf singularity "input_path/*D3*.oir" "output_path"`\r +\r +Convert by using a docker container instead of conda environment (requires docker\r +to be installed on your system):\\\r +`batchconvert omezarr -pf docker "input_path/*D3*.oir" "output_path"`\r +\r +Note that similarly to the case with the profile `conda`, the first execution of\r +a conversion with the profile `singularity` or `docker` will take a while for the\r +container image to be pulled. All the subsequent conversion commands using a \r +container option will use this image, and thus show no such delay. \r +\r +Convert local data and upload the output to an s3 bucket. Note that the output \r +path is created relative to the bucket specified in your s3 configuration:\\\r +`batchconvert omezarr -dt s3 "input_path" "output_path"`\r +\r +Receive input files from an s3 bucket, convert locally and upload the output to \r +the same bucket. Note that wildcards cannot be used when the input is from s3. \r +Use pattern matching option `-p` for selecting a subset of input files:\\\r +`batchconvert omezarr -p mutation -st s3 -dt s3 "input_path" "output_path"`\r +\r +Receive input files from your private BioStudies user space and convert them locally.\r +Use pattern matching option `-p` for selecting a subset of input files:\\\r +`batchconvert omezarr -p mutation -st bia "input_path" "output_path"`\r +\r +Receive an input from an s3 bucket, convert locally and upload the output to your \r +private BioStudies user space. Use pattern matching option `-p` for selecting a subset \r +of input files:\\\r +`batchconvert omezarr -p mutation -st s3 -dt bia "input_path" "output_path"`\r +\r +Note that in all the examples shown above, BatchConvert treats each input file as separate,\r +standalone data point, disregarding the possibility that some of the input files might belong to \r +the same multidimensional array. Thus, each input file is converted to an independent \r +OME-TIFF / OME-Zarr and the number of outputs will thus equal the number of selected input files.\r +An alternative scenario is discussed below.\r +\r +#### Parallel conversion of file groups by stacking multiple files into single OME-TIFFs / OME-Zarrs:\r +\r +When the flag `--merge_files` is specified, BatchConvert tries to detect which input files might \r +belong to the same multidimensional array based on the patterns in the filenames. Then a "grouped conversion" \r +is performed, meaning that the files belonging to the same dataset will be incorporated into \r +a single OME-TIFF / OME-Zarr series, in that files will be concatenated along specific dimension(s) \r +during the conversion. Multiple file groups in the input directory can be detected and converted \r +in parallel. \r +\r +This feature uses Bio-Formats's pattern files as described [here](https://docs.openmicroscopy.org/bio-formats/6.6.0/formats/pattern-file.html).\r +However, BatchConvert generates pattern files automatically, allowing the user to directly use the \r +input directory in the conversion command. BatchConvert also has the option of specifying the \r +concatenation axes in the command line, which is especially useful in cases where the filenames \r +may not contain dimension information. \r +\r +To be able to use the `--merge files` flag, the input file names must obey certain rules:\r +1. File names in the same group must be uniform, except for one or more **numeric field(s)**, which\r +should show incremental change across the files. These so-called **variable fields** \r +will be detected and used as the dimension(s) of concatenation.\r +2. The length of variable fields must be uniform within the group. For instance, if the\r +variable field has values reaching multi-digit numbers, leading "0"s should be included where needed \r +in the file names to make the variable field length uniform within the group.\r +3. Typically, each variable field should follow a dimension specifier. What patterns can be used as \r +dimension specifiers are explained [here](https://docs.openmicroscopy.org/bio-formats/6.6.0/formats/pattern-file.html).\r +However, BatchConvert also has the option `--concatenation_order`, which allows the user to\r +specify from the command line, the dimension(s), along which the files must be concatenated.\r +4. File names that are unique and cannot be associated with any group will be assumed as\r +standalone images and converted accordingly. \r +\r +Below are some examples of grouped conversion commands in the context of different possible use-case scenarios:\r +\r +**Example 1:**\r +\r +This is an example of a folder with non-uniform filename lengths:\r +```\r +time-series/test_img_T2\r +time-series/test_img_T4\r +time-series/test_img_T6\r +time-series/test_img_T8\r +time-series/test_img_T10\r +time-series/test_img_T12\r +```\r +In this example, leading zeroes are missing in the variable fields of some filenames. \r +A typical command to convert this folder to a single OME-TIFF would look like: \\\r +`batchconvert --ometiff --merge_files "input_dir/time-series" "output_path"`\r +\r +However, this command would fail to create a single OME-Zarr folder due to the non-uniform \r +lengths of the filenames. Instead, the files would be split into two groups based on the\r +filename length, leading to two separate OME-Zarrs with names:\r +\r +`test_img_TRange{2-8-2}.ome.zarr` and `test_img_TRange{10-12-2}.ome.zarr`\r +\r +Here is the corrected version of the folder for the above example-\r +```\r +time-series/test_img_T02\r +time-series/test_img_T04\r +time-series/test_img_T06\r +time-series/test_img_T08\r +time-series/test_img_T10\r +time-series/test_img_T12\r +```\r +\r +Executing the same command on this folder would result in a single OME-Zarr with the name:\r +`test_img_TRange{02-12-2}.ome.zarr`\r +\r +**Example 2**- \r +\r +In this example, the filename lengths are uniform but the incrementation within the variable field is not.\r +```\r +time-series/test_img_T2\r +time-series/test_img_T4\r +time-series/test_img_T5\r +time-series/test_img_T7\r +```\r +\r +A typical command to convert this folder to a single OME-Zarr would look like: \\\r +`batchconvert --omezarr --merge_files "input_dir/time-series" "output_path"`\r +\r +However, the command would fail to assume these files as a single group due to the\r +non-uniform incrementation in the variable field of the filenames. Instead, the dataset \r +would be split into two groups, leading to two separate OME-Zarrs with the following names:\r +`test_img_TRange{2-4-2}.ome.zarr` and `test_img_TRange{5-7-2}.ome.zarr` \r +\r +\r +**Example 3**\r +\r +This is an example of a case where the conversion attempts to concatenate files along two\r +dimensions, channel and time.\r +```\r +multichannel_time-series/test_img_C1-T1\r +multichannel_time-series/test_img_C1-T2\r +multichannel_time-series/test_img_C1-T3\r +multichannel_time-series/test_img_C2-T1\r +multichannel_time-series/test_img_C2-T2\r +```\r +To convert this folder to a single OME-Zarr, one could try the following command: \\\r +`batchconvert --omezarr --merge_files "input_dir/multichannel_time-series" "output_path"`\r +\r +However, since the channel-2 does not have the same number of timeframes as the channel-1, \r +BatchConvert will fail to assume these two channels as part of the same series and\r +will instead split the two channels into two separate OME-Zarrs. \r +\r +The output would look like: \\\r +`test_img_C1-TRange{1-3-1}.ome.zarr` \\\r +`test_img_C2-TRange{1-2-1}.ome.zarr`\r +\r +To be able to really incorporate all files into a single OME-Zarr, the folder should have equal\r +number of images corresponding to both channels, as shown below:\r +```\r +multichannel_time-series/test_img_C1-T1\r +multichannel_time-series/test_img_C1-T2\r +multichannel_time-series/test_img_C1-T3\r +multichannel_time-series/test_img_C2-T1\r +multichannel_time-series/test_img_C2-T2\r +multichannel_time-series/test_img_C2-T3\r +```\r +The same conversion command on this version of the input folder would result in a single \r +OME-Zarr with the name: \\\r +`test_img_CRange{1-2-1}-TRange{1-3-1}.ome.zarr`\r +\r +\r +**Example 4**\r +\r +This is another example of a case, where there are multiple filename patterns in the input folder.\r +\r +```\r +folder_with_multiple_groups/test_img_C1-T1\r +folder_with_multiple_groups/test_img_C1-T2\r +folder_with_multiple_groups/test_img_C2-T1\r +folder_with_multiple_groups/test_img_C2-T2\r +folder_with_multiple_groups/test_img_T1-Z1\r +folder_with_multiple_groups/test_img_T1-Z2\r +folder_with_multiple_groups/test_img_T1-Z3\r +folder_with_multiple_groups/test_img_T2-Z1\r +folder_with_multiple_groups/test_img_T2-Z2\r +folder_with_multiple_groups/test_img_T2-Z3\r +```\r +\r +One can convert this folder with- \\\r +`batchconvert --omezarr --merge_files "input_dir/folder_with_multiple_groups" "output_path"`\r + \r +BatchConvert will detect the two patterns in this folder and perform two grouped conversions. \r +The output folders will be named as `test_img_CRange{1-2-1}-TRange{1-2-1}.ome.zarr` and \r +`test_img_TRange{1-2-1}-ZRange{1-3-1}.ome.zarr`. \r +\r +\r +**Example 5**\r +\r +Now imagine that we have the same files as in the example 4 but the filenames of the\r +first group lack any dimension specifier, so we have the following folder:\r +\r +```\r +folder_with_multiple_groups/test_img_1-1\r +folder_with_multiple_groups/test_img_1-2\r +folder_with_multiple_groups/test_img_2-1\r +folder_with_multiple_groups/test_img_2-2\r +folder_with_multiple_groups/test_img_T1-Z1\r +folder_with_multiple_groups/test_img_T1-Z2\r +folder_with_multiple_groups/test_img_T1-Z3\r +folder_with_multiple_groups/test_img_T2-Z1\r +folder_with_multiple_groups/test_img_T2-Z2\r +folder_with_multiple_groups/test_img_T2-Z3\r +```\r +\r +In such a scenario, BatchConvert allows the user to specify the concatenation axes \r +via `--concatenation_order` option. This option expects comma-separated strings of dimensions \r +for each group. In this example, the user must provide a string of 2 characters, such as `ct` for \r +channel and time, for group 1, since there are two variable fields for this group. Since group 2 \r +already has dimension specifiers (T and Z as specified in the filenames preceding the variable fields),\r +the user does not need to specify anything for this group, and can enter `auto` or `aa` for automatic\r +detection of the specifiers. \r +\r +So the following line can be used to convert this folder: \\\r +`batchconvert --omezarr --merge_files --concatenation_order ct,aa "input_dir/folder_with_multiple_groups" "output_path"`\r +\r +The resulting OME-Zarrs will have the names:\r +`test_img_CRange{1-2-1}-TRange{1-2-1}.ome.zarr` and\r +`test_img_TRange{1-2-1}-ZRange{1-3-1}.ome.zarr`\r +\r +Note that `--concatenation_order` will override any dimension specifiers already\r +existing in the filenames.\r +\r +\r +**Example 6**\r +\r +There can be scenarios where the user may want to have further control over the axes along \r +which to concatenate the images. For example, the filenames might contain the data acquisition\r +date, which can be recognised by BatchConvert as a concatenation axis in the automatic \r +detection mode. An example of such a fileset might look like:\r +\r +```\r +filenames_with_dates/test_data_date03.03.2023_imageZ1-T1\r +filenames_with_dates/test_data_date03.03.2023_imageZ1-T2\r +filenames_with_dates/test_data_date03.03.2023_imageZ1-T3\r +filenames_with_dates/test_data_date03.03.2023_imageZ2-T1\r +filenames_with_dates/test_data_date03.03.2023_imageZ2-T2\r +filenames_with_dates/test_data_date03.03.2023_imageZ2-T3\r +filenames_with_dates/test_data_date04.03.2023_imageZ1-T1\r +filenames_with_dates/test_data_date04.03.2023_imageZ1-T2\r +filenames_with_dates/test_data_date04.03.2023_imageZ1-T3\r +filenames_with_dates/test_data_date04.03.2023_imageZ2-T1\r +filenames_with_dates/test_data_date04.03.2023_imageZ2-T2\r +filenames_with_dates/test_data_date04.03.2023_imageZ2-T3\r +```\r +\r +One may try the following command to convert this folder:\r +\r +`batchconvert --omezarr --merge_files "input_dir/filenames_with_dates" "output_path"`\r +\r +Since the concatenation axes are not specified, this command would try to create\r +a single OME-Zarr with name: `test_data_dateRange{03-04-1}.03.2023_imageZRange{1-2-1}-TRange{1-3-1}`.\r +\r +In order to force BatchConvert to ignore the date field, the user can restrict the concatenation \r +axes to the last two numeric fields. This can be done by using a command such as: \\\r +`batchconvert --omezarr --merge_files --concatenation_order aa "input_dir/filenames_with_dates" "output_path"` \\\r +This command will avoid concatenation along the date field, and therefore, there will be two\r +OME-Zarrs corresponding to the two dates. The number of characters being passed to the \r +`--concatenation_order` option specifies the number of numeric fields (starting from the right \r +end of the filename) that are recognised by the BatchConvert as valid concatenation axes. \r +Passing `aa`, therefore, means that the last two numeric fields must be recognised as \r +concatenation axes and the dimension type should be automatically detected (`a` for automatic). \r +In the same logic, one could, for example, convert each Z section into a separate OME-Zarr by \r +specifying `--concatenation_order a`.\r +\r +\r +\r +### Conversion on slurm\r +\r +All the examples given above can also be run on slurm by specifying `-pf cluster` option. \r +Note that this option automatically uses the singularity profile:\\\r +`batchconvert omezarr -pf cluster -p .oir "input_path" "output_path"`\r +\r +\r +\r +\r +\r +\r +\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/453?version=2" ; + schema1:keywords "Nextflow, bash, Python, NGFF, OME-Zarr, Conversion, imaging, bioimaging, image file format, file conversion, OME-TIFF, S3, BioStudies, bioformats, bioformats2raw" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "BatchConvert" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/453?version=3" ; + schema1:version 3 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 151518 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """Galaxy workflow example that illustrate the process of setting up a simulation system containing a protein, step by step, using the [BioExcel Building Blocks](/projects/11) library (biobb). The particular example used is the Lysozyme protein (PDB code 1AKI). This workflow returns a resulting protein structure and simulated 3D trajectories.\r +\r +Designed for running on the https://dev.usegalaxy.es Galaxy instance.""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.194.1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Protein MD Setup tutorial using BioExcel Building Blocks (biobb) in Galaxy" ; + schema1:sdDatePublished "2024-07-12 13:36:21 +0100" ; + schema1:url "https://workflowhub.eu/workflows/194/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 60779 ; + schema1:dateCreated "2021-09-26T19:50:02Z" ; + schema1:dateModified "2023-01-16T13:53:08Z" ; + schema1:description """Galaxy workflow example that illustrate the process of setting up a simulation system containing a protein, step by step, using the [BioExcel Building Blocks](/projects/11) library (biobb). The particular example used is the Lysozyme protein (PDB code 1AKI). This workflow returns a resulting protein structure and simulated 3D trajectories.\r +\r +Designed for running on the https://dev.usegalaxy.es Galaxy instance.""" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Protein MD Setup tutorial using BioExcel Building Blocks (biobb) in Galaxy" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/194?version=1" ; + schema1:version 1 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "ChIP-seq peak-calling and differential analysis pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/971?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/chipseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/chipseq" ; + schema1:sdDatePublished "2024-07-12 13:22:05 +0100" ; + schema1:url "https://workflowhub.eu/workflows/971/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5685 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:44Z" ; + schema1:dateModified "2024-06-11T12:54:44Z" ; + schema1:description "ChIP-seq peak-calling and differential analysis pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/971?version=5" ; + schema1:keywords "ChIP, ChIP-seq, chromatin-immunoprecipitation, macs2, peak-calling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/chipseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/971?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +**Based on the official [pmx tutorial](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate how to compute a **fast-growth mutation free energy** calculation, step by step, using the BioExcel **Building Blocks library (biobb)**. The particular example used is the **Staphylococcal nuclease** protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +The **non-equilibrium free energy calculation** protocol performs a **fast alchemical transition** in the direction **WT->Mut** and back **Mut->WT**. The two equilibrium trajectories needed for the tutorial, one for **Wild Type (WT)** and another for the **Mutated (Mut)** protein (Isoleucine 10 to Alanine -I10A-), have already been generated and are included in this example. We will name **WT as stateA** and **Mut as stateB**.\r +\r +![](https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/schema.png)\r +\r +The tutorial calculates the **free energy difference** in the folded state of a protein. Starting from **two 1ns-length independent equilibrium simulations** (WT and mutant), snapshots are selected to start **fast (50ps) transitions** driving the system in the **forward** (WT to mutant) and **reverse** (mutant to WT) directions, and the **work values** required to perform these transitions are collected. With these values, **Crooks Gaussian Intersection** (CGI), **Bennett Acceptance Ratio** (BAR) and **Jarzynski estimator** methods are used to calculate the **free energy difference** between the two states.\r +\r +*Please note that for the sake of disk space this tutorial is using 1ns-length equilibrium trajectories, whereas in the [original example](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/eq.mdp) the equilibrium trajectories used were obtained from 10ns-length simulations.*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.328.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_pmx_tutorial/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)" ; + schema1:sdDatePublished "2024-07-12 13:35:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/328/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8378 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-22T10:07:20Z" ; + schema1:dateModified "2023-01-16T13:59:42Z" ; + schema1:description """# Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +**Based on the official [pmx tutorial](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate how to compute a **fast-growth mutation free energy** calculation, step by step, using the BioExcel **Building Blocks library (biobb)**. The particular example used is the **Staphylococcal nuclease** protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +The **non-equilibrium free energy calculation** protocol performs a **fast alchemical transition** in the direction **WT->Mut** and back **Mut->WT**. The two equilibrium trajectories needed for the tutorial, one for **Wild Type (WT)** and another for the **Mutated (Mut)** protein (Isoleucine 10 to Alanine -I10A-), have already been generated and are included in this example. We will name **WT as stateA** and **Mut as stateB**.\r +\r +![](https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/schema.png)\r +\r +The tutorial calculates the **free energy difference** in the folded state of a protein. Starting from **two 1ns-length independent equilibrium simulations** (WT and mutant), snapshots are selected to start **fast (50ps) transitions** driving the system in the **forward** (WT to mutant) and **reverse** (mutant to WT) directions, and the **work values** required to perform these transitions are collected. With these values, **Crooks Gaussian Intersection** (CGI), **Bennett Acceptance Ratio** (BAR) and **Jarzynski estimator** methods are used to calculate the **free energy difference** between the two states.\r +\r +*Please note that for the sake of disk space this tutorial is using 1ns-length equilibrium trajectories, whereas in the [original example](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/eq.mdp) the equilibrium trajectories used were obtained from 10ns-length simulations.*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/328?version=3" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_pmx_tutorial/python/workflow.py" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.286.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_dna_helparms/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Structural DNA helical parameters tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/286/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 22900 ; + schema1:creator , + ; + schema1:dateCreated "2023-04-14T08:48:00Z" ; + schema1:dateModified "2023-04-14T08:48:53Z" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/286?version=3" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Structural DNA helical parameters tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_dna_helparms/python/workflow.py" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.819.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/main/biobb_wf_amber_md_setup/docker" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Docker Amber Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:23:33 +0100" ; + schema1:url "https://workflowhub.eu/workflows/819/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 764 ; + schema1:creator , + ; + schema1:dateCreated "2024-04-22T10:22:47Z" ; + schema1:dateModified "2024-05-22T13:48:33Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Docker Amber Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/main/biobb_wf_amber_md_setup/docker/Dockerfile" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.259.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_virtual_screening/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL Protein-ligand Docking tutorial (Fpocket)" ; + schema1:sdDatePublished "2024-07-12 13:36:12 +0100" ; + schema1:url "https://workflowhub.eu/workflows/259/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 29165 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5518 ; + schema1:creator , + ; + schema1:dateCreated "2022-01-10T11:55:05Z" ; + schema1:dateModified "2023-06-06T14:56:34Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/259?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CWL Protein-ligand Docking tutorial (Fpocket)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/259?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """To discover causal mutations of inherited diseases it’s common practice to do a trio analysis. In a trio analysis DNA is sequenced of both the patient and parents. Using this method, it’s possible to identify multiple inheritance patterns. Some examples of these patterns are autosomal recessive, autosomal dominant, and de-novo variants, which are represented in the figure below. To elaborate, the most left tree shows an autosomal dominant inhertitance pattern where the offspring inherits a faulty copy of the gene from one of the parents.\r +\r +To discover these mutations either whole exome sequencing (WES) or whole genome sequencing (WGS) can be used. With these technologies it is possible to uncover the DNA of the parents and offspring to find (shared) mutations in the DNA. These mutations can include insertions/deletions (indels), loss of heterozygosity (LOH), single nucleotide variants (SNVs), copy number variations (CNVs), and fusion genes.\r +\r +In this workflow we will also make use of the HTSGET protocol, which is a program to download our data securely and savely. This protocol has been implemented in the EGA Download Client Tool: toolshed.g2.bx.psu.edu/repos/iuc/ega_download_client/pyega3/4.0.0+galaxy0 tool, so we don’t have to leave Galaxy to retrieve our data.\r +\r +We will not start our analysis from scratch, since the main goal of this tutorial is to use the HTSGET protocol to download variant information from an online archive and to find the causative variant from those variants. If you want to learn how to do the analysis from scratch, using the raw reads, you can have a look at the Exome sequencing data analysis for diagnosing a genetic disease tutorial.""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.363.2" ; + schema1:isBasedOn "https://github.com/galaxyproject/training-material/blob/main/topics/variant-analysis/tutorials/trio-analysis/workflows/main_workflow.ga" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Trio Analysis" ; + schema1:sdDatePublished "2024-07-12 13:34:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/363/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 29328 ; + schema1:creator ; + schema1:dateCreated "2023-03-01T15:35:32Z" ; + schema1:dateModified "2023-09-05T08:11:49Z" ; + schema1:description """To discover causal mutations of inherited diseases it’s common practice to do a trio analysis. In a trio analysis DNA is sequenced of both the patient and parents. Using this method, it’s possible to identify multiple inheritance patterns. Some examples of these patterns are autosomal recessive, autosomal dominant, and de-novo variants, which are represented in the figure below. To elaborate, the most left tree shows an autosomal dominant inhertitance pattern where the offspring inherits a faulty copy of the gene from one of the parents.\r +\r +To discover these mutations either whole exome sequencing (WES) or whole genome sequencing (WGS) can be used. With these technologies it is possible to uncover the DNA of the parents and offspring to find (shared) mutations in the DNA. These mutations can include insertions/deletions (indels), loss of heterozygosity (LOH), single nucleotide variants (SNVs), copy number variations (CNVs), and fusion genes.\r +\r +In this workflow we will also make use of the HTSGET protocol, which is a program to download our data securely and savely. This protocol has been implemented in the EGA Download Client Tool: toolshed.g2.bx.psu.edu/repos/iuc/ega_download_client/pyega3/4.0.0+galaxy0 tool, so we don’t have to leave Galaxy to retrieve our data.\r +\r +We will not start our analysis from scratch, since the main goal of this tutorial is to use the HTSGET protocol to download variant information from an online archive and to find the causative variant from those variants. If you want to learn how to do the analysis from scratch, using the raw reads, you can have a look at the Exome sequencing data analysis for diagnosing a genetic disease tutorial.""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/363?version=1" ; + schema1:keywords "variant-analysis" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Trio Analysis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/galaxyproject/training-material/blob/main/topics/variant-analysis/tutorials/trio-analysis/workflows/main_workflow.ga" ; + schema1:version 2 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=22" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-07-12 13:18:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=22" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14671 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:40Z" ; + schema1:dateModified "2024-06-11T12:54:40Z" ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=22" ; + schema1:version 22 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Genome-wide alternative splicing analysis v.2" ; + schema1:hasPart , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.482.1" ; + schema1:license "CC-BY-NC-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genome-wide alternative splicing analysis v.2" ; + schema1:sdDatePublished "2024-07-12 13:33:04 +0100" ; + schema1:url "https://workflowhub.eu/workflows/482/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 214419 . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 30436 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 115942 ; + schema1:creator ; + schema1:dateCreated "2023-05-25T22:01:31Z" ; + schema1:dateModified "2023-06-07T16:01:06Z" ; + schema1:description "Genome-wide alternative splicing analysis v.2" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/482?version=6" ; + schema1:keywords "Transcriptomics, Alternative splicing, isoform switching" ; + schema1:license "https://spdx.org/licenses/CC-BY-NC-4.0" ; + schema1:name "Genome-wide alternative splicing analysis v.2" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/482?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Demultiplexing pipeline for Illumina sequencing data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/978?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/demultiplex" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/demultiplex" ; + schema1:sdDatePublished "2024-07-12 13:21:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/978/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9518 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:47Z" ; + schema1:dateModified "2024-06-11T12:54:47Z" ; + schema1:description "Demultiplexing pipeline for Illumina sequencing data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/978?version=7" ; + schema1:keywords "bases2fastq, bcl2fastq, demultiplexing, elementbiosciences, illumina" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/demultiplex" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/978?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "An example workflow to allow users to run the Specimen Data Refinery tools on data provided in an input CSV file." ; + schema1:identifier "https://workflowhub.eu/workflows/373?version=2" ; + schema1:license "CC-BY-4.0" ; + schema1:name "Research Object Crate for De novo digitisation" ; + schema1:sdDatePublished "2024-07-12 13:35:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/373/ro_crate?version=2" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Macromolecular Coarse-Grained Flexibility (FlexServ) tutorial using BioExcel Building Blocks (biobb)\r +\r +This tutorial aims to illustrate the process of generating protein conformational ensembles from 3D structures and analysing its molecular flexibility, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.557.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_flexserv/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy Macromolecular Coarse-Grained Flexibility tutorial" ; + schema1:sdDatePublished "2024-07-12 13:27:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/557/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 56921 ; + schema1:creator , + ; + schema1:dateCreated "2023-08-11T08:32:23Z" ; + schema1:dateModified "2023-08-11T08:34:07Z" ; + schema1:description """# Macromolecular Coarse-Grained Flexibility (FlexServ) tutorial using BioExcel Building Blocks (biobb)\r +\r +This tutorial aims to illustrate the process of generating protein conformational ensembles from 3D structures and analysing its molecular flexibility, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy Macromolecular Coarse-Grained Flexibility tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_flexserv/galaxy/biobb_wf_flexserv.ga" ; + schema1:version 1 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """From the R1 and R2 fastq files of a single samples, make a scRNAseq counts matrix, and perform basic QC with scanpy. Then, do further processing by making a UMAP and clustering. Produces a processed AnnData \r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/465?version=2" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq Single Sample Processing STARSolo" ; + schema1:sdDatePublished "2024-07-12 13:22:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/465/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 126308 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-06-22T06:36:33Z" ; + schema1:dateModified "2023-06-23T06:46:18Z" ; + schema1:description """From the R1 and R2 fastq files of a single samples, make a scRNAseq counts matrix, and perform basic QC with scanpy. Then, do further processing by making a UMAP and clustering. Produces a processed AnnData \r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/465?version=3" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "scRNAseq Single Sample Processing STARSolo" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/465?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + schema1:datePublished "2023-09-05T08:29:48.601016" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/parallel-accession-download" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "parallel-accession-download/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.10" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-07-12 13:20:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4227 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:03Z" ; + schema1:dateModified "2024-06-11T12:55:03Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """[![Cite with Zenodo](https://zenodo.org/badge/509096312.svg)](https://zenodo.org/doi/10.5281/zenodo.10047653)\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A522.10.1-23aa62.svg)](https://www.nextflow.io/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +[![Launch on Nextflow Tower](https://img.shields.io/badge/Launch%20%F0%9F%9A%80-Nextflow%20Tower-%234256e7)](https://tower.nf/launch?pipeline=https://github.com/sanger-tol/treeval)\r +\r +## Introduction\r +\r +**sanger-tol/treeval [1.1.0 - Ancient Aurora]** is a bioinformatics best-practice analysis pipeline for the generation of data supplemental to the curation of reference quality genomes. This pipeline has been written to generate flat files compatible with [JBrowse2](https://jbrowse.org/jb2/) as well as HiC maps for use in Juicebox, PretextView and HiGlass.\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!\r +\r +You can also set up and attempt to run the pipeline here: https://gitpod.io/#https://github.com/BGAcademy23/treeval-curation\r +This is a gitpod set up for BGA23 with a version of TreeVal, although for now gitpod will not run a nextflow pipeline die to issues with using singularity. We will be replacing this with an AWS instance soon.\r +\r +The treeval pipeline has a sister pipeline currently named [curationpretext](https://github.com/sanger-tol/curationpretext) which acts to regenerate the pretext maps and accessory files during genomic curation in order to confirm interventions. This pipeline is sufficiently different to the treeval implementation that it is written as it's own pipeline.\r +\r +1. Parse input yaml ( YAML_INPUT )\r +2. Generate my.genome file ( GENERATE_GENOME )\r +3. Generate insilico digests of the input assembly ( INSILICO_DIGEST )\r +4. Generate gene alignments with high quality data against the input assembly ( GENE_ALIGNMENT )\r +5. Generate a repeat density graph ( REPEAT_DENSITY )\r +6. Generate a gap track ( GAP_FINDER )\r +7. Generate a map of self complementary sequence ( SELFCOMP )\r +8. Generate syntenic alignments with a closely related high quality assembly ( SYNTENY )\r +9. Generate a coverage track using PacBio data ( LONGREAD_COVERAGE )\r +10. Generate HiC maps, pretext and higlass using HiC cram files ( HIC_MAPPING )\r +11. Generate a telomere track based on input motif ( TELO_FINDER )\r +12. Run Busco and convert results into bed format ( BUSCO_ANNOTATION )\r +13. Ancestral Busco linkage if available for clade ( BUSCO_ANNOTATION:ANCESTRAL_GENE )\r +14. Count KMERs with FastK and plot the spectra using MerquryFK ( KMER )\r +15. Generate a coverge track using KMER data ( KMER_READ_COVERAGE )\r +\r +## Usage\r +\r +> **Note**\r +> If you are new to Nextflow and nf-core, please refer to [this page](https://nf-co.re/docs/usage/installation) on how\r +> to set-up Nextflow. Make sure to [test your setup](https://nf-co.re/docs/usage/introduction#how-to-run-a-pipeline)\r +> with `-profile test` before running the workflow on actual data.\r +\r +Currently, it is advised to run the pipeline with docker or singularity as a small number of major modules do not currently have a conda env associated with them.\r +\r +Now, you can run the pipeline using:\r +\r +```bash\r +# For the FULL pipeline\r +nextflow run main.nf -profile singularity --input treeval.yaml --outdir {OUTDIR}\r +\r +# For the RAPID subset\r +nextflow run main.nf -profile singularity --input treeval.yaml -entry RAPID --outdir {OUTDIR}\r +```\r +\r +An example treeval.yaml can be found [here](assets/local_testing/nxOscDF5033.yaml).\r +\r +Further documentation about the pipeline can be found in the following files: [usage](https://pipelines.tol.sanger.ac.uk/treeval/dev/usage), [parameters](https://pipelines.tol.sanger.ac.uk/treeval/dev/parameters) and [output](https://pipelines.tol.sanger.ac.uk/treeval/dev/output).\r +\r +> **Warning:**\r +> Please provide pipeline parameters via the CLI or Nextflow `-params-file` option. Custom config files including those\r +> provided by the `-c` Nextflow option can be used to provide any configuration _**except for parameters**_;\r +> see [docs](https://nf-co.re/usage/configuration#custom-configuration-files).\r +\r +## Credits\r +\r +sanger-tol/treeval has been written by Damon-Lee Pointon (@DLBPointon), Yumi Sims (@yumisims) and William Eagles (@weaglesBio).\r +\r +We thank the following people for their extensive assistance in the development of this pipeline:\r +\r +
    \r +
  • @gq1 - For building the infrastructure around TreeVal and helping with code review
  • \r +
  • @ksenia-krasheninnikova - For help with C code implementation and YAML parsing
  • \r +
  • @mcshane - For guidance on algorithms
  • \r +
  • @muffato - For code reviews and code support
  • \r +
  • @priyanka-surana - For help with the majority of code reviews and code support
  • \r +
\r +\r +## Contributions and Support\r +\r +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\r +\r +## Citations\r +\r +\r +\r +If you use sanger-tol/treeval for your analysis, please cite it using the following doi: [10.5281/zenodo.10047653](https://doi.org/10.5281/zenodo.10047653).\r +\r +### Tools\r +\r +An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\r +\r +You can cite the `nf-core` publication as follows:\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/813?version=1" ; + schema1:isBasedOn "https://github.com/sanger-tol/treeval.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for sanger-tol/treeval v1.1.0 - Ancient Aurora" ; + schema1:sdDatePublished "2024-07-12 13:23:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/813/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2029 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-04-09T09:22:28Z" ; + schema1:dateModified "2024-04-09T09:22:28Z" ; + schema1:description """[![Cite with Zenodo](https://zenodo.org/badge/509096312.svg)](https://zenodo.org/doi/10.5281/zenodo.10047653)\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A522.10.1-23aa62.svg)](https://www.nextflow.io/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +[![Launch on Nextflow Tower](https://img.shields.io/badge/Launch%20%F0%9F%9A%80-Nextflow%20Tower-%234256e7)](https://tower.nf/launch?pipeline=https://github.com/sanger-tol/treeval)\r +\r +## Introduction\r +\r +**sanger-tol/treeval [1.1.0 - Ancient Aurora]** is a bioinformatics best-practice analysis pipeline for the generation of data supplemental to the curation of reference quality genomes. This pipeline has been written to generate flat files compatible with [JBrowse2](https://jbrowse.org/jb2/) as well as HiC maps for use in Juicebox, PretextView and HiGlass.\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!\r +\r +You can also set up and attempt to run the pipeline here: https://gitpod.io/#https://github.com/BGAcademy23/treeval-curation\r +This is a gitpod set up for BGA23 with a version of TreeVal, although for now gitpod will not run a nextflow pipeline die to issues with using singularity. We will be replacing this with an AWS instance soon.\r +\r +The treeval pipeline has a sister pipeline currently named [curationpretext](https://github.com/sanger-tol/curationpretext) which acts to regenerate the pretext maps and accessory files during genomic curation in order to confirm interventions. This pipeline is sufficiently different to the treeval implementation that it is written as it's own pipeline.\r +\r +1. Parse input yaml ( YAML_INPUT )\r +2. Generate my.genome file ( GENERATE_GENOME )\r +3. Generate insilico digests of the input assembly ( INSILICO_DIGEST )\r +4. Generate gene alignments with high quality data against the input assembly ( GENE_ALIGNMENT )\r +5. Generate a repeat density graph ( REPEAT_DENSITY )\r +6. Generate a gap track ( GAP_FINDER )\r +7. Generate a map of self complementary sequence ( SELFCOMP )\r +8. Generate syntenic alignments with a closely related high quality assembly ( SYNTENY )\r +9. Generate a coverage track using PacBio data ( LONGREAD_COVERAGE )\r +10. Generate HiC maps, pretext and higlass using HiC cram files ( HIC_MAPPING )\r +11. Generate a telomere track based on input motif ( TELO_FINDER )\r +12. Run Busco and convert results into bed format ( BUSCO_ANNOTATION )\r +13. Ancestral Busco linkage if available for clade ( BUSCO_ANNOTATION:ANCESTRAL_GENE )\r +14. Count KMERs with FastK and plot the spectra using MerquryFK ( KMER )\r +15. Generate a coverge track using KMER data ( KMER_READ_COVERAGE )\r +\r +## Usage\r +\r +> **Note**\r +> If you are new to Nextflow and nf-core, please refer to [this page](https://nf-co.re/docs/usage/installation) on how\r +> to set-up Nextflow. Make sure to [test your setup](https://nf-co.re/docs/usage/introduction#how-to-run-a-pipeline)\r +> with `-profile test` before running the workflow on actual data.\r +\r +Currently, it is advised to run the pipeline with docker or singularity as a small number of major modules do not currently have a conda env associated with them.\r +\r +Now, you can run the pipeline using:\r +\r +```bash\r +# For the FULL pipeline\r +nextflow run main.nf -profile singularity --input treeval.yaml --outdir {OUTDIR}\r +\r +# For the RAPID subset\r +nextflow run main.nf -profile singularity --input treeval.yaml -entry RAPID --outdir {OUTDIR}\r +```\r +\r +An example treeval.yaml can be found [here](assets/local_testing/nxOscDF5033.yaml).\r +\r +Further documentation about the pipeline can be found in the following files: [usage](https://pipelines.tol.sanger.ac.uk/treeval/dev/usage), [parameters](https://pipelines.tol.sanger.ac.uk/treeval/dev/parameters) and [output](https://pipelines.tol.sanger.ac.uk/treeval/dev/output).\r +\r +> **Warning:**\r +> Please provide pipeline parameters via the CLI or Nextflow `-params-file` option. Custom config files including those\r +> provided by the `-c` Nextflow option can be used to provide any configuration _**except for parameters**_;\r +> see [docs](https://nf-co.re/usage/configuration#custom-configuration-files).\r +\r +## Credits\r +\r +sanger-tol/treeval has been written by Damon-Lee Pointon (@DLBPointon), Yumi Sims (@yumisims) and William Eagles (@weaglesBio).\r +\r +We thank the following people for their extensive assistance in the development of this pipeline:\r +\r +
    \r +
  • @gq1 - For building the infrastructure around TreeVal and helping with code review
  • \r +
  • @ksenia-krasheninnikova - For help with C code implementation and YAML parsing
  • \r +
  • @mcshane - For guidance on algorithms
  • \r +
  • @muffato - For code reviews and code support
  • \r +
  • @priyanka-surana - For help with the majority of code reviews and code support
  • \r +
\r +\r +## Contributions and Support\r +\r +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\r +\r +## Citations\r +\r +\r +\r +If you use sanger-tol/treeval for your analysis, please cite it using the following doi: [10.5281/zenodo.10047653](https://doi.org/10.5281/zenodo.10047653).\r +\r +### Tools\r +\r +An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\r +\r +You can cite the `nf-core` publication as follows:\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +""" ; + schema1:keywords "Bioinformatics, Genomics, genome_assembly" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "sanger-tol/treeval v1.1.0 - Ancient Aurora" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/813?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/303?version=1" ; + schema1:isBasedOn "https://github.com/DimitraPanou/scRNAseq-cwl.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for seurat scRNA-seq" ; + schema1:sdDatePublished "2024-07-12 13:35:40 +0100" ; + schema1:url "https://workflowhub.eu/workflows/303/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 84591 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2798 ; + schema1:dateCreated "2022-04-04T04:43:31Z" ; + schema1:dateModified "2022-04-14T13:26:32Z" ; + schema1:description "" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/303?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "seurat scRNA-seq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/DimitraPanou/scRNAseq-cwl/blob/master/steps.cwl" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2023-01-16T18:45:24.648437" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/rnaseq-pe" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "rnaseq-pe/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.3" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# GSC (Genotype Sparse Compression)\r +Genotype Sparse Compression (GSC) is an advanced tool for lossless compression of VCF files, designed to efficiently store and manage VCF files in a compressed format. It accepts VCF/BCF files as input and utilizes advanced compression techniques to significantly reduce storage requirements while ensuring fast query capabilities. In our study, we successfully compressed the VCF files from the 1000 Genomes Project (1000Gpip3), consisting of 2504 samples and 80 million variants, from an uncompressed VCF file of 803.70GB to approximately 1GB.\r +\r +## Requirements \r +### GSC requires:\r +\r +- **Compiler Compatibility**: GSC requires a modern C++14-ready compiler, such as:\r + - g++ version 10.1.0 or higher\r +\r +- **Build System**: Make build system is necessary for compiling GSC.\r +\r +- **Operating System**: GSC supports 64-bit operating systems, including:\r + - Linux (Ubuntu 18.04)\r + \r +## Installation\r +To download, build and install GSC use the following commands.\r +```bash\r +git clone https://github.com/luo-xiaolong/GSC.git\r +cd GSC\r +make\r +```\r +To clean the GSC build use:\r +```bash\r +make clean\r +```\r +## Usage\r +```bash\r +Usage: gsc [option] [arguments] \r +Available options: \r + compress - compress VCF/BCF file\r + decompress - query and decompress to VCF/BCF file\r +```\r +- Compress the input VCF/BCF file\r +```bash\r +Usage of gsc compress:\r +\r + gsc compress [options] [--in [in_file]] [--out [out_file]]\r +\r +Where:\r +\r + [options] Optional flags and parameters for compression.\r + -i, --in [in_file] Specify the input file (default: VCF or VCF.GZ). If omitted, input is taken from standard input (stdin).\r + -o, --out [out_file] Specify the output file. If omitted, output is sent to standard output (stdout).\r +\r +Options:\r +\r + -M, --mode_lossly Choose lossy compression mode (lossless by default).\r + -b, --bcf Input is a BCF file (default: VCF or VCF.GZ).\r + -p, --ploidy [X] Set ploidy of samples in input VCF to [X] (default: 2).\r + -t, --threads [X] Set number of threads to [X] (default: 1).\r + -d, --depth [X] Set maximum replication depth to [X] (default: 100, 0 means no matches).\r + -m, --merge [X] Specify files to merge, separated by commas (e.g., -m chr1.vcf,chr2.vcf), or '@' followed by a file containing a list of VCF files (e.g., -m @file_with_IDs.txt). By default, all VCF files are compressed.\r +```\r +- Decompress / Query\r +```bash\r +Usage of gsc decompress and query:\r +\r + gsc decompress [options] --in [in_file] --out [out_file]\r +\r +Where:\r + [options] Optional flags and parameters for compression.\r + -i, --in [in_file] Specify the input file . If omitted, input is taken from standard input (stdin).\r + -o, --out [out_file] Specify the output file (default: VCF). If omitted, output is sent to standard output (stdout).\r +\r +Options:\r +\r + General Options:\r +\r + -M, --mode_lossly Choose lossy compression mode (default: lossless).\r + -b, --bcf Output a BCF file (default: VCF).\r +\r + Filter options (applicable in lossy compression mode only): \r +\r + -r, --range [X] Specify range in format [start],[end] (e.g., -r 4999756,4999852).\r + -s, --samples [X] Samples separated by comms (e.g., -s HG03861,NA18639) OR '@' sign followed by the name of a file with sample name(s) separated by whitespaces (for exaple: -s @file_with_IDs.txt). By default all samples/individuals are decompressed. \r + --header-only Output only the header of the VCF/BCF.\r + --no-header Output without the VCF/BCF header (only genotypes).\r + -G, --no-genotype Don't output sample genotypes (only #CHROM, POS, ID, REF, ALT, QUAL, FILTER, and INFO columns).\r + -C, --out-ac-an Write AC/AN to the INFO field.\r + -S, --split Split output into multiple files (one per chromosome).\r + -I, [ID=^] Include only sites with specified ID (e.g., -I "ID=rs6040355").\r + --minAC [X] Include only sites with AC <= X.\r + --maxAC [X] Include only sites with AC >= X.\r + --minAF [X] Include only sites with AF >= X (X: 0 to 1).\r + --maxAF [X] Include only sites with AF <= X (X: 0 to 1).\r + --min-qual [X] Include only sites with QUAL >= X.\r + --max-qual [X] Include only sites with QUAL <= X.\r +```\r +## Example\r +There is an example VCF/VCF.gz/BCF file, `toy.vcf`/`toy.vcf.gz`/`toy.bcf`, in the toy folder, which can be used to test GSC\r +### compress\r +\r +#### lossless compression:\r +The input file format is VCF. You can compress a VCF file in lossless mode using one of the following methods:\r +1. **Explicit input and output file parameters**:\r + \r + Use the `--in` option to specify the input VCF file and the `--out` option for the output compressed file.\r + ```bash\r + ./gsc compress --in toy/toy.vcf --out toy/toy_lossless.gsc\r + ```\r +2. **Input file parameter and output redirection**:\r + \r + Use the `--out` option for the output compressed file and redirect the input VCF file into the command.\r + ```bash\r + ./gsc compress --out toy/toy_lossless.gsc < toy/toy.vcf\r + ```\r +3. **Output file redirection and input file parameter**:\r + \r + Specify the input VCF file with the `--in` option and redirect the output to create the compressed file.\r + ```bash\r + ./gsc compress --in toy/toy.vcf > toy/toy_lossless.gsc\r + ```\r +4. **Input and output redirection**:\r + \r + Use shell redirection for both input and output. This method does not use the `--in` and `--out` options.\r + ```bash\r + ./gsc compress < toy/toy.vcf > toy/toy_lossless.gsc\r + ```\r +This will create a file:\r +* `toy_lossless.gsc` - The compressed archive of the entire VCF file.\r +\r +#### lossy compression:\r +\r +The input file format is VCF. The commands are similar to those used for lossless compression, with the addition of the `-M` parameter to enable lossy compression.\r +\r + For example, to compress a VCF file in lossy mode:\r +\r + ```bash\r + ./gsc compress -M --in toy/toy.vcf --out toy/toy_lossy.gsc\r + ```\r + Or using redirection:\r + ```bash\r + ./gsc compress -M --out toy/toy_lossy.gsc < toy/toy.vcf\r + ``` \r + This will create a file:\r + * `toy_lossy.gsc` - The compressed archive of the entire VCF file is implemented with lossy compression. It only retains the 'GT' subfield within the INFO and FORMAT fields, and excludes all other subfields..\r + \r +### Decompress (The commands are similar to those used for compression)\r +lossless decompression:\r +\r +To decompress the compressed toy_lossless.gsc into a VCF file named toy_lossless.vcf:\r +```bash\r +./gsc decompress --in toy/toy_lossless.gsc --out toy/toy_lossless.vcf\r +```\r +lossy decompression:\r +\r +To decompress the compressed toy_lossy.gsc into a VCF file named toy_lossy.vcf:\r +```bash\r +./gsc decompress -M --in toy/toy_lossy.gsc --out toy/toy_lossy.vcf\r +```\r +## Dockerfile\r +Dockerfile can be used to build a Docker image with all necessary dependencies and GSC compressor. The image is based on Ubuntu 18.04. To build a Docker image and run a Docker container, you need Docker Desktop (https://www.docker.com). Example commands (run it within a directory with Dockerfile):\r +```bash\r +docker build -t gsc_project .\r +docker run -it gsc_project\r +```\r +## Citations\r +- **bio.tools ID**: `gsc_genotype_sparse_compression`\r +- **Research Resource Identifier (RRID)**: `SCR_025071`\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/885?version=1" ; + schema1:isBasedOn "https://github.com/luo-xiaolong/GSC.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for GSC (Genotype Sparse Compression)" ; + schema1:sdDatePublished "2024-07-12 13:22:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/885/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7413 ; + schema1:dateCreated "2024-05-17T16:51:00Z" ; + schema1:dateModified "2024-05-17T16:51:00Z" ; + schema1:description """# GSC (Genotype Sparse Compression)\r +Genotype Sparse Compression (GSC) is an advanced tool for lossless compression of VCF files, designed to efficiently store and manage VCF files in a compressed format. It accepts VCF/BCF files as input and utilizes advanced compression techniques to significantly reduce storage requirements while ensuring fast query capabilities. In our study, we successfully compressed the VCF files from the 1000 Genomes Project (1000Gpip3), consisting of 2504 samples and 80 million variants, from an uncompressed VCF file of 803.70GB to approximately 1GB.\r +\r +## Requirements \r +### GSC requires:\r +\r +- **Compiler Compatibility**: GSC requires a modern C++14-ready compiler, such as:\r + - g++ version 10.1.0 or higher\r +\r +- **Build System**: Make build system is necessary for compiling GSC.\r +\r +- **Operating System**: GSC supports 64-bit operating systems, including:\r + - Linux (Ubuntu 18.04)\r + \r +## Installation\r +To download, build and install GSC use the following commands.\r +```bash\r +git clone https://github.com/luo-xiaolong/GSC.git\r +cd GSC\r +make\r +```\r +To clean the GSC build use:\r +```bash\r +make clean\r +```\r +## Usage\r +```bash\r +Usage: gsc [option] [arguments] \r +Available options: \r + compress - compress VCF/BCF file\r + decompress - query and decompress to VCF/BCF file\r +```\r +- Compress the input VCF/BCF file\r +```bash\r +Usage of gsc compress:\r +\r + gsc compress [options] [--in [in_file]] [--out [out_file]]\r +\r +Where:\r +\r + [options] Optional flags and parameters for compression.\r + -i, --in [in_file] Specify the input file (default: VCF or VCF.GZ). If omitted, input is taken from standard input (stdin).\r + -o, --out [out_file] Specify the output file. If omitted, output is sent to standard output (stdout).\r +\r +Options:\r +\r + -M, --mode_lossly Choose lossy compression mode (lossless by default).\r + -b, --bcf Input is a BCF file (default: VCF or VCF.GZ).\r + -p, --ploidy [X] Set ploidy of samples in input VCF to [X] (default: 2).\r + -t, --threads [X] Set number of threads to [X] (default: 1).\r + -d, --depth [X] Set maximum replication depth to [X] (default: 100, 0 means no matches).\r + -m, --merge [X] Specify files to merge, separated by commas (e.g., -m chr1.vcf,chr2.vcf), or '@' followed by a file containing a list of VCF files (e.g., -m @file_with_IDs.txt). By default, all VCF files are compressed.\r +```\r +- Decompress / Query\r +```bash\r +Usage of gsc decompress and query:\r +\r + gsc decompress [options] --in [in_file] --out [out_file]\r +\r +Where:\r + [options] Optional flags and parameters for compression.\r + -i, --in [in_file] Specify the input file . If omitted, input is taken from standard input (stdin).\r + -o, --out [out_file] Specify the output file (default: VCF). If omitted, output is sent to standard output (stdout).\r +\r +Options:\r +\r + General Options:\r +\r + -M, --mode_lossly Choose lossy compression mode (default: lossless).\r + -b, --bcf Output a BCF file (default: VCF).\r +\r + Filter options (applicable in lossy compression mode only): \r +\r + -r, --range [X] Specify range in format [start],[end] (e.g., -r 4999756,4999852).\r + -s, --samples [X] Samples separated by comms (e.g., -s HG03861,NA18639) OR '@' sign followed by the name of a file with sample name(s) separated by whitespaces (for exaple: -s @file_with_IDs.txt). By default all samples/individuals are decompressed. \r + --header-only Output only the header of the VCF/BCF.\r + --no-header Output without the VCF/BCF header (only genotypes).\r + -G, --no-genotype Don't output sample genotypes (only #CHROM, POS, ID, REF, ALT, QUAL, FILTER, and INFO columns).\r + -C, --out-ac-an Write AC/AN to the INFO field.\r + -S, --split Split output into multiple files (one per chromosome).\r + -I, [ID=^] Include only sites with specified ID (e.g., -I "ID=rs6040355").\r + --minAC [X] Include only sites with AC <= X.\r + --maxAC [X] Include only sites with AC >= X.\r + --minAF [X] Include only sites with AF >= X (X: 0 to 1).\r + --maxAF [X] Include only sites with AF <= X (X: 0 to 1).\r + --min-qual [X] Include only sites with QUAL >= X.\r + --max-qual [X] Include only sites with QUAL <= X.\r +```\r +## Example\r +There is an example VCF/VCF.gz/BCF file, `toy.vcf`/`toy.vcf.gz`/`toy.bcf`, in the toy folder, which can be used to test GSC\r +### compress\r +\r +#### lossless compression:\r +The input file format is VCF. You can compress a VCF file in lossless mode using one of the following methods:\r +1. **Explicit input and output file parameters**:\r + \r + Use the `--in` option to specify the input VCF file and the `--out` option for the output compressed file.\r + ```bash\r + ./gsc compress --in toy/toy.vcf --out toy/toy_lossless.gsc\r + ```\r +2. **Input file parameter and output redirection**:\r + \r + Use the `--out` option for the output compressed file and redirect the input VCF file into the command.\r + ```bash\r + ./gsc compress --out toy/toy_lossless.gsc < toy/toy.vcf\r + ```\r +3. **Output file redirection and input file parameter**:\r + \r + Specify the input VCF file with the `--in` option and redirect the output to create the compressed file.\r + ```bash\r + ./gsc compress --in toy/toy.vcf > toy/toy_lossless.gsc\r + ```\r +4. **Input and output redirection**:\r + \r + Use shell redirection for both input and output. This method does not use the `--in` and `--out` options.\r + ```bash\r + ./gsc compress < toy/toy.vcf > toy/toy_lossless.gsc\r + ```\r +This will create a file:\r +* `toy_lossless.gsc` - The compressed archive of the entire VCF file.\r +\r +#### lossy compression:\r +\r +The input file format is VCF. The commands are similar to those used for lossless compression, with the addition of the `-M` parameter to enable lossy compression.\r +\r + For example, to compress a VCF file in lossy mode:\r +\r + ```bash\r + ./gsc compress -M --in toy/toy.vcf --out toy/toy_lossy.gsc\r + ```\r + Or using redirection:\r + ```bash\r + ./gsc compress -M --out toy/toy_lossy.gsc < toy/toy.vcf\r + ``` \r + This will create a file:\r + * `toy_lossy.gsc` - The compressed archive of the entire VCF file is implemented with lossy compression. It only retains the 'GT' subfield within the INFO and FORMAT fields, and excludes all other subfields..\r + \r +### Decompress (The commands are similar to those used for compression)\r +lossless decompression:\r +\r +To decompress the compressed toy_lossless.gsc into a VCF file named toy_lossless.vcf:\r +```bash\r +./gsc decompress --in toy/toy_lossless.gsc --out toy/toy_lossless.vcf\r +```\r +lossy decompression:\r +\r +To decompress the compressed toy_lossy.gsc into a VCF file named toy_lossy.vcf:\r +```bash\r +./gsc decompress -M --in toy/toy_lossy.gsc --out toy/toy_lossy.vcf\r +```\r +## Dockerfile\r +Dockerfile can be used to build a Docker image with all necessary dependencies and GSC compressor. The image is based on Ubuntu 18.04. To build a Docker image and run a Docker container, you need Docker Desktop (https://www.docker.com). Example commands (run it within a directory with Dockerfile):\r +```bash\r +docker build -t gsc_project .\r +docker run -it gsc_project\r +```\r +## Citations\r +- **bio.tools ID**: `gsc_genotype_sparse_compression`\r +- **Research Resource Identifier (RRID)**: `SCR_025071`\r +""" ; + schema1:keywords "Bioinformatics, Genomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "GSC (Genotype Sparse Compression)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/885?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/963?version=14" ; + schema1:isBasedOn "https://github.com/nf-core/airrflow" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/airrflow" ; + schema1:sdDatePublished "2024-07-12 13:22:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/963/ro_crate?version=14" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14400 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:37Z" ; + schema1:dateModified "2024-06-11T12:54:37Z" ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/963?version=13" ; + schema1:keywords "airr, b-cell, immcantation, immunorepertoire, repseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/airrflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/963?version=14" ; + schema1:version 14 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Structural and functional genome annotation with Funannotate" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/754?version=1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genome annotation with Funannotate" ; + schema1:sdDatePublished "2024-07-12 13:24:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/754/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 46764 ; + schema1:creator ; + schema1:dateCreated "2024-02-15T11:39:50Z" ; + schema1:dateModified "2024-02-15T11:39:50Z" ; + schema1:description "Structural and functional genome annotation with Funannotate" ; + schema1:isPartOf ; + schema1:keywords "genome-annotation" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Genome annotation with Funannotate" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/754?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1023?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/smrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/smrnaseq" ; + schema1:sdDatePublished "2024-07-12 13:18:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1023/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10530 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:18Z" ; + schema1:dateModified "2024-06-11T12:55:18Z" ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1023?version=10" ; + schema1:keywords "small-rna, smrna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/smrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1023?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Summary\r +\r +This tutorial aims to illustrate the process of setting up a simulation system containing a protein, step by step, using the BioExcel Building Blocks library (biobb). The particular example used is the Lysozyme protein (PDB code 1AKI).\r +\r +Workflow engine is a jupyter notebook. It can be run in binder, following the link given, or locally. Auxiliar libraries used are: nb\\_conda_kernels, nglview, ipywidgets, plotly, and simpletraj. Environment can be setup using the included environment.yml file.\r +\r +\r +# Parameters\r +\r +## Inputs\r +\r +Parameters needed to configure the workflow:\r +\r +* **pdbCode**: PDB code of the protein-ligand complex structure (e.g. 1AKI)\r +\r +## Outputs\r +\r +Output files generated (named according to the input parameters given above):\r +\r +* **output\\_md\\_gro**: final structure of the MD setup protocol\r +\r +* **output\\_md\\_trr**: final trajectory of the MD setup protocol\r +\r +* **output\\_md\\_cpt**: final checkpoint file\r +\r +* **output\\_gppmd\\_tpr**: final tpr file\r +\r +* **output\\_genion\\_top\\_zip**: final topology of the MD system""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.120.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Protein MD Setup tutorial using BioExcel Building Blocks (biobb) (jupyter notebook)" ; + schema1:sdDatePublished "2024-07-12 13:34:11 +0100" ; + schema1:url "https://workflowhub.eu/workflows/120/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 53057 ; + schema1:creator , + ; + schema1:dateCreated "2021-05-07T13:51:35Z" ; + schema1:dateModified "2021-05-13T08:14:02Z" ; + schema1:description """# Summary\r +\r +This tutorial aims to illustrate the process of setting up a simulation system containing a protein, step by step, using the BioExcel Building Blocks library (biobb). The particular example used is the Lysozyme protein (PDB code 1AKI).\r +\r +Workflow engine is a jupyter notebook. It can be run in binder, following the link given, or locally. Auxiliar libraries used are: nb\\_conda_kernels, nglview, ipywidgets, plotly, and simpletraj. Environment can be setup using the included environment.yml file.\r +\r +\r +# Parameters\r +\r +## Inputs\r +\r +Parameters needed to configure the workflow:\r +\r +* **pdbCode**: PDB code of the protein-ligand complex structure (e.g. 1AKI)\r +\r +## Outputs\r +\r +Output files generated (named according to the input parameters given above):\r +\r +* **output\\_md\\_gro**: final structure of the MD setup protocol\r +\r +* **output\\_md\\_trr**: final trajectory of the MD setup protocol\r +\r +* **output\\_md\\_cpt**: final checkpoint file\r +\r +* **output\\_gppmd\\_tpr**: final tpr file\r +\r +* **output\\_genion\\_top\\_zip**: final topology of the MD system""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/120?version=5" ; + schema1:isPartOf , + , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Protein MD Setup tutorial using BioExcel Building Blocks (biobb) (jupyter notebook)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/120?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "NDVI data with OpenEO to time series visualisation with HoloViz" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/759?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Visualizing NDVI time-series data with HoloViz" ; + schema1:sdDatePublished "2024-07-12 13:24:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/759/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3065 ; + schema1:creator ; + schema1:dateCreated "2024-02-15T11:59:36Z" ; + schema1:dateModified "2024-02-15T13:42:10Z" ; + schema1:description "NDVI data with OpenEO to time series visualisation with HoloViz" ; + schema1:isPartOf ; + schema1:keywords "Ecology" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Visualizing NDVI time-series data with HoloViz" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/759?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """Abstract CWL Automatically generated from the Galaxy workflow file: GTN 'Pangeo 101 for everyone - Introduction to Xarray'.\r +\r +In this tutorial, we analyze particle matter < 2.5 μm/m3 data from Copernicus Atmosphere Monitoring Service to understand Xarray Galaxy Tools:\r +- Understand how an Xarray dataset is organized;\r +- Get metadata from Xarray dataset such as variable names, units, coordinates (latitude, longitude, level), etc;\r +- Plot an Xarray dataset on a geographical map and learn to customize it;\r +- Select/Subset an Xarray dataset from coordinates values such as time selection or a subset over a geographical area;\r +- Mask an Xarray dataset with a Where statement, for instance to only see PM2.5 > 30 μm/m and highlight on a map regions with "high" values;\r +- Convert an Xarray dataset to Tabular data (pandas dataframe);\r +- Plot tabular data to visualize the forecast PM2.5 over a single point (here Naples) using a scatterplot and/or climate stripes.""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/252?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Pangeo 101 for everyone - introduction to Xarray" ; + schema1:sdDatePublished "2024-07-12 13:36:26 +0100" ; + schema1:url "https://workflowhub.eu/workflows/252/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 27957 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 124667 ; + schema1:dateCreated "2021-12-29T07:57:46Z" ; + schema1:dateModified "2023-01-16T13:56:30Z" ; + schema1:description """Abstract CWL Automatically generated from the Galaxy workflow file: GTN 'Pangeo 101 for everyone - Introduction to Xarray'.\r +\r +In this tutorial, we analyze particle matter < 2.5 μm/m3 data from Copernicus Atmosphere Monitoring Service to understand Xarray Galaxy Tools:\r +- Understand how an Xarray dataset is organized;\r +- Get metadata from Xarray dataset such as variable names, units, coordinates (latitude, longitude, level), etc;\r +- Plot an Xarray dataset on a geographical map and learn to customize it;\r +- Select/Subset an Xarray dataset from coordinates values such as time selection or a subset over a geographical area;\r +- Mask an Xarray dataset with a Where statement, for instance to only see PM2.5 > 30 μm/m and highlight on a map regions with "high" values;\r +- Convert an Xarray dataset to Tabular data (pandas dataframe);\r +- Plot tabular data to visualize the forecast PM2.5 over a single point (here Naples) using a scatterplot and/or climate stripes.""" ; + schema1:image ; + schema1:keywords "GTN, Climate, copernicus, pangeo" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Pangeo 101 for everyone - introduction to Xarray" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/252?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 101858 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=19" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-07-12 13:21:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=19" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13111 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:50Z" ; + schema1:dateModified "2024-06-11T12:54:50Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=19" ; + schema1:version 19 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=29" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-07-12 13:21:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=29" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13588 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-07-02T03:02:47Z" ; + schema1:dateModified "2024-07-02T03:02:47Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=29" ; + schema1:version 29 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.830.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_virtual-screening" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Docker Protein-ligand Docking tutorial (Fpocket)" ; + schema1:sdDatePublished "2024-07-12 13:23:07 +0100" ; + schema1:url "https://workflowhub.eu/workflows/830/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 968 ; + schema1:creator , + ; + schema1:dateCreated "2024-04-22T11:19:34Z" ; + schema1:dateModified "2024-05-22T13:44:48Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Docker Protein-ligand Docking tutorial (Fpocket)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/main/biobb_wf_virtual_screening/docker/Dockerfile" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Alternative splicing analysis using RNA-seq." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1018?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/rnasplice" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnasplice" ; + schema1:sdDatePublished "2024-07-12 13:19:40 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1018/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14104 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:14Z" ; + schema1:dateModified "2024-06-11T12:55:14Z" ; + schema1:description "Alternative splicing analysis using RNA-seq." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1018?version=4" ; + schema1:keywords "alternative-splicing, rna, rna-seq, splicing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnasplice" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1018?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# COVID-19 Multiscale Modelling of the Virus and Patients’ Tissue Workflow\r +\r +## Table of Contents\r +\r +- [COVID-19 Multiscale Modelling of the Virus and Patients’ Tissue Workflow](#covid-19-multiscale-modelling-of-the-virus-and-patients-tissue-workflow)\r + - [Table of Contents](#table-of-contents)\r + - [Description](#description)\r + - [Contents](#contents)\r + - [Building Blocks](#building-blocks)\r + - [Workflows](#workflows)\r + - [Resources](#resources)\r + - [Tests](#tests)\r + - [Instructions](#instructions)\r + - [Local machine](#local-machine)\r + - [Requirements](#requirements)\r + - [Usage steps](#usage-steps)\r + - [MareNostrum 4](#marenostrum-4)\r + - [Requirements in MN4](#requirements-in-mn4)\r + - [Usage steps in MN4](#usage-steps-in-mn4)\r + - [Mahti or Puhti](#mahti-or-puhti)\r + - [Requirements](#requirements)\r + - [Steps](#steps)\r + - [License](#license)\r + - [Contact](#contact)\r +\r +## Description\r +\r +Uses multiscale simulations to predict patient-specific SARS‑CoV‑2 severity subtypes\r +(moderate, severe or control), using single-cell RNA-Seq data, MaBoSS and PhysiBoSS.\r +Boolean models are used to determine the behaviour of individual agents as a function\r +of extracellular conditions and the concentration of different substrates, including\r +the number of virions. Predictions of severity subtypes are based on a meta-analysis of\r +personalised model outputs simulating cellular apoptosis regulation in epithelial cells\r +infected by SARS‑CoV‑2.\r +\r +The workflow uses the following building blocks, described in order of execution:\r +\r +1. High-throughput mutant analysis\r +2. Single-cell processing\r +3. Personalise patient\r +4. PhysiBoSS\r +5. Analysis of all simulations\r +\r +For details on individual workflow steps, see the user documentation for each building block.\r +\r +[`GitHub repository`]()\r +\r +\r +## Contents\r +\r +### Building Blocks\r +\r +The ``BuildingBlocks`` folder contains the script to install the\r +Building Blocks used in the COVID-19 Workflow.\r +\r +### Workflows\r +\r +The ``Workflow`` folder contains the workflows implementations.\r +\r +Currently contains the implementation using PyCOMPSs and Snakemake (in progress).\r +\r +### Resources\r +\r +The ``Resources`` folder contains dataset files.\r +\r +### Tests\r +\r +The ``Tests`` folder contains the scripts that run each Building Block\r +used in the workflow for the given small dataset.\r +They can be executed individually for testing purposes.\r +\r +## Instructions\r +\r +### Local machine\r +\r +This section explains the requirements and usage for the COVID19 Workflow in a laptop or desktop computer.\r +\r +#### Requirements\r +\r +- [`permedcoe`](https://github.com/PerMedCoE/permedcoe) package\r +- [PyCOMPSs](https://pycompss.readthedocs.io/en/stable/Sections/00_Quickstart.html) / [Snakemake](https://snakemake.readthedocs.io/en/stable/)\r +- [Singularity](https://sylabs.io/guides/3.0/user-guide/installation.html)\r +\r +#### Usage steps\r +\r +1. Clone this repository:\r +\r + ```bash\r + git clone https://github.com/PerMedCoE/covid-19-workflow.git\r + ```\r +\r +2. Install the Building Blocks required for the COVID19 Workflow:\r +\r + ```bash\r + covid-19-workflow/BuildingBlocks/./install_BBs.sh\r + ```\r +\r +3. Get the required Building Block images from the project [B2DROP](https://b2drop.bsc.es/index.php/f/444350):\r +\r + - Required images:\r + - MaBoSS.singularity\r + - meta_analysis.singularity\r + - PhysiCell-COVID19.singularity\r + - single_cell.singularity\r +\r + The path where these files are stored **MUST be exported in the `PERMEDCOE_IMAGES`** environment variable.\r +\r + > :warning: **TIP**: These containers can be built manually as follows (be patient since some of them may take some time):\r + 1. Clone the `BuildingBlocks` repository\r + ```bash\r + git clone https://github.com/PerMedCoE/BuildingBlocks.git\r + ```\r + 2. Build the required Building Block images\r + ```bash\r + cd BuildingBlocks/Resources/images\r + sudo singularity build MaBoSS.sif MaBoSS.singularity\r + sudo singularity build meta_analysis.sif meta_analysis.singularity\r + sudo singularity build PhysiCell-COVID19.sif PhysiCell-COVID19.singularity\r + sudo singularity build single_cell.sif single_cell.singularity\r + cd ../../..\r + ```\r +\r +**If using PyCOMPSs in local PC** (make sure that PyCOMPSs in installed):\r +\r +4. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflows/PyCOMPSs\r + ```\r +\r +5. Execute `./run.sh`\r +\r +**If using Snakemake in local PC** (make sure that SnakeMake is installed):\r +\r +4. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflows/SnakeMake\r + ```\r +\r +5. Execute `./run.sh`\r + > **TIP**: If you want to run the workflow with a different dataset, please update the `run.sh` script setting the `dataset` variable to the new dataset folder and their file names.\r +\r +\r +### MareNostrum 4\r +\r +This section explains the requirements and usage for the COVID19 Workflow in the MareNostrum 4 supercomputer.\r +\r +#### Requirements in MN4\r +\r +- Access to MN4\r +\r +All Building Blocks are already installed in MN4, and the COVID19 Workflow available.\r +\r +#### Usage steps in MN4\r +\r +1. Load the `COMPSs`, `Singularity` and `permedcoe` modules\r +\r + ```bash\r + export COMPSS_PYTHON_VERSION=3\r + module load COMPSs/3.1\r + module load singularity/3.5.2\r + module use /apps/modules/modulefiles/tools/COMPSs/libraries\r + module load permedcoe\r + ```\r +\r + > **TIP**: Include the loading into your `${HOME}/.bashrc` file to load it automatically on the session start.\r +\r + This commands will load COMPSs and the permedcoe package which provides all necessary dependencies, as well as the path to the singularity container images (`PERMEDCOE_IMAGES` environment variable) and testing dataset (`COVID19WORKFLOW_DATASET` environment variable).\r +\r +2. Get a copy of the pilot workflow into your desired folder\r +\r + ```bash\r + mkdir desired_folder\r + cd desired_folder\r + get_covid19workflow\r + ```\r +\r +3. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflow/PyCOMPSs\r + ```\r +\r +4. Execute `./launch.sh`\r +\r + This command will launch a job into the job queuing system (SLURM) requesting 2 nodes (one node acting half master and half worker, and other full worker node) for 20 minutes, and is prepared to use the singularity images that are already deployed in MN4 (located into the `PERMEDCOE_IMAGES` environment variable). It uses the dataset located into `../../Resources/data` folder.\r +\r + > :warning: **TIP**: If you want to run the workflow with a different dataset, please edit the `launch.sh` script and define the appropriate dataset path.\r +\r + After the execution, a `results` folder will be available with with COVID19 Workflow results.\r +\r +### Mahti or Puhti\r +\r +This section explains how to run the COVID19 workflow on CSC supercomputers using SnakeMake.\r +\r +#### Requirements\r +\r +- Install snakemake (or check if there is a version installed using `module spider snakemake`)\r +- Install workflow, using the same steps as for the local machine. With the exception that containers have to be built elsewhere.\r +\r +#### Steps\r +\r +\r +1. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflow/SnakeMake\r + ```\r +\r +2. Edit `launch.sh` with the correct partition, account, and resource specifications. \r +\r +3. Execute `./launch.sh`\r +\r + > :warning: Snakemake provides a `--cluster` flag, but this functionality should be avoided as it's really not suited for HPC systems.\r +\r +## License\r +\r +[Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)\r +\r +## Contact\r +\r +\r +\r +This software has been developed for the [PerMedCoE project](https://permedcoe.eu/), funded by the European Commission (EU H2020 [951773](https://cordis.europa.eu/project/id/951773)).\r +\r +![](https://permedcoe.eu/wp-content/uploads/2020/11/logo_1.png "PerMedCoE")\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/476?version=1" ; + schema1:isBasedOn "https://github.com/PerMedCoE/covid-19-workflow/" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for PerMedCoE Covid19 Pilot workflow (PyCOMPSs)" ; + schema1:sdDatePublished "2024-07-12 13:33:26 +0100" ; + schema1:url "https://workflowhub.eu/workflows/476/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1029 ; + schema1:dateCreated "2023-05-23T12:07:56Z" ; + schema1:dateModified "2023-05-23T12:33:23Z" ; + schema1:description """# COVID-19 Multiscale Modelling of the Virus and Patients’ Tissue Workflow\r +\r +## Table of Contents\r +\r +- [COVID-19 Multiscale Modelling of the Virus and Patients’ Tissue Workflow](#covid-19-multiscale-modelling-of-the-virus-and-patients-tissue-workflow)\r + - [Table of Contents](#table-of-contents)\r + - [Description](#description)\r + - [Contents](#contents)\r + - [Building Blocks](#building-blocks)\r + - [Workflows](#workflows)\r + - [Resources](#resources)\r + - [Tests](#tests)\r + - [Instructions](#instructions)\r + - [Local machine](#local-machine)\r + - [Requirements](#requirements)\r + - [Usage steps](#usage-steps)\r + - [MareNostrum 4](#marenostrum-4)\r + - [Requirements in MN4](#requirements-in-mn4)\r + - [Usage steps in MN4](#usage-steps-in-mn4)\r + - [Mahti or Puhti](#mahti-or-puhti)\r + - [Requirements](#requirements)\r + - [Steps](#steps)\r + - [License](#license)\r + - [Contact](#contact)\r +\r +## Description\r +\r +Uses multiscale simulations to predict patient-specific SARS‑CoV‑2 severity subtypes\r +(moderate, severe or control), using single-cell RNA-Seq data, MaBoSS and PhysiBoSS.\r +Boolean models are used to determine the behaviour of individual agents as a function\r +of extracellular conditions and the concentration of different substrates, including\r +the number of virions. Predictions of severity subtypes are based on a meta-analysis of\r +personalised model outputs simulating cellular apoptosis regulation in epithelial cells\r +infected by SARS‑CoV‑2.\r +\r +The workflow uses the following building blocks, described in order of execution:\r +\r +1. High-throughput mutant analysis\r +2. Single-cell processing\r +3. Personalise patient\r +4. PhysiBoSS\r +5. Analysis of all simulations\r +\r +For details on individual workflow steps, see the user documentation for each building block.\r +\r +[`GitHub repository`]()\r +\r +\r +## Contents\r +\r +### Building Blocks\r +\r +The ``BuildingBlocks`` folder contains the script to install the\r +Building Blocks used in the COVID-19 Workflow.\r +\r +### Workflows\r +\r +The ``Workflow`` folder contains the workflows implementations.\r +\r +Currently contains the implementation using PyCOMPSs and Snakemake (in progress).\r +\r +### Resources\r +\r +The ``Resources`` folder contains dataset files.\r +\r +### Tests\r +\r +The ``Tests`` folder contains the scripts that run each Building Block\r +used in the workflow for the given small dataset.\r +They can be executed individually for testing purposes.\r +\r +## Instructions\r +\r +### Local machine\r +\r +This section explains the requirements and usage for the COVID19 Workflow in a laptop or desktop computer.\r +\r +#### Requirements\r +\r +- [`permedcoe`](https://github.com/PerMedCoE/permedcoe) package\r +- [PyCOMPSs](https://pycompss.readthedocs.io/en/stable/Sections/00_Quickstart.html) / [Snakemake](https://snakemake.readthedocs.io/en/stable/)\r +- [Singularity](https://sylabs.io/guides/3.0/user-guide/installation.html)\r +\r +#### Usage steps\r +\r +1. Clone this repository:\r +\r + ```bash\r + git clone https://github.com/PerMedCoE/covid-19-workflow.git\r + ```\r +\r +2. Install the Building Blocks required for the COVID19 Workflow:\r +\r + ```bash\r + covid-19-workflow/BuildingBlocks/./install_BBs.sh\r + ```\r +\r +3. Get the required Building Block images from the project [B2DROP](https://b2drop.bsc.es/index.php/f/444350):\r +\r + - Required images:\r + - MaBoSS.singularity\r + - meta_analysis.singularity\r + - PhysiCell-COVID19.singularity\r + - single_cell.singularity\r +\r + The path where these files are stored **MUST be exported in the `PERMEDCOE_IMAGES`** environment variable.\r +\r + > :warning: **TIP**: These containers can be built manually as follows (be patient since some of them may take some time):\r + 1. Clone the `BuildingBlocks` repository\r + ```bash\r + git clone https://github.com/PerMedCoE/BuildingBlocks.git\r + ```\r + 2. Build the required Building Block images\r + ```bash\r + cd BuildingBlocks/Resources/images\r + sudo singularity build MaBoSS.sif MaBoSS.singularity\r + sudo singularity build meta_analysis.sif meta_analysis.singularity\r + sudo singularity build PhysiCell-COVID19.sif PhysiCell-COVID19.singularity\r + sudo singularity build single_cell.sif single_cell.singularity\r + cd ../../..\r + ```\r +\r +**If using PyCOMPSs in local PC** (make sure that PyCOMPSs in installed):\r +\r +4. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflows/PyCOMPSs\r + ```\r +\r +5. Execute `./run.sh`\r +\r +**If using Snakemake in local PC** (make sure that SnakeMake is installed):\r +\r +4. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflows/SnakeMake\r + ```\r +\r +5. Execute `./run.sh`\r + > **TIP**: If you want to run the workflow with a different dataset, please update the `run.sh` script setting the `dataset` variable to the new dataset folder and their file names.\r +\r +\r +### MareNostrum 4\r +\r +This section explains the requirements and usage for the COVID19 Workflow in the MareNostrum 4 supercomputer.\r +\r +#### Requirements in MN4\r +\r +- Access to MN4\r +\r +All Building Blocks are already installed in MN4, and the COVID19 Workflow available.\r +\r +#### Usage steps in MN4\r +\r +1. Load the `COMPSs`, `Singularity` and `permedcoe` modules\r +\r + ```bash\r + export COMPSS_PYTHON_VERSION=3\r + module load COMPSs/3.1\r + module load singularity/3.5.2\r + module use /apps/modules/modulefiles/tools/COMPSs/libraries\r + module load permedcoe\r + ```\r +\r + > **TIP**: Include the loading into your `${HOME}/.bashrc` file to load it automatically on the session start.\r +\r + This commands will load COMPSs and the permedcoe package which provides all necessary dependencies, as well as the path to the singularity container images (`PERMEDCOE_IMAGES` environment variable) and testing dataset (`COVID19WORKFLOW_DATASET` environment variable).\r +\r +2. Get a copy of the pilot workflow into your desired folder\r +\r + ```bash\r + mkdir desired_folder\r + cd desired_folder\r + get_covid19workflow\r + ```\r +\r +3. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflow/PyCOMPSs\r + ```\r +\r +4. Execute `./launch.sh`\r +\r + This command will launch a job into the job queuing system (SLURM) requesting 2 nodes (one node acting half master and half worker, and other full worker node) for 20 minutes, and is prepared to use the singularity images that are already deployed in MN4 (located into the `PERMEDCOE_IMAGES` environment variable). It uses the dataset located into `../../Resources/data` folder.\r +\r + > :warning: **TIP**: If you want to run the workflow with a different dataset, please edit the `launch.sh` script and define the appropriate dataset path.\r +\r + After the execution, a `results` folder will be available with with COVID19 Workflow results.\r +\r +### Mahti or Puhti\r +\r +This section explains how to run the COVID19 workflow on CSC supercomputers using SnakeMake.\r +\r +#### Requirements\r +\r +- Install snakemake (or check if there is a version installed using `module spider snakemake`)\r +- Install workflow, using the same steps as for the local machine. With the exception that containers have to be built elsewhere.\r +\r +#### Steps\r +\r +\r +1. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflow/SnakeMake\r + ```\r +\r +2. Edit `launch.sh` with the correct partition, account, and resource specifications. \r +\r +3. Execute `./launch.sh`\r +\r + > :warning: Snakemake provides a `--cluster` flag, but this functionality should be avoided as it's really not suited for HPC systems.\r +\r +## License\r +\r +[Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)\r +\r +## Contact\r +\r +\r +\r +This software has been developed for the [PerMedCoE project](https://permedcoe.eu/), funded by the European Commission (EU H2020 [951773](https://cordis.europa.eu/project/id/951773)).\r +\r +![](https://permedcoe.eu/wp-content/uploads/2020/11/logo_1.png "PerMedCoE")\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "PerMedCoE Covid19 Pilot workflow (PyCOMPSs)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/476?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1027?version=10" ; + schema1:isBasedOn "https://github.com/nf-core/viralrecon" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/viralrecon" ; + schema1:sdDatePublished "2024-07-12 13:18:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1027/ro_crate?version=10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10178 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:21Z" ; + schema1:dateModified "2024-06-11T12:55:21Z" ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1027?version=10" ; + schema1:keywords "Amplicon, ARTIC, Assembly, covid-19, covid19, illumina, long-read-sequencing, Metagenomics, nanopore, ONT, oxford-nanopore, SARS-CoV-2, variant-calling, viral, Virus" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/viralrecon" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1027?version=10" ; + schema1:version 10 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.255.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_ligand_parameterization/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL GMX Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-07-12 13:35:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/255/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 12338 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2368 ; + schema1:creator , + ; + schema1:dateCreated "2022-01-10T10:39:09Z" ; + schema1:dateModified "2023-06-06T12:23:52Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/255?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CWL GMX Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/255?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + ; + ns1:output , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "Performs scaffolding using HiC Data. Part of VGP assembly pipeline. The scaffolding can be performed on long read assembly contigs or on scaffolds (e.g.: Bionano scaffolds)." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/324?version=1" ; + schema1:isBasedOn "https://github.com/Delphine-L/iwc/tree/VGP/workflows/VGP-assembly-v2" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for VGP hybrid scaffolding with HiC data" ; + schema1:sdDatePublished "2024-07-12 13:35:46 +0100" ; + schema1:url "https://workflowhub.eu/workflows/324/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 9690 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 52589 ; + schema1:creator ; + schema1:dateCreated "2022-04-05T23:33:36Z" ; + schema1:dateModified "2023-01-16T13:59:36Z" ; + schema1:description "Performs scaffolding using HiC Data. Part of VGP assembly pipeline. The scaffolding can be performed on long read assembly contigs or on scaffolds (e.g.: Bionano scaffolds)." ; + schema1:isPartOf ; + schema1:keywords "vgp, Assembly, Galaxy" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "VGP hybrid scaffolding with HiC data" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/324?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=16" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-07-12 13:19:05 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=16" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17642 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:16Z" ; + schema1:dateModified "2024-06-11T12:55:16Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=16" ; + schema1:version 16 . + + a schema1:Dataset ; + schema1:datePublished "2024-04-09T15:37:12.177589" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:description "Preprocessing of raw SARS-CoV-2 reads. This workflow contains an alternate starting point to avoid the data to be downloaded from the NCBI SRA. More info can be found at https://covid19.galaxyproject.org/genomics/" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/4?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genomics - Read pre-processing without downloading from SRA" ; + schema1:sdDatePublished "2024-07-12 13:37:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/4/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 6333 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 35202 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2020-04-10T12:06:00Z" ; + schema1:dateModified "2023-01-16T13:39:49Z" ; + schema1:description "Preprocessing of raw SARS-CoV-2 reads. This workflow contains an alternate starting point to avoid the data to be downloaded from the NCBI SRA. More info can be found at https://covid19.galaxyproject.org/genomics/" ; + schema1:image ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Genomics - Read pre-processing without downloading from SRA" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/4?version=1" ; + schema1:version 1 ; + ns1:input , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 10958 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """MGnify (http://www.ebi.ac.uk/metagenomics) provides a free to use platform for the assembly, analysis and archiving of microbiome data derived from sequencing microbial populations that are present in particular environments. Over the past 2 years, MGnify (formerly EBI Metagenomics) has more than doubled the number of publicly available analysed datasets held within the resource. Recently, an updated approach to data analysis has been unveiled (version 5.0), replacing the previous single pipeline with multiple analysis pipelines that are tailored according to the input data, and that are formally described using the Common Workflow Language, enabling greater provenance, reusability, and reproducibility. MGnify's new analysis pipelines offer additional approaches for taxonomic assertions based on ribosomal internal transcribed spacer regions (ITS1/2) and expanded protein functional annotations. Biochemical pathways and systems predictions have also been added for assembled contigs. MGnify's growing focus on the assembly of metagenomic data has also seen the number of datasets it has assembled and analysed increase six-fold. The non-redundant protein database constructed from the proteins encoded by these assemblies now exceeds 1 billion sequences. Meanwhile, a newly developed contig viewer provides fine-grained visualisation of the assembled contigs and their enriched annotations.\r +\r +Documentation: https://docs.mgnify.org/en/latest/analysis.html#assembly-analysis-pipeline\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/360?version=2" ; + schema1:isBasedOn "https://github.com/EBI-Metagenomics/pipeline-v5.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for MGnify - assembly analysis pipeline" ; + schema1:sdDatePublished "2024-07-12 13:35:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/360/ro_crate?version=2" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 67287 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7995 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2022-06-07T08:03:35Z" ; + schema1:dateModified "2023-04-28T10:09:02Z" ; + schema1:description """MGnify (http://www.ebi.ac.uk/metagenomics) provides a free to use platform for the assembly, analysis and archiving of microbiome data derived from sequencing microbial populations that are present in particular environments. Over the past 2 years, MGnify (formerly EBI Metagenomics) has more than doubled the number of publicly available analysed datasets held within the resource. Recently, an updated approach to data analysis has been unveiled (version 5.0), replacing the previous single pipeline with multiple analysis pipelines that are tailored according to the input data, and that are formally described using the Common Workflow Language, enabling greater provenance, reusability, and reproducibility. MGnify's new analysis pipelines offer additional approaches for taxonomic assertions based on ribosomal internal transcribed spacer regions (ITS1/2) and expanded protein functional annotations. Biochemical pathways and systems predictions have also been added for assembled contigs. MGnify's growing focus on the assembly of metagenomic data has also seen the number of datasets it has assembled and analysed increase six-fold. The non-redundant protein database constructed from the proteins encoded by these assemblies now exceeds 1 billion sequences. Meanwhile, a newly developed contig viewer provides fine-grained visualisation of the assembled contigs and their enriched annotations.\r +\r +Documentation: https://docs.mgnify.org/en/latest/analysis.html#assembly-analysis-pipeline\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/360?version=1" ; + schema1:keywords "Metagenomics, Annotation, workflow, CWL" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "MGnify - assembly analysis pipeline" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/360?version=2" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """[![GitHub Actions CI Status](https://github.com/plant-food-research-open/assemblyqc/actions/workflows/ci.yml/badge.svg)](https://github.com/plant-food-research-open/assemblyqc/actions/workflows/ci.yml)\r +[![GitHub Actions Linting Status](https://github.com/plant-food-research-open/assemblyqc/actions/workflows/linting.yml/badge.svg)](https://github.com/plant-food-research-open/assemblyqc/actions/workflows/linting.yml)[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.10647870-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.10647870)\r +[![nf-test](https://img.shields.io/badge/unit_tests-nf--test-337ab7.svg)](https://www.nf-test.com)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A523.04.0-23aa62.svg)](https://www.nextflow.io/)\r +[![run with conda ❌](http://img.shields.io/badge/run%20with-conda%20❌-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +[![Launch on Seqera Platform](https://img.shields.io/badge/Launch%20%F0%9F%9A%80-Seqera%20Platform-%234256e7)](https://cloud.seqera.io/launch?pipeline=https://github.com/plant-food-research-open/assemblyqc)\r +\r +## Introduction\r +\r +**plant-food-research-open/assemblyqc** is a [NextFlow](https://www.nextflow.io/docs/latest/index.html) pipeline which evaluates assembly quality with multiple QC tools and presents the results in a unified html report. The tools are shown in the [Pipeline Flowchart](#pipeline-flowchart) and their references are listed in [CITATIONS.md](./CITATIONS.md).\r +\r +## Pipeline Flowchart\r +\r +```mermaid\r +%%{init: {\r + 'theme': 'base',\r + 'themeVariables': {\r + 'fontSize': '52px",\r + 'primaryColor': '#9A6421',\r + 'primaryTextColor': '#ffffff',\r + 'primaryBorderColor': '#9A6421',\r + 'lineColor': '#B180A8',\r + 'secondaryColor': '#455C58',\r + 'tertiaryColor': '#ffffff'\r + }\r +}}%%\r +flowchart LR\r + forEachTag(Assembly) ==> VALIDATE_FORMAT[VALIDATE FORMAT]\r +\r + VALIDATE_FORMAT ==> ncbiFCS[NCBI FCS\\nADAPTOR]\r + ncbiFCS ==> Check{Check}\r +\r + VALIDATE_FORMAT ==> ncbiGX[NCBI FCS GX]\r + ncbiGX ==> Check\r + Check ==> |Clean|Run(Run)\r +\r + Check ==> |Contamination|Skip(Skip All)\r + Skip ==> REPORT\r +\r + VALIDATE_FORMAT ==> GFF_STATS[GENOMETOOLS GT STAT]\r +\r + Run ==> ASS_STATS[ASSEMBLATHON STATS]\r + Run ==> BUSCO\r + Run ==> TIDK\r + Run ==> LAI\r + Run ==> KRAKEN2\r + Run ==> HIC_CONTACT_MAP[HIC CONTACT MAP]\r + Run ==> MUMMER\r + Run ==> MINIMAP2\r + Run ==> MERQURY\r +\r + MUMMER ==> CIRCOS\r + MUMMER ==> DOTPLOT\r +\r + MINIMAP2 ==> PLOTSR\r +\r + ASS_STATS ==> REPORT\r + GFF_STATS ==> REPORT\r + BUSCO ==> REPORT\r + TIDK ==> REPORT\r + LAI ==> REPORT\r + KRAKEN2 ==> REPORT\r + HIC_CONTACT_MAP ==> REPORT\r + CIRCOS ==> REPORT\r + DOTPLOT ==> REPORT\r + PLOTSR ==> REPORT\r + MERQURY ==> REPORT\r +```\r +\r +- [FASTA VALIDATOR](https://github.com/linsalrob/fasta_validator) + [SEQKIT RMDUP](https://github.com/shenwei356/seqkit): FASTA validation\r +- [GENOMETOOLS GT GFF3VALIDATOR](https://genometools.org/tools/gt_gff3validator.html): GFF3 validation\r +- [ASSEMBLATHON STATS](https://github.com/PlantandFoodResearch/assemblathon2-analysis/blob/a93cba25d847434f7eadc04e63b58c567c46a56d/assemblathon_stats.pl): Assembly statistics\r +- [GENOMETOOLS GT STAT](https://genometools.org/tools/gt_stat.html): Annotation statistics\r +- [NCBI FCS ADAPTOR](https://github.com/ncbi/fcs): Adaptor contamination pass/fail\r +- [NCBI FCS GX](https://github.com/ncbi/fcs): Foreign organism contamination pass/fail\r +- [BUSCO](https://gitlab.com/ezlab/busco): Gene-space completeness estimation\r +- [TIDK](https://github.com/tolkit/telomeric-identifier): Telomere repeat identification\r +- [LAI](https://github.com/oushujun/LTR_retriever/blob/master/LAI): Continuity of repetitive sequences\r +- [KRAKEN2](https://github.com/DerrickWood/kraken2): Taxonomy classification\r +- [HIC CONTACT MAP](https://github.com/igvteam/juicebox.js): Alignment and visualisation of HiC data\r +- [MUMMER](https://github.com/mummer4/mummer) → [CIRCOS](http://circos.ca/documentation/) + [DOTPLOT](https://plotly.com) & [MINIMAP2](https://github.com/lh3/minimap2) → [PLOTSR](https://github.com/schneebergerlab/plotsr): Synteny analysis\r +- [MERQURY](https://github.com/marbl/merqury): K-mer completeness, consensus quality and phasing assessment\r +\r +## Usage\r +\r +Refer to [usage](./docs/usage.md), [parameters](./docs/parameters.md) and [output](./docs/output.md) documents for details.\r +\r +> [!NOTE]\r +> If you are new to Nextflow and nf-core, please refer to [this page](https://nf-co.re/docs/usage/installation) on how to set-up Nextflow. Make sure to [test your setup](https://nf-co.re/docs/usage/introduction#how-to-run-a-pipeline) with `-profile test` before running the workflow on actual data.\r +\r +Prepare an `assemblysheet.csv` file with following columns representing target assemblies and associated meta-data.\r +\r +- `tag:` A unique tag which represents the target assembly throughout the pipeline and in the final report\r +- `fasta:` FASTA file\r +\r +Now, you can run the pipeline using:\r +\r +```bash\r +nextflow run plant-food-research-open/assemblyqc \\\r + -profile \\\r + --input assemblysheet.csv \\\r + --outdir \r +```\r +\r +> [!WARNING]\r +> Please provide pipeline parameters via the CLI or Nextflow `-params-file` option. Custom config files including those provided by the `-c` Nextflow option can be used to provide any configuration _**except for parameters**_;\r +> see [docs](https://nf-co.re/usage/configuration#custom-configuration-files).\r +\r +### Plant&Food Users\r +\r +Download the pipeline to your `/workspace/$USER` folder. Change the parameters defined in the [pfr/params.json](./pfr/params.json) file. Submit the pipeline to SLURM for execution.\r +\r +```bash\r +sbatch ./pfr_assemblyqc\r +```\r +\r +## Credits\r +\r +plant-food-research-open/assemblyqc was originally written by Usman Rashid ([@gallvp](https://github.com/gallvp)) and Ken Smith ([@hzlnutspread](https://github.com/hzlnutspread)).\r +\r +Ross Crowhurst ([@rosscrowhurst](https://github.com/rosscrowhurst)), Chen Wu ([@christinawu2008](https://github.com/christinawu2008)) and Marcus Davy ([@mdavy86](https://github.com/mdavy86)) generously contributed their QC scripts.\r +\r +Mahesh Binzer-Panchal ([@mahesh-panchal](https://github.com/mahesh-panchal)) helped port the pipeline modules and sub-workflows to [nf-core](https://nf-co.re) schema.\r +\r +We thank the following people for their extensive assistance in the development of this pipeline:\r +\r +- [Cecilia Deng](https://github.com/CeciliaDeng)\r +- [Ignacio Carvajal](https://github.com/ignacio3437)\r +- [Jason Shiller](https://github.com/jasonshiller)\r +- [Sarah Bailey](https://github.com/SarahBailey1998)\r +- [Susan Thomson](https://github.com/cflsjt)\r +- [Ting-Hsuan Chen](https://github.com/ting-hsuan-chen)\r +\r +The pipeline uses nf-core modules contributed by following authors:\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +## Contributions and Support\r +\r +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\r +\r +## Citations\r +\r +If you use plant-food-research-open/assemblyqc for your analysis, please cite it as:\r +\r +> Rashid, U., Wu, C., Shiller, J., Smith, K., Crowhurst, R., Davy, M., Chen, T.-H., Thomson, S., & Deng, C. (2024). AssemblyQC: A NextFlow pipeline for evaluating assembly quality (2.0.0). Zenodo. https://doi.org/10.5281/zenodo.10647870\r +\r +An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\r +\r +This pipeline uses code and infrastructure developed and maintained by the [nf-core](https://nf-co.re) community, reused here under the [MIT license](https://github.com/nf-core/tools/blob/master/LICENSE).\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1058?version=1" ; + schema1:isBasedOn "https://github.com/Plant-Food-Research-Open/assemblyqc" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for AssemblyQC: A NextFlow pipeline for evaluating assembly quality" ; + schema1:sdDatePublished "2024-07-12 13:17:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1058/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3163 ; + schema1:creator , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-25T01:40:30Z" ; + schema1:dateModified "2024-06-25T01:40:30Z" ; + schema1:description """[![GitHub Actions CI Status](https://github.com/plant-food-research-open/assemblyqc/actions/workflows/ci.yml/badge.svg)](https://github.com/plant-food-research-open/assemblyqc/actions/workflows/ci.yml)\r +[![GitHub Actions Linting Status](https://github.com/plant-food-research-open/assemblyqc/actions/workflows/linting.yml/badge.svg)](https://github.com/plant-food-research-open/assemblyqc/actions/workflows/linting.yml)[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.10647870-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.10647870)\r +[![nf-test](https://img.shields.io/badge/unit_tests-nf--test-337ab7.svg)](https://www.nf-test.com)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A523.04.0-23aa62.svg)](https://www.nextflow.io/)\r +[![run with conda ❌](http://img.shields.io/badge/run%20with-conda%20❌-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +[![Launch on Seqera Platform](https://img.shields.io/badge/Launch%20%F0%9F%9A%80-Seqera%20Platform-%234256e7)](https://cloud.seqera.io/launch?pipeline=https://github.com/plant-food-research-open/assemblyqc)\r +\r +## Introduction\r +\r +**plant-food-research-open/assemblyqc** is a [NextFlow](https://www.nextflow.io/docs/latest/index.html) pipeline which evaluates assembly quality with multiple QC tools and presents the results in a unified html report. The tools are shown in the [Pipeline Flowchart](#pipeline-flowchart) and their references are listed in [CITATIONS.md](./CITATIONS.md).\r +\r +## Pipeline Flowchart\r +\r +```mermaid\r +%%{init: {\r + 'theme': 'base',\r + 'themeVariables': {\r + 'fontSize': '52px",\r + 'primaryColor': '#9A6421',\r + 'primaryTextColor': '#ffffff',\r + 'primaryBorderColor': '#9A6421',\r + 'lineColor': '#B180A8',\r + 'secondaryColor': '#455C58',\r + 'tertiaryColor': '#ffffff'\r + }\r +}}%%\r +flowchart LR\r + forEachTag(Assembly) ==> VALIDATE_FORMAT[VALIDATE FORMAT]\r +\r + VALIDATE_FORMAT ==> ncbiFCS[NCBI FCS\\nADAPTOR]\r + ncbiFCS ==> Check{Check}\r +\r + VALIDATE_FORMAT ==> ncbiGX[NCBI FCS GX]\r + ncbiGX ==> Check\r + Check ==> |Clean|Run(Run)\r +\r + Check ==> |Contamination|Skip(Skip All)\r + Skip ==> REPORT\r +\r + VALIDATE_FORMAT ==> GFF_STATS[GENOMETOOLS GT STAT]\r +\r + Run ==> ASS_STATS[ASSEMBLATHON STATS]\r + Run ==> BUSCO\r + Run ==> TIDK\r + Run ==> LAI\r + Run ==> KRAKEN2\r + Run ==> HIC_CONTACT_MAP[HIC CONTACT MAP]\r + Run ==> MUMMER\r + Run ==> MINIMAP2\r + Run ==> MERQURY\r +\r + MUMMER ==> CIRCOS\r + MUMMER ==> DOTPLOT\r +\r + MINIMAP2 ==> PLOTSR\r +\r + ASS_STATS ==> REPORT\r + GFF_STATS ==> REPORT\r + BUSCO ==> REPORT\r + TIDK ==> REPORT\r + LAI ==> REPORT\r + KRAKEN2 ==> REPORT\r + HIC_CONTACT_MAP ==> REPORT\r + CIRCOS ==> REPORT\r + DOTPLOT ==> REPORT\r + PLOTSR ==> REPORT\r + MERQURY ==> REPORT\r +```\r +\r +- [FASTA VALIDATOR](https://github.com/linsalrob/fasta_validator) + [SEQKIT RMDUP](https://github.com/shenwei356/seqkit): FASTA validation\r +- [GENOMETOOLS GT GFF3VALIDATOR](https://genometools.org/tools/gt_gff3validator.html): GFF3 validation\r +- [ASSEMBLATHON STATS](https://github.com/PlantandFoodResearch/assemblathon2-analysis/blob/a93cba25d847434f7eadc04e63b58c567c46a56d/assemblathon_stats.pl): Assembly statistics\r +- [GENOMETOOLS GT STAT](https://genometools.org/tools/gt_stat.html): Annotation statistics\r +- [NCBI FCS ADAPTOR](https://github.com/ncbi/fcs): Adaptor contamination pass/fail\r +- [NCBI FCS GX](https://github.com/ncbi/fcs): Foreign organism contamination pass/fail\r +- [BUSCO](https://gitlab.com/ezlab/busco): Gene-space completeness estimation\r +- [TIDK](https://github.com/tolkit/telomeric-identifier): Telomere repeat identification\r +- [LAI](https://github.com/oushujun/LTR_retriever/blob/master/LAI): Continuity of repetitive sequences\r +- [KRAKEN2](https://github.com/DerrickWood/kraken2): Taxonomy classification\r +- [HIC CONTACT MAP](https://github.com/igvteam/juicebox.js): Alignment and visualisation of HiC data\r +- [MUMMER](https://github.com/mummer4/mummer) → [CIRCOS](http://circos.ca/documentation/) + [DOTPLOT](https://plotly.com) & [MINIMAP2](https://github.com/lh3/minimap2) → [PLOTSR](https://github.com/schneebergerlab/plotsr): Synteny analysis\r +- [MERQURY](https://github.com/marbl/merqury): K-mer completeness, consensus quality and phasing assessment\r +\r +## Usage\r +\r +Refer to [usage](./docs/usage.md), [parameters](./docs/parameters.md) and [output](./docs/output.md) documents for details.\r +\r +> [!NOTE]\r +> If you are new to Nextflow and nf-core, please refer to [this page](https://nf-co.re/docs/usage/installation) on how to set-up Nextflow. Make sure to [test your setup](https://nf-co.re/docs/usage/introduction#how-to-run-a-pipeline) with `-profile test` before running the workflow on actual data.\r +\r +Prepare an `assemblysheet.csv` file with following columns representing target assemblies and associated meta-data.\r +\r +- `tag:` A unique tag which represents the target assembly throughout the pipeline and in the final report\r +- `fasta:` FASTA file\r +\r +Now, you can run the pipeline using:\r +\r +```bash\r +nextflow run plant-food-research-open/assemblyqc \\\r + -profile \\\r + --input assemblysheet.csv \\\r + --outdir \r +```\r +\r +> [!WARNING]\r +> Please provide pipeline parameters via the CLI or Nextflow `-params-file` option. Custom config files including those provided by the `-c` Nextflow option can be used to provide any configuration _**except for parameters**_;\r +> see [docs](https://nf-co.re/usage/configuration#custom-configuration-files).\r +\r +### Plant&Food Users\r +\r +Download the pipeline to your `/workspace/$USER` folder. Change the parameters defined in the [pfr/params.json](./pfr/params.json) file. Submit the pipeline to SLURM for execution.\r +\r +```bash\r +sbatch ./pfr_assemblyqc\r +```\r +\r +## Credits\r +\r +plant-food-research-open/assemblyqc was originally written by Usman Rashid ([@gallvp](https://github.com/gallvp)) and Ken Smith ([@hzlnutspread](https://github.com/hzlnutspread)).\r +\r +Ross Crowhurst ([@rosscrowhurst](https://github.com/rosscrowhurst)), Chen Wu ([@christinawu2008](https://github.com/christinawu2008)) and Marcus Davy ([@mdavy86](https://github.com/mdavy86)) generously contributed their QC scripts.\r +\r +Mahesh Binzer-Panchal ([@mahesh-panchal](https://github.com/mahesh-panchal)) helped port the pipeline modules and sub-workflows to [nf-core](https://nf-co.re) schema.\r +\r +We thank the following people for their extensive assistance in the development of this pipeline:\r +\r +- [Cecilia Deng](https://github.com/CeciliaDeng)\r +- [Ignacio Carvajal](https://github.com/ignacio3437)\r +- [Jason Shiller](https://github.com/jasonshiller)\r +- [Sarah Bailey](https://github.com/SarahBailey1998)\r +- [Susan Thomson](https://github.com/cflsjt)\r +- [Ting-Hsuan Chen](https://github.com/ting-hsuan-chen)\r +\r +The pipeline uses nf-core modules contributed by following authors:\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +## Contributions and Support\r +\r +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\r +\r +## Citations\r +\r +If you use plant-food-research-open/assemblyqc for your analysis, please cite it as:\r +\r +> Rashid, U., Wu, C., Shiller, J., Smith, K., Crowhurst, R., Davy, M., Chen, T.-H., Thomson, S., & Deng, C. (2024). AssemblyQC: A NextFlow pipeline for evaluating assembly quality (2.0.0). Zenodo. https://doi.org/10.5281/zenodo.10647870\r +\r +An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\r +\r +This pipeline uses code and infrastructure developed and maintained by the [nf-core](https://nf-co.re) community, reused here under the [MIT license](https://github.com/nf-core/tools/blob/master/LICENSE).\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +""" ; + schema1:keywords "quality control, Statistics, genome, taxonomy, Assembly, repeat, Hi-C, Report, k-mer, synteny, adaptor, fcs, contamination, phasing, BUSCO, telomere, n50, Merqury" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "AssemblyQC: A NextFlow pipeline for evaluating assembly quality" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1058?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2021-12-20T17:04:47.638855" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-se-illumina-wgs-variant-calling" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sars-cov-2-se-illumina-wgs-variant-calling/COVID-19-SE-WGS-ILLUMINA" ; + schema1:sdDatePublished "2021-12-21 03:01:00 +0000" ; + schema1:softwareVersion "v0.1.3" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=25" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-07-12 13:22:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=25" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12300 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:13Z" ; + schema1:dateModified "2024-06-11T12:55:13Z" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=25" ; + schema1:version 25 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.128.4" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_virtual-screening" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein-ligand Docking tutorial (PDBe REST API)" ; + schema1:sdDatePublished "2024-07-12 13:33:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/128/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 36987 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-07-26T09:29:17Z" ; + schema1:dateModified "2023-07-26T09:29:56Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/128?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein-ligand Docking tutorial (PDBe REST API)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_virtual-screening/blob/master/biobb_wf_virtual-screening/notebooks/ebi_api/wf_vs_ebi_api.ipynb" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# sqtlseeker2-nf\r +\r +[![nextflow](https://img.shields.io/badge/nextflow-%E2%89%A50.27.0-blue.svg)](http://nextflow.io)\r +[![CI-checks](https://github.com/guigolab/sqtlseeker2-nf/actions/workflows/ci.yaml/badge.svg)](https://github.com/guigolab/sqtlseeker2-nf/actions/workflows/ci.yaml)\r +\r +A pipeline for splicing quantitative trait loci (sQTL) mapping.\r +\r +The pipeline performs the following analysis steps:\r +\r +* Index the genotype file\r +* Preprocess the transcript expression data\r +* Test for association between splicing ratios and genetic variants in *cis* (nominal pass)\r +* Obtain an empirical P-value for each phenotype (permutation pass, optional)\r +* Control for multiple testing \r +\r +For details on each step, please read [sQTLseekeR2](https://github.com/guigolab/sQTLseekeR2) documentation.\r +\r +The pipeline uses [Nextflow](http://www.nextflow.io) as the execution backend. Please check [Nextflow documentation](http://www.nextflow.io/docs/latest/index.html) for more information.\r +\r +## Requirements\r +\r +- Unix-like operating system (Linux, MacOS, etc.)\r +- Java 8 or later \r +- [Docker](https://www.docker.com/) (v1.10.0 or later) or [Singularity](http://singularity.lbl.gov) (v2.5.0 or later)\r +\r +## Quickstart (~2 min)\r +\r +1. Install Nextflow:\r + ```\r + curl -fsSL get.nextflow.io | bash\r + ```\r +\r +2. Make a test run:\r + ```\r + ./nextflow run guigolab/sqtlseeker2-nf -with-docker\r + ```\r +\r + **Note**: set `-with-singularity` to use Singularity instead of Docker. \r +\r +## Pipeline usage\r +\r +Launching the pipeline with the `--help` parameter shows the help message:\r +\r +```\r +nextflow run sqtlseeker2-nf --help\r +```\r +\r +```\r +N E X T F L O W ~ version 0.27.2\r +Launching `sqtlseeker2.nf` [admiring_lichterman] - revision: 28c86caf1c\r +\r +sqtlseeker2-nf ~ A pipeline for splicing QTL mapping\r +----------------------------------------------------\r +Run sQTLseekeR2 on a set of data.\r +\r +Usage: \r + sqtlseeker2-nf [options]\r +\r +Options:\r +--genotype GENOTYPE_FILE the genotype file\r +--trexp EXPRESSION_FILE the transcript expression file\r +--metadata METADATA_FILE the metadata file\r +--genes GENES_FILE the gene location file\r +--dir DIRECTORY the output directory\r +--mode MODE the run mode: nominal or permuted (default: nominal)\r +--win WINDOW the cis window in bp (default: 5000)\r +--covariates COVARIATES include covariates in the model (default: false)\r +--fdr FDR false discovery rate level (default: 0.05)\r +--min_md MIN_MD minimum effect size reported (default: 0.05)\r +--svqtl SVQTLS report svQTLs (default: false)\r +\r +Additional parameters for mode = nominal:\r +--ld LD threshold for LD-based variant clustering (default: 0, no clustering)\r +--kn KN number of genes per batch in nominal pass (default: 10)\r +\r +Additional parameters for mode = permuted:\r +--kp KP number of genes per batch in permuted pass (default: 10)\r +--max_perm MAX_PERM maximum number of permutations (default: 1000)\r +```\r +\r +## Input files and format\r +\r +`sqtlseeker2-nf` takes as input files the following:\r +\r +* **Genotype file.**\r +Contains the genotype of each sample, coded as follows: 0 for REF/REF, 1 for REF/ALT, 2 for ALT/ALT, -1 for missing value.\r +The first four columns should be: `chr`, `start`, `end` and `snpId`. This file needs to be sorted by coordinate.\r +\r +* **Transcript expression file.**\r +Contains the expression of each transcript in each sample (e.g. read counts, RPKM, TPM).\r +It is not recommended to use transformed (log, quantile, or any non-linear transformation) expression.\r +Columns `trId` and `geneId`, corresponding to the transcript and gene IDs, are required. \r +\r +* **Metadata file.** Contains the covariate information for each sample. \r +In addition, it defines the groups or conditions for which sQTL mapping will be performed.\r +The first columns should be: `indId`, `sampleId`, `group`, followed by the covariates.\r +This file defines which samples will be tested.\r +\r +* **Gene location file.**\r +Contains the location of each gene. Columns `chr`, `start`, `end` and `geneId` are required. \r +This file defines which genes will be tested.\r +\r +Example [data](data) is available for the test run.\r +\r +## Pipeline results\r +\r +sQTL mapping results are saved into the folder specified with the `--dir` parameter. By default it is the `result` folder within the current working directory.\r +\r +Output files are organinzed into subfolders corresponding to the different `groups` specified in the metadata file: \r +\r +```\r +result\r +└── groups\r + ├── group1 \r + │   ├── all-tests.nominal.tsv \r + │   ├── all-tests.permuted.tsv \r + │   ├── sqtls-${level}fdr.nominal.tsv \r + │   └── sqtls-${level}fdr.permuted.tsv \r + ├── group2\r + ...\r +```\r +\r +Note: if only a nominal pass was run, files `*.permuted.tsv` will not be present.\r +\r +Output files contain the following information:\r +\r +`all-tests.nominal.tsv`\r +\r +* geneId: gene name \r +* snpId: variant name\r +* F: test statistic\r +* nb.groups: number of genotype groups\r +* md: maximum difference in relative expression between genotype groups (sQTL effect size)\r +* tr.first/tr.second: the transcript IDs of the two transcripts that change the most, in opposite directions\r +* info: number of individuals in each genotype group, including missing values (-1,0,1,2)\r +* pv: nominal P-value\r +\r +if `--svqtl true`\r +* F.svQTL: svQTL test statistic\r +* nb.perms.svQTL: number of permutations for svQTL test\r +* pv.svQTL: svQTL nominal P-value \r +\r +if `--ld ${r2}`\r +* LD: other variants in linkage disequilibrium with snpId above a given r2 threshold > 0\r +\r +`sqtls-${level}fdr.nominal.tsv` (in addition to the previous)\r +\r +* fdr: false discovery rate (computed across all nominal tests)\r +* fdr.svQTL: svQTL FDR\r +\r +`all-tests.permuted.tsv`\r +\r +* geneId: gene name\r +* variants.cis: number of variants tested in *cis*\r +* LD: median linkage disequilibrium in the region (r2)\r +* best.snp: ID of the top variant\r +* best.nominal.pv: P-value of the top variant\r +* shape1: first parameter value of the fitted beta distribution\r +* shape2: second parameter value of the fitted beta distribution (effective number of independent tests in the region)\r +* nb.perm: number of permutations\r +* pv.emp.perm: empirical P-value, computed based on permutations\r +* pv.emp.beta: empirical P-value, computed based on the fitted beta distribution\r +* runtime: run time in minutes\r +\r +`sqtls-${level}fdr.nominal.tsv` (in addition to the previous)\r +\r +* fdr: false discovery rate (computed across empirical P-values)\r +* p_tn: gene-level threshold for nominal P-values\r +\r +## Cite sqtlseeker2-nf\r +\r +If you find `sqtlseeker2-nf` useful in your research please cite the related publication:\r +\r +Garrido-Martín, D., Borsari, B., Calvo, M., Reverter, F., Guigó, R. Identification and analysis of splicing quantitative trait loci across multiple tissues in the human genome. *Nat Commun* 12, 727 (2021). [https://doi.org/10.1038/s41467-020-20578-2](https://doi.org/10.1038/s41467-020-20578-2)\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/435?version=1" ; + schema1:isBasedOn "https://github.com/guigolab/sqtlseeker2-nf.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for sqtlseeker2-nf" ; + schema1:sdDatePublished "2024-07-12 13:34:30 +0100" ; + schema1:url "https://workflowhub.eu/workflows/435/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9703 ; + schema1:creator , + ; + schema1:dateCreated "2023-02-15T11:54:54Z" ; + schema1:dateModified "2023-02-15T12:02:21Z" ; + schema1:description """# sqtlseeker2-nf\r +\r +[![nextflow](https://img.shields.io/badge/nextflow-%E2%89%A50.27.0-blue.svg)](http://nextflow.io)\r +[![CI-checks](https://github.com/guigolab/sqtlseeker2-nf/actions/workflows/ci.yaml/badge.svg)](https://github.com/guigolab/sqtlseeker2-nf/actions/workflows/ci.yaml)\r +\r +A pipeline for splicing quantitative trait loci (sQTL) mapping.\r +\r +The pipeline performs the following analysis steps:\r +\r +* Index the genotype file\r +* Preprocess the transcript expression data\r +* Test for association between splicing ratios and genetic variants in *cis* (nominal pass)\r +* Obtain an empirical P-value for each phenotype (permutation pass, optional)\r +* Control for multiple testing \r +\r +For details on each step, please read [sQTLseekeR2](https://github.com/guigolab/sQTLseekeR2) documentation.\r +\r +The pipeline uses [Nextflow](http://www.nextflow.io) as the execution backend. Please check [Nextflow documentation](http://www.nextflow.io/docs/latest/index.html) for more information.\r +\r +## Requirements\r +\r +- Unix-like operating system (Linux, MacOS, etc.)\r +- Java 8 or later \r +- [Docker](https://www.docker.com/) (v1.10.0 or later) or [Singularity](http://singularity.lbl.gov) (v2.5.0 or later)\r +\r +## Quickstart (~2 min)\r +\r +1. Install Nextflow:\r + ```\r + curl -fsSL get.nextflow.io | bash\r + ```\r +\r +2. Make a test run:\r + ```\r + ./nextflow run guigolab/sqtlseeker2-nf -with-docker\r + ```\r +\r + **Note**: set `-with-singularity` to use Singularity instead of Docker. \r +\r +## Pipeline usage\r +\r +Launching the pipeline with the `--help` parameter shows the help message:\r +\r +```\r +nextflow run sqtlseeker2-nf --help\r +```\r +\r +```\r +N E X T F L O W ~ version 0.27.2\r +Launching `sqtlseeker2.nf` [admiring_lichterman] - revision: 28c86caf1c\r +\r +sqtlseeker2-nf ~ A pipeline for splicing QTL mapping\r +----------------------------------------------------\r +Run sQTLseekeR2 on a set of data.\r +\r +Usage: \r + sqtlseeker2-nf [options]\r +\r +Options:\r +--genotype GENOTYPE_FILE the genotype file\r +--trexp EXPRESSION_FILE the transcript expression file\r +--metadata METADATA_FILE the metadata file\r +--genes GENES_FILE the gene location file\r +--dir DIRECTORY the output directory\r +--mode MODE the run mode: nominal or permuted (default: nominal)\r +--win WINDOW the cis window in bp (default: 5000)\r +--covariates COVARIATES include covariates in the model (default: false)\r +--fdr FDR false discovery rate level (default: 0.05)\r +--min_md MIN_MD minimum effect size reported (default: 0.05)\r +--svqtl SVQTLS report svQTLs (default: false)\r +\r +Additional parameters for mode = nominal:\r +--ld LD threshold for LD-based variant clustering (default: 0, no clustering)\r +--kn KN number of genes per batch in nominal pass (default: 10)\r +\r +Additional parameters for mode = permuted:\r +--kp KP number of genes per batch in permuted pass (default: 10)\r +--max_perm MAX_PERM maximum number of permutations (default: 1000)\r +```\r +\r +## Input files and format\r +\r +`sqtlseeker2-nf` takes as input files the following:\r +\r +* **Genotype file.**\r +Contains the genotype of each sample, coded as follows: 0 for REF/REF, 1 for REF/ALT, 2 for ALT/ALT, -1 for missing value.\r +The first four columns should be: `chr`, `start`, `end` and `snpId`. This file needs to be sorted by coordinate.\r +\r +* **Transcript expression file.**\r +Contains the expression of each transcript in each sample (e.g. read counts, RPKM, TPM).\r +It is not recommended to use transformed (log, quantile, or any non-linear transformation) expression.\r +Columns `trId` and `geneId`, corresponding to the transcript and gene IDs, are required. \r +\r +* **Metadata file.** Contains the covariate information for each sample. \r +In addition, it defines the groups or conditions for which sQTL mapping will be performed.\r +The first columns should be: `indId`, `sampleId`, `group`, followed by the covariates.\r +This file defines which samples will be tested.\r +\r +* **Gene location file.**\r +Contains the location of each gene. Columns `chr`, `start`, `end` and `geneId` are required. \r +This file defines which genes will be tested.\r +\r +Example [data](data) is available for the test run.\r +\r +## Pipeline results\r +\r +sQTL mapping results are saved into the folder specified with the `--dir` parameter. By default it is the `result` folder within the current working directory.\r +\r +Output files are organinzed into subfolders corresponding to the different `groups` specified in the metadata file: \r +\r +```\r +result\r +└── groups\r + ├── group1 \r + │   ├── all-tests.nominal.tsv \r + │   ├── all-tests.permuted.tsv \r + │   ├── sqtls-${level}fdr.nominal.tsv \r + │   └── sqtls-${level}fdr.permuted.tsv \r + ├── group2\r + ...\r +```\r +\r +Note: if only a nominal pass was run, files `*.permuted.tsv` will not be present.\r +\r +Output files contain the following information:\r +\r +`all-tests.nominal.tsv`\r +\r +* geneId: gene name \r +* snpId: variant name\r +* F: test statistic\r +* nb.groups: number of genotype groups\r +* md: maximum difference in relative expression between genotype groups (sQTL effect size)\r +* tr.first/tr.second: the transcript IDs of the two transcripts that change the most, in opposite directions\r +* info: number of individuals in each genotype group, including missing values (-1,0,1,2)\r +* pv: nominal P-value\r +\r +if `--svqtl true`\r +* F.svQTL: svQTL test statistic\r +* nb.perms.svQTL: number of permutations for svQTL test\r +* pv.svQTL: svQTL nominal P-value \r +\r +if `--ld ${r2}`\r +* LD: other variants in linkage disequilibrium with snpId above a given r2 threshold > 0\r +\r +`sqtls-${level}fdr.nominal.tsv` (in addition to the previous)\r +\r +* fdr: false discovery rate (computed across all nominal tests)\r +* fdr.svQTL: svQTL FDR\r +\r +`all-tests.permuted.tsv`\r +\r +* geneId: gene name\r +* variants.cis: number of variants tested in *cis*\r +* LD: median linkage disequilibrium in the region (r2)\r +* best.snp: ID of the top variant\r +* best.nominal.pv: P-value of the top variant\r +* shape1: first parameter value of the fitted beta distribution\r +* shape2: second parameter value of the fitted beta distribution (effective number of independent tests in the region)\r +* nb.perm: number of permutations\r +* pv.emp.perm: empirical P-value, computed based on permutations\r +* pv.emp.beta: empirical P-value, computed based on the fitted beta distribution\r +* runtime: run time in minutes\r +\r +`sqtls-${level}fdr.nominal.tsv` (in addition to the previous)\r +\r +* fdr: false discovery rate (computed across empirical P-values)\r +* p_tn: gene-level threshold for nominal P-values\r +\r +## Cite sqtlseeker2-nf\r +\r +If you find `sqtlseeker2-nf` useful in your research please cite the related publication:\r +\r +Garrido-Martín, D., Borsari, B., Calvo, M., Reverter, F., Guigó, R. Identification and analysis of splicing quantitative trait loci across multiple tissues in the human genome. *Nat Commun* 12, 727 (2021). [https://doi.org/10.1038/s41467-020-20578-2](https://doi.org/10.1038/s41467-020-20578-2)\r +\r +""" ; + schema1:keywords "QTL mapping, rna-seq, SNPs, Nextflow, Alternative splicing" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "sqtlseeker2-nf" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/435?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Genes and transcripts annotation with Isoseq using uLTRA and TAMA" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/993?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/isoseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/isoseq" ; + schema1:sdDatePublished "2024-07-12 13:21:14 +0100" ; + schema1:url "https://workflowhub.eu/workflows/993/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9995 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:58Z" ; + schema1:dateModified "2024-06-11T12:54:58Z" ; + schema1:description "Genes and transcripts annotation with Isoseq using uLTRA and TAMA" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/993?version=6" ; + schema1:keywords "isoseq, isoseq-3, rna, tama, ultra" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/isoseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/993?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=12" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-07-12 13:20:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=12" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5836 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:03Z" ; + schema1:dateModified "2024-06-11T12:55:03Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=12" ; + schema1:version 12 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """SINGLE-END workflow. \r +Align reads on fasta reference/assembly using bwa mem, get a consensus, variants, mutation explanations. \r +\r +IMPORTANT: \r +* For "bcftools call" consensus step, the --ploidy file is in "Données partagées" (Shared Data) and must be imported in your history to use the worflow by providing this file (tells bcftools to consider haploid variant calling). \r +* SELECT the mot ADAPTED VADR MODEL for annotation (see vadr parameters).""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/517?version=1" ; + schema1:isBasedOn "https://github.com/ANSES-Ploufragan/vvv2_display" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for VVV2_align_SE" ; + schema1:sdDatePublished "2024-07-12 13:27:19 +0100" ; + schema1:url "https://workflowhub.eu/workflows/517/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 30291 ; + schema1:creator ; + schema1:dateCreated "2023-06-27T14:41:53Z" ; + schema1:dateModified "2023-10-16T12:03:17Z" ; + schema1:description """SINGLE-END workflow. \r +Align reads on fasta reference/assembly using bwa mem, get a consensus, variants, mutation explanations. \r +\r +IMPORTANT: \r +* For "bcftools call" consensus step, the --ploidy file is in "Données partagées" (Shared Data) and must be imported in your history to use the worflow by providing this file (tells bcftools to consider haploid variant calling). \r +* SELECT the mot ADAPTED VADR MODEL for annotation (see vadr parameters).""" ; + schema1:image ; + schema1:keywords "single-end, Annotation, variant, Virus, variant_calling, high-throughput_sequencing_analysis, Galaxy, Bioinformatics, SNPs, variant calling" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "VVV2_align_SE" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/517?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 222967 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Common Workflow Language example that illustrate the process of setting up a\r +simulation system containing a protein, step by step, using the BioExcel\r +Building Blocks library (biobb). The particular example used is the Lysozyme\r +protein (PDB code 1AKI).\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.29.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb-wf-md-setup-protein-cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Example of setting up a simulation system" ; + schema1:sdDatePublished "2024-07-12 13:36:20 +0100" ; + schema1:url "https://workflowhub.eu/workflows/29/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11233 ; + schema1:creator ; + schema1:dateCreated "2021-05-07T15:27:34Z" ; + schema1:dateModified "2021-05-07T15:28:30Z" ; + schema1:description """Common Workflow Language example that illustrate the process of setting up a\r +simulation system containing a protein, step by step, using the BioExcel\r +Building Blocks library (biobb). The particular example used is the Lysozyme\r +protein (PDB code 1AKI).\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/29?version=2" ; + schema1:isPartOf , + ; + schema1:keywords "molecular dynamics, trajectories, protein" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Example of setting up a simulation system" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/29?version=2" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 53141 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "16S rRNA amplicon sequencing analysis workflow using QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-07-12 13:18:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4808 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:38Z" ; + schema1:dateModified "2024-06-11T12:54:38Z" ; + schema1:description "16S rRNA amplicon sequencing analysis workflow using QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.131.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/131/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 77840 ; + schema1:creator , + ; + schema1:dateCreated "2023-04-14T08:35:44Z" ; + schema1:dateModified "2023-04-14T08:37:57Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/131?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_amber_md_setup/master/biobb_wf_amber_md_setup/notebooks/mdsetup_lig/biobb_amber_complex_setup_notebook.ipynb" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """Galaxy version of pre-processing of reads from COVID-19 samples. \r +QC + human read cleaning\r +Based on https://github.com/Finn-Lab/Metagen-FastQC/blob/master/metagen-fastqc.sh""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/99?version=1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for COVID-19: read pre-processing" ; + schema1:sdDatePublished "2024-07-12 13:37:18 +0100" ; + schema1:url "https://workflowhub.eu/workflows/99/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13217 ; + schema1:dateCreated "2021-02-02T19:06:59Z" ; + schema1:dateModified "2023-02-13T14:06:45Z" ; + schema1:description """Galaxy version of pre-processing of reads from COVID-19 samples. \r +QC + human read cleaning\r +Based on https://github.com/Finn-Lab/Metagen-FastQC/blob/master/metagen-fastqc.sh""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "COVID-19: read pre-processing" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/99?version=1" ; + schema1:version 1 ; + ns1:input , + ; + ns1:output , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """Assembly polishing subworkflow: Racon polishing with long reads\r +\r +Inputs: long reads and assembly contigs\r +\r +Workflow steps:\r +* minimap2 : long reads are mapped to assembly => overlaps.paf. \r +* overaps, long reads, assembly => Racon => polished assembly 1\r +* using polished assembly 1 as input; repeat minimap2 + racon => polished assembly 2\r +* using polished assembly 2 as input, repeat minimap2 + racon => polished assembly 3\r +* using polished assembly 3 as input, repeat minimap2 + racon => polished assembly 4\r +* Racon long-read polished assembly => Fasta statistics\r +* Note: The Racon tool panel can be a bit confusing and is under review for improvement. Presently it requires sequences (= long reads), overlaps (= the paf file created by minimap2), and target sequences (= the contigs to be polished) as per "usage" described here https://github.com/isovic/racon/blob/master/README.md\r +* Note: Racon: the default setting for "output unpolished target sequences?" is No. This has been changed to Yes for all Racon steps in these polishing workflows. This means that even if no polishes are made in some contigs, they will be part of the output fasta file. \r +* Note: the contigs output by Racon have new tags in their headers. For more on this see https://github.com/isovic/racon/issues/85.\r +\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.227.1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Racon polish with long reads, x4" ; + schema1:sdDatePublished "2024-07-12 13:36:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/227/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 26968 ; + schema1:creator ; + schema1:dateCreated "2021-11-08T05:45:09Z" ; + schema1:dateModified "2023-01-30T18:21:31Z" ; + schema1:description """Assembly polishing subworkflow: Racon polishing with long reads\r +\r +Inputs: long reads and assembly contigs\r +\r +Workflow steps:\r +* minimap2 : long reads are mapped to assembly => overlaps.paf. \r +* overaps, long reads, assembly => Racon => polished assembly 1\r +* using polished assembly 1 as input; repeat minimap2 + racon => polished assembly 2\r +* using polished assembly 2 as input, repeat minimap2 + racon => polished assembly 3\r +* using polished assembly 3 as input, repeat minimap2 + racon => polished assembly 4\r +* Racon long-read polished assembly => Fasta statistics\r +* Note: The Racon tool panel can be a bit confusing and is under review for improvement. Presently it requires sequences (= long reads), overlaps (= the paf file created by minimap2), and target sequences (= the contigs to be polished) as per "usage" described here https://github.com/isovic/racon/blob/master/README.md\r +* Note: Racon: the default setting for "output unpolished target sequences?" is No. This has been changed to Yes for all Racon steps in these polishing workflows. This means that even if no polishes are made in some contigs, they will be part of the output fasta file. \r +* Note: the contigs output by Racon have new tags in their headers. For more on this see https://github.com/isovic/racon/issues/85.\r +\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "Large-genome-assembly" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Racon polish with long reads, x4" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/227?version=1" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 344676 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-23T13:33:07.965408" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/kmer-profiling-hifi-trio-VGP2" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "kmer-profiling-hifi-trio-VGP2/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Rbbt implementation of the Covid-19 pilot workflow from the Personalized Medicine Center of Excellence.\r +\r +This workflow processes single cell data to personalize boolean models that are then used in a multi-scale cellular simulation using PhysiBoSS.""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/469?version=1" ; + schema1:isBasedOn "https://github.com/Rbbt-Workflows/Covid19.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for PerMedCoE Covid19 Pilot workflow (Rbbt)" ; + schema1:sdDatePublished "2024-07-12 13:33:26 +0100" ; + schema1:url "https://workflowhub.eu/workflows/469/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5368 ; + schema1:dateCreated "2023-05-09T09:43:39Z" ; + schema1:dateModified "2023-05-23T12:33:53Z" ; + schema1:description """Rbbt implementation of the Covid-19 pilot workflow from the Personalized Medicine Center of Excellence.\r +\r +This workflow processes single cell data to personalize boolean models that are then used in a multi-scale cellular simulation using PhysiBoSS.""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "PerMedCoE Covid19 Pilot workflow (Rbbt)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/469?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.54.5" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_ligand_parameterization" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter GMX Notebook Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-07-12 13:24:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/54/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15271 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-07-26T09:20:04Z" ; + schema1:dateModified "2023-07-26T09:21:07Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/54?version=5" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter GMX Notebook Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_ligand_parameterization/blob/master/biobb_wf_ligand_parameterization/notebooks/biobb_ligand_parameterization_tutorial.ipynb" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Simple bacterial assembly and annotation" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/966?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/bacass" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/bacass" ; + schema1:sdDatePublished "2024-07-12 13:22:20 +0100" ; + schema1:url "https://workflowhub.eu/workflows/966/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7151 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:43Z" ; + schema1:dateModified "2024-06-11T12:54:43Z" ; + schema1:description "Simple bacterial assembly and annotation" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/966?version=7" ; + schema1:keywords "Assembly, bacterial-genomes, denovo, denovo-assembly, genome-assembly, hybrid-assembly, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/bacass" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/966?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/383?version=3" ; + schema1:isBasedOn "https://gitlab.bsc.es/lrodrig1/structuralvariants_poc/-/tree/1.1.3/structuralvariants/nextflow" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CNV_pipeline" ; + schema1:sdDatePublished "2024-07-12 13:35:14 +0100" ; + schema1:url "https://workflowhub.eu/workflows/383/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4731 ; + schema1:creator , + , + ; + schema1:dateCreated "2022-10-11T10:58:35Z" ; + schema1:dateModified "2023-01-16T14:02:25Z" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/383?version=2" ; + schema1:keywords "CODEX2, TransBioNet, ExomeDepth, variant calling, cancer, manta, GRIDS, structural variants" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CNV_pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/383?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Integration of viral sequences in genomic data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1026?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/viralintegration" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/viralintegration" ; + schema1:sdDatePublished "2024-07-12 13:18:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1026/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8264 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:20Z" ; + schema1:dateModified "2024-06-11T12:55:20Z" ; + schema1:description "Integration of viral sequences in genomic data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1026?version=1" ; + schema1:keywords "chimeric-alignment, ctat, viral-integration, Virus, virusintegrationfinder" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/viralintegration" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1026?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# BatchConvert\r +\r +A command line tool for converting image data into either of the standard file formats OME-TIFF or OME-Zarr. \r +\r +The tool wraps the dedicated file converters bfconvert and bioformats2raw to convert into OME-TIFF or OME-Zarr,\r +respectively. The workflow management system NextFlow is used to perform conversion in parallel for batches of images. \r +\r +The tool also wraps s3 and Aspera clients (go-mc and aspera-cli, respectively). Therefore, input and output locations can \r +be specified as local or remote storage and file transfer will be performed automatically. The conversion can be run on \r +HPC with Slurm. \r +\r +![](figures/diagram.png)\r +\r +## Installation & Dependencies\r +\r +**Important** note: The package has been so far only tested on Ubuntu 20.04.\r +\r +The minimal dependency to run the tool is NextFlow, which should be installed and made accessible from the command line.\r +\r +If conda exists on your system, you can install BatchConvert together with NextFlow using the following script:\r +```\r +git clone https://github.com/Euro-BioImaging/BatchConvert.git && \\ \r +source BatchConvert/installation/install_with_nextflow.sh\r +```\r +\r +\r +If you already have NextFlow installed and accessible from the command line (or if you prefer to install it manually \r +e.g., as shown [here](https://www.nextflow.io/docs/latest/getstarted.html)), you can also install BatchConvert alone, using the following script:\r +```\r +git clone https://github.com/Euro-BioImaging/BatchConvert.git && \\ \r +source BatchConvert/installation/install.sh\r +```\r +\r +\r +Other dependencies (which will be **automatically** installed):\r +- bioformats2raw (entrypoint bioformats2raw)\r +- bftools (entrypoint bfconvert)\r +- go-mc (entrypoint mc)\r +- aspera-cli (entrypoint ascp)\r +\r +These dependencies will be pulled and cached automatically at the first execution of the conversion command. \r +The mode of dependency management can be specified by using the command line option ``--profile`` or `-pf`. Depending \r +on how this option is specified, the dependencies will be acquired / run either via conda or via docker/singularity containers. \r +\r +Specifying ``--profile conda`` (default) will install the dependencies to an \r +environment at ``./.condaCache`` and use this environment to run the workflow. This option \r +requires that miniconda/anaconda is installed on your system. \r +\r +Alternatively, specifying ``--profile docker`` or ``--profile singularity`` will pull a docker or \r +singularity image with the dependencies, respectively, and use this image to run the workflow.\r +These options assume that the respective container runtime (docker or singularity) is available on \r +your system. If singularity is being used, a cache directory will be created at the path \r +``./.singularityCache`` where the singularity image is stored. \r +\r +Finally, you can still choose to install the dependencies manually and use your own installations to run\r +the workflow. In this case, you should specify ``--profile standard`` and make sure the entrypoints\r +specified above are recognised by your shell. \r +\r +\r +## Configuration\r +\r +BatchConvert can be configured to have default options for file conversion and transfer. Probably, the most important sets of parameters\r +to be configured include credentials for the remote ends. The easiest way to configure remote stores is by running the interactive \r +configuration command as indicated below.\r +\r +### Configuration of the s3 object store\r +\r +Run the interactive configuration command: \r +\r +`batchconvert configure_s3_remote`\r +\r +This will start a sequence of requests for s3 credentials such as name, url, access, etc. Provide each requested credential and click\r +enter. Continue this cycle until the process is finished. Upon completing the configuration, the sequence of commands should roughly look like this:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_s3_remote\r +enter remote name (for example s3)\r +s3\r +enter url:\r +https://s3.embl.de\r +enter access key:\r +\r +enter secret key:\r +\r +enter bucket name:\r +\r +Configuration of the default s3 credentials is complete\r +```\r +\r +\r +### Configuration of the BioStudies user space\r +\r +Run the interactive configuration command: \r +\r +`batchconvert configure_bia_remote`\r +\r +This will prompt a request for the secret directory to connect to. Enter the secret directory for your user space and click enter. \r +Upon completing the configuration, the sequence of commands should roughly look like this:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_bia_remote\r +enter the secret directory for BioImage Archive user space:\r +\r +configuration of the default bia credentials is complete\r +```\r +\r +### Configuration of the slurm options\r +\r +BatchConvert can also run on slurm clusters. In order to configure the slurm parameters, run the interactive configuration command: \r +\r +`batchconvert configure_slurm`\r +\r +This will start a sequence of requests for slurm options. Provide each requested option and click enter. \r +Continue this cycle until the process is finished. Upon completing the configuration, the sequence of commands should \r +roughly look like this:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_slurm\r +Please enter value for queue_size\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´50´\r +s\r +Please enter value for submit_rate_limit\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´10/2min´\r +s\r +Please enter value for cluster_options\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´--mem-per-cpu=3140 --cpus-per-task=16´\r +s\r +Please enter value for time\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´6h´\r +s\r +configuration of the default slurm parameters is complete\r +```\r +\r +### Configuration of the default conversion parameters\r +\r +While all conversion parameters can be specified as command line arguments, it can\r +be useful for the users to set their own default parameters to avoid re-entering those\r +parameters for subsequent executions. BatchConvert allows for interactive configuration of \r +conversion in the same way as configuration of the remote stores described above.\r +\r +To configure the conversion into OME-TIFF, run the following command:\r +\r +`batchconvert configure_ometiff`\r +\r +This will prompt the user to enter a series of parameters, which will then be saved as the \r +default parameters to be passed to the `batchconvert ometiff` command. Upon completing the \r +configuration, the sequence of commands should look similar to:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_ometiff\r +Please enter value for noflat\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +Please enter value for series\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +Please enter value for timepoint\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +...\r +...\r +...\r +...\r +...\r +...\r +Configuration of the default parameters for 'bfconvert' is complete\r +```\r +\r +\r +To configure the conversion into OME-Zarr, run the following command:\r +\r +`batchconvert configure_omezarr`\r +\r +Similarly, this will prompt the user to enter a series of parameters, which will then be saved as the \r +default parameters to be passed to the `batchconvert omezarr` command. Upon completing the configuration, \r +the sequence of commands should look similar to:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_omezarr\r +Please enter value for resolutions_zarr\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +Please enter value for chunk_h\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +Please enter value for chunk_w\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +...\r +...\r +...\r +...\r +...\r +...\r +Configuration of the default parameters for 'bioformats2raw' is complete\r +```\r +\r +It is important to note that the initial defaults for the conversion parameters are the same as the defaults\r +of the backend tools bfconvert and bioformats2raw, as noted in the prompt excerpt above. Through interactive configuration, \r +the user is overriding these initial defaults and setting their own defaults. It is possible to reset the initial\r +defaults by running the following command.\r +\r +`batchconvert reset_defaults`\r +\r +Another important point is that any of these configured parameters can be overridden by passing a value to that\r +parameter in the commandline. For instance, in the following command, the value of 20 will be assigned to `chunk_h` parameter \r +even if the value for the same parameter might be different in the configuration file. \r +\r +`batchconvert omezarr --chunk_h 20 `\r +\r +\r +## Examples\r +\r +### Local conversion\r +\r +#### Parallel conversion of files to separate OME-TIFFs / OME-Zarrs:\r +Convert a batch of images on your local storage into OME-TIFF format. \r +Note that the `input_path` in the command given below is typically a \r +directory with multiple image files but a single image file can also be passed:\\\r +`batchconvert ometiff -pf conda ` \r +\r +Note that if this is your first conversion with the profile `conda`, \r +it will take a while for a conda environment with the dependencies to be\r +created. All the subsequent conversion commands with the profile `conda`,\r +however, will use this environment, and thus show no such delay.\r +\r +Since conda is the default profile, it does not have to be \r +explicitly included in the command line. Thus, the command can be shortened to:\\\r +`batchconvert ometiff `\r +\r +Convert only the first channel of the images:\\\r +`batchconvert ometiff -chn 0 `\r +\r +Crop the images being converted along x and y axis by 150 pixels:\\\r +`batchconvert ometiff -cr 0,0,150,150 `\r +\r +Convert into OME-Zarr instead:\\\r +`batchconvert omezarr `\r +\r +Convert into OME-Zarr with 3 resolution levels:\\\r +`batchconvert omezarr -rz 3 `\r +\r +Select a subset of images with a matching string such as "mutation":\\\r +`batchconvert omezarr -p mutation `\r +\r +Select a subset of images using wildcards. Note that the use of "" around \r +the input path is necessary when using wildcards:\\\r +`batchconvert omezarr "/*D3*.oir" `\r +\r +Convert by using a singularity container instead of conda environment (requires\r +singularity to be installed on your system):\\\r +`batchconvert omezarr -pf singularity "/*D3*.oir" `\r +\r +Convert by using a docker container instead of conda environment (requires docker\r +to be installed on your system):\\\r +`batchconvert omezarr -pf docker "/*D3*.oir" `\r +\r +Note that similarly to the case with the profile `conda`, the first execution of\r +a conversion with the profile `singularity` or `docker` will take a while for the\r +container image to be pulled. All the subsequent conversion commands using a \r +container option will use this image, and thus show no such delay. \r +\r +Convert local data and upload the output to an s3 bucket. Note that the output \r +path is created relative to the bucket specified in your s3 configuration:\\\r +`batchconvert omezarr -dt s3 `\r +\r +Receive input files from an s3 bucket, convert locally and upload the output to \r +the same bucket. Note that wildcards cannot be used when the input is from s3. \r +Use pattern matching option `-p` for selecting a subset of input files:\\\r +`batchconvert omezarr -p mutation -st s3 -dt s3 `\r +\r +Receive input files from your private BioStudies user space and convert them locally.\r +Use pattern matching option `-p` for selecting a subset of input files:\\\r +`batchconvert omezarr -p mutation -st bia `\r +\r +Receive an input from an s3 bucket, convert locally and upload the output to your \r +private BioStudies user space. Use pattern matching option `-p` for selecting a subset \r +of input files:\\\r +`batchconvert omezarr -p mutation -st s3 -dt bia `\r +\r +Note that in all the examples shown above, BatchConvert treats each input file as separate,\r +standalone data point, disregarding the possibility that some of the input files might belong to \r +the same multidimensional array. Thus, each input file is converted to an independent \r +OME-TIFF / OME-Zarr and the number of outputs will thus equal the number of selected input files.\r +An alternative scenario is discussed below.\r +\r +#### Parallel conversion of file groups by stacking multiple files into single OME-TIFFs / OME-Zarrs:\r +\r +When the flag `--merge_files` is specified, BatchConvert tries to detect which input files might \r +belong to the same multidimensional array based on the patterns in the filenames. Then a "grouped conversion" \r +is performed, meaning that the files belonging to the same dataset will be incorporated into \r +a single OME-TIFF / OME-Zarr series, in that files will be concatenated along specific dimension(s) \r +during the conversion. Multiple file groups in the input directory can be detected and converted \r +in parallel. \r +\r +This feature uses Bio-Formats's pattern files as described [here](https://docs.openmicroscopy.org/bio-formats/6.6.0/formats/pattern-file.html).\r +However, BatchConvert generates pattern files automatically, allowing the user to directly use the \r +input directory in the conversion command. BatchConvert also has the option of specifying the \r +concatenation axes in the command line, which is especially useful in cases where the filenames \r +may not contain dimension information. \r +\r +To be able to use the `--merge files` flag, the input file names must obey certain rules:\r +1. File names in the same group must be uniform, except for one or more **numeric field(s)**, which\r +should show incremental change across the files. These so-called **variable fields** \r +will be detected and used as the dimension(s) of concatenation.\r +2. The length of variable fields must be uniform within the group. For instance, if the\r +variable field has values reaching multi-digit numbers, leading "0"s should be included where needed \r +in the file names to make the variable field length uniform within the group.\r +3. Typically, each variable field should follow a dimension specifier. What patterns can be used as \r +dimension specifiers are explained [here](https://docs.openmicroscopy.org/bio-formats/6.6.0/formats/pattern-file.html).\r +However, BatchConvert also has the option `--concatenation_order`, which allows the user to\r +specify from the command line, the dimension(s), along which the files must be concatenated.\r +4. File names that are unique and cannot be associated with any group will be assumed as\r +standalone images and converted accordingly. \r +\r +Below are some examples of grouped conversion commands in the context of different possible use-case scenarios:\r +\r +**Example 1:**\r +\r +This is an example of a folder with non-uniform filename lengths:\r +```\r +time-series/test_img_T2\r +time-series/test_img_T4\r +time-series/test_img_T6\r +time-series/test_img_T8\r +time-series/test_img_T10\r +time-series/test_img_T12\r +```\r +In this example, leading zeroes are missing in the variable fields of some filenames. \r +A typical command to convert this folder to a single OME-TIFF would look like: \\\r +`batchconvert --ometiff --merge_files /time-series `\r +\r +However, this command would fail to create a single OME-Zarr folder due to the non-uniform \r +lengths of the filenames. Instead, the files would be split into two groups based on the\r +filename length, leading to two separate OME-Zarrs with names:\r +\r +`test_img_TRange{2-8-2}.ome.zarr` and `test_img_TRange{10-12-2}.ome.zarr`\r +\r +Here is the corrected version of the folder for the above example-\r +```\r +time-series/test_img_T02\r +time-series/test_img_T04\r +time-series/test_img_T06\r +time-series/test_img_T08\r +time-series/test_img_T10\r +time-series/test_img_T12\r +```\r +\r +Executing the same command on this folder would result in a single OME-Zarr with the name:\r +`test_img_TRange{02-12-2}.ome.zarr`\r +\r +**Example 2**- \r +\r +In this example, the filename lengths are uniform but the incrementation within the variable field is not.\r +```\r +time-series/test_img_T2\r +time-series/test_img_T4\r +time-series/test_img_T5\r +time-series/test_img_T7\r +```\r +\r +A typical command to convert this folder to a single OME-Zarr would look like: \\\r +`batchconvert --omezarr --merge_files /time-series `\r +\r +However, the command would fail to assume these files as a single group due to the\r +non-uniform incrementation in the variable field of the filenames. Instead, the dataset \r +would be split into two groups, leading to two separate OME-Zarrs with the following names:\r +`test_img_TRange{2-4-2}.ome.zarr` and `test_img_TRange{5-7-2}.ome.zarr` \r +\r +\r +**Example 3**\r +\r +This is an example of a case where the conversion attempts to concatenate files along two\r +dimensions, channel and time.\r +```\r +multichannel_time-series/test_img_C1-T1\r +multichannel_time-series/test_img_C1-T2\r +multichannel_time-series/test_img_C1-T3\r +multichannel_time-series/test_img_C2-T1\r +multichannel_time-series/test_img_C2-T2\r +```\r +To convert this folder to a single OME-Zarr, one could try the following command: \\\r +`batchconvert --omezarr --merge_files /multichannel_time-series `\r +\r +However, since the channel-2 does not have the same number of timeframes as the channel-1, \r +BatchConvert will fail to assume these two channels as part of the same series and\r +will instead split the two channels into two separate OME-Zarrs. \r +\r +The output would look like: \\\r +`test_img_C1-TRange{1-3-1}.ome.zarr` \\\r +`test_img_C2-TRange{1-2-1}.ome.zarr`\r +\r +To be able to really incorporate all files into a single OME-Zarr, the folder should have equal\r +number of images corresponding to both channels, as shown below:\r +```\r +multichannel_time-series/test_img_C1-T1\r +multichannel_time-series/test_img_C1-T2\r +multichannel_time-series/test_img_C1-T3\r +multichannel_time-series/test_img_C2-T1\r +multichannel_time-series/test_img_C2-T2\r +multichannel_time-series/test_img_C2-T3\r +```\r +The same conversion command on this version of the input folder would result in a single \r +OME-Zarr with the name: \\\r +`test_img_CRange{1-2-1}-TRange{1-3-1}.ome.zarr`\r +\r +\r +**Example 4**\r +\r +This is another example of a case, where there are multiple filename patterns in the input folder.\r +\r +```\r +folder_with_multiple_groups/test_img_C1-T1\r +folder_with_multiple_groups/test_img_C1-T2\r +folder_with_multiple_groups/test_img_C2-T1\r +folder_with_multiple_groups/test_img_C2-T2\r +folder_with_multiple_groups/test_img_T1-Z1\r +folder_with_multiple_groups/test_img_T1-Z2\r +folder_with_multiple_groups/test_img_T1-Z3\r +folder_with_multiple_groups/test_img_T2-Z1\r +folder_with_multiple_groups/test_img_T2-Z2\r +folder_with_multiple_groups/test_img_T2-Z3\r +```\r +\r +One can convert this folder with- \\\r +`batchconvert --omezarr --merge_files /folder_with_multiple_groups `\r + \r +BatchConvert will detect the two patterns in this folder and perform two grouped conversions. \r +The output folders will be named as `test_img_CRange{1-2-1}-TRange{1-2-1}.ome.zarr` and \r +`test_img_TRange{1-2-1}-ZRange{1-3-1}.ome.zarr`. \r +\r +\r +**Example 5**\r +\r +Now imagine that we have the same files as in the example 4 but the filenames of the\r +first group lack any dimension specifier, so we have the following folder:\r +\r +```\r +folder_with_multiple_groups/test_img_1-1\r +folder_with_multiple_groups/test_img_1-2\r +folder_with_multiple_groups/test_img_2-1\r +folder_with_multiple_groups/test_img_2-2\r +folder_with_multiple_groups/test_img_T1-Z1\r +folder_with_multiple_groups/test_img_T1-Z2\r +folder_with_multiple_groups/test_img_T1-Z3\r +folder_with_multiple_groups/test_img_T2-Z1\r +folder_with_multiple_groups/test_img_T2-Z2\r +folder_with_multiple_groups/test_img_T2-Z3\r +```\r +\r +In such a scenario, BatchConvert allows the user to specify the concatenation axes \r +via `--concatenation_order` option. This option expects comma-separated strings of dimensions \r +for each group. In this example, the user must provide a string of 2 characters, such as `ct` for \r +channel and time, for group 1, since there are two variable fields for this group. Since group 2 \r +already has dimension specifiers (T and Z as specified in the filenames preceding the variable fields),\r +the user does not need to specify anything for this group, and can enter `auto` or `aa` for automatic\r +detection of the specifiers. \r +\r +So the following line can be used to convert this folder: \\\r +`batchconvert --omezarr --merge_files --concatenation_order ct,aa /folder_with_multiple_groups `\r +\r +The resulting OME-Zarrs will have the names:\r +`test_img_CRange{1-2-1}-TRange{1-2-1}.ome.zarr` and\r +`test_img_TRange{1-2-1}-ZRange{1-3-1}.ome.zarr`\r +\r +Note that `--concatenation_order` will override any dimension specifiers already\r +existing in the filenames.\r +\r +\r +**Example 6**\r +\r +There can be scenarios where the user may want to have further control over the axes along \r +which to concatenate the images. For example, the filenames might contain the data acquisition\r +date, which can be recognised by BatchConvert as a concatenation axis in the automatic \r +detection mode. An example of such a fileset might look like:\r +\r +```\r +filenames_with_dates/test_data_date03.03.2023_imageZ1-T1\r +filenames_with_dates/test_data_date03.03.2023_imageZ1-T2\r +filenames_with_dates/test_data_date03.03.2023_imageZ1-T3\r +filenames_with_dates/test_data_date03.03.2023_imageZ2-T1\r +filenames_with_dates/test_data_date03.03.2023_imageZ2-T2\r +filenames_with_dates/test_data_date03.03.2023_imageZ2-T3\r +filenames_with_dates/test_data_date04.03.2023_imageZ1-T1\r +filenames_with_dates/test_data_date04.03.2023_imageZ1-T2\r +filenames_with_dates/test_data_date04.03.2023_imageZ1-T3\r +filenames_with_dates/test_data_date04.03.2023_imageZ2-T1\r +filenames_with_dates/test_data_date04.03.2023_imageZ2-T2\r +filenames_with_dates/test_data_date04.03.2023_imageZ2-T3\r +```\r +\r +One may try the following command to convert this folder:\r +\r +`batchconvert --omezarr --merge_files /filenames_with_dates `\r +\r +Since the concatenation axes are not specified, this command would try to create\r +a single OME-Zarr with name: `test_data_dateRange{03-04-1}.03.2023_imageZRange{1-2-1}-TRange{1-3-1}`.\r +\r +In order to force BatchConvert to ignore the date field, the user can restrict the concatenation \r +axes to the last two numeric fields. This can be done by using a command such as: \\\r +`batchconvert --omezarr --merge_files --concatenation_order aa /filenames_with_dates ` \\\r +This command will avoid concatenation along the date field, and therefore, there will be two\r +OME-Zarrs corresponding to the two dates. The number of characters being passed to the \r +`--concatenation_order` option specifies the number of numeric fields (starting from the right \r +end of the filename) that are recognised by the BatchConvert as valid concatenation axes. \r +Passing `aa`, therefore, means that the last two numeric fields must be recognised as \r +concatenation axes and the dimension type should be automatically detected (`a` for automatic). \r +In the same logic, one could, for example, convert each Z section into a separate OME-Zarr by \r +specifying `--concatenation_order a`.\r +\r +\r +\r +### Conversion on slurm\r +\r +All the examples given above can also be run on slurm by specifying `-pf cluster` option. \r +Note that this option automatically uses the singularity profile:\\\r +`batchconvert omezarr -pf cluster -p .oir `\r +\r +\r +\r +\r +\r +\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/453?version=1" ; + schema1:isBasedOn "https://github.com/Euro-BioImaging/BatchConvert.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for BatchConvert" ; + schema1:sdDatePublished "2024-07-12 13:32:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/453/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 89 ; + schema1:creator ; + schema1:dateCreated "2023-04-10T10:40:10Z" ; + schema1:dateModified "2023-04-27T10:51:07Z" ; + schema1:description """# BatchConvert\r +\r +A command line tool for converting image data into either of the standard file formats OME-TIFF or OME-Zarr. \r +\r +The tool wraps the dedicated file converters bfconvert and bioformats2raw to convert into OME-TIFF or OME-Zarr,\r +respectively. The workflow management system NextFlow is used to perform conversion in parallel for batches of images. \r +\r +The tool also wraps s3 and Aspera clients (go-mc and aspera-cli, respectively). Therefore, input and output locations can \r +be specified as local or remote storage and file transfer will be performed automatically. The conversion can be run on \r +HPC with Slurm. \r +\r +![](figures/diagram.png)\r +\r +## Installation & Dependencies\r +\r +**Important** note: The package has been so far only tested on Ubuntu 20.04.\r +\r +The minimal dependency to run the tool is NextFlow, which should be installed and made accessible from the command line.\r +\r +If conda exists on your system, you can install BatchConvert together with NextFlow using the following script:\r +```\r +git clone https://github.com/Euro-BioImaging/BatchConvert.git && \\ \r +source BatchConvert/installation/install_with_nextflow.sh\r +```\r +\r +\r +If you already have NextFlow installed and accessible from the command line (or if you prefer to install it manually \r +e.g., as shown [here](https://www.nextflow.io/docs/latest/getstarted.html)), you can also install BatchConvert alone, using the following script:\r +```\r +git clone https://github.com/Euro-BioImaging/BatchConvert.git && \\ \r +source BatchConvert/installation/install.sh\r +```\r +\r +\r +Other dependencies (which will be **automatically** installed):\r +- bioformats2raw (entrypoint bioformats2raw)\r +- bftools (entrypoint bfconvert)\r +- go-mc (entrypoint mc)\r +- aspera-cli (entrypoint ascp)\r +\r +These dependencies will be pulled and cached automatically at the first execution of the conversion command. \r +The mode of dependency management can be specified by using the command line option ``--profile`` or `-pf`. Depending \r +on how this option is specified, the dependencies will be acquired / run either via conda or via docker/singularity containers. \r +\r +Specifying ``--profile conda`` (default) will install the dependencies to an \r +environment at ``./.condaCache`` and use this environment to run the workflow. This option \r +requires that miniconda/anaconda is installed on your system. \r +\r +Alternatively, specifying ``--profile docker`` or ``--profile singularity`` will pull a docker or \r +singularity image with the dependencies, respectively, and use this image to run the workflow.\r +These options assume that the respective container runtime (docker or singularity) is available on \r +your system. If singularity is being used, a cache directory will be created at the path \r +``./.singularityCache`` where the singularity image is stored. \r +\r +Finally, you can still choose to install the dependencies manually and use your own installations to run\r +the workflow. In this case, you should specify ``--profile standard`` and make sure the entrypoints\r +specified above are recognised by your shell. \r +\r +\r +## Configuration\r +\r +BatchConvert can be configured to have default options for file conversion and transfer. Probably, the most important sets of parameters\r +to be configured include credentials for the remote ends. The easiest way to configure remote stores is by running the interactive \r +configuration command as indicated below.\r +\r +### Configuration of the s3 object store\r +\r +Run the interactive configuration command: \r +\r +`batchconvert configure_s3_remote`\r +\r +This will start a sequence of requests for s3 credentials such as name, url, access, etc. Provide each requested credential and click\r +enter. Continue this cycle until the process is finished. Upon completing the configuration, the sequence of commands should roughly look like this:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_s3_remote\r +enter remote name (for example s3)\r +s3\r +enter url:\r +https://s3.embl.de\r +enter access key:\r +\r +enter secret key:\r +\r +enter bucket name:\r +\r +Configuration of the default s3 credentials is complete\r +```\r +\r +\r +### Configuration of the BioStudies user space\r +\r +Run the interactive configuration command: \r +\r +`batchconvert configure_bia_remote`\r +\r +This will prompt a request for the secret directory to connect to. Enter the secret directory for your user space and click enter. \r +Upon completing the configuration, the sequence of commands should roughly look like this:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_bia_remote\r +enter the secret directory for BioImage Archive user space:\r +\r +configuration of the default bia credentials is complete\r +```\r +\r +### Configuration of the slurm options\r +\r +BatchConvert can also run on slurm clusters. In order to configure the slurm parameters, run the interactive configuration command: \r +\r +`batchconvert configure_slurm`\r +\r +This will start a sequence of requests for slurm options. Provide each requested option and click enter. \r +Continue this cycle until the process is finished. Upon completing the configuration, the sequence of commands should \r +roughly look like this:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_slurm\r +Please enter value for queue_size\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´50´\r +s\r +Please enter value for submit_rate_limit\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´10/2min´\r +s\r +Please enter value for cluster_options\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´--mem-per-cpu=3140 --cpus-per-task=16´\r +s\r +Please enter value for time\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´6h´\r +s\r +configuration of the default slurm parameters is complete\r +```\r +\r +### Configuration of the default conversion parameters\r +\r +While all conversion parameters can be specified as command line arguments, it can\r +be useful for the users to set their own default parameters to avoid re-entering those\r +parameters for subsequent executions. BatchConvert allows for interactive configuration of \r +conversion in the same way as configuration of the remote stores described above.\r +\r +To configure the conversion into OME-TIFF, run the following command:\r +\r +`batchconvert configure_ometiff`\r +\r +This will prompt the user to enter a series of parameters, which will then be saved as the \r +default parameters to be passed to the `batchconvert ometiff` command. Upon completing the \r +configuration, the sequence of commands should look similar to:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_ometiff\r +Please enter value for noflat\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +Please enter value for series\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +Please enter value for timepoint\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +...\r +...\r +...\r +...\r +...\r +...\r +Configuration of the default parameters for 'bfconvert' is complete\r +```\r +\r +\r +To configure the conversion into OME-Zarr, run the following command:\r +\r +`batchconvert configure_omezarr`\r +\r +Similarly, this will prompt the user to enter a series of parameters, which will then be saved as the \r +default parameters to be passed to the `batchconvert omezarr` command. Upon completing the configuration, \r +the sequence of commands should look similar to:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_omezarr\r +Please enter value for resolutions_zarr\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +Please enter value for chunk_h\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +Please enter value for chunk_w\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +...\r +...\r +...\r +...\r +...\r +...\r +Configuration of the default parameters for 'bioformats2raw' is complete\r +```\r +\r +It is important to note that the initial defaults for the conversion parameters are the same as the defaults\r +of the backend tools bfconvert and bioformats2raw, as noted in the prompt excerpt above. Through interactive configuration, \r +the user is overriding these initial defaults and setting their own defaults. It is possible to reset the initial\r +defaults by running the following command.\r +\r +`batchconvert reset_defaults`\r +\r +Another important point is that any of these configured parameters can be overridden by passing a value to that\r +parameter in the commandline. For instance, in the following command, the value of 20 will be assigned to `chunk_h` parameter \r +even if the value for the same parameter might be different in the configuration file. \r +\r +`batchconvert omezarr --chunk_h 20 `\r +\r +\r +## Examples\r +\r +### Local conversion\r +\r +#### Parallel conversion of files to separate OME-TIFFs / OME-Zarrs:\r +Convert a batch of images on your local storage into OME-TIFF format. \r +Note that the `input_path` in the command given below is typically a \r +directory with multiple image files but a single image file can also be passed:\\\r +`batchconvert ometiff -pf conda ` \r +\r +Note that if this is your first conversion with the profile `conda`, \r +it will take a while for a conda environment with the dependencies to be\r +created. All the subsequent conversion commands with the profile `conda`,\r +however, will use this environment, and thus show no such delay.\r +\r +Since conda is the default profile, it does not have to be \r +explicitly included in the command line. Thus, the command can be shortened to:\\\r +`batchconvert ometiff `\r +\r +Convert only the first channel of the images:\\\r +`batchconvert ometiff -chn 0 `\r +\r +Crop the images being converted along x and y axis by 150 pixels:\\\r +`batchconvert ometiff -cr 0,0,150,150 `\r +\r +Convert into OME-Zarr instead:\\\r +`batchconvert omezarr `\r +\r +Convert into OME-Zarr with 3 resolution levels:\\\r +`batchconvert omezarr -rz 3 `\r +\r +Select a subset of images with a matching string such as "mutation":\\\r +`batchconvert omezarr -p mutation `\r +\r +Select a subset of images using wildcards. Note that the use of "" around \r +the input path is necessary when using wildcards:\\\r +`batchconvert omezarr "/*D3*.oir" `\r +\r +Convert by using a singularity container instead of conda environment (requires\r +singularity to be installed on your system):\\\r +`batchconvert omezarr -pf singularity "/*D3*.oir" `\r +\r +Convert by using a docker container instead of conda environment (requires docker\r +to be installed on your system):\\\r +`batchconvert omezarr -pf docker "/*D3*.oir" `\r +\r +Note that similarly to the case with the profile `conda`, the first execution of\r +a conversion with the profile `singularity` or `docker` will take a while for the\r +container image to be pulled. All the subsequent conversion commands using a \r +container option will use this image, and thus show no such delay. \r +\r +Convert local data and upload the output to an s3 bucket. Note that the output \r +path is created relative to the bucket specified in your s3 configuration:\\\r +`batchconvert omezarr -dt s3 `\r +\r +Receive input files from an s3 bucket, convert locally and upload the output to \r +the same bucket. Note that wildcards cannot be used when the input is from s3. \r +Use pattern matching option `-p` for selecting a subset of input files:\\\r +`batchconvert omezarr -p mutation -st s3 -dt s3 `\r +\r +Receive input files from your private BioStudies user space and convert them locally.\r +Use pattern matching option `-p` for selecting a subset of input files:\\\r +`batchconvert omezarr -p mutation -st bia `\r +\r +Receive an input from an s3 bucket, convert locally and upload the output to your \r +private BioStudies user space. Use pattern matching option `-p` for selecting a subset \r +of input files:\\\r +`batchconvert omezarr -p mutation -st s3 -dt bia `\r +\r +Note that in all the examples shown above, BatchConvert treats each input file as separate,\r +standalone data point, disregarding the possibility that some of the input files might belong to \r +the same multidimensional array. Thus, each input file is converted to an independent \r +OME-TIFF / OME-Zarr and the number of outputs will thus equal the number of selected input files.\r +An alternative scenario is discussed below.\r +\r +#### Parallel conversion of file groups by stacking multiple files into single OME-TIFFs / OME-Zarrs:\r +\r +When the flag `--merge_files` is specified, BatchConvert tries to detect which input files might \r +belong to the same multidimensional array based on the patterns in the filenames. Then a "grouped conversion" \r +is performed, meaning that the files belonging to the same dataset will be incorporated into \r +a single OME-TIFF / OME-Zarr series, in that files will be concatenated along specific dimension(s) \r +during the conversion. Multiple file groups in the input directory can be detected and converted \r +in parallel. \r +\r +This feature uses Bio-Formats's pattern files as described [here](https://docs.openmicroscopy.org/bio-formats/6.6.0/formats/pattern-file.html).\r +However, BatchConvert generates pattern files automatically, allowing the user to directly use the \r +input directory in the conversion command. BatchConvert also has the option of specifying the \r +concatenation axes in the command line, which is especially useful in cases where the filenames \r +may not contain dimension information. \r +\r +To be able to use the `--merge files` flag, the input file names must obey certain rules:\r +1. File names in the same group must be uniform, except for one or more **numeric field(s)**, which\r +should show incremental change across the files. These so-called **variable fields** \r +will be detected and used as the dimension(s) of concatenation.\r +2. The length of variable fields must be uniform within the group. For instance, if the\r +variable field has values reaching multi-digit numbers, leading "0"s should be included where needed \r +in the file names to make the variable field length uniform within the group.\r +3. Typically, each variable field should follow a dimension specifier. What patterns can be used as \r +dimension specifiers are explained [here](https://docs.openmicroscopy.org/bio-formats/6.6.0/formats/pattern-file.html).\r +However, BatchConvert also has the option `--concatenation_order`, which allows the user to\r +specify from the command line, the dimension(s), along which the files must be concatenated.\r +4. File names that are unique and cannot be associated with any group will be assumed as\r +standalone images and converted accordingly. \r +\r +Below are some examples of grouped conversion commands in the context of different possible use-case scenarios:\r +\r +**Example 1:**\r +\r +This is an example of a folder with non-uniform filename lengths:\r +```\r +time-series/test_img_T2\r +time-series/test_img_T4\r +time-series/test_img_T6\r +time-series/test_img_T8\r +time-series/test_img_T10\r +time-series/test_img_T12\r +```\r +In this example, leading zeroes are missing in the variable fields of some filenames. \r +A typical command to convert this folder to a single OME-TIFF would look like: \\\r +`batchconvert --ometiff --merge_files /time-series `\r +\r +However, this command would fail to create a single OME-Zarr folder due to the non-uniform \r +lengths of the filenames. Instead, the files would be split into two groups based on the\r +filename length, leading to two separate OME-Zarrs with names:\r +\r +`test_img_TRange{2-8-2}.ome.zarr` and `test_img_TRange{10-12-2}.ome.zarr`\r +\r +Here is the corrected version of the folder for the above example-\r +```\r +time-series/test_img_T02\r +time-series/test_img_T04\r +time-series/test_img_T06\r +time-series/test_img_T08\r +time-series/test_img_T10\r +time-series/test_img_T12\r +```\r +\r +Executing the same command on this folder would result in a single OME-Zarr with the name:\r +`test_img_TRange{02-12-2}.ome.zarr`\r +\r +**Example 2**- \r +\r +In this example, the filename lengths are uniform but the incrementation within the variable field is not.\r +```\r +time-series/test_img_T2\r +time-series/test_img_T4\r +time-series/test_img_T5\r +time-series/test_img_T7\r +```\r +\r +A typical command to convert this folder to a single OME-Zarr would look like: \\\r +`batchconvert --omezarr --merge_files /time-series `\r +\r +However, the command would fail to assume these files as a single group due to the\r +non-uniform incrementation in the variable field of the filenames. Instead, the dataset \r +would be split into two groups, leading to two separate OME-Zarrs with the following names:\r +`test_img_TRange{2-4-2}.ome.zarr` and `test_img_TRange{5-7-2}.ome.zarr` \r +\r +\r +**Example 3**\r +\r +This is an example of a case where the conversion attempts to concatenate files along two\r +dimensions, channel and time.\r +```\r +multichannel_time-series/test_img_C1-T1\r +multichannel_time-series/test_img_C1-T2\r +multichannel_time-series/test_img_C1-T3\r +multichannel_time-series/test_img_C2-T1\r +multichannel_time-series/test_img_C2-T2\r +```\r +To convert this folder to a single OME-Zarr, one could try the following command: \\\r +`batchconvert --omezarr --merge_files /multichannel_time-series `\r +\r +However, since the channel-2 does not have the same number of timeframes as the channel-1, \r +BatchConvert will fail to assume these two channels as part of the same series and\r +will instead split the two channels into two separate OME-Zarrs. \r +\r +The output would look like: \\\r +`test_img_C1-TRange{1-3-1}.ome.zarr` \\\r +`test_img_C2-TRange{1-2-1}.ome.zarr`\r +\r +To be able to really incorporate all files into a single OME-Zarr, the folder should have equal\r +number of images corresponding to both channels, as shown below:\r +```\r +multichannel_time-series/test_img_C1-T1\r +multichannel_time-series/test_img_C1-T2\r +multichannel_time-series/test_img_C1-T3\r +multichannel_time-series/test_img_C2-T1\r +multichannel_time-series/test_img_C2-T2\r +multichannel_time-series/test_img_C2-T3\r +```\r +The same conversion command on this version of the input folder would result in a single \r +OME-Zarr with the name: \\\r +`test_img_CRange{1-2-1}-TRange{1-3-1}.ome.zarr`\r +\r +\r +**Example 4**\r +\r +This is another example of a case, where there are multiple filename patterns in the input folder.\r +\r +```\r +folder_with_multiple_groups/test_img_C1-T1\r +folder_with_multiple_groups/test_img_C1-T2\r +folder_with_multiple_groups/test_img_C2-T1\r +folder_with_multiple_groups/test_img_C2-T2\r +folder_with_multiple_groups/test_img_T1-Z1\r +folder_with_multiple_groups/test_img_T1-Z2\r +folder_with_multiple_groups/test_img_T1-Z3\r +folder_with_multiple_groups/test_img_T2-Z1\r +folder_with_multiple_groups/test_img_T2-Z2\r +folder_with_multiple_groups/test_img_T2-Z3\r +```\r +\r +One can convert this folder with- \\\r +`batchconvert --omezarr --merge_files /folder_with_multiple_groups `\r + \r +BatchConvert will detect the two patterns in this folder and perform two grouped conversions. \r +The output folders will be named as `test_img_CRange{1-2-1}-TRange{1-2-1}.ome.zarr` and \r +`test_img_TRange{1-2-1}-ZRange{1-3-1}.ome.zarr`. \r +\r +\r +**Example 5**\r +\r +Now imagine that we have the same files as in the example 4 but the filenames of the\r +first group lack any dimension specifier, so we have the following folder:\r +\r +```\r +folder_with_multiple_groups/test_img_1-1\r +folder_with_multiple_groups/test_img_1-2\r +folder_with_multiple_groups/test_img_2-1\r +folder_with_multiple_groups/test_img_2-2\r +folder_with_multiple_groups/test_img_T1-Z1\r +folder_with_multiple_groups/test_img_T1-Z2\r +folder_with_multiple_groups/test_img_T1-Z3\r +folder_with_multiple_groups/test_img_T2-Z1\r +folder_with_multiple_groups/test_img_T2-Z2\r +folder_with_multiple_groups/test_img_T2-Z3\r +```\r +\r +In such a scenario, BatchConvert allows the user to specify the concatenation axes \r +via `--concatenation_order` option. This option expects comma-separated strings of dimensions \r +for each group. In this example, the user must provide a string of 2 characters, such as `ct` for \r +channel and time, for group 1, since there are two variable fields for this group. Since group 2 \r +already has dimension specifiers (T and Z as specified in the filenames preceding the variable fields),\r +the user does not need to specify anything for this group, and can enter `auto` or `aa` for automatic\r +detection of the specifiers. \r +\r +So the following line can be used to convert this folder: \\\r +`batchconvert --omezarr --merge_files --concatenation_order ct,aa /folder_with_multiple_groups `\r +\r +The resulting OME-Zarrs will have the names:\r +`test_img_CRange{1-2-1}-TRange{1-2-1}.ome.zarr` and\r +`test_img_TRange{1-2-1}-ZRange{1-3-1}.ome.zarr`\r +\r +Note that `--concatenation_order` will override any dimension specifiers already\r +existing in the filenames.\r +\r +\r +**Example 6**\r +\r +There can be scenarios where the user may want to have further control over the axes along \r +which to concatenate the images. For example, the filenames might contain the data acquisition\r +date, which can be recognised by BatchConvert as a concatenation axis in the automatic \r +detection mode. An example of such a fileset might look like:\r +\r +```\r +filenames_with_dates/test_data_date03.03.2023_imageZ1-T1\r +filenames_with_dates/test_data_date03.03.2023_imageZ1-T2\r +filenames_with_dates/test_data_date03.03.2023_imageZ1-T3\r +filenames_with_dates/test_data_date03.03.2023_imageZ2-T1\r +filenames_with_dates/test_data_date03.03.2023_imageZ2-T2\r +filenames_with_dates/test_data_date03.03.2023_imageZ2-T3\r +filenames_with_dates/test_data_date04.03.2023_imageZ1-T1\r +filenames_with_dates/test_data_date04.03.2023_imageZ1-T2\r +filenames_with_dates/test_data_date04.03.2023_imageZ1-T3\r +filenames_with_dates/test_data_date04.03.2023_imageZ2-T1\r +filenames_with_dates/test_data_date04.03.2023_imageZ2-T2\r +filenames_with_dates/test_data_date04.03.2023_imageZ2-T3\r +```\r +\r +One may try the following command to convert this folder:\r +\r +`batchconvert --omezarr --merge_files /filenames_with_dates `\r +\r +Since the concatenation axes are not specified, this command would try to create\r +a single OME-Zarr with name: `test_data_dateRange{03-04-1}.03.2023_imageZRange{1-2-1}-TRange{1-3-1}`.\r +\r +In order to force BatchConvert to ignore the date field, the user can restrict the concatenation \r +axes to the last two numeric fields. This can be done by using a command such as: \\\r +`batchconvert --omezarr --merge_files --concatenation_order aa /filenames_with_dates ` \\\r +This command will avoid concatenation along the date field, and therefore, there will be two\r +OME-Zarrs corresponding to the two dates. The number of characters being passed to the \r +`--concatenation_order` option specifies the number of numeric fields (starting from the right \r +end of the filename) that are recognised by the BatchConvert as valid concatenation axes. \r +Passing `aa`, therefore, means that the last two numeric fields must be recognised as \r +concatenation axes and the dimension type should be automatically detected (`a` for automatic). \r +In the same logic, one could, for example, convert each Z section into a separate OME-Zarr by \r +specifying `--concatenation_order a`.\r +\r +\r +\r +### Conversion on slurm\r +\r +All the examples given above can also be run on slurm by specifying `-pf cluster` option. \r +Note that this option automatically uses the singularity profile:\\\r +`batchconvert omezarr -pf cluster -p .oir `\r +\r +\r +\r +\r +\r +\r +\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/453?version=2" ; + schema1:keywords "Nextflow, bash, Python, NGFF, OME-Zarr, Conversion, imaging, bioimaging, image file format, file conversion, OME-TIFF, S3, BioStudies, bioformats, bioformats2raw" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "BatchConvert" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/453?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 151518 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Workflow converts one or multiple bam/cram files to fastq format" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/968?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/bamtofastq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/bamtofastq" ; + schema1:sdDatePublished "2024-07-12 13:22:08 +0100" ; + schema1:url "https://workflowhub.eu/workflows/968/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9637 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:44Z" ; + schema1:dateModified "2024-06-11T12:54:44Z" ; + schema1:description "Workflow converts one or multiple bam/cram files to fastq format" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/968?version=5" ; + schema1:keywords "bamtofastq, Conversion, cramtofastq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/bamtofastq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/968?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + schema1:datePublished "2024-07-09T15:31:05.900075" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-Trio-phasing-VGP5/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.24" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "The pangenome graph construction pipeline renders a collection of sequences into a pangenome graph. Its goal is to build a graph that is locally directed and acyclic while preserving large-scale variation. Maintaining local linearity is important for interpretation, visualization, mapping, comparative genomics, and reuse of pangenome graphs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1007?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/pangenome" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/pangenome" ; + schema1:sdDatePublished "2024-07-12 13:20:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1007/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11517 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:07Z" ; + schema1:dateModified "2024-06-11T12:55:07Z" ; + schema1:description "The pangenome graph construction pipeline renders a collection of sequences into a pangenome graph. Its goal is to build a graph that is locally directed and acyclic while preserving large-scale variation. Maintaining local linearity is important for interpretation, visualization, mapping, comparative genomics, and reuse of pangenome graphs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1007?version=3" ; + schema1:keywords "pangenome" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/pangenome" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1007?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "call and score variants from WGS/WES of rare disease patients" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1014?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/raredisease" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/raredisease" ; + schema1:sdDatePublished "2024-07-12 13:19:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1014/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13482 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:08Z" ; + schema1:dateModified "2024-06-11T12:55:08Z" ; + schema1:description "call and score variants from WGS/WES of rare disease patients" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1014?version=5" ; + schema1:keywords "diagnostics, rare-disease, snv, structural-variants, variant-annotation, variant-calling, wes, WGS" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/raredisease" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1014?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.282.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_virtual_screening/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Protein-ligand Docking tutorial (Fpocket)" ; + schema1:sdDatePublished "2024-07-12 13:33:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/282/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2666 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-22T10:10:56Z" ; + schema1:dateModified "2023-01-16T13:58:31Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/282?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Protein-ligand Docking tutorial (Fpocket)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_virtual_screening/python/workflow.py" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1021?version=12" ; + schema1:isBasedOn "https://github.com/nf-core/scrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/scrnaseq" ; + schema1:sdDatePublished "2024-07-12 13:19:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1021/ro_crate?version=12" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10731 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:18Z" ; + schema1:dateModified "2024-06-11T12:55:18Z" ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1021?version=13" ; + schema1:keywords "10x-genomics, 10xgenomics, alevin, bustools, Cellranger, kallisto, rna-seq, single-cell, star-solo" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/scrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1021?version=12" ; + schema1:version 12 . + + a schema1:Dataset ; + schema1:datePublished "2023-10-30T19:54:20.930054" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Scaffolding-HiC-VGP8" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Scaffolding-HiC-VGP8/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.15" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/abcix-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.299.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_abc_md_setup/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy ABC MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/299/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 107223 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-29T10:43:15Z" ; + schema1:dateModified "2022-11-23T09:03:34Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/abcix-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/299?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy ABC MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_abc_md_setup/galaxy/biobb_wf_amber_abc_md_setup.ga" ; + schema1:version 1 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """ASPICov was developed to provide a rapid, reliable and complete analysis of NGS SARS-Cov2 samples to the biologist. This broad application tool allows to process samples from either capture or amplicon strategy and Illumina or Ion Torrent technology. To ensure FAIR data analysis, this Nextflow pipeline follows nf-core guidelines and use Singularity containers. \r +\r +Availability and Implementation: https://gitlab.com/vtilloy/aspicov\r +\r +Citation: Valentin Tilloy, Pierre Cuzin, Laura Leroi, Emilie Guérin, Patrick Durand, Sophie Alain\r + ASPICov: An automated pipeline for identification of SARS-Cov2 nucleotidic variants\r + PLoS One 2022 Jan 26;17(1):e0262953: https://pubmed.ncbi.nlm.nih.gov/35081137/""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/192?version=1" ; + schema1:isBasedOn "https://gitlab.com/vtilloy/aspicov/-/blob/master/main.nf" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ASPICov" ; + schema1:sdDatePublished "2024-07-12 13:36:22 +0100" ; + schema1:url "https://workflowhub.eu/workflows/192/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 140575 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 91310 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2021-09-24T14:22:51Z" ; + schema1:dateModified "2023-01-16T13:52:59Z" ; + schema1:description """ASPICov was developed to provide a rapid, reliable and complete analysis of NGS SARS-Cov2 samples to the biologist. This broad application tool allows to process samples from either capture or amplicon strategy and Illumina or Ion Torrent technology. To ensure FAIR data analysis, this Nextflow pipeline follows nf-core guidelines and use Singularity containers. \r +\r +Availability and Implementation: https://gitlab.com/vtilloy/aspicov\r +\r +Citation: Valentin Tilloy, Pierre Cuzin, Laura Leroi, Emilie Guérin, Patrick Durand, Sophie Alain\r + ASPICov: An automated pipeline for identification of SARS-Cov2 nucleotidic variants\r + PLoS One 2022 Jan 26;17(1):e0262953: https://pubmed.ncbi.nlm.nih.gov/35081137/""" ; + schema1:image ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "ASPICov" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/192?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2024-06-25T09:57:05.227636" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/nanopore-pre-processing" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "nanopore-pre-processing/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "The tutorial for this workflow can be found on [Galaxy Training Network](https://training.galaxyproject.org/training-material/topics/climate/tutorials/climate-101/tutorial.html)" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/42?version=1" ; + schema1:isBasedOn "https://climate.usegalaxy.eu/u/annefou/w/workflow-constructed-from-history-climate-101" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Climate - Climate 101" ; + schema1:sdDatePublished "2024-07-12 13:37:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/42/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 5136 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 24703 ; + schema1:creator ; + schema1:dateCreated "2020-06-29T14:00:25Z" ; + schema1:dateModified "2023-01-16T13:43:36Z" ; + schema1:description "The tutorial for this workflow can be found on [Galaxy Training Network](https://training.galaxyproject.org/training-material/topics/climate/tutorials/climate-101/tutorial.html)" ; + schema1:image ; + schema1:keywords "GTN, Climate" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Climate - Climate 101" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/42?version=1" ; + schema1:version 1 ; + ns1:input , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 12807 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-07-12 13:20:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10326 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:58Z" ; + schema1:dateModified "2024-06-11T12:54:58Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + schema1:datePublished "2024-03-13T21:38:33.354139" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-decontamination-VGP9" ; + schema1:license "BSD-3-Clause" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-decontamination-VGP9/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "The workflow takes a HiFi reads collection, runs FastQC and SeqKit, filters with Cutadapt, and creates a MultiQC report. The main outputs are a collection of filtred reads, a report with raw and filtered reads stats, and a table with raw reads stats." ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.602.1" ; + schema1:isBasedOn "https://github.com/ERGA-consortium/pipelines/tree/main/assembly/galaxy" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ERGA DataQC HiFi v2309 (WF0)" ; + schema1:sdDatePublished "2024-07-12 13:27:17 +0100" ; + schema1:url "https://workflowhub.eu/workflows/602/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15463 ; + schema1:creator , + ; + schema1:dateCreated "2023-10-06T13:17:36Z" ; + schema1:dateModified "2024-03-13T09:04:37Z" ; + schema1:description "The workflow takes a HiFi reads collection, runs FastQC and SeqKit, filters with Cutadapt, and creates a MultiQC report. The main outputs are a collection of filtred reads, a report with raw and filtered reads stats, and a table with raw reads stats." ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "ERGA, DataQC, HiFi" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ERGA DataQC HiFi v2309 (WF0)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/0.Data_QC/Galaxy-Workflow-ERGA_DataQC_HiFi_v2309_(WF0).ga" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 164392 ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/0.Data_QC/pics/QC_hifi_2309.png" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.259.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_virtual_screening/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL Protein-ligand Docking tutorial (Fpocket)" ; + schema1:sdDatePublished "2024-07-12 13:36:13 +0100" ; + schema1:url "https://workflowhub.eu/workflows/259/ro_crate?version=2" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 28572 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6352 ; + schema1:creator , + ; + schema1:dateCreated "2023-06-06T14:57:00Z" ; + schema1:dateModified "2023-06-06T15:02:00Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/259?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CWL Protein-ligand Docking tutorial (Fpocket)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_virtual_screening/cwl/workflow.cwl" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Differential abundance analysis" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/981?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/differentialabundance" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/differentialabundance" ; + schema1:sdDatePublished "2024-07-12 13:21:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/981/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13413 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:48Z" ; + schema1:dateModified "2024-06-11T12:54:48Z" ; + schema1:description "Differential abundance analysis" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/981?version=8" ; + schema1:keywords "ATAC-seq, ChIP-seq, deseq2, differential-abundance, differential-expression, gsea, limma, microarray, rna-seq, shiny" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/differentialabundance" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/981?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """CWL workflow for NMR spectra Peak Picking\r +The workflow takes as input a series of 2D 1H 15N HSQC NMR spectra and uses nmrpipe tools to convert the spectra in nmrpipe format and performs an automatic peak picking.\r +This test uses a protein MDM2 with different ligands and peptide and generates a peak list with 1H and 15N chemical shift values for each spectrum. The difference among these peak lists can be used to characterize the ligand binding site on the protein.""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/43?version=1" ; + schema1:isBasedOn "https://github.com/andreagia/CWL_dem1_NMR_Peak_Picking" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for NMR pipe" ; + schema1:sdDatePublished "2024-07-12 13:37:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/43/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1268 ; + schema1:dateCreated "2020-07-22T10:49:00Z" ; + schema1:dateModified "2023-01-16T13:43:41Z" ; + schema1:description """CWL workflow for NMR spectra Peak Picking\r +The workflow takes as input a series of 2D 1H 15N HSQC NMR spectra and uses nmrpipe tools to convert the spectra in nmrpipe format and performs an automatic peak picking.\r +This test uses a protein MDM2 with different ligands and peptide and generates a peak list with 1H and 15N chemical shift values for each spectrum. The difference among these peak lists can be used to characterize the ligand binding site on the protein.""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "NMR pipe" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/43?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 6650 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """### Workflow for Illumina Quality Control and Filtering\r +_Multiple paired datasets will be merged into single paired dataset._\r +\r +**Summary:**\r +- FastQC on raw data files
\r +- fastp for read quality trimming
\r +- BBduk for phiX and (optional) rRNA filtering
\r +- Kraken2 for taxonomic classification of reads (optional)
\r +- BBmap for (contamination) filtering using given references (optional)
\r +- FastQC on filtered (merged) data
\r +\r +Other UNLOCK workflows on WorkflowHub: https://workflowhub.eu/projects/16/workflows?view=default

\r +\r +**All tool CWL files and other workflows can be found here:**
\r +https://gitlab.com/m-unlock/cwl\r +\r +**How to setup and use an UNLOCK workflow:**
\r +https://m-unlock.gitlab.io/docs/setup/setup.html
\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/336?version=1" ; + schema1:isBasedOn "https://gitlab.com/m-unlock/cwl/-/blob/master/cwl/workflows/workflow_illumina_quality.cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Workflow for Illumina Quality Control and Filtering" ; + schema1:sdDatePublished "2024-07-12 13:34:21 +0100" ; + schema1:url "https://workflowhub.eu/workflows/336/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 85490 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 16671 ; + schema1:creator , + ; + schema1:dateCreated "2022-04-21T13:00:34Z" ; + schema1:dateModified "2023-04-07T14:04:28Z" ; + schema1:description """### Workflow for Illumina Quality Control and Filtering\r +_Multiple paired datasets will be merged into single paired dataset._\r +\r +**Summary:**\r +- FastQC on raw data files
\r +- fastp for read quality trimming
\r +- BBduk for phiX and (optional) rRNA filtering
\r +- Kraken2 for taxonomic classification of reads (optional)
\r +- BBmap for (contamination) filtering using given references (optional)
\r +- FastQC on filtered (merged) data
\r +\r +Other UNLOCK workflows on WorkflowHub: https://workflowhub.eu/projects/16/workflows?view=default

\r +\r +**All tool CWL files and other workflows can be found here:**
\r +https://gitlab.com/m-unlock/cwl\r +\r +**How to setup and use an UNLOCK workflow:**
\r +https://m-unlock.gitlab.io/docs/setup/setup.html
\r +""" ; + schema1:image ; + schema1:keywords "illumina, Genomics, Transcriptomics, quality, filtering, Classification" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Workflow for Illumina Quality Control and Filtering" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/336?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for the analysis of crispr data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/975?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/crisprseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/crisprseq" ; + schema1:sdDatePublished "2024-07-12 13:18:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/975/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7872 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:45Z" ; + schema1:dateModified "2024-06-11T12:54:46Z" ; + schema1:description "Pipeline for the analysis of crispr data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/975?version=4" ; + schema1:keywords "crispr, crispr-analysis, crispr-cas, NGS" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/crisprseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/975?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/gmx-protein-ligand-complex-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.295.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_protein_complex_md_setup/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy Protein Ligand Complex MD Setup" ; + schema1:sdDatePublished "2024-07-12 13:35:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/295/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 97199 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-24T14:47:53Z" ; + schema1:dateModified "2022-11-22T09:57:03Z" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/gmx-protein-ligand-complex-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/295?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy Protein Ligand Complex MD Setup" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_protein_complex_md_setup/galaxy/biobb_wf_protein_complex_md_setup.ga" ; + schema1:version 1 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for processing of 10xGenomics single cell rnaseq data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1021?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/scrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/scrnaseq" ; + schema1:sdDatePublished "2024-07-12 13:18:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1021/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7959 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:17Z" ; + schema1:dateModified "2024-06-11T12:55:17Z" ; + schema1:description "Pipeline for processing of 10xGenomics single cell rnaseq data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1021?version=13" ; + schema1:keywords "10x-genomics, 10xgenomics, alevin, bustools, Cellranger, kallisto, rna-seq, single-cell, star-solo" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/scrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1021?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "The workflow takes trimmed HiC forward and reverse reads, and one assembly (e.g.: Hap1 or Pri or Collapsed) to produce a scaffolded assembly using YaHS. It also runs all the QC analyses (gfastats, BUSCO, and Merqury). " ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/702?version=1" ; + schema1:isBasedOn "https://github.com/ERGA-consortium/pipelines/tree/main/assembly/galaxy" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ERGA HiC Collapsed Scaffolding+QC YaHS v2311 (WF4)" ; + schema1:sdDatePublished "2024-07-12 13:25:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/702/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 46334 ; + schema1:creator , + ; + schema1:dateCreated "2024-01-09T11:00:47Z" ; + schema1:dateModified "2024-01-09T11:00:47Z" ; + schema1:description "The workflow takes trimmed HiC forward and reverse reads, and one assembly (e.g.: Hap1 or Pri or Collapsed) to produce a scaffolded assembly using YaHS. It also runs all the QC analyses (gfastats, BUSCO, and Merqury). " ; + schema1:image ; + schema1:isPartOf , + ; + schema1:keywords "name:ASSEMBLY+QC, ERGA, HiC" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ERGA HiC Collapsed Scaffolding+QC YaHS v2311 (WF4)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/4.Scaffolding/Galaxy-Workflow-ERGA_HiC_Collapsed_Scaffolding_QC_YaHS_v2311_(WF4).ga" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + ; + ns1:output , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 172042 ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/4.Scaffolding/pics/Scaf_yahs_pri_2311.png" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """Assess genome quality; can run alone or as part of a combined workflow for large genome assembly. \r +\r +* What it does: Assesses the quality of the genome assembly: generate some statistics and determine if expected genes are present; align contigs to a reference genome.\r +* Inputs: polished assembly; reference_genome.fasta (e.g. of a closely-related species, if available). \r +* Outputs: Busco table of genes found; Quast HTML report, and link to Icarus contigs browser, showing contigs aligned to a reference genome\r +* Tools used: Busco, Quast\r +* Input parameters: None required\r +\r +Workflow steps: \r +\r +Polished assembly => Busco\r +* First: predict genes in the assembly: using Metaeuk\r +* Second: compare the set of predicted genes to the set of expected genes in a particular lineage. Default setting for lineage: Eukaryota\r +\r +Polished assembly and a reference genome => Quast\r +* Contigs/scaffolds file: polished assembly\r +* Type of assembly: Genome\r +* Use a reference genome: Yes\r +* Reference genome: Arabidopsis genome\r +* Is the genome large (> 100Mbp)? Yes. \r +* All other settings as defaults, except second last setting: Distinguish contigs with more than 50% unaligned bases as a separate group of contigs?: change to No\r +\r +Options\r +\r +Gene prediction: \r +* Change tool used by Busco to predict genes in the assembly: instead of Metaeuk, use Augustus. \r +* To do this: select: Use Augustus; Use another predefined species model; then choose from the drop down list.\r +* Select from a database of trained species models. list here: https://github.com/Gaius-Augustus/Augustus/tree/master/config/species\r +* Note: if using Augustus: it may fail if the input assembly is too small (e.g. a test-size data assembly). It can't do the training part properly. \r +\r +Compare genes found to other lineage: \r +* Busco has databases of lineages and their expected genes. Option to change lineage. \r +* Not all lineages are available - there is a mix of broader and narrower lineages. - list of lineages here: https://busco.ezlab.org/list_of_lineages.html. \r +* To see the groups in taxonomic hierarchies: Eukaryotes: https://busco.ezlab.org/frames/euka.htm\r +* For example, if you have a plant species from Fabales, you could set that as the lineage. \r +* The narrower the taxonomic group, the more total genes are expected. \r +\r +\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.229.1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Assess genome quality" ; + schema1:sdDatePublished "2024-07-12 13:36:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/229/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 159462 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10556 ; + schema1:creator ; + schema1:dateCreated "2021-11-08T06:03:05Z" ; + schema1:dateModified "2023-01-30T18:21:31Z" ; + schema1:description """Assess genome quality; can run alone or as part of a combined workflow for large genome assembly. \r +\r +* What it does: Assesses the quality of the genome assembly: generate some statistics and determine if expected genes are present; align contigs to a reference genome.\r +* Inputs: polished assembly; reference_genome.fasta (e.g. of a closely-related species, if available). \r +* Outputs: Busco table of genes found; Quast HTML report, and link to Icarus contigs browser, showing contigs aligned to a reference genome\r +* Tools used: Busco, Quast\r +* Input parameters: None required\r +\r +Workflow steps: \r +\r +Polished assembly => Busco\r +* First: predict genes in the assembly: using Metaeuk\r +* Second: compare the set of predicted genes to the set of expected genes in a particular lineage. Default setting for lineage: Eukaryota\r +\r +Polished assembly and a reference genome => Quast\r +* Contigs/scaffolds file: polished assembly\r +* Type of assembly: Genome\r +* Use a reference genome: Yes\r +* Reference genome: Arabidopsis genome\r +* Is the genome large (> 100Mbp)? Yes. \r +* All other settings as defaults, except second last setting: Distinguish contigs with more than 50% unaligned bases as a separate group of contigs?: change to No\r +\r +Options\r +\r +Gene prediction: \r +* Change tool used by Busco to predict genes in the assembly: instead of Metaeuk, use Augustus. \r +* To do this: select: Use Augustus; Use another predefined species model; then choose from the drop down list.\r +* Select from a database of trained species models. list here: https://github.com/Gaius-Augustus/Augustus/tree/master/config/species\r +* Note: if using Augustus: it may fail if the input assembly is too small (e.g. a test-size data assembly). It can't do the training part properly. \r +\r +Compare genes found to other lineage: \r +* Busco has databases of lineages and their expected genes. Option to change lineage. \r +* Not all lineages are available - there is a mix of broader and narrower lineages. - list of lineages here: https://busco.ezlab.org/list_of_lineages.html. \r +* To see the groups in taxonomic hierarchies: Eukaryotes: https://busco.ezlab.org/frames/euka.htm\r +* For example, if you have a plant species from Fabales, you could set that as the lineage. \r +* The narrower the taxonomic group, the more total genes are expected. \r +\r +\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)\r +""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "Large-genome-assembly" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Assess genome quality" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/229?version=1" ; + schema1:version 1 ; + ns1:input , + ; + ns1:output , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """Workflow for tracking objects in Cell Profiler:\r +https://training.galaxyproject.org/training-material/topics/imaging/tutorials/object-tracking-using-cell-profiler/tutorial.html""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/115?version=1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Object tracking using CellProfiler" ; + schema1:sdDatePublished "2024-07-12 13:37:06 +0100" ; + schema1:url "https://workflowhub.eu/workflows/115/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 162671 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 30831 ; + schema1:dateCreated "2021-03-21T18:12:10Z" ; + schema1:dateModified "2023-07-03T10:16:04Z" ; + schema1:description """Workflow for tracking objects in Cell Profiler:\r +https://training.galaxyproject.org/training-material/topics/imaging/tutorials/object-tracking-using-cell-profiler/tutorial.html""" ; + schema1:image ; + schema1:keywords "CellProfiler, imaging, Galaxy, image processing" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Object tracking using CellProfiler" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/115?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Molecular Structure Checking using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **checking** a **molecular structure** before using it as an input for a **Molecular Dynamics** simulation. The workflow uses the **BioExcel Building Blocks library (biobb)**. The particular structure used is the crystal structure of **human Adenylate Kinase 1A (AK1A)**, in complex with the **AP5A inhibitor** (PDB code [1Z83](https://www.rcsb.org/structure/1z83)). \r +\r +**Structure checking** is a key step before setting up a protein system for **simulations**. A number of **common issues** found in structures at **Protein Data Bank** may compromise the success of the **simulation**, or may suggest that longer **equilibration** procedures are necessary.\r +\r +The **workflow** shows how to:\r +\r +- Run **basic manipulations on structures** (selection of models, chains, alternative locations\r +- Detect and fix **amide assignments** and **wrong chiralities**\r +- Detect and fix **protein backbone** issues (missing fragments, and atoms, capping)\r +- Detect and fix **missing side-chain atoms**\r +- **Add hydrogen atoms** according to several criteria\r +- Detect and classify **atomic clashes**\r +- Detect possible **disulfide bonds (SS)**\r +\r +An implementation of this workflow in a **web-based Graphical User Interface (GUI)** can be found in the [https://mmb.irbbarcelona.org/biobb-wfs/](https://mmb.irbbarcelona.org/biobb-wfs/) server (see [https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check](https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check)).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.777.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/main/biobb_wf_structure_checking/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Molecular Structure Checking" ; + schema1:sdDatePublished "2024-07-12 13:24:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/777/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5246 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-05T08:52:36Z" ; + schema1:dateModified "2024-03-05T08:55:07Z" ; + schema1:description """# Molecular Structure Checking using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **checking** a **molecular structure** before using it as an input for a **Molecular Dynamics** simulation. The workflow uses the **BioExcel Building Blocks library (biobb)**. The particular structure used is the crystal structure of **human Adenylate Kinase 1A (AK1A)**, in complex with the **AP5A inhibitor** (PDB code [1Z83](https://www.rcsb.org/structure/1z83)). \r +\r +**Structure checking** is a key step before setting up a protein system for **simulations**. A number of **common issues** found in structures at **Protein Data Bank** may compromise the success of the **simulation**, or may suggest that longer **equilibration** procedures are necessary.\r +\r +The **workflow** shows how to:\r +\r +- Run **basic manipulations on structures** (selection of models, chains, alternative locations\r +- Detect and fix **amide assignments** and **wrong chiralities**\r +- Detect and fix **protein backbone** issues (missing fragments, and atoms, capping)\r +- Detect and fix **missing side-chain atoms**\r +- **Add hydrogen atoms** according to several criteria\r +- Detect and classify **atomic clashes**\r +- Detect possible **disulfide bonds (SS)**\r +\r +An implementation of this workflow in a **web-based Graphical User Interface (GUI)** can be found in the [https://mmb.irbbarcelona.org/biobb-wfs/](https://mmb.irbbarcelona.org/biobb-wfs/) server (see [https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check](https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check)).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Molecular Structure Checking" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/main/biobb_wf_structure_checking/python/workflow.py" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """The Flashlite-Supernova pipeline runs Supernova to generate phased whole-genome de novo assemblies from a Chromium prepared library on [University of Queensland's HPC, Flashlite](https://rcc.uq.edu.au/flashlite). \r +\r +Infrastructure\\_deployment\\_metadata: FlashLite (QRISCloud)""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.151.1" ; + schema1:isBasedOn "https://github.com/Sydney-Informatics-Hub/Flashlite-Supernova" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Flashlite-Supernova" ; + schema1:sdDatePublished "2024-07-12 13:36:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/151/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2606 ; + schema1:dateCreated "2021-08-18T23:21:08Z" ; + schema1:dateModified "2023-01-16T13:51:45Z" ; + schema1:description """The Flashlite-Supernova pipeline runs Supernova to generate phased whole-genome de novo assemblies from a Chromium prepared library on [University of Queensland's HPC, Flashlite](https://rcc.uq.edu.au/flashlite). \r +\r +Infrastructure\\_deployment\\_metadata: FlashLite (QRISCloud)""" ; + schema1:isPartOf ; + schema1:keywords "Flashlite, Supernova, 10X, TELLSeq" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Flashlite-Supernova" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/151?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +****\r +\r +About this workflow:\r +\r +* Run this workflow per tissue. \r +* Inputs: masked_genome.fasta and the trimmed RNAseq reads (R1 and R2) from one type of tissue. \r +* Index genome and align reads to genome with HISAT2, with default settings except for: Advanced options: spliced alignment options: specify options: Transcriptome assembly reporting: selected option: Report alignments tailored for transcript assemblers including StringTie (equivalent to -dta flag). \r +* Runs samtools sort to sort bam by coordinate. \r +* Runs StringTie to generate gtf from sorted bam. \r +* Output: transcripts.gtf from a single tissue.""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.877.1" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Find transcripts - TSI" ; + schema1:sdDatePublished "2024-07-12 13:22:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/877/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11408 ; + schema1:creator , + ; + schema1:dateCreated "2024-05-08T06:51:41Z" ; + schema1:dateModified "2024-05-09T04:05:20Z" ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +****\r +\r +About this workflow:\r +\r +* Run this workflow per tissue. \r +* Inputs: masked_genome.fasta and the trimmed RNAseq reads (R1 and R2) from one type of tissue. \r +* Index genome and align reads to genome with HISAT2, with default settings except for: Advanced options: spliced alignment options: specify options: Transcriptome assembly reporting: selected option: Report alignments tailored for transcript assemblers including StringTie (equivalent to -dta flag). \r +* Runs samtools sort to sort bam by coordinate. \r +* Runs StringTie to generate gtf from sorted bam. \r +* Output: transcripts.gtf from a single tissue.""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "TSI-annotation" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Find transcripts - TSI" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/877?version=1" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 299849 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for analysis of Molecular Pixelation assays" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1010?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/pixelator" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/pixelator" ; + schema1:sdDatePublished "2024-07-12 13:20:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1010/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11496 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:07Z" ; + schema1:dateModified "2024-06-11T12:55:07Z" ; + schema1:description "Pipeline for analysis of Molecular Pixelation assays" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1010?version=5" ; + schema1:keywords "molecular-pixelation, pixelator, pixelgen-technologies, proteins, single-cell, single-cell-omics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/pixelator" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1010?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-07-12 13:22:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5800 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:11Z" ; + schema1:dateModified "2024-06-11T12:55:11Z" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + schema1:datePublished "2023-10-30T19:49:48.921572" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/kmer-profiling-hifi-trio-VGP2" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "kmer-profiling-hifi-trio-VGP2/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.15" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Genome-wide alternative splicing analysis v.2" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/482?version=7" ; + schema1:license "CC-BY-NC-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genome-wide alternative splicing analysis v.2" ; + schema1:sdDatePublished "2024-07-12 13:33:07 +0100" ; + schema1:url "https://workflowhub.eu/workflows/482/ro_crate?version=7" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 214419 . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 30115 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 109284 ; + schema1:creator ; + schema1:dateCreated "2023-06-11T19:17:35Z" ; + schema1:dateModified "2023-06-11T19:17:56Z" ; + schema1:description "Genome-wide alternative splicing analysis v.2" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/482?version=6" ; + schema1:keywords "Transcriptomics, Alternative splicing, isoform switching" ; + schema1:license "https://spdx.org/licenses/CC-BY-NC-4.0" ; + schema1:name "Genome-wide alternative splicing analysis v.2" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/482?version=7" ; + schema1:version 7 ; + ns1:input , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/807?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for A workflow demonstrating the 'Run interpolation based on IDW' tool" ; + schema1:sdDatePublished "2024-07-12 13:23:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/807/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10504 ; + schema1:dateCreated "2024-03-28T13:42:44Z" ; + schema1:dateModified "2024-03-28T13:46:06Z" ; + schema1:description "" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "A workflow demonstrating the 'Run interpolation based on IDW' tool" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/AquaINFRA/galaxy-workflow-idw/main/galaxy_workflow.ga" ; + schema1:version 1 ; + ns1:input , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Fastq-to-BAM @ NCI-Gadi is a genome alignment workflow that takes raw FASTQ files, aligns them to a reference genome and outputs analysis ready BAM files. This workflow is designed for the National Computational Infrastructure's (NCI) Gadi supercompter, leveraging multiple nodes on NCI Gadi to run all stages of the workflow in parallel, either massively parallel using the scatter-gather approach or parallel by sample. It consists of a number of stages and follows the BROAD Institute's best practice recommendations. \r +\r +Infrastructure\\_deployment\\_metadata: Gadi (NCI)""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.146.1" ; + schema1:isBasedOn "https://github.com/Sydney-Informatics-Hub/Fastq-to-BAM/blob/fastq-to-bam-v2/README.md" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Fastq-to-bam @ NCI-Gadi" ; + schema1:sdDatePublished "2024-07-12 13:35:22 +0100" ; + schema1:url "https://workflowhub.eu/workflows/146/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 1189980 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 30768 ; + schema1:creator , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2021-08-17T04:45:57Z" ; + schema1:dateModified "2023-01-16T13:51:45Z" ; + schema1:description """Fastq-to-BAM @ NCI-Gadi is a genome alignment workflow that takes raw FASTQ files, aligns them to a reference genome and outputs analysis ready BAM files. This workflow is designed for the National Computational Infrastructure's (NCI) Gadi supercompter, leveraging multiple nodes on NCI Gadi to run all stages of the workflow in parallel, either massively parallel using the scatter-gather approach or parallel by sample. It consists of a number of stages and follows the BROAD Institute's best practice recommendations. \r +\r +Infrastructure\\_deployment\\_metadata: Gadi (NCI)""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "Genomics, Alignment, BROAD, WGS, BWA-mem, scalable, NCI, Gadi, PBS, genome, DNA, mapping" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Fastq-to-bam @ NCI-Gadi" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/146?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Taxonomic classification and profiling of shotgun short- and long-read metagenomic data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1025?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/taxprofiler" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/taxprofiler" ; + schema1:sdDatePublished "2024-07-12 13:18:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1025/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15607 ; + schema1:creator , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:19Z" ; + schema1:dateModified "2024-06-11T12:55:19Z" ; + schema1:description "Taxonomic classification and profiling of shotgun short- and long-read metagenomic data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1025?version=10" ; + schema1:keywords "Classification, illumina, long-reads, Metagenomics, microbiome, nanopore, pathogen, Profiling, shotgun, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/taxprofiler" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1025?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + schema1:datePublished "2024-01-26T14:52:42.979858" ; + schema1:hasPart , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/velocyto" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + ; + schema1:name "velocyto/Velocyto-on10X-from-bundled" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.19" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "velocyto/Velocyto-on10X-filtered-barcodes" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/velocyto" ; + schema1:version "0.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.19" . + + a schema1:Dataset ; + schema1:datePublished "2023-11-23T13:33:07.987369" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/cutandrun" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "cutandrun/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """### Workflow for Metagenomics binning from assembly\r +\r +**Minimal inputs are: Identifier, assembly (fasta) and a associated sorted BAM file**\r +\r +**Summary**\r + - MetaBAT2 (binning)\r + - MaxBin2 (binning)\r + - SemiBin (binning)\r + - DAS Tool (bin merging)\r + - EukRep (eukaryotic classification)\r + - CheckM (bin completeness and contamination)\r + - BUSCO (bin completeness)\r + - GTDB-Tk (bin taxonomic classification)\r +\r +Other UNLOCK workflows on WorkflowHub: https://workflowhub.eu/projects/16/workflows?view=default

\r +\r +**All tool CWL files and other workflows can be found here:**
\r + Tools: https://gitlab.com/m-unlock/cwl
\r + Workflows: https://gitlab.com/m-unlock/cwl/workflows
\r +\r +**How to setup and use an UNLOCK workflow:**
\r +https://m-unlock.gitlab.io/docs/setup/setup.html
\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/64?version=11" ; + schema1:isBasedOn "https://gitlab.com/m-unlock/cwl/-/blob/master/cwl/workflows/workflow_metagenomics_binning.cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Metagenomic Binning from Assembly" ; + schema1:sdDatePublished "2024-07-12 13:34:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/64/ro_crate?version=11" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 82211 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 18745 ; + schema1:creator , + ; + schema1:dateCreated "2021-10-18T09:49:33Z" ; + schema1:dateModified "2023-02-02T15:15:38Z" ; + schema1:description """### Workflow for Metagenomics binning from assembly\r +\r +**Minimal inputs are: Identifier, assembly (fasta) and a associated sorted BAM file**\r +\r +**Summary**\r + - MetaBAT2 (binning)\r + - MaxBin2 (binning)\r + - SemiBin (binning)\r + - DAS Tool (bin merging)\r + - EukRep (eukaryotic classification)\r + - CheckM (bin completeness and contamination)\r + - BUSCO (bin completeness)\r + - GTDB-Tk (bin taxonomic classification)\r +\r +Other UNLOCK workflows on WorkflowHub: https://workflowhub.eu/projects/16/workflows?view=default

\r +\r +**All tool CWL files and other workflows can be found here:**
\r + Tools: https://gitlab.com/m-unlock/cwl
\r + Workflows: https://gitlab.com/m-unlock/cwl/workflows
\r +\r +**How to setup and use an UNLOCK workflow:**
\r +https://m-unlock.gitlab.io/docs/setup/setup.html
\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/64?version=10" ; + schema1:keywords "Metagenomics, microbial, metagenome, binning" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Metagenomic Binning from Assembly" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/64?version=11" ; + schema1:version 11 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=21" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-07-12 13:21:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=21" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13448 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:50Z" ; + schema1:dateModified "2024-06-11T12:54:50Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=21" ; + schema1:version 21 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "A workflow querying on an endpoint of a graph database by a file containing a SPARQL query." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/122?version=1" ; + schema1:isBasedOn "https://github.com/longmanplus/EOSC-Life_demos" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for SPARQL query on graph database" ; + schema1:sdDatePublished "2024-07-12 13:37:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/122/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 283 ; + schema1:dateCreated "2021-05-23T16:14:17Z" ; + schema1:dateModified "2021-05-26T10:43:23Z" ; + schema1:description "A workflow querying on an endpoint of a graph database by a file containing a SPARQL query." ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/122?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "SPARQL query on graph database" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/122?version=1" ; + schema1:version 1 ; + ns1:input , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 1556 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A pipeline to demultiplex, QC and map Nanopore data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1002?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/nanoseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/nanoseq" ; + schema1:sdDatePublished "2024-07-12 13:20:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1002/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9753 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:05Z" ; + schema1:dateModified "2024-06-11T12:55:05Z" ; + schema1:description "A pipeline to demultiplex, QC and map Nanopore data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1002?version=5" ; + schema1:keywords "Alignment, demultiplexing, nanopore, QC" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/nanoseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1002?version=6" ; + schema1:version 6 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 320859 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:MediaObject ; + schema1:contentSize 769 ; + schema1:dateModified "2024-03-21T11:54:38+00:00" ; + schema1:name "matmul_case1.csv" ; + schema1:sdDatePublished "2024-03-22T11:39:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 765 ; + schema1:dateModified "2023-11-28T00:25:59+00:00" ; + schema1:name "matmul_case2.csv" ; + schema1:sdDatePublished "2024-03-22T11:39:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 717 ; + schema1:dateModified "2023-11-28T19:49:11+00:00" ; + schema1:name "matmul_case3.csv" ; + schema1:sdDatePublished "2024-03-22T11:39:26+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state of the art epitope prediction pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/984?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/epitopeprediction" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/epitopeprediction" ; + schema1:sdDatePublished "2024-07-12 13:21:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/984/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9453 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:52Z" ; + schema1:dateModified "2024-06-11T12:54:52Z" ; + schema1:description "A fully reproducible and state of the art epitope prediction pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/984?version=7" ; + schema1:keywords "epitope, epitope-prediction, mhc-binding-prediction" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/epitopeprediction" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/984?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Galaxy Workflow created on Galaxy-E european instance, ecology.usegalaxy.eu, related to the Galaxy training tutorial "[Biodiversity data exploration](https://training.galaxyproject.org/training-material/topics/ecology/tutorials/biodiversity-data-exploration/tutorial.html)"\r +\r +This workflow allows to explore biodiversity data looking at homoscedasticity, normality or collinearity of presences-absence or abundance data and at comparing beta diversity taking into account space, time and species components""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/656?version=1" ; + schema1:isBasedOn "https://ecology.usegalaxy.eu/u/ylebras/w/copy-of-workflow-biodiversity-data-exploration-tuto" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Biodiversity data exploration tutorial" ; + schema1:sdDatePublished "2024-07-12 13:26:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/656/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14766 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-11-09T12:47:49Z" ; + schema1:dateModified "2023-11-09T21:02:04Z" ; + schema1:description """Galaxy Workflow created on Galaxy-E european instance, ecology.usegalaxy.eu, related to the Galaxy training tutorial "[Biodiversity data exploration](https://training.galaxyproject.org/training-material/topics/ecology/tutorials/biodiversity-data-exploration/tutorial.html)"\r +\r +This workflow allows to explore biodiversity data looking at homoscedasticity, normality or collinearity of presences-absence or abundance data and at comparing beta diversity taking into account space, time and species components""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Biodiversity data exploration tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/656?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2023-11-23T13:33:07.954489" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-only-VGP3" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-only-VGP3/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.130.4" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Amber Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:24:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/130/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 66118 ; + schema1:creator , + ; + schema1:dateCreated "2023-07-26T09:36:49Z" ; + schema1:dateModified "2023-07-26T09:37:20Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/130?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Amber Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_amber_md_setup/master/biobb_wf_amber_md_setup/notebooks/mdsetup/biobb_amber_setup_notebook.ipynb" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=14" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-07-12 13:22:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=14" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9304 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:12Z" ; + schema1:dateModified "2024-06-11T12:55:12Z" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=14" ; + schema1:version 14 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/278?version=7" ; + schema1:isBasedOn "https://gitlab.bsc.es/lrodrig1/structuralvariants_poc/-/tree/1.1.3/structuralvariants/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CNV_pipeline" ; + schema1:sdDatePublished "2024-07-12 13:35:29 +0100" ; + schema1:url "https://workflowhub.eu/workflows/278/ro_crate?version=7" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 9858 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9607 ; + schema1:creator , + ; + schema1:dateCreated "2022-06-01T17:26:39Z" ; + schema1:dateModified "2022-06-01T17:26:39Z" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/278?version=10" ; + schema1:keywords "cancer, CODEX2, ExomeDepth, manta, TransBioNet, variant calling, GRIDSS, structural variants" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CNV_pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/278?version=7" ; + schema1:version 7 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 74039 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Differential abundance analysis" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/981?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/differentialabundance" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/differentialabundance" ; + schema1:sdDatePublished "2024-07-12 13:21:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/981/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15641 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:48Z" ; + schema1:dateModified "2024-06-11T12:54:48Z" ; + schema1:description "Differential abundance analysis" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/981?version=8" ; + schema1:keywords "ATAC-seq, ChIP-seq, deseq2, differential-abundance, differential-expression, gsea, limma, microarray, rna-seq, shiny" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/differentialabundance" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/981?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """The second-level complexity workflow is one among a collection of workflows designed to address tasks up to CTF estimation. In addition to the functionalities provided by the layer 0 workflow, this workflow aims to enhance the quality of acquisition images using quality protocols.\r +\r +**Quality control protocols**\r +\r +* **Movie max shift**: automatic reject those movies whose frames move more than a given threshold. \r +\r +* **Tilt analysis**: quality score based in the Power Spectrum Density (astigmatism and tilt) \r +\r +* **CTF consensus**: acts as a filter discarding micrographs based on their CTF (limit resolution, defocus, astigmatism, etc.).\r +\r +**Advantages:** \r +\r +* More control of the acquisition quality\r +\r +* Reduce unnecessary processing time and storage""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/599?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CEITEC layer 1 workflow" ; + schema1:sdDatePublished "2024-07-12 13:17:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/599/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 53166 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7675 ; + schema1:dateCreated "2023-10-04T13:01:57Z" ; + schema1:dateModified "2024-07-10T14:16:39Z" ; + schema1:description """The second-level complexity workflow is one among a collection of workflows designed to address tasks up to CTF estimation. In addition to the functionalities provided by the layer 0 workflow, this workflow aims to enhance the quality of acquisition images using quality protocols.\r +\r +**Quality control protocols**\r +\r +* **Movie max shift**: automatic reject those movies whose frames move more than a given threshold. \r +\r +* **Tilt analysis**: quality score based in the Power Spectrum Density (astigmatism and tilt) \r +\r +* **CTF consensus**: acts as a filter discarding micrographs based on their CTF (limit resolution, defocus, astigmatism, etc.).\r +\r +**Advantages:** \r +\r +* More control of the acquisition quality\r +\r +* Reduce unnecessary processing time and storage""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/599?version=1" ; + schema1:isPartOf ; + schema1:keywords "image processing, cryoem, spa, scipion" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "CEITEC layer 1 workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/599?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=16" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-07-12 13:20:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=16" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8751 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:04Z" ; + schema1:dateModified "2024-06-11T12:55:04Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=16" ; + schema1:version 16 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "call and score variants from WGS/WES of rare disease patients" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1014?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/raredisease" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/raredisease" ; + schema1:sdDatePublished "2024-07-12 13:19:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1014/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13482 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:08Z" ; + schema1:dateModified "2024-06-11T12:55:08Z" ; + schema1:description "call and score variants from WGS/WES of rare disease patients" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1014?version=5" ; + schema1:keywords "diagnostics, rare-disease, snv, structural-variants, variant-annotation, variant-calling, wes, WGS" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/raredisease" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1014?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Abstract CWL Automatically generated from the Galaxy workflow file: CLM-FATES_ ALP1 simulation (5 years)" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/65?version=1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CLM-FATES_ALP1_simulation_5years" ; + schema1:sdDatePublished "2024-07-12 13:37:24 +0100" ; + schema1:url "https://workflowhub.eu/workflows/65/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 3407 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 20149 ; + schema1:dateCreated "2020-10-27T12:14:22Z" ; + schema1:dateModified "2023-01-16T13:45:44Z" ; + schema1:description "Abstract CWL Automatically generated from the Galaxy workflow file: CLM-FATES_ ALP1 simulation (5 years)" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CLM-FATES_ALP1_simulation_5years" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/65?version=1" ; + schema1:version 1 ; + ns1:input , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 8510 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/963?version=12" ; + schema1:isBasedOn "https://github.com/nf-core/airrflow" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/airrflow" ; + schema1:sdDatePublished "2024-07-12 13:22:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/963/ro_crate?version=12" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14129 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:37Z" ; + schema1:dateModified "2024-06-11T12:54:37Z" ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/963?version=13" ; + schema1:keywords "airr, b-cell, immcantation, immunorepertoire, repseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/airrflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/963?version=12" ; + schema1:version 12 . + + a schema1:Dataset ; + schema1:datePublished "2024-06-24T13:49:03.518903" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/bacterial-genome-assembly" ; + schema1:license "GPL-3.0-or-later" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "bacterial-genome-assembly/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An automated processing pipeline for mammalian bulk calling cards experiments" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/970?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/callingcards" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/callingcards" ; + schema1:sdDatePublished "2024-07-12 13:22:06 +0100" ; + schema1:url "https://workflowhub.eu/workflows/970/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10916 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:44Z" ; + schema1:dateModified "2024-06-11T12:54:44Z" ; + schema1:description "An automated processing pipeline for mammalian bulk calling cards experiments" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/callingcards" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/970?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Virus genome assembly with Unicycler and Spades,\r +The 2 assemblers works in parallel. The graph visualization is made with Bandage.\r +workflow git repository : https://github.com/fjrmoreews/cwl-workflow-SARS-CoV-2/blob/master/Assembly/workflow/assembly-wf-virus.cwl\r +Based on https://github.com/galaxyproject/SARS-CoV-2/blob/master/genomics/2-Assembly/as_wf.png\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/3?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Virus genome assembly with Unicycler and Spades." ; + schema1:sdDatePublished "2024-07-12 13:37:30 +0100" ; + schema1:url "https://workflowhub.eu/workflows/3/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8602 ; + schema1:dateCreated "2020-04-10T10:45:00Z" ; + schema1:dateModified "2023-01-16T13:39:45Z" ; + schema1:description """Virus genome assembly with Unicycler and Spades,\r +The 2 assemblers works in parallel. The graph visualization is made with Bandage.\r +workflow git repository : https://github.com/fjrmoreews/cwl-workflow-SARS-CoV-2/blob/master/Assembly/workflow/assembly-wf-virus.cwl\r +Based on https://github.com/galaxyproject/SARS-CoV-2/blob/master/genomics/2-Assembly/as_wf.png\r +""" ; + schema1:image ; + schema1:keywords "covid-19, Assembly" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Virus genome assembly with Unicycler and Spades." ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/3?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 34311 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Genome-wide alternative splicing analysis v.2" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/482?version=4" ; + schema1:license "CC-BY-NC-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genome-wide alternative splicing analysis v.2" ; + schema1:sdDatePublished "2024-07-12 13:33:06 +0100" ; + schema1:url "https://workflowhub.eu/workflows/482/ro_crate?version=4" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 214419 . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 30115 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 119329 ; + schema1:creator ; + schema1:dateCreated "2023-06-11T11:57:01Z" ; + schema1:dateModified "2023-06-11T11:57:19Z" ; + schema1:description "Genome-wide alternative splicing analysis v.2" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/482?version=6" ; + schema1:keywords "Transcriptomics, Alternative splicing, isoform switching" ; + schema1:license "https://spdx.org/licenses/CC-BY-NC-4.0" ; + schema1:name "Genome-wide alternative splicing analysis v.2" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/482?version=4" ; + schema1:version 4 ; + ns1:input , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/278?version=9" ; + schema1:isBasedOn "https://gitlab.bsc.es/lrodrig1/structuralvariants_poc/-/tree/1.1.3/structuralvariants/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CNV_pipeline" ; + schema1:sdDatePublished "2024-07-12 13:35:29 +0100" ; + schema1:url "https://workflowhub.eu/workflows/278/ro_crate?version=9" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 9694 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9435 ; + schema1:creator , + ; + schema1:dateCreated "2022-06-30T13:07:39Z" ; + schema1:dateModified "2022-06-30T13:07:39Z" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/278?version=10" ; + schema1:keywords "cancer, CODEX2, ExomeDepth, manta, TransBioNet, variant calling, GRIDSS, structural variants" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CNV_pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/278?version=9" ; + schema1:version 9 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 72914 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# The Polygenic Score Catalog Calculator (`pgsc_calc`)\r +\r +[![Documentation Status](https://readthedocs.org/projects/pgsc-calc/badge/?version=latest)](https://pgsc-calc.readthedocs.io/en/latest/?badge=latest)\r +[![pgscatalog/pgsc_calc CI](https://github.com/PGScatalog/pgsc_calc/actions/workflows/ci.yml/badge.svg)](https://github.com/PGScatalog/pgsc_calc/actions/workflows/ci.yml)\r +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.5970794.svg)](https://doi.org/10.5281/zenodo.5970794)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-≥22.10.0-23aa62.svg?labelColor=000000)](https://www.nextflow.io/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +\r +## Introduction\r +\r +`pgsc_calc` is a bioinformatics best-practice analysis pipeline for calculating\r +polygenic [risk] scores on samples with imputed genotypes using existing scoring\r +files from the [Polygenic Score (PGS) Catalog](https://www.pgscatalog.org/)\r +and/or user-defined PGS/PRS.\r +\r +## Pipeline summary\r +\r +

\r + \r +

\r +\r +The workflow performs the following steps:\r +\r +* Downloading scoring files using the PGS Catalog API in a specified genome build (GRCh37 and GRCh38).\r +* Reading custom scoring files (and performing a liftover if genotyping data is in a different build).\r +* Automatically combines and creates scoring files for efficient parallel computation of multiple PGS\r + - Matching variants in the scoring files against variants in the target dataset (in plink bfile/pfile or VCF format)\r +* Calculates PGS for all samples (linear sum of weights and dosages)\r +* Creates a summary report to visualize score distributions and pipeline metadata (variant matching QC)\r +\r +And optionally:\r +\r +- Genetic Ancestry: calculate similarity of target samples to populations in a\r + reference dataset ([1000 Genomes (1000G)](http://www.nature.com/nature/journal/v526/n7571/full/nature15393.html)), using principal components analysis (PCA)\r +- PGS Normalization: Using reference population data and/or PCA projections to report\r + individual-level PGS predictions (e.g. percentiles, z-scores) that account for genetic ancestry\r +\r +See documentation for a list of planned [features under development](https://pgsc-calc.readthedocs.io/en/latest/index.html#Features-under-development).\r +\r +## Quick start\r +\r +1. Install\r +[`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation)\r +(`>=22.10.0`)\r +\r +2. Install [`Docker`](https://docs.docker.com/engine/installation/) or\r +[`Singularity (v3.8.3 minimum)`](https://www.sylabs.io/guides/3.0/user-guide/)\r +(please only use [`Conda`](https://conda.io/miniconda.html) as a last resort)\r +\r +3. Download the pipeline and test it on a minimal dataset with a single command:\r +\r + ```console\r + nextflow run pgscatalog/pgsc_calc -profile test,\r + ```\r +\r +4. Start running your own analysis!\r +\r + ```console\r + nextflow run pgscatalog/pgsc_calc -profile --input samplesheet.csv --pgs_id PGS001229\r + ```\r +\r +See [getting\r +started](https://pgsc-calc.readthedocs.io/en/latest/getting-started.html) for more\r +details.\r +\r +## Documentation\r +\r +[Full documentation is available on Read the Docs](https://pgsc-calc.readthedocs.io/)\r +\r +## Credits\r +\r +pgscatalog/pgsc_calc is developed as part of the PGS Catalog project, a\r +collaboration between the University of Cambridge’s Department of Public Health\r +and Primary Care (Michael Inouye, Samuel Lambert) and the European\r +Bioinformatics Institute (Helen Parkinson, Laura Harris).\r +\r +The pipeline seeks to provide a standardized workflow for PGS calculation and\r +ancestry inference implemented in nextflow derived from an existing set of\r +tools/scripts developed by Inouye lab (Rodrigo Canovas, Scott Ritchie, Jingqin\r +Wu) and PGS Catalog teams (Samuel Lambert, Laurent Gil).\r +\r +The adaptation of the codebase, nextflow implementation, and PGS Catalog features\r +are written by Benjamin Wingfield, Samuel Lambert, Laurent Gil with additional input\r +from Aoife McMahon (EBI). Development of new features, testing, and code review\r +is ongoing including Inouye lab members (Rodrigo Canovas, Scott Ritchie) and others. A\r +manuscript describing the tool is *in preparation*. In the meantime if you use the\r +tool we ask you to cite the repo and the paper describing the PGS Catalog\r +resource:\r +\r +- >PGS Catalog Calculator _(in preparation)_. PGS Catalog\r + Team. [https://github.com/PGScatalog/pgsc_calc](https://github.com/PGScatalog/pgsc_calc)\r +- >Lambert _et al._ (2021) The Polygenic Score Catalog as an open database for\r +reproducibility and systematic evaluation. Nature Genetics. 53:420–425\r +doi:[10.1038/s41588-021-00783-5](https://doi.org/10.1038/s41588-021-00783-5).\r +\r +This pipeline is distrubuted under an [Apache License](LICENSE) amd uses code and \r +infrastructure developed and maintained by the [nf-core](https://nf-co.re) community \r +(Ewels *et al. Nature Biotech* (2020) doi:[10.1038/s41587-020-0439-x](https://doi.org/10.1038/s41587-020-0439-x)), \r +reused here under the [MIT license](https://github.com/nf-core/tools/blob/master/LICENSE).\r +\r +Additional references of open-source tools and data used in this pipeline are described in\r +[`CITATIONS.md`](CITATIONS.md).\r +\r +This work has received funding from EMBL-EBI core funds, the Baker Institute,\r +the University of Cambridge, Health Data Research UK (HDRUK), and the European\r +Union’s Horizon 2020 research and innovation programme under grant agreement No\r +101016775 INTERVENE.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/556?version=1" ; + schema1:isBasedOn "https://github.com/PGScatalog/pgsc_calc" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for The Polygenic Score Catalog Calculator" ; + schema1:sdDatePublished "2024-07-12 13:27:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/556/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1673 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-08-10T09:01:48Z" ; + schema1:dateModified "2023-08-10T09:01:48Z" ; + schema1:description """# The Polygenic Score Catalog Calculator (`pgsc_calc`)\r +\r +[![Documentation Status](https://readthedocs.org/projects/pgsc-calc/badge/?version=latest)](https://pgsc-calc.readthedocs.io/en/latest/?badge=latest)\r +[![pgscatalog/pgsc_calc CI](https://github.com/PGScatalog/pgsc_calc/actions/workflows/ci.yml/badge.svg)](https://github.com/PGScatalog/pgsc_calc/actions/workflows/ci.yml)\r +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.5970794.svg)](https://doi.org/10.5281/zenodo.5970794)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-≥22.10.0-23aa62.svg?labelColor=000000)](https://www.nextflow.io/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +\r +## Introduction\r +\r +`pgsc_calc` is a bioinformatics best-practice analysis pipeline for calculating\r +polygenic [risk] scores on samples with imputed genotypes using existing scoring\r +files from the [Polygenic Score (PGS) Catalog](https://www.pgscatalog.org/)\r +and/or user-defined PGS/PRS.\r +\r +## Pipeline summary\r +\r +

\r + \r +

\r +\r +The workflow performs the following steps:\r +\r +* Downloading scoring files using the PGS Catalog API in a specified genome build (GRCh37 and GRCh38).\r +* Reading custom scoring files (and performing a liftover if genotyping data is in a different build).\r +* Automatically combines and creates scoring files for efficient parallel computation of multiple PGS\r + - Matching variants in the scoring files against variants in the target dataset (in plink bfile/pfile or VCF format)\r +* Calculates PGS for all samples (linear sum of weights and dosages)\r +* Creates a summary report to visualize score distributions and pipeline metadata (variant matching QC)\r +\r +And optionally:\r +\r +- Genetic Ancestry: calculate similarity of target samples to populations in a\r + reference dataset ([1000 Genomes (1000G)](http://www.nature.com/nature/journal/v526/n7571/full/nature15393.html)), using principal components analysis (PCA)\r +- PGS Normalization: Using reference population data and/or PCA projections to report\r + individual-level PGS predictions (e.g. percentiles, z-scores) that account for genetic ancestry\r +\r +See documentation for a list of planned [features under development](https://pgsc-calc.readthedocs.io/en/latest/index.html#Features-under-development).\r +\r +## Quick start\r +\r +1. Install\r +[`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation)\r +(`>=22.10.0`)\r +\r +2. Install [`Docker`](https://docs.docker.com/engine/installation/) or\r +[`Singularity (v3.8.3 minimum)`](https://www.sylabs.io/guides/3.0/user-guide/)\r +(please only use [`Conda`](https://conda.io/miniconda.html) as a last resort)\r +\r +3. Download the pipeline and test it on a minimal dataset with a single command:\r +\r + ```console\r + nextflow run pgscatalog/pgsc_calc -profile test,\r + ```\r +\r +4. Start running your own analysis!\r +\r + ```console\r + nextflow run pgscatalog/pgsc_calc -profile --input samplesheet.csv --pgs_id PGS001229\r + ```\r +\r +See [getting\r +started](https://pgsc-calc.readthedocs.io/en/latest/getting-started.html) for more\r +details.\r +\r +## Documentation\r +\r +[Full documentation is available on Read the Docs](https://pgsc-calc.readthedocs.io/)\r +\r +## Credits\r +\r +pgscatalog/pgsc_calc is developed as part of the PGS Catalog project, a\r +collaboration between the University of Cambridge’s Department of Public Health\r +and Primary Care (Michael Inouye, Samuel Lambert) and the European\r +Bioinformatics Institute (Helen Parkinson, Laura Harris).\r +\r +The pipeline seeks to provide a standardized workflow for PGS calculation and\r +ancestry inference implemented in nextflow derived from an existing set of\r +tools/scripts developed by Inouye lab (Rodrigo Canovas, Scott Ritchie, Jingqin\r +Wu) and PGS Catalog teams (Samuel Lambert, Laurent Gil).\r +\r +The adaptation of the codebase, nextflow implementation, and PGS Catalog features\r +are written by Benjamin Wingfield, Samuel Lambert, Laurent Gil with additional input\r +from Aoife McMahon (EBI). Development of new features, testing, and code review\r +is ongoing including Inouye lab members (Rodrigo Canovas, Scott Ritchie) and others. A\r +manuscript describing the tool is *in preparation*. In the meantime if you use the\r +tool we ask you to cite the repo and the paper describing the PGS Catalog\r +resource:\r +\r +- >PGS Catalog Calculator _(in preparation)_. PGS Catalog\r + Team. [https://github.com/PGScatalog/pgsc_calc](https://github.com/PGScatalog/pgsc_calc)\r +- >Lambert _et al._ (2021) The Polygenic Score Catalog as an open database for\r +reproducibility and systematic evaluation. Nature Genetics. 53:420–425\r +doi:[10.1038/s41588-021-00783-5](https://doi.org/10.1038/s41588-021-00783-5).\r +\r +This pipeline is distrubuted under an [Apache License](LICENSE) amd uses code and \r +infrastructure developed and maintained by the [nf-core](https://nf-co.re) community \r +(Ewels *et al. Nature Biotech* (2020) doi:[10.1038/s41587-020-0439-x](https://doi.org/10.1038/s41587-020-0439-x)), \r +reused here under the [MIT license](https://github.com/nf-core/tools/blob/master/LICENSE).\r +\r +Additional references of open-source tools and data used in this pipeline are described in\r +[`CITATIONS.md`](CITATIONS.md).\r +\r +This work has received funding from EMBL-EBI core funds, the Baker Institute,\r +the University of Cambridge, Health Data Research UK (HDRUK), and the European\r +Union’s Horizon 2020 research and innovation programme under grant agreement No\r +101016775 INTERVENE.\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/556?version=3" ; + schema1:keywords "Nextflow, Workflows, polygenic risk score, polygenic score, prediction, GWAS, genomic ancestry" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "The Polygenic Score Catalog Calculator" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/556?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/963?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/airrflow" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/airrflow" ; + schema1:sdDatePublished "2024-07-12 13:22:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/963/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8464 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:36Z" ; + schema1:dateModified "2024-06-11T12:54:36Z" ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/963?version=13" ; + schema1:keywords "airr, b-cell, immcantation, immunorepertoire, repseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/airrflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/963?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-variant-calling" ; + schema1:mainEntity ; + schema1:name "COVID-19-PE-ARTIC-ILLUMINA (v0.1)" ; + schema1:sdDatePublished "2021-03-12 13:41:23 +0000" ; + schema1:softwareVersion "v0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 56967 ; + schema1:name "COVID-19-PE-ARTIC-ILLUMINA" ; + schema1:programmingLanguage . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "The workflow takes ONT reads collection, runs SeqKit and Nanoplot. The main outputs are a table and plots of raw reads stats." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/697?version=1" ; + schema1:isBasedOn "https://github.com/ERGA-consortium/pipelines/tree/main/assembly/galaxy" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ERGA DataQC ONT v2311 (WF0)" ; + schema1:sdDatePublished "2024-07-12 13:25:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/697/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8849 ; + schema1:creator , + ; + schema1:dateCreated "2024-01-08T15:25:44Z" ; + schema1:dateModified "2024-01-08T15:57:26Z" ; + schema1:description "The workflow takes ONT reads collection, runs SeqKit and Nanoplot. The main outputs are a table and plots of raw reads stats." ; + schema1:image ; + schema1:isPartOf , + ; + schema1:keywords "ONT, ERGA, DataQC" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ERGA DataQC ONT v2311 (WF0)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/0.Data_QC/Galaxy-Workflow-ERGA_DataQC_ONT_v2311_(WF0).ga" ; + schema1:version 1 ; + ns1:input . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 147798 ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/0.Data_QC/pics/QC_ont_2311.png" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 167040 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:MediaObject ; + schema1:contentSize 3277 ; + schema1:dateModified "2024-01-17T10:54:05+00:00" ; + schema1:name "dnn_cnn_5epochs.csv" ; + schema1:sdDatePublished "2024-03-25T11:02:59+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.280.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_ligand_parameterization/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python GMX Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-07-12 13:34:06 +0100" ; + schema1:url "https://workflowhub.eu/workflows/280/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1728 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-24T14:17:18Z" ; + schema1:dateModified "2022-04-11T09:29:42Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/280?version=3" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python GMX Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_ligand_parameterization/python/workflow.py" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=11" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-07-12 13:19:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14236 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:15Z" ; + schema1:dateModified "2024-06-11T12:55:15Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=11" ; + schema1:version 11 . + + a schema1:Dataset ; + schema1:datePublished "2024-07-03T12:31:48.614143" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/allele-based-pathogen-identification" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "allele-based-pathogen-identification/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1023?version=11" ; + schema1:isBasedOn "https://github.com/nf-core/smrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/smrnaseq" ; + schema1:sdDatePublished "2024-07-12 13:18:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1023/ro_crate?version=11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12489 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:19Z" ; + schema1:dateModified "2024-06-11T12:55:19Z" ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1023?version=10" ; + schema1:keywords "small-rna, smrna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/smrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1023?version=11" ; + schema1:version 11 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# PacBio HiFi genome assembly using hifiasm v2.1\r +\r +## General usage recommendations\r +Please see the [Genome assembly with hifiasm on Galaxy Australia](https://australianbiocommons.github.io/how-to-guides/genome_assembly/hifi_assembly) guide.\r +\r +## See [change log](./change_log.md)\r +\r +## Acknowledgements\r +\r +The workflow & the [doc_guidelines template used](https://github.com/AustralianBioCommons/doc_guidelines) are \r +supported by the Australian BioCommons via Bioplatforms Australia funding, the Australian Research Data Commons \r +(https://doi.org/10.47486/PL105) and the Queensland Government RICF programme. Bioplatforms Australia and the \r +Australian Research Data Commons are enabled by the National Collaborative Research Infrastructure Strategy (NCRIS).\r +\r +\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.221.3" ; + schema1:isBasedOn "https://github.com/AustralianBioCommons/PacBio-HiFi-genome-assembly-using-hifiasm" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for PacBio HiFi genome assembly using hifiasm v2.1" ; + schema1:sdDatePublished "2024-07-12 13:34:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/221/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 27208 ; + schema1:creator , + ; + schema1:dateCreated "2022-10-21T05:13:12Z" ; + schema1:dateModified "2023-01-30T18:21:31Z" ; + schema1:description """# PacBio HiFi genome assembly using hifiasm v2.1\r +\r +## General usage recommendations\r +Please see the [Genome assembly with hifiasm on Galaxy Australia](https://australianbiocommons.github.io/how-to-guides/genome_assembly/hifi_assembly) guide.\r +\r +## See [change log](./change_log.md)\r +\r +## Acknowledgements\r +\r +The workflow & the [doc_guidelines template used](https://github.com/AustralianBioCommons/doc_guidelines) are \r +supported by the Australian BioCommons via Bioplatforms Australia funding, the Australian Research Data Commons \r +(https://doi.org/10.47486/PL105) and the Queensland Government RICF programme. Bioplatforms Australia and the \r +Australian Research Data Commons are enabled by the National Collaborative Research Infrastructure Strategy (NCRIS).\r +\r +\r +\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/221?version=2" ; + schema1:isPartOf , + ; + schema1:keywords "FASTQ, hifiasm, HiFi, genome_assembly" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "PacBio HiFi genome assembly using hifiasm v2.1" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/221?version=3" ; + schema1:version 3 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Takes fastqs and reference data, to produce a single cell counts matrix into and save in annData format - adding a column called sample with the sample name. " ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/513?version=3" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq: Count and Load with starSOLO" ; + schema1:sdDatePublished "2024-07-12 13:22:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/513/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 28457 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-05-30T05:47:35Z" ; + schema1:dateModified "2024-05-30T05:47:35Z" ; + schema1:description "Takes fastqs and reference data, to produce a single cell counts matrix into and save in annData format - adding a column called sample with the sample name. " ; + schema1:isBasedOn "https://workflowhub.eu/workflows/513?version=2" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "scRNAseq: Count and Load with starSOLO" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/513?version=3" ; + schema1:version 3 ; + ns1:input , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1017?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/rnafusion" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnafusion" ; + schema1:sdDatePublished "2024-07-12 13:18:21 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1017/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4606 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:09Z" ; + schema1:dateModified "2024-06-11T12:55:09Z" ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1017?version=16" ; + schema1:keywords "fusion, fusion-genes, gene-fusion, rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnafusion" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1017?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Differential abundance analysis" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/981?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/differentialabundance" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/differentialabundance" ; + schema1:sdDatePublished "2024-07-12 13:21:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/981/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12719 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:48Z" ; + schema1:dateModified "2024-06-11T12:54:48Z" ; + schema1:description "Differential abundance analysis" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/981?version=8" ; + schema1:keywords "ATAC-seq, ChIP-seq, deseq2, differential-abundance, differential-expression, gsea, limma, microarray, rna-seq, shiny" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/differentialabundance" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/981?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """# workflow-denovo-stacks\r +\r +These workflows are part of a set designed to work for RAD-seq data on the Galaxy platform, using the tools from the Stacks program. \r +\r +Galaxy Australia: https://usegalaxy.org.au/\r +\r +Stacks: http://catchenlab.life.illinois.edu/stacks/\r +\r +## Inputs\r +* demultiplexed reads in fastq format, may be output from the QC workflow. Files are in a collection. \r +* population map in text format\r +\r +\r +## Steps and outputs\r +\r +ustacks:\r +* input reads go to ustacks. \r +* ustacks assembles the reads into matching stacks (hypothetical alleles). \r +* The outputs are in a collection called something like: Stacks2: ustacks on data 21, data 20, and others Loci and polymorphism. Click on this to see the files:\r +* for each sample, assembled loci (tsv format), named e.g. sample_CAAC.tags\r +* for each sample, model calls from each locus (tsv format), named e.g. sample_CAAC.snps\r +* for each sample, haplotypes/alleles recorded from each locus (tsv format), named e.g. sample_CAAC.alleles\r +* Please see sections 6.1 to 6.4 in https://catchenlab.life.illinois.edu/stacks/manual/#ufiles for a full description. \r +\r +cstacks:\r +* cstacks will merge stacks into a catalog of consensus loci. \r +* The outputs are in a collection called something like Stacks2: cstacks on data 3, data 71, and others Catalog of loci. Click on this to see the three files, each in tsv format:\r +catalog.tags\r +catalog.snps\r +catalog.alleles\r +\r +\r +sstacks:\r +* sstacks will compare each sample to the loci in the catalog. \r +* The outputs are in a collection called something like Stacks2: sstacks on data 3, data 76, and others Matches to the catalog.Click on this to see the files:\r +There is one file for each sample, named e.g. sample_CAAC.matches, in tsv format. \r +\r +tsv2bam:\r +* Conversion to BAM format\r +* Reads from each sample are now aligned to each locus, and the tsv2bam tool will convert this into a bam file for each sample. \r +* The outputs are in a collection called something like Stacks2: tsv2bam on data 3, data 94, and others Matches to the catalog.Click on this to see the files:\r +There is one file for each sample, named e.g sample_CAAC.matches, in BAM format. \r +\r +gstacks:\r +* Catalog of loci in fasta format\r +* Variant calls in VCF format\r +\r +populations:\r +* Locus consensus sequences in fasta format\r +* Snp calls, in VCF format\r +* Haplotypes, in VCF format\r +* Summary statistics\r +\r +![denovo](wf-denovo.png)\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/348?version=1" ; + schema1:isBasedOn "https://github.com/AnnaSyme/workflow-denovo-stacks.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Stacks RAD-seq de novo workflow" ; + schema1:sdDatePublished "2024-07-12 13:35:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/348/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 25118 ; + schema1:creator ; + schema1:dateCreated "2022-05-31T07:39:10Z" ; + schema1:dateModified "2023-01-30T18:21:31Z" ; + schema1:description """# workflow-denovo-stacks\r +\r +These workflows are part of a set designed to work for RAD-seq data on the Galaxy platform, using the tools from the Stacks program. \r +\r +Galaxy Australia: https://usegalaxy.org.au/\r +\r +Stacks: http://catchenlab.life.illinois.edu/stacks/\r +\r +## Inputs\r +* demultiplexed reads in fastq format, may be output from the QC workflow. Files are in a collection. \r +* population map in text format\r +\r +\r +## Steps and outputs\r +\r +ustacks:\r +* input reads go to ustacks. \r +* ustacks assembles the reads into matching stacks (hypothetical alleles). \r +* The outputs are in a collection called something like: Stacks2: ustacks on data 21, data 20, and others Loci and polymorphism. Click on this to see the files:\r +* for each sample, assembled loci (tsv format), named e.g. sample_CAAC.tags\r +* for each sample, model calls from each locus (tsv format), named e.g. sample_CAAC.snps\r +* for each sample, haplotypes/alleles recorded from each locus (tsv format), named e.g. sample_CAAC.alleles\r +* Please see sections 6.1 to 6.4 in https://catchenlab.life.illinois.edu/stacks/manual/#ufiles for a full description. \r +\r +cstacks:\r +* cstacks will merge stacks into a catalog of consensus loci. \r +* The outputs are in a collection called something like Stacks2: cstacks on data 3, data 71, and others Catalog of loci. Click on this to see the three files, each in tsv format:\r +catalog.tags\r +catalog.snps\r +catalog.alleles\r +\r +\r +sstacks:\r +* sstacks will compare each sample to the loci in the catalog. \r +* The outputs are in a collection called something like Stacks2: sstacks on data 3, data 76, and others Matches to the catalog.Click on this to see the files:\r +There is one file for each sample, named e.g. sample_CAAC.matches, in tsv format. \r +\r +tsv2bam:\r +* Conversion to BAM format\r +* Reads from each sample are now aligned to each locus, and the tsv2bam tool will convert this into a bam file for each sample. \r +* The outputs are in a collection called something like Stacks2: tsv2bam on data 3, data 94, and others Matches to the catalog.Click on this to see the files:\r +There is one file for each sample, named e.g sample_CAAC.matches, in BAM format. \r +\r +gstacks:\r +* Catalog of loci in fasta format\r +* Variant calls in VCF format\r +\r +populations:\r +* Locus consensus sequences in fasta format\r +* Snp calls, in VCF format\r +* Haplotypes, in VCF format\r +* Summary statistics\r +\r +![denovo](wf-denovo.png)\r +""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Stacks RAD-seq de novo workflow" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/348?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 350220 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "call and score variants from WGS/WES of rare disease patients" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1014?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/raredisease" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/raredisease" ; + schema1:sdDatePublished "2024-07-12 13:19:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1014/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14007 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:08Z" ; + schema1:dateModified "2024-06-11T12:55:08Z" ; + schema1:description "call and score variants from WGS/WES of rare disease patients" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1014?version=5" ; + schema1:keywords "diagnostics, rare-disease, snv, structural-variants, variant-annotation, variant-calling, wes, WGS" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/raredisease" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1014?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + schema1:datePublished "2023-09-12T13:46:41.022242" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/parallel-accession-download" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "parallel-accession-download/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.10" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=13" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-07-12 13:22:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=13" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9077 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:12Z" ; + schema1:dateModified "2024-06-11T12:55:12Z" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=13" ; + schema1:version 13 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/963?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/airrflow" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/airrflow" ; + schema1:sdDatePublished "2024-07-12 13:22:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/963/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9444 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:37Z" ; + schema1:dateModified "2024-06-11T12:54:37Z" ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/963?version=13" ; + schema1:keywords "airr, b-cell, immcantation, immunorepertoire, repseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/airrflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/963?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + schema1:datePublished "2024-06-19T07:34:48.016693" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/brew3r" ; + schema1:license "GPL-3.0-or-later" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "brew3r/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Alignment, assembly and annotation of generated transcripts from RNASEQ reads." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/40?version=1" ; + schema1:isBasedOn "https://usegalaxy.eu/u/ambarishk/w/covid-19-assembly-using-tophat-and-annotation" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Assembly using Tophat2 and annotation" ; + schema1:sdDatePublished "2024-07-12 13:37:29 +0100" ; + schema1:url "https://workflowhub.eu/workflows/40/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 46216 ; + schema1:dateCreated "2020-06-18T23:17:39Z" ; + schema1:dateModified "2023-02-13T14:06:46Z" ; + schema1:description "Alignment, assembly and annotation of generated transcripts from RNASEQ reads." ; + schema1:image ; + schema1:keywords "Alignment, Assembly, Annotation, Tophat2, RNASEQ, covid-19" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Assembly using Tophat2 and annotation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/40?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 280925 . + + a schema1:Dataset ; + schema1:datePublished "2024-03-06T19:15:00.868427" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Scaffolding-Bionano-VGP7" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Scaffolding-Bionano-VGP7/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# HiC scaffolding pipeline\r +\r +Snakemake pipeline for scaffolding of a genome using HiC reads using yahs.\r +\r +## Prerequisites\r +\r +This pipeine has been tested using `Snakemake v7.32.4` and requires conda for installation of required tools. To run the pipline use the command:\r +\r +`snakemake --use-conda --cores N`\r +\r +where N is number of cores to use. There are provided a set of configuration and running scripts for exectution on a slurm queueing system. After configuring the `cluster.json` file run:\r +\r +`./run_cluster`\r +\r +## Before starting\r +\r +You need to create a temporary folder and specify the path in the `config.yaml` file. This should be able to hold the temporary files created when sorting the `.pairsam` file (100s of GB or even many TBs)\r +\r +The path to the genome assemly must be given in the `config.yaml`.\r +\r +The HiC reads should be paired and named as follows: `Library_1.fastq.gz Library_2.fastq.gz`. The pipeline can accept any number of paired HiC read files, but the naming must be consistent. The folder containing these files must be provided in the `config.yaml`.""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.796.1" ; + schema1:isBasedOn "https://github.com/ERGA-consortium/pipelines/tree/main/assembly/snakemake/4.Scaffolding/yahs" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for HiC scaffolding pipeline" ; + schema1:sdDatePublished "2024-07-12 13:23:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/796/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4471 ; + schema1:creator ; + schema1:dateCreated "2024-03-16T09:01:33Z" ; + schema1:dateModified "2024-06-21T09:41:21Z" ; + schema1:description """# HiC scaffolding pipeline\r +\r +Snakemake pipeline for scaffolding of a genome using HiC reads using yahs.\r +\r +## Prerequisites\r +\r +This pipeine has been tested using `Snakemake v7.32.4` and requires conda for installation of required tools. To run the pipline use the command:\r +\r +`snakemake --use-conda --cores N`\r +\r +where N is number of cores to use. There are provided a set of configuration and running scripts for exectution on a slurm queueing system. After configuring the `cluster.json` file run:\r +\r +`./run_cluster`\r +\r +## Before starting\r +\r +You need to create a temporary folder and specify the path in the `config.yaml` file. This should be able to hold the temporary files created when sorting the `.pairsam` file (100s of GB or even many TBs)\r +\r +The path to the genome assemly must be given in the `config.yaml`.\r +\r +The HiC reads should be paired and named as follows: `Library_1.fastq.gz Library_2.fastq.gz`. The pipeline can accept any number of paired HiC read files, but the naming must be consistent. The folder containing these files must be provided in the `config.yaml`.""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/796?version=1" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "HiC scaffolding pipeline" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/796?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.284.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_complex_md_setup/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/284/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7128 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-17T10:37:49Z" ; + schema1:dateModified "2022-04-11T09:29:43Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/284?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_complex_md_setup/python/workflow.py" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/amber-protein-ligand-complex-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/298?version=2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_complex_md_setup/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/298/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 102255 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-23T08:43:48Z" ; + schema1:dateModified "2023-01-16T13:58:55Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/amber-protein-ligand-complex-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/298?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_complex_md_setup/galaxy/biobb_wf_amber_complex_md_setup.ga" ; + schema1:version 2 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2023-10-23T13:04:32.570375" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.14" . + + a schema1:Dataset ; + schema1:datePublished "2024-06-17T16:32:31.401725" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/bacterial-genome-assembly" ; + schema1:license "GPL-3.0-or-later" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "bacterial-genome-assembly/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A workflow to simulate reads" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1015?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/readsimulator" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/readsimulator" ; + schema1:sdDatePublished "2024-07-12 13:19:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1015/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11759 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:09Z" ; + schema1:dateModified "2024-06-11T12:55:09Z" ; + schema1:description "A workflow to simulate reads" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1015?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/readsimulator" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1015?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """## Summary\r +\r +This pipeline has as major goal provide a tool for protein interactions (PPI) prediction data formalization and standardization using the [OntoPPI](https://link.springer.com/chapter/10.1007/978-3-030-36599-8_23) ontology. This pipeline is splitted in two parts: (i) a part to prepare data from three main sources of PPI data ([HINT](http://hint.yulab.org/), [STRING](https://string-db.org/) and [PredPrin](https://github.com/YasCoMa/PredPrin.git)) and create the standard files to be processed by the next part; (ii) the second part uses the data prepared before to semantically describe using ontologies related to the concepts of this domain. It describes the provenance information of PPI prediction experiments, datasets characteristics, functional annotations of proteins involved in the PPIs, description of the PPI detection methods (also named as evidence) used in the experiment, and the prediction score obtained by each PPI detection method for the PPIs. This pipeline also execute data fusion to map the same protein pairs from different data sources and, finally, it creates a database of all these information in the [alegro](https://allegrograph.com/) graph triplestore.\r +\r +## Requirements:\r +* Python packages needed:\r + - pip3 install numpy\r + - pip3 install rdflib\r + - pip3 install uuid\r + - pip3 install SPARQLWrapper\r + - alegro graph tools (pip3 install agraph-python)
\r + Go to this [site](https://franz.com/agraph/support/documentation/current/python/install.html) for the installation tutorial\r +\r +## Usage Instructions\r +### Preparation:\r +1. ````git clone https://github.com/YasCoMa/ppintegrator.git````\r +2. ````cd ppintegrator````\r +3. `pip3 install -r requirements.txt`\r +**Allegrograph is a triple store, which is a database to maintain semantic descriptions. This database's server provides a web application with a user interface to run, edit and manage queries, visualize results and manipulate the data without writing codes other than SPARQL query language. The use of the Allegregraph option is not mandatory, but if you want to export and use it, you have to install the server and the client.**\r +4. if you want to use the Allegrograph server option (this triple store has free license up to 5,000,000 triples), install allegrograph server in your machine (configure a user and password): Server - https://franz.com/agraph/support/documentation/current/server-installation.html; Client - https://franz.com/agraph/support/documentation/current/python/install.html\r +5. Export the following environment variables to configure Allegrograph server\r +\r +````\r +export AGRAPH_HOST=127.0.0.1\r +export AGRAPH_PORT=10035\r +export AGRAPH_USER=chosen_user\r +export AGRAPH_PASSWORD=chosen_password\r +````\r +5. Start allegrograph: ````path/to/allegrograph/bin/agraph-control --config path/to/allegrograph/lib/agraph.cfg start````\r +6. Read the file data_requirements.txt to understand which files are needed for the process\r +\r +### Data preparation (first part) - File ````prepare_data_triplification.py```` :\r +* Pipeline parameters:\r + - __-rt__ or __--running_type__
\r + Use to indicate from which source you want to prepare PPI data, as follows:
\r + 1 - Prepare data for PredPrin
\r + 2 - Prepare data for String
\r + 3 - Prepare data for HINT\r + - __-fec__ or __--file_experiment_config__
\r + File with the experiment configuration in json format
\r + \r + Examples are in these files (all the metadata are required): params_hint.json, params_predrep_5k.json e params_string.json\r +\r + - __-org__ or __--organism__
\r + Prepare data only for one organism of interest (example: homo_sapiens)
\r +\r + This parameter is optional. If you do not specify, it will automatically use the organisms described in the experiment configuration file above\r +\r +\r +* Running modes examples:\r + 1. Running for PPI data generated by PredPrin:
\r + ````python3 prepare_data_triplification.py -rt 1 -fec params_predrep_5k.json````\r +\r + 2. Running for HINT database:
\r + ````python3 prepare_data_triplification.py -rt 3 -fec params_hint.json````\r +\r + 3. Running for STRING database:
\r + ````python3 prepare_data_triplification.py -rt 2 -fec params_string.json````\r +\r + In the file ````auxiliar_data_preparation.py```` you can run it for all the examples provided automatically, as follows:
\r + ````python3 auxiliar_data_preparation.py````\r +\r +\r +### PPI data triplification (second part) - File ````triplification_ppi_data.py````:\r +\r +* Pipeline parameters:\r + - __-rt__ or __--running_type__
\r + Use to indicate which execution step you want to run (it is desirable following the order showed):
\r + 0 - Generate the descriptions for all the protein interaction steps of an experiment (run steps 1, 2 and 3)
\r + 1 - Generate triples just about data provenance
\r + 2 - Generate triples just for protein functional annotations
\r + 3 - Generate triples just for the score results of each evidence
\r + 4 - Execute data fusion
\r + 5 - Generate descriptions and execute data fusion (run steps 1, 2, 3 and 4)
\r + 6 - Export to allegrograph server\r +\r + - __-fec__ or __--file_experiment_config__
\r + File with the experiment configuration in json format
\r + \r + Examples are in these files (all the metadata are required): params_hint.json, params_predrep_5k.json e params_string.json\r +\r + - __-fev__ or __--file_evidence_info__
\r + File with the PPI detection methods information in json format
\r + \r + Examples are in these files (all the metadata are required): evidences_information.json, evidences_information_hint.json e evidences_information_string.json\r +\r + - __-fcv__ or __--file_config_evidence__
\r + File with the experiment and evidence methods files addresses in tsv format
\r + \r + Example of this file: config_evidence_file.tsv\r +\r +* Running modes examples:\r + 1. Running to generate all semantic descriptions for PredPrin:
\r + ````python3 triplification_ppi_data.py -rt 0 -fec params_predrep_5k.json -fev evidences_information.json````\r +\r + 2. Running to generate only triples of data provenance:
\r + ````python3 triplification_ppi_data.py -rt 1 -fec params_hint.json -fev evidences_information_hint.json````\r +\r + 3. Running to generate only triples of PPI scores for each evidence:
\r + ````python3 triplification_ppi_data.py -rt 3 -fec params_hint.json -fev evidences_information_hint.json````\r +\r + 4. Running to generate only triples of protein functional annotations (only PredPrin exports these annotations):
\r + ````python3 triplification_ppi_data.py -rt 2 -fec params_predrep_5k.json -fev evidences_information.json````\r +\r + 5. Running to generate all semantic descrptions for STRING:
\r + ````python3 triplification_ppi_data.py -rt 0 -fec params_string.json -fev evidences_information_string.json````\r + \r + **For the next options (4, 5 and 6), it is mandatory running at least mode 1 and 3 for HINT, STRING and PredPrin**\r + \r + 6. Running to execute data fusion of different sources:
\r + ````python3 triplification_ppi_data.py -rt 4 -fcv config_evidence_file.tsv````\r +\r + 7. Running to generate all semantic descriptions and execute data fusion of different sources (combines mode 0 and 4):
\r + ````python3 triplification_ppi_data.py -rt 5 -fcv config_evidence_file.tsv````\r +\r + 8. Export semantic data to allegrograph server:
\r + ````python3 triplification_ppi_data.py -rt 6 -fcv config_evidence_file.tsv````\r +\r +## Query Scenarios for analysis\r +Supposing you ran all the steps showed in the section above, you can run the following options to analyse the data stored alegro graph triple store.
\r +File to use for this section: ````query_analysis_ppitriplificator.py````
\r +\r +* Parameter:\r + - __-q__ or __--query_option__
\r + Use to indicate which query you want to perform:
\r + 1 - Get all the different organisms whose interactions are stored in the database
\r + 2 - Get the interactions that have scientific papers associated and the list of these papers
\r + 3 - Get a list of the most frequent biological processes annotated for the interactions of Escherichia coli bacteria
\r + 4 - Get only the interactions belonging to a specific biological process (regulation of transcription, DNA-templated) in Escherichia coli bacteria
\r + 5 - Get the scores of interactions belonging to a specific biological process (regulation of transcription, DNA-templated) in Escherichia coli bacteria
\r + 6 - Get a list of the most frequent biological processes annotated for the interactions of human organism
\r + 7 - Get only the interactions belonging to a specific biological process (positive regulation of transcription by RNA polymerase II) in human organism
\r + 8 - Get the scores of interactions belonging to a specific biological process (positive regulation of transcription by RNA polymerase II) in human organism\r +\r +* Running modes examples:\r + 1. Running queries:
\r + ````python3 query_analysis_ppitriplificator.py -q 1 ````
\r + Change number 1 to the respective number of the query you want to perform\r +\r +## Reference\r +Martins, Y. C., Ziviani, A., Cerqueira e Costa, M. D. O., Cavalcanti, M. C. R., Nicolás, M. F., & de Vasconcelos, A. T. R. (2023). PPIntegrator: semantic integrative system for protein–protein interaction and application for host–pathogen datasets. Bioinformatics Advances, 3(1), vbad067.\r +\r +## Bug Report\r +Please, use the [Issues](https://github.com/YasCoMa/ppintegrator/issues) tab to report any bug.""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/618?version=1" ; + schema1:isBasedOn "https://github.com/YasCoMa/ppintegrator" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for PPIntegrator - PPI Triplification Process" ; + schema1:sdDatePublished "2024-07-12 13:27:11 +0100" ; + schema1:url "https://workflowhub.eu/workflows/618/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 81252 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 28124 ; + schema1:creator ; + schema1:dateCreated "2023-10-21T23:56:39Z" ; + schema1:dateModified "2023-10-21T23:56:39Z" ; + schema1:description """## Summary\r +\r +This pipeline has as major goal provide a tool for protein interactions (PPI) prediction data formalization and standardization using the [OntoPPI](https://link.springer.com/chapter/10.1007/978-3-030-36599-8_23) ontology. This pipeline is splitted in two parts: (i) a part to prepare data from three main sources of PPI data ([HINT](http://hint.yulab.org/), [STRING](https://string-db.org/) and [PredPrin](https://github.com/YasCoMa/PredPrin.git)) and create the standard files to be processed by the next part; (ii) the second part uses the data prepared before to semantically describe using ontologies related to the concepts of this domain. It describes the provenance information of PPI prediction experiments, datasets characteristics, functional annotations of proteins involved in the PPIs, description of the PPI detection methods (also named as evidence) used in the experiment, and the prediction score obtained by each PPI detection method for the PPIs. This pipeline also execute data fusion to map the same protein pairs from different data sources and, finally, it creates a database of all these information in the [alegro](https://allegrograph.com/) graph triplestore.\r +\r +## Requirements:\r +* Python packages needed:\r + - pip3 install numpy\r + - pip3 install rdflib\r + - pip3 install uuid\r + - pip3 install SPARQLWrapper\r + - alegro graph tools (pip3 install agraph-python)
\r + Go to this [site](https://franz.com/agraph/support/documentation/current/python/install.html) for the installation tutorial\r +\r +## Usage Instructions\r +### Preparation:\r +1. ````git clone https://github.com/YasCoMa/ppintegrator.git````\r +2. ````cd ppintegrator````\r +3. `pip3 install -r requirements.txt`\r +**Allegrograph is a triple store, which is a database to maintain semantic descriptions. This database's server provides a web application with a user interface to run, edit and manage queries, visualize results and manipulate the data without writing codes other than SPARQL query language. The use of the Allegregraph option is not mandatory, but if you want to export and use it, you have to install the server and the client.**\r +4. if you want to use the Allegrograph server option (this triple store has free license up to 5,000,000 triples), install allegrograph server in your machine (configure a user and password): Server - https://franz.com/agraph/support/documentation/current/server-installation.html; Client - https://franz.com/agraph/support/documentation/current/python/install.html\r +5. Export the following environment variables to configure Allegrograph server\r +\r +````\r +export AGRAPH_HOST=127.0.0.1\r +export AGRAPH_PORT=10035\r +export AGRAPH_USER=chosen_user\r +export AGRAPH_PASSWORD=chosen_password\r +````\r +5. Start allegrograph: ````path/to/allegrograph/bin/agraph-control --config path/to/allegrograph/lib/agraph.cfg start````\r +6. Read the file data_requirements.txt to understand which files are needed for the process\r +\r +### Data preparation (first part) - File ````prepare_data_triplification.py```` :\r +* Pipeline parameters:\r + - __-rt__ or __--running_type__
\r + Use to indicate from which source you want to prepare PPI data, as follows:
\r + 1 - Prepare data for PredPrin
\r + 2 - Prepare data for String
\r + 3 - Prepare data for HINT\r + - __-fec__ or __--file_experiment_config__
\r + File with the experiment configuration in json format
\r + \r + Examples are in these files (all the metadata are required): params_hint.json, params_predrep_5k.json e params_string.json\r +\r + - __-org__ or __--organism__
\r + Prepare data only for one organism of interest (example: homo_sapiens)
\r +\r + This parameter is optional. If you do not specify, it will automatically use the organisms described in the experiment configuration file above\r +\r +\r +* Running modes examples:\r + 1. Running for PPI data generated by PredPrin:
\r + ````python3 prepare_data_triplification.py -rt 1 -fec params_predrep_5k.json````\r +\r + 2. Running for HINT database:
\r + ````python3 prepare_data_triplification.py -rt 3 -fec params_hint.json````\r +\r + 3. Running for STRING database:
\r + ````python3 prepare_data_triplification.py -rt 2 -fec params_string.json````\r +\r + In the file ````auxiliar_data_preparation.py```` you can run it for all the examples provided automatically, as follows:
\r + ````python3 auxiliar_data_preparation.py````\r +\r +\r +### PPI data triplification (second part) - File ````triplification_ppi_data.py````:\r +\r +* Pipeline parameters:\r + - __-rt__ or __--running_type__
\r + Use to indicate which execution step you want to run (it is desirable following the order showed):
\r + 0 - Generate the descriptions for all the protein interaction steps of an experiment (run steps 1, 2 and 3)
\r + 1 - Generate triples just about data provenance
\r + 2 - Generate triples just for protein functional annotations
\r + 3 - Generate triples just for the score results of each evidence
\r + 4 - Execute data fusion
\r + 5 - Generate descriptions and execute data fusion (run steps 1, 2, 3 and 4)
\r + 6 - Export to allegrograph server\r +\r + - __-fec__ or __--file_experiment_config__
\r + File with the experiment configuration in json format
\r + \r + Examples are in these files (all the metadata are required): params_hint.json, params_predrep_5k.json e params_string.json\r +\r + - __-fev__ or __--file_evidence_info__
\r + File with the PPI detection methods information in json format
\r + \r + Examples are in these files (all the metadata are required): evidences_information.json, evidences_information_hint.json e evidences_information_string.json\r +\r + - __-fcv__ or __--file_config_evidence__
\r + File with the experiment and evidence methods files addresses in tsv format
\r + \r + Example of this file: config_evidence_file.tsv\r +\r +* Running modes examples:\r + 1. Running to generate all semantic descriptions for PredPrin:
\r + ````python3 triplification_ppi_data.py -rt 0 -fec params_predrep_5k.json -fev evidences_information.json````\r +\r + 2. Running to generate only triples of data provenance:
\r + ````python3 triplification_ppi_data.py -rt 1 -fec params_hint.json -fev evidences_information_hint.json````\r +\r + 3. Running to generate only triples of PPI scores for each evidence:
\r + ````python3 triplification_ppi_data.py -rt 3 -fec params_hint.json -fev evidences_information_hint.json````\r +\r + 4. Running to generate only triples of protein functional annotations (only PredPrin exports these annotations):
\r + ````python3 triplification_ppi_data.py -rt 2 -fec params_predrep_5k.json -fev evidences_information.json````\r +\r + 5. Running to generate all semantic descrptions for STRING:
\r + ````python3 triplification_ppi_data.py -rt 0 -fec params_string.json -fev evidences_information_string.json````\r + \r + **For the next options (4, 5 and 6), it is mandatory running at least mode 1 and 3 for HINT, STRING and PredPrin**\r + \r + 6. Running to execute data fusion of different sources:
\r + ````python3 triplification_ppi_data.py -rt 4 -fcv config_evidence_file.tsv````\r +\r + 7. Running to generate all semantic descriptions and execute data fusion of different sources (combines mode 0 and 4):
\r + ````python3 triplification_ppi_data.py -rt 5 -fcv config_evidence_file.tsv````\r +\r + 8. Export semantic data to allegrograph server:
\r + ````python3 triplification_ppi_data.py -rt 6 -fcv config_evidence_file.tsv````\r +\r +## Query Scenarios for analysis\r +Supposing you ran all the steps showed in the section above, you can run the following options to analyse the data stored alegro graph triple store.
\r +File to use for this section: ````query_analysis_ppitriplificator.py````
\r +\r +* Parameter:\r + - __-q__ or __--query_option__
\r + Use to indicate which query you want to perform:
\r + 1 - Get all the different organisms whose interactions are stored in the database
\r + 2 - Get the interactions that have scientific papers associated and the list of these papers
\r + 3 - Get a list of the most frequent biological processes annotated for the interactions of Escherichia coli bacteria
\r + 4 - Get only the interactions belonging to a specific biological process (regulation of transcription, DNA-templated) in Escherichia coli bacteria
\r + 5 - Get the scores of interactions belonging to a specific biological process (regulation of transcription, DNA-templated) in Escherichia coli bacteria
\r + 6 - Get a list of the most frequent biological processes annotated for the interactions of human organism
\r + 7 - Get only the interactions belonging to a specific biological process (positive regulation of transcription by RNA polymerase II) in human organism
\r + 8 - Get the scores of interactions belonging to a specific biological process (positive regulation of transcription by RNA polymerase II) in human organism\r +\r +* Running modes examples:\r + 1. Running queries:
\r + ````python3 query_analysis_ppitriplificator.py -q 1 ````
\r + Change number 1 to the respective number of the query you want to perform\r +\r +## Reference\r +Martins, Y. C., Ziviani, A., Cerqueira e Costa, M. D. O., Cavalcanti, M. C. R., Nicolás, M. F., & de Vasconcelos, A. T. R. (2023). PPIntegrator: semantic integrative system for protein–protein interaction and application for host–pathogen datasets. Bioinformatics Advances, 3(1), vbad067.\r +\r +## Bug Report\r +Please, use the [Issues](https://github.com/YasCoMa/ppintegrator/issues) tab to report any bug.""" ; + schema1:image ; + schema1:keywords "protein interactin data triplification, protein interactions database integration, data fusion, data annotation" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "PPIntegrator - PPI Triplification Process" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/618?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2023-10-17T19:52:22.592351" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "atacseq/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.12" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/gmx-protein-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.277.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_md_setup/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/277/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 55159 ; + schema1:creator , + ; + schema1:dateCreated "2023-05-03T13:41:05Z" ; + schema1:dateModified "2023-05-03T13:43:16Z" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/gmx-protein-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/277?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_md_setup/galaxy/biobb_wf_md_setup.ga" ; + schema1:version 3 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for the identification of circular DNAs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/972?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/circdna" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/circdna" ; + schema1:sdDatePublished "2024-07-12 13:22:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/972/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10166 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:45Z" ; + schema1:dateModified "2024-06-11T12:54:45Z" ; + schema1:description "Pipeline for the identification of circular DNAs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/972?version=7" ; + schema1:keywords "ampliconarchitect, ampliconsuite, circle-seq, circular, DNA, eccdna, ecdna, extrachromosomal-circular-dna, Genomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/circdna" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/972?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.54.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_ligand_parameterization" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter GMX Notebook Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-07-12 13:24:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/54/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15271 ; + schema1:creator , + , + ; + schema1:dateCreated "2022-09-15T10:59:36Z" ; + schema1:dateModified "2023-01-16T13:44:50Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/54?version=5" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter GMX Notebook Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_ligand_parameterization/master/biobb_wf_ligand_parameterization/notebooks/biobb_ligand_parameterization_tutorial.ipynb" ; + schema1:version 3 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-27T09:48:18.733544" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "The pangenome graph construction pipeline renders a collection of sequences into a pangenome graph. Its goal is to build a graph that is locally directed and acyclic while preserving large-scale variation. Maintaining local linearity is important for interpretation, visualization, mapping, comparative genomics, and reuse of pangenome graphs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1007?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/pangenome" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/pangenome" ; + schema1:sdDatePublished "2024-07-12 13:20:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1007/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11520 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:07Z" ; + schema1:dateModified "2024-06-11T12:55:07Z" ; + schema1:description "The pangenome graph construction pipeline renders a collection of sequences into a pangenome graph. Its goal is to build a graph that is locally directed and acyclic while preserving large-scale variation. Maintaining local linearity is important for interpretation, visualization, mapping, comparative genomics, and reuse of pangenome graphs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1007?version=3" ; + schema1:keywords "pangenome" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/pangenome" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1007?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/998?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/methylseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/methylseq" ; + schema1:sdDatePublished "2024-07-12 13:18:23 +0100" ; + schema1:url "https://workflowhub.eu/workflows/998/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6562 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:02Z" ; + schema1:dateModified "2024-06-11T12:55:02Z" ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/998?version=14" ; + schema1:keywords "bisulfite-sequencing, dna-methylation, em-seq, epigenome, Epigenomics, methyl-seq, pbat, rrbs" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/methylseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/998?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# BatchConvert\r +\r +A command line tool for converting image data into either of the standard file formats OME-TIFF or OME-Zarr. \r +\r +The tool wraps the dedicated file converters bfconvert and bioformats2raw to convert into OME-TIFF or OME-Zarr,\r +respectively. The workflow management system NextFlow is used to perform conversion in parallel for batches of images. \r +\r +The tool also wraps s3 and Aspera clients (go-mc and aspera-cli, respectively). Therefore, input and output locations can \r +be specified as local or remote storage and file transfer will be performed automatically. The conversion can be run on \r +HPC with Slurm. \r +\r +![](figures/diagram.png)\r +\r +## Installation & Dependencies\r +\r +**Important** note: The package has been so far only tested on Ubuntu 20.04.\r +\r +The minimal dependency to run the tool is NextFlow, which should be installed and made accessible from the command line.\r +\r +If conda exists on your system, you can install BatchConvert together with NextFlow using the following script:\r +```\r +git clone https://github.com/Euro-BioImaging/BatchConvert.git && \\ \r +source BatchConvert/installation/install_with_nextflow.sh\r +```\r +\r +\r +If you already have NextFlow installed and accessible from the command line (or if you prefer to install it manually \r +e.g., as shown [here](https://www.nextflow.io/docs/latest/getstarted.html)), you can also install BatchConvert alone, using the following script:\r +```\r +git clone https://github.com/Euro-BioImaging/BatchConvert.git && \\ \r +source BatchConvert/installation/install.sh\r +```\r +\r +\r +Other dependencies (which will be **automatically** installed):\r +- bioformats2raw (entrypoint bioformats2raw)\r +- bftools (entrypoint bfconvert)\r +- go-mc (entrypoint mc)\r +- aspera-cli (entrypoint ascp)\r +\r +These dependencies will be pulled and cached automatically at the first execution of the conversion command. \r +The mode of dependency management can be specified by using the command line option ``--profile`` or `-pf`. Depending \r +on how this option is specified, the dependencies will be acquired / run either via conda or via docker/singularity containers. \r +\r +Specifying ``--profile conda`` (default) will install the dependencies to an \r +environment at ``./.condaCache`` and use this environment to run the workflow. This option \r +requires that miniconda/anaconda is installed on your system. \r +\r +Alternatively, specifying ``--profile docker`` or ``--profile singularity`` will pull a docker or \r +singularity image with the dependencies, respectively, and use this image to run the workflow.\r +These options assume that the respective container runtime (docker or singularity) is available on \r +your system. If singularity is being used, a cache directory will be created at the path \r +``./.singularityCache`` where the singularity image is stored. \r +\r +Finally, you can still choose to install the dependencies manually and use your own installations to run\r +the workflow. In this case, you should specify ``--profile standard`` and make sure the entrypoints\r +specified above are recognised by your shell. \r +\r +\r +## Configuration\r +\r +BatchConvert can be configured to have default options for file conversion and transfer. Probably, the most important sets of parameters\r +to be configured include credentials for the remote ends. The easiest way to configure remote stores is by running the interactive \r +configuration command as indicated below.\r +\r +### Configuration of the s3 object store\r +\r +Run the interactive configuration command: \r +\r +`batchconvert configure_s3_remote`\r +\r +This will start a sequence of requests for s3 credentials such as name, url, access, etc. Provide each requested credential and click\r +enter. Continue this cycle until the process is finished. Upon completing the configuration, the sequence of commands should roughly look like this:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_s3_remote\r +enter remote name (for example s3)\r +s3\r +enter url:\r +https://s3.embl.de\r +enter access key:\r +\r +enter secret key:\r +\r +enter bucket name:\r +\r +Configuration of the default s3 credentials is complete\r +```\r +\r +\r +### Configuration of the BioStudies user space\r +\r +Run the interactive configuration command: \r +\r +`batchconvert configure_bia_remote`\r +\r +This will prompt a request for the secret directory to connect to. Enter the secret directory for your user space and click enter. \r +Upon completing the configuration, the sequence of commands should roughly look like this:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_bia_remote\r +enter the secret directory for BioImage Archive user space:\r +\r +configuration of the default bia credentials is complete\r +```\r +\r +### Configuration of the slurm options\r +\r +BatchConvert can also run on slurm clusters. In order to configure the slurm parameters, run the interactive configuration command: \r +\r +`batchconvert configure_slurm`\r +\r +This will start a sequence of requests for slurm options. Provide each requested option and click enter. \r +Continue this cycle until the process is finished. Upon completing the configuration, the sequence of commands should \r +roughly look like this:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_slurm\r +Please enter value for queue_size\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´50´\r +s\r +Please enter value for submit_rate_limit\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´10/2min´\r +s\r +Please enter value for cluster_options\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´--mem-per-cpu=3140 --cpus-per-task=16´\r +s\r +Please enter value for time\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´6h´\r +s\r +configuration of the default slurm parameters is complete\r +```\r +\r +### Configuration of the default conversion parameters\r +\r +While all conversion parameters can be specified as command line arguments, it can\r +be useful for the users to set their own default parameters to avoid re-entering those\r +parameters for subsequent executions. BatchConvert allows for interactive configuration of \r +conversion in the same way as configuration of the remote stores described above.\r +\r +To configure the conversion into OME-TIFF, run the following command:\r +\r +`batchconvert configure_ometiff`\r +\r +This will prompt the user to enter a series of parameters, which will then be saved as the \r +default parameters to be passed to the `batchconvert ometiff` command. Upon completing the \r +configuration, the sequence of commands should look similar to:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_ometiff\r +Please enter value for noflat\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +Please enter value for series\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +Please enter value for timepoint\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +...\r +...\r +...\r +...\r +...\r +...\r +Configuration of the default parameters for 'bfconvert' is complete\r +```\r +\r +\r +To configure the conversion into OME-Zarr, run the following command:\r +\r +`batchconvert configure_omezarr`\r +\r +Similarly, this will prompt the user to enter a series of parameters, which will then be saved as the \r +default parameters to be passed to the `batchconvert omezarr` command. Upon completing the configuration, \r +the sequence of commands should look similar to:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_omezarr\r +Please enter value for resolutions_zarr\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +Please enter value for chunk_h\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +Please enter value for chunk_w\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +...\r +...\r +...\r +...\r +...\r +...\r +Configuration of the default parameters for 'bioformats2raw' is complete\r +```\r +\r +It is important to note that the initial defaults for the conversion parameters are the same as the defaults\r +of the backend tools bfconvert and bioformats2raw, as noted in the prompt excerpt above. Through interactive configuration, \r +the user is overriding these initial defaults and setting their own defaults. It is possible to reset the initial\r +defaults by running the following command.\r +\r +`batchconvert reset_defaults`\r +\r +Another important point is that any of these configured parameters can be overridden by passing a value to that\r +parameter in the commandline. For instance, in the following command, the value of 20 will be assigned to `chunk_h` parameter \r +even if the value for the same parameter might be different in the configuration file. \r +\r +`batchconvert omezarr --chunk_h 20 `\r +\r +\r +## Examples\r +\r +### Local conversion\r +\r +#### Parallel conversion of files to separate OME-TIFFs / OME-Zarrs:\r +Convert a batch of images on your local storage into OME-TIFF format. \r +Note that the `input_path` in the command given below is typically a \r +directory with multiple image files but a single image file can also be passed:\\\r +`batchconvert ometiff -pf conda ` \r +\r +Note that if this is your first conversion with the profile `conda`, \r +it will take a while for a conda environment with the dependencies to be\r +created. All the subsequent conversion commands with the profile `conda`,\r +however, will use this environment, and thus show no such delay.\r +\r +Since conda is the default profile, it does not have to be \r +explicitly included in the command line. Thus, the command can be shortened to:\\\r +`batchconvert ometiff `\r +\r +Convert only the first channel of the images:\\\r +`batchconvert ometiff -chn 0 `\r +\r +Crop the images being converted along x and y axis by 150 pixels:\\\r +`batchconvert ometiff -cr 0,0,150,150 `\r +\r +Convert into OME-Zarr instead:\\\r +`batchconvert omezarr `\r +\r +Convert into OME-Zarr with 3 resolution levels:\\\r +`batchconvert omezarr -rz 3 `\r +\r +Select a subset of images with a matching string such as "mutation":\\\r +`batchconvert omezarr -p mutation `\r +\r +Select a subset of images using wildcards. Note that the use of "" around \r +the input path is necessary when using wildcards:\\\r +`batchconvert omezarr "/*D3*.oir" `\r +\r +Convert by using a singularity container instead of conda environment (requires\r +singularity to be installed on your system):\\\r +`batchconvert omezarr -pf singularity "/*D3*.oir" `\r +\r +Convert by using a docker container instead of conda environment (requires docker\r +to be installed on your system):\\\r +`batchconvert omezarr -pf docker "/*D3*.oir" `\r +\r +Note that similarly to the case with the profile `conda`, the first execution of\r +a conversion with the profile `singularity` or `docker` will take a while for the\r +container image to be pulled. All the subsequent conversion commands using a \r +container option will use this image, and thus show no such delay. \r +\r +Convert local data and upload the output to an s3 bucket. Note that the output \r +path is created relative to the bucket specified in your s3 configuration:\\\r +`batchconvert omezarr -dt s3 `\r +\r +Receive input files from an s3 bucket, convert locally and upload the output to \r +the same bucket. Note that wildcards cannot be used when the input is from s3. \r +Use pattern matching option `-p` for selecting a subset of input files:\\\r +`batchconvert omezarr -p mutation -st s3 -dt s3 `\r +\r +Receive input files from your private BioStudies user space and convert them locally.\r +Use pattern matching option `-p` for selecting a subset of input files:\\\r +`batchconvert omezarr -p mutation -st bia `\r +\r +Receive an input from an s3 bucket, convert locally and upload the output to your \r +private BioStudies user space. Use pattern matching option `-p` for selecting a subset \r +of input files:\\\r +`batchconvert omezarr -p mutation -st s3 -dt bia `\r +\r +Note that in all the examples shown above, BatchConvert treats each input file as separate,\r +standalone data point, disregarding the possibility that some of the input files might belong to \r +the same multidimensional array. Thus, each input file is converted to an independent \r +OME-TIFF / OME-Zarr and the number of outputs will thus equal the number of selected input files.\r +An alternative scenario is discussed below.\r +\r +#### Parallel conversion of file groups by stacking multiple files into single OME-TIFFs / OME-Zarrs:\r +\r +When the flag `--merge_files` is specified, BatchConvert tries to detect which input files might \r +belong to the same multidimensional array based on the patterns in the filenames. Then a "grouped conversion" \r +is performed, meaning that the files belonging to the same dataset will be incorporated into \r +a single OME-TIFF / OME-Zarr series, in that files will be concatenated along specific dimension(s) \r +during the conversion. Multiple file groups in the input directory can be detected and converted \r +in parallel. \r +\r +This feature uses Bio-Formats's pattern files as described [here](https://docs.openmicroscopy.org/bio-formats/6.6.0/formats/pattern-file.html).\r +However, BatchConvert generates pattern files automatically, allowing the user to directly use the \r +input directory in the conversion command. BatchConvert also has the option of specifying the \r +concatenation axes in the command line, which is especially useful in cases where the filenames \r +may not contain dimension information. \r +\r +To be able to use the `--merge files` flag, the input file names must obey certain rules:\r +1. File names in the same group must be uniform, except for one or more **numeric field(s)**, which\r +should show incremental change across the files. These so-called **variable fields** \r +will be detected and used as the dimension(s) of concatenation.\r +2. The length of variable fields must be uniform within the group. For instance, if the\r +variable field has values reaching multi-digit numbers, leading "0"s should be included where needed \r +in the file names to make the variable field length uniform within the group.\r +3. Typically, each variable field should follow a dimension specifier. What patterns can be used as \r +dimension specifiers are explained [here](https://docs.openmicroscopy.org/bio-formats/6.6.0/formats/pattern-file.html).\r +However, BatchConvert also has the option `--concatenation_order`, which allows the user to\r +specify from the command line, the dimension(s), along which the files must be concatenated.\r +4. File names that are unique and cannot be associated with any group will be assumed as\r +standalone images and converted accordingly. \r +\r +Below are some examples of grouped conversion commands in the context of different possible use-case scenarios:\r +\r +**Example 1:**\r +\r +This is an example of a folder with non-uniform filename lengths:\r +```\r +time-series/test_img_T2\r +time-series/test_img_T4\r +time-series/test_img_T6\r +time-series/test_img_T8\r +time-series/test_img_T10\r +time-series/test_img_T12\r +```\r +In this example, leading zeroes are missing in the variable fields of some filenames. \r +A typical command to convert this folder to a single OME-TIFF would look like: \\\r +`batchconvert --ometiff --merge_files /time-series `\r +\r +However, this command would fail to create a single OME-Zarr folder due to the non-uniform \r +lengths of the filenames. Instead, the files would be split into two groups based on the\r +filename length, leading to two separate OME-Zarrs with names:\r +\r +`test_img_TRange{2-8-2}.ome.zarr` and `test_img_TRange{10-12-2}.ome.zarr`\r +\r +Here is the corrected version of the folder for the above example-\r +```\r +time-series/test_img_T02\r +time-series/test_img_T04\r +time-series/test_img_T06\r +time-series/test_img_T08\r +time-series/test_img_T10\r +time-series/test_img_T12\r +```\r +\r +Executing the same command on this folder would result in a single OME-Zarr with the name:\r +`test_img_TRange{02-12-2}.ome.zarr`\r +\r +**Example 2**- \r +\r +In this example, the filename lengths are uniform but the incrementation within the variable field is not.\r +```\r +time-series/test_img_T2\r +time-series/test_img_T4\r +time-series/test_img_T5\r +time-series/test_img_T7\r +```\r +\r +A typical command to convert this folder to a single OME-Zarr would look like: \\\r +`batchconvert --omezarr --merge_files /time-series `\r +\r +However, the command would fail to assume these files as a single group due to the\r +non-uniform incrementation in the variable field of the filenames. Instead, the dataset \r +would be split into two groups, leading to two separate OME-Zarrs with the following names:\r +`test_img_TRange{2-4-2}.ome.zarr` and `test_img_TRange{5-7-2}.ome.zarr` \r +\r +\r +**Example 3**\r +\r +This is an example of a case where the conversion attempts to concatenate files along two\r +dimensions, channel and time.\r +```\r +multichannel_time-series/test_img_C1-T1\r +multichannel_time-series/test_img_C1-T2\r +multichannel_time-series/test_img_C1-T3\r +multichannel_time-series/test_img_C2-T1\r +multichannel_time-series/test_img_C2-T2\r +```\r +To convert this folder to a single OME-Zarr, one could try the following command: \\\r +`batchconvert --omezarr --merge_files /multichannel_time-series `\r +\r +However, since the channel-2 does not have the same number of timeframes as the channel-1, \r +BatchConvert will fail to assume these two channels as part of the same series and\r +will instead split the two channels into two separate OME-Zarrs. \r +\r +The output would look like: \\\r +`test_img_C1-TRange{1-3-1}.ome.zarr` \\\r +`test_img_C2-TRange{1-2-1}.ome.zarr`\r +\r +To be able to really incorporate all files into a single OME-Zarr, the folder should have equal\r +number of images corresponding to both channels, as shown below:\r +```\r +multichannel_time-series/test_img_C1-T1\r +multichannel_time-series/test_img_C1-T2\r +multichannel_time-series/test_img_C1-T3\r +multichannel_time-series/test_img_C2-T1\r +multichannel_time-series/test_img_C2-T2\r +multichannel_time-series/test_img_C2-T3\r +```\r +The same conversion command on this version of the input folder would result in a single \r +OME-Zarr with the name: \\\r +`test_img_CRange{1-2-1}-TRange{1-3-1}.ome.zarr`\r +\r +\r +**Example 4**\r +\r +This is another example of a case, where there are multiple filename patterns in the input folder.\r +\r +```\r +folder_with_multiple_groups/test_img_C1-T1\r +folder_with_multiple_groups/test_img_C1-T2\r +folder_with_multiple_groups/test_img_C2-T1\r +folder_with_multiple_groups/test_img_C2-T2\r +folder_with_multiple_groups/test_img_T1-Z1\r +folder_with_multiple_groups/test_img_T1-Z2\r +folder_with_multiple_groups/test_img_T1-Z3\r +folder_with_multiple_groups/test_img_T2-Z1\r +folder_with_multiple_groups/test_img_T2-Z2\r +folder_with_multiple_groups/test_img_T2-Z3\r +```\r +\r +One can convert this folder with- \\\r +`batchconvert --omezarr --merge_files /folder_with_multiple_groups `\r + \r +BatchConvert will detect the two patterns in this folder and perform two grouped conversions. \r +The output folders will be named as `test_img_CRange{1-2-1}-TRange{1-2-1}.ome.zarr` and \r +`test_img_TRange{1-2-1}-ZRange{1-3-1}.ome.zarr`. \r +\r +\r +**Example 5**\r +\r +Now imagine that we have the same files as in the example 4 but the filenames of the\r +first group lack any dimension specifier, so we have the following folder:\r +\r +```\r +folder_with_multiple_groups/test_img_1-1\r +folder_with_multiple_groups/test_img_1-2\r +folder_with_multiple_groups/test_img_2-1\r +folder_with_multiple_groups/test_img_2-2\r +folder_with_multiple_groups/test_img_T1-Z1\r +folder_with_multiple_groups/test_img_T1-Z2\r +folder_with_multiple_groups/test_img_T1-Z3\r +folder_with_multiple_groups/test_img_T2-Z1\r +folder_with_multiple_groups/test_img_T2-Z2\r +folder_with_multiple_groups/test_img_T2-Z3\r +```\r +\r +In such a scenario, BatchConvert allows the user to specify the concatenation axes \r +via `--concatenation_order` option. This option expects comma-separated strings of dimensions \r +for each group. In this example, the user must provide a string of 2 characters, such as `ct` for \r +channel and time, for group 1, since there are two variable fields for this group. Since group 2 \r +already has dimension specifiers (T and Z as specified in the filenames preceding the variable fields),\r +the user does not need to specify anything for this group, and can enter `auto` or `aa` for automatic\r +detection of the specifiers. \r +\r +So the following line can be used to convert this folder: \\\r +`batchconvert --omezarr --merge_files --concatenation_order ct,aa /folder_with_multiple_groups `\r +\r +The resulting OME-Zarrs will have the names:\r +`test_img_CRange{1-2-1}-TRange{1-2-1}.ome.zarr` and\r +`test_img_TRange{1-2-1}-ZRange{1-3-1}.ome.zarr`\r +\r +Note that `--concatenation_order` will override any dimension specifiers already\r +existing in the filenames.\r +\r +\r +**Example 6**\r +\r +There can be scenarios where the user may want to have further control over the axes along \r +which to concatenate the images. For example, the filenames might contain the data acquisition\r +date, which can be recognised by BatchConvert as a concatenation axis in the automatic \r +detection mode. An example of such a fileset might look like:\r +\r +```\r +filenames_with_dates/test_data_date03.03.2023_imageZ1-T1\r +filenames_with_dates/test_data_date03.03.2023_imageZ1-T2\r +filenames_with_dates/test_data_date03.03.2023_imageZ1-T3\r +filenames_with_dates/test_data_date03.03.2023_imageZ2-T1\r +filenames_with_dates/test_data_date03.03.2023_imageZ2-T2\r +filenames_with_dates/test_data_date03.03.2023_imageZ2-T3\r +filenames_with_dates/test_data_date04.03.2023_imageZ1-T1\r +filenames_with_dates/test_data_date04.03.2023_imageZ1-T2\r +filenames_with_dates/test_data_date04.03.2023_imageZ1-T3\r +filenames_with_dates/test_data_date04.03.2023_imageZ2-T1\r +filenames_with_dates/test_data_date04.03.2023_imageZ2-T2\r +filenames_with_dates/test_data_date04.03.2023_imageZ2-T3\r +```\r +\r +One may try the following command to convert this folder:\r +\r +`batchconvert --omezarr --merge_files /filenames_with_dates `\r +\r +Since the concatenation axes are not specified, this command would try to create\r +a single OME-Zarr with name: `test_data_dateRange{03-04-1}.03.2023_imageZRange{1-2-1}-TRange{1-3-1}`.\r +\r +In order to force BatchConvert to ignore the date field, the user can restrict the concatenation \r +axes to the last two numeric fields. This can be done by using a command such as: \\\r +`batchconvert --omezarr --merge_files --concatenation_order aa /filenames_with_dates ` \\\r +This command will avoid concatenation along the date field, and therefore, there will be two\r +OME-Zarrs corresponding to the two dates. The number of characters being passed to the \r +`--concatenation_order` option specifies the number of numeric fields (starting from the right \r +end of the filename) that are recognised by the BatchConvert as valid concatenation axes. \r +Passing `aa`, therefore, means that the last two numeric fields must be recognised as \r +concatenation axes and the dimension type should be automatically detected (`a` for automatic). \r +In the same logic, one could, for example, convert each Z section into a separate OME-Zarr by \r +specifying `--concatenation_order a`.\r +\r +\r +\r +### Conversion on slurm\r +\r +All the examples given above can also be run on slurm by specifying `-pf cluster` option. \r +Note that this option automatically uses the singularity profile:\\\r +`batchconvert omezarr -pf cluster -p .oir `\r +\r +\r +\r +\r +\r +\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.453.2" ; + schema1:isBasedOn "https://github.com/Euro-BioImaging/BatchConvert.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for BatchConvert" ; + schema1:sdDatePublished "2024-07-12 13:32:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/453/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 89 ; + schema1:creator ; + schema1:dateCreated "2023-04-27T11:21:54Z" ; + schema1:dateModified "2023-07-03T11:04:19Z" ; + schema1:description """# BatchConvert\r +\r +A command line tool for converting image data into either of the standard file formats OME-TIFF or OME-Zarr. \r +\r +The tool wraps the dedicated file converters bfconvert and bioformats2raw to convert into OME-TIFF or OME-Zarr,\r +respectively. The workflow management system NextFlow is used to perform conversion in parallel for batches of images. \r +\r +The tool also wraps s3 and Aspera clients (go-mc and aspera-cli, respectively). Therefore, input and output locations can \r +be specified as local or remote storage and file transfer will be performed automatically. The conversion can be run on \r +HPC with Slurm. \r +\r +![](figures/diagram.png)\r +\r +## Installation & Dependencies\r +\r +**Important** note: The package has been so far only tested on Ubuntu 20.04.\r +\r +The minimal dependency to run the tool is NextFlow, which should be installed and made accessible from the command line.\r +\r +If conda exists on your system, you can install BatchConvert together with NextFlow using the following script:\r +```\r +git clone https://github.com/Euro-BioImaging/BatchConvert.git && \\ \r +source BatchConvert/installation/install_with_nextflow.sh\r +```\r +\r +\r +If you already have NextFlow installed and accessible from the command line (or if you prefer to install it manually \r +e.g., as shown [here](https://www.nextflow.io/docs/latest/getstarted.html)), you can also install BatchConvert alone, using the following script:\r +```\r +git clone https://github.com/Euro-BioImaging/BatchConvert.git && \\ \r +source BatchConvert/installation/install.sh\r +```\r +\r +\r +Other dependencies (which will be **automatically** installed):\r +- bioformats2raw (entrypoint bioformats2raw)\r +- bftools (entrypoint bfconvert)\r +- go-mc (entrypoint mc)\r +- aspera-cli (entrypoint ascp)\r +\r +These dependencies will be pulled and cached automatically at the first execution of the conversion command. \r +The mode of dependency management can be specified by using the command line option ``--profile`` or `-pf`. Depending \r +on how this option is specified, the dependencies will be acquired / run either via conda or via docker/singularity containers. \r +\r +Specifying ``--profile conda`` (default) will install the dependencies to an \r +environment at ``./.condaCache`` and use this environment to run the workflow. This option \r +requires that miniconda/anaconda is installed on your system. \r +\r +Alternatively, specifying ``--profile docker`` or ``--profile singularity`` will pull a docker or \r +singularity image with the dependencies, respectively, and use this image to run the workflow.\r +These options assume that the respective container runtime (docker or singularity) is available on \r +your system. If singularity is being used, a cache directory will be created at the path \r +``./.singularityCache`` where the singularity image is stored. \r +\r +Finally, you can still choose to install the dependencies manually and use your own installations to run\r +the workflow. In this case, you should specify ``--profile standard`` and make sure the entrypoints\r +specified above are recognised by your shell. \r +\r +\r +## Configuration\r +\r +BatchConvert can be configured to have default options for file conversion and transfer. Probably, the most important sets of parameters\r +to be configured include credentials for the remote ends. The easiest way to configure remote stores is by running the interactive \r +configuration command as indicated below.\r +\r +### Configuration of the s3 object store\r +\r +Run the interactive configuration command: \r +\r +`batchconvert configure_s3_remote`\r +\r +This will start a sequence of requests for s3 credentials such as name, url, access, etc. Provide each requested credential and click\r +enter. Continue this cycle until the process is finished. Upon completing the configuration, the sequence of commands should roughly look like this:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_s3_remote\r +enter remote name (for example s3)\r +s3\r +enter url:\r +https://s3.embl.de\r +enter access key:\r +\r +enter secret key:\r +\r +enter bucket name:\r +\r +Configuration of the default s3 credentials is complete\r +```\r +\r +\r +### Configuration of the BioStudies user space\r +\r +Run the interactive configuration command: \r +\r +`batchconvert configure_bia_remote`\r +\r +This will prompt a request for the secret directory to connect to. Enter the secret directory for your user space and click enter. \r +Upon completing the configuration, the sequence of commands should roughly look like this:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_bia_remote\r +enter the secret directory for BioImage Archive user space:\r +\r +configuration of the default bia credentials is complete\r +```\r +\r +### Configuration of the slurm options\r +\r +BatchConvert can also run on slurm clusters. In order to configure the slurm parameters, run the interactive configuration command: \r +\r +`batchconvert configure_slurm`\r +\r +This will start a sequence of requests for slurm options. Provide each requested option and click enter. \r +Continue this cycle until the process is finished. Upon completing the configuration, the sequence of commands should \r +roughly look like this:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_slurm\r +Please enter value for queue_size\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´50´\r +s\r +Please enter value for submit_rate_limit\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´10/2min´\r +s\r +Please enter value for cluster_options\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´--mem-per-cpu=3140 --cpus-per-task=16´\r +s\r +Please enter value for time\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´6h´\r +s\r +configuration of the default slurm parameters is complete\r +```\r +\r +### Configuration of the default conversion parameters\r +\r +While all conversion parameters can be specified as command line arguments, it can\r +be useful for the users to set their own default parameters to avoid re-entering those\r +parameters for subsequent executions. BatchConvert allows for interactive configuration of \r +conversion in the same way as configuration of the remote stores described above.\r +\r +To configure the conversion into OME-TIFF, run the following command:\r +\r +`batchconvert configure_ometiff`\r +\r +This will prompt the user to enter a series of parameters, which will then be saved as the \r +default parameters to be passed to the `batchconvert ometiff` command. Upon completing the \r +configuration, the sequence of commands should look similar to:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_ometiff\r +Please enter value for noflat\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +Please enter value for series\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +Please enter value for timepoint\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +...\r +...\r +...\r +...\r +...\r +...\r +Configuration of the default parameters for 'bfconvert' is complete\r +```\r +\r +\r +To configure the conversion into OME-Zarr, run the following command:\r +\r +`batchconvert configure_omezarr`\r +\r +Similarly, this will prompt the user to enter a series of parameters, which will then be saved as the \r +default parameters to be passed to the `batchconvert omezarr` command. Upon completing the configuration, \r +the sequence of commands should look similar to:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_omezarr\r +Please enter value for resolutions_zarr\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +Please enter value for chunk_h\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +Please enter value for chunk_w\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +...\r +...\r +...\r +...\r +...\r +...\r +Configuration of the default parameters for 'bioformats2raw' is complete\r +```\r +\r +It is important to note that the initial defaults for the conversion parameters are the same as the defaults\r +of the backend tools bfconvert and bioformats2raw, as noted in the prompt excerpt above. Through interactive configuration, \r +the user is overriding these initial defaults and setting their own defaults. It is possible to reset the initial\r +defaults by running the following command.\r +\r +`batchconvert reset_defaults`\r +\r +Another important point is that any of these configured parameters can be overridden by passing a value to that\r +parameter in the commandline. For instance, in the following command, the value of 20 will be assigned to `chunk_h` parameter \r +even if the value for the same parameter might be different in the configuration file. \r +\r +`batchconvert omezarr --chunk_h 20 `\r +\r +\r +## Examples\r +\r +### Local conversion\r +\r +#### Parallel conversion of files to separate OME-TIFFs / OME-Zarrs:\r +Convert a batch of images on your local storage into OME-TIFF format. \r +Note that the `input_path` in the command given below is typically a \r +directory with multiple image files but a single image file can also be passed:\\\r +`batchconvert ometiff -pf conda ` \r +\r +Note that if this is your first conversion with the profile `conda`, \r +it will take a while for a conda environment with the dependencies to be\r +created. All the subsequent conversion commands with the profile `conda`,\r +however, will use this environment, and thus show no such delay.\r +\r +Since conda is the default profile, it does not have to be \r +explicitly included in the command line. Thus, the command can be shortened to:\\\r +`batchconvert ometiff `\r +\r +Convert only the first channel of the images:\\\r +`batchconvert ometiff -chn 0 `\r +\r +Crop the images being converted along x and y axis by 150 pixels:\\\r +`batchconvert ometiff -cr 0,0,150,150 `\r +\r +Convert into OME-Zarr instead:\\\r +`batchconvert omezarr `\r +\r +Convert into OME-Zarr with 3 resolution levels:\\\r +`batchconvert omezarr -rz 3 `\r +\r +Select a subset of images with a matching string such as "mutation":\\\r +`batchconvert omezarr -p mutation `\r +\r +Select a subset of images using wildcards. Note that the use of "" around \r +the input path is necessary when using wildcards:\\\r +`batchconvert omezarr "/*D3*.oir" `\r +\r +Convert by using a singularity container instead of conda environment (requires\r +singularity to be installed on your system):\\\r +`batchconvert omezarr -pf singularity "/*D3*.oir" `\r +\r +Convert by using a docker container instead of conda environment (requires docker\r +to be installed on your system):\\\r +`batchconvert omezarr -pf docker "/*D3*.oir" `\r +\r +Note that similarly to the case with the profile `conda`, the first execution of\r +a conversion with the profile `singularity` or `docker` will take a while for the\r +container image to be pulled. All the subsequent conversion commands using a \r +container option will use this image, and thus show no such delay. \r +\r +Convert local data and upload the output to an s3 bucket. Note that the output \r +path is created relative to the bucket specified in your s3 configuration:\\\r +`batchconvert omezarr -dt s3 `\r +\r +Receive input files from an s3 bucket, convert locally and upload the output to \r +the same bucket. Note that wildcards cannot be used when the input is from s3. \r +Use pattern matching option `-p` for selecting a subset of input files:\\\r +`batchconvert omezarr -p mutation -st s3 -dt s3 `\r +\r +Receive input files from your private BioStudies user space and convert them locally.\r +Use pattern matching option `-p` for selecting a subset of input files:\\\r +`batchconvert omezarr -p mutation -st bia `\r +\r +Receive an input from an s3 bucket, convert locally and upload the output to your \r +private BioStudies user space. Use pattern matching option `-p` for selecting a subset \r +of input files:\\\r +`batchconvert omezarr -p mutation -st s3 -dt bia `\r +\r +Note that in all the examples shown above, BatchConvert treats each input file as separate,\r +standalone data point, disregarding the possibility that some of the input files might belong to \r +the same multidimensional array. Thus, each input file is converted to an independent \r +OME-TIFF / OME-Zarr and the number of outputs will thus equal the number of selected input files.\r +An alternative scenario is discussed below.\r +\r +#### Parallel conversion of file groups by stacking multiple files into single OME-TIFFs / OME-Zarrs:\r +\r +When the flag `--merge_files` is specified, BatchConvert tries to detect which input files might \r +belong to the same multidimensional array based on the patterns in the filenames. Then a "grouped conversion" \r +is performed, meaning that the files belonging to the same dataset will be incorporated into \r +a single OME-TIFF / OME-Zarr series, in that files will be concatenated along specific dimension(s) \r +during the conversion. Multiple file groups in the input directory can be detected and converted \r +in parallel. \r +\r +This feature uses Bio-Formats's pattern files as described [here](https://docs.openmicroscopy.org/bio-formats/6.6.0/formats/pattern-file.html).\r +However, BatchConvert generates pattern files automatically, allowing the user to directly use the \r +input directory in the conversion command. BatchConvert also has the option of specifying the \r +concatenation axes in the command line, which is especially useful in cases where the filenames \r +may not contain dimension information. \r +\r +To be able to use the `--merge files` flag, the input file names must obey certain rules:\r +1. File names in the same group must be uniform, except for one or more **numeric field(s)**, which\r +should show incremental change across the files. These so-called **variable fields** \r +will be detected and used as the dimension(s) of concatenation.\r +2. The length of variable fields must be uniform within the group. For instance, if the\r +variable field has values reaching multi-digit numbers, leading "0"s should be included where needed \r +in the file names to make the variable field length uniform within the group.\r +3. Typically, each variable field should follow a dimension specifier. What patterns can be used as \r +dimension specifiers are explained [here](https://docs.openmicroscopy.org/bio-formats/6.6.0/formats/pattern-file.html).\r +However, BatchConvert also has the option `--concatenation_order`, which allows the user to\r +specify from the command line, the dimension(s), along which the files must be concatenated.\r +4. File names that are unique and cannot be associated with any group will be assumed as\r +standalone images and converted accordingly. \r +\r +Below are some examples of grouped conversion commands in the context of different possible use-case scenarios:\r +\r +**Example 1:**\r +\r +This is an example of a folder with non-uniform filename lengths:\r +```\r +time-series/test_img_T2\r +time-series/test_img_T4\r +time-series/test_img_T6\r +time-series/test_img_T8\r +time-series/test_img_T10\r +time-series/test_img_T12\r +```\r +In this example, leading zeroes are missing in the variable fields of some filenames. \r +A typical command to convert this folder to a single OME-TIFF would look like: \\\r +`batchconvert --ometiff --merge_files /time-series `\r +\r +However, this command would fail to create a single OME-Zarr folder due to the non-uniform \r +lengths of the filenames. Instead, the files would be split into two groups based on the\r +filename length, leading to two separate OME-Zarrs with names:\r +\r +`test_img_TRange{2-8-2}.ome.zarr` and `test_img_TRange{10-12-2}.ome.zarr`\r +\r +Here is the corrected version of the folder for the above example-\r +```\r +time-series/test_img_T02\r +time-series/test_img_T04\r +time-series/test_img_T06\r +time-series/test_img_T08\r +time-series/test_img_T10\r +time-series/test_img_T12\r +```\r +\r +Executing the same command on this folder would result in a single OME-Zarr with the name:\r +`test_img_TRange{02-12-2}.ome.zarr`\r +\r +**Example 2**- \r +\r +In this example, the filename lengths are uniform but the incrementation within the variable field is not.\r +```\r +time-series/test_img_T2\r +time-series/test_img_T4\r +time-series/test_img_T5\r +time-series/test_img_T7\r +```\r +\r +A typical command to convert this folder to a single OME-Zarr would look like: \\\r +`batchconvert --omezarr --merge_files /time-series `\r +\r +However, the command would fail to assume these files as a single group due to the\r +non-uniform incrementation in the variable field of the filenames. Instead, the dataset \r +would be split into two groups, leading to two separate OME-Zarrs with the following names:\r +`test_img_TRange{2-4-2}.ome.zarr` and `test_img_TRange{5-7-2}.ome.zarr` \r +\r +\r +**Example 3**\r +\r +This is an example of a case where the conversion attempts to concatenate files along two\r +dimensions, channel and time.\r +```\r +multichannel_time-series/test_img_C1-T1\r +multichannel_time-series/test_img_C1-T2\r +multichannel_time-series/test_img_C1-T3\r +multichannel_time-series/test_img_C2-T1\r +multichannel_time-series/test_img_C2-T2\r +```\r +To convert this folder to a single OME-Zarr, one could try the following command: \\\r +`batchconvert --omezarr --merge_files /multichannel_time-series `\r +\r +However, since the channel-2 does not have the same number of timeframes as the channel-1, \r +BatchConvert will fail to assume these two channels as part of the same series and\r +will instead split the two channels into two separate OME-Zarrs. \r +\r +The output would look like: \\\r +`test_img_C1-TRange{1-3-1}.ome.zarr` \\\r +`test_img_C2-TRange{1-2-1}.ome.zarr`\r +\r +To be able to really incorporate all files into a single OME-Zarr, the folder should have equal\r +number of images corresponding to both channels, as shown below:\r +```\r +multichannel_time-series/test_img_C1-T1\r +multichannel_time-series/test_img_C1-T2\r +multichannel_time-series/test_img_C1-T3\r +multichannel_time-series/test_img_C2-T1\r +multichannel_time-series/test_img_C2-T2\r +multichannel_time-series/test_img_C2-T3\r +```\r +The same conversion command on this version of the input folder would result in a single \r +OME-Zarr with the name: \\\r +`test_img_CRange{1-2-1}-TRange{1-3-1}.ome.zarr`\r +\r +\r +**Example 4**\r +\r +This is another example of a case, where there are multiple filename patterns in the input folder.\r +\r +```\r +folder_with_multiple_groups/test_img_C1-T1\r +folder_with_multiple_groups/test_img_C1-T2\r +folder_with_multiple_groups/test_img_C2-T1\r +folder_with_multiple_groups/test_img_C2-T2\r +folder_with_multiple_groups/test_img_T1-Z1\r +folder_with_multiple_groups/test_img_T1-Z2\r +folder_with_multiple_groups/test_img_T1-Z3\r +folder_with_multiple_groups/test_img_T2-Z1\r +folder_with_multiple_groups/test_img_T2-Z2\r +folder_with_multiple_groups/test_img_T2-Z3\r +```\r +\r +One can convert this folder with- \\\r +`batchconvert --omezarr --merge_files /folder_with_multiple_groups `\r + \r +BatchConvert will detect the two patterns in this folder and perform two grouped conversions. \r +The output folders will be named as `test_img_CRange{1-2-1}-TRange{1-2-1}.ome.zarr` and \r +`test_img_TRange{1-2-1}-ZRange{1-3-1}.ome.zarr`. \r +\r +\r +**Example 5**\r +\r +Now imagine that we have the same files as in the example 4 but the filenames of the\r +first group lack any dimension specifier, so we have the following folder:\r +\r +```\r +folder_with_multiple_groups/test_img_1-1\r +folder_with_multiple_groups/test_img_1-2\r +folder_with_multiple_groups/test_img_2-1\r +folder_with_multiple_groups/test_img_2-2\r +folder_with_multiple_groups/test_img_T1-Z1\r +folder_with_multiple_groups/test_img_T1-Z2\r +folder_with_multiple_groups/test_img_T1-Z3\r +folder_with_multiple_groups/test_img_T2-Z1\r +folder_with_multiple_groups/test_img_T2-Z2\r +folder_with_multiple_groups/test_img_T2-Z3\r +```\r +\r +In such a scenario, BatchConvert allows the user to specify the concatenation axes \r +via `--concatenation_order` option. This option expects comma-separated strings of dimensions \r +for each group. In this example, the user must provide a string of 2 characters, such as `ct` for \r +channel and time, for group 1, since there are two variable fields for this group. Since group 2 \r +already has dimension specifiers (T and Z as specified in the filenames preceding the variable fields),\r +the user does not need to specify anything for this group, and can enter `auto` or `aa` for automatic\r +detection of the specifiers. \r +\r +So the following line can be used to convert this folder: \\\r +`batchconvert --omezarr --merge_files --concatenation_order ct,aa /folder_with_multiple_groups `\r +\r +The resulting OME-Zarrs will have the names:\r +`test_img_CRange{1-2-1}-TRange{1-2-1}.ome.zarr` and\r +`test_img_TRange{1-2-1}-ZRange{1-3-1}.ome.zarr`\r +\r +Note that `--concatenation_order` will override any dimension specifiers already\r +existing in the filenames.\r +\r +\r +**Example 6**\r +\r +There can be scenarios where the user may want to have further control over the axes along \r +which to concatenate the images. For example, the filenames might contain the data acquisition\r +date, which can be recognised by BatchConvert as a concatenation axis in the automatic \r +detection mode. An example of such a fileset might look like:\r +\r +```\r +filenames_with_dates/test_data_date03.03.2023_imageZ1-T1\r +filenames_with_dates/test_data_date03.03.2023_imageZ1-T2\r +filenames_with_dates/test_data_date03.03.2023_imageZ1-T3\r +filenames_with_dates/test_data_date03.03.2023_imageZ2-T1\r +filenames_with_dates/test_data_date03.03.2023_imageZ2-T2\r +filenames_with_dates/test_data_date03.03.2023_imageZ2-T3\r +filenames_with_dates/test_data_date04.03.2023_imageZ1-T1\r +filenames_with_dates/test_data_date04.03.2023_imageZ1-T2\r +filenames_with_dates/test_data_date04.03.2023_imageZ1-T3\r +filenames_with_dates/test_data_date04.03.2023_imageZ2-T1\r +filenames_with_dates/test_data_date04.03.2023_imageZ2-T2\r +filenames_with_dates/test_data_date04.03.2023_imageZ2-T3\r +```\r +\r +One may try the following command to convert this folder:\r +\r +`batchconvert --omezarr --merge_files /filenames_with_dates `\r +\r +Since the concatenation axes are not specified, this command would try to create\r +a single OME-Zarr with name: `test_data_dateRange{03-04-1}.03.2023_imageZRange{1-2-1}-TRange{1-3-1}`.\r +\r +In order to force BatchConvert to ignore the date field, the user can restrict the concatenation \r +axes to the last two numeric fields. This can be done by using a command such as: \\\r +`batchconvert --omezarr --merge_files --concatenation_order aa /filenames_with_dates ` \\\r +This command will avoid concatenation along the date field, and therefore, there will be two\r +OME-Zarrs corresponding to the two dates. The number of characters being passed to the \r +`--concatenation_order` option specifies the number of numeric fields (starting from the right \r +end of the filename) that are recognised by the BatchConvert as valid concatenation axes. \r +Passing `aa`, therefore, means that the last two numeric fields must be recognised as \r +concatenation axes and the dimension type should be automatically detected (`a` for automatic). \r +In the same logic, one could, for example, convert each Z section into a separate OME-Zarr by \r +specifying `--concatenation_order a`.\r +\r +\r +\r +### Conversion on slurm\r +\r +All the examples given above can also be run on slurm by specifying `-pf cluster` option. \r +Note that this option automatically uses the singularity profile:\\\r +`batchconvert omezarr -pf cluster -p .oir `\r +\r +\r +\r +\r +\r +\r +\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/453?version=2" ; + schema1:keywords "Nextflow, bash, Python, NGFF, OME-Zarr, Conversion, imaging, bioimaging, image file format, file conversion, OME-TIFF, S3, BioStudies, bioformats, bioformats2raw" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "BatchConvert" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/453?version=2" ; + schema1:version 2 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 151518 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """A CWL-based pipeline for processing ChIP-Seq data (FASTQ format) and performing: \r +\r +- Peak calling\r +- Consensus peak count table generation\r +- Detection of super-enhancer regions\r +- Differential binding analysis\r +\r +On the respective GitHub folder are available:\r +\r +- The CWL wrappers for the workflow\r +- A pre-configured YAML template, based on validation analysis of publicly available HTS data\r +- Tables of metadata (``EZH2_metadata_CLL.csv`` and ``H3K27me3_metadata_CLL.csv``), based on the same validation analysis, to serve as input examples for the design of comparisons during differential binding analysis\r +- A list of ChIP-Seq blacklisted regions (human genome version 38; hg38) from the ENCODE project, which is can be used as input for the workflow, is provided in BED format (``hg38-blacklist.v2.bed``)\r +\r +Briefly, the workflow performs the following steps:\r +\r +1. Quality control of short reads (FastQC)\r +2. Trimming of the reads (e.g., removal of adapter and/or low quality sequences) (Trimmomatic)\r +3. Mapping to reference genome (HISAT2)\r +5. Convertion of mapped reads from SAM (Sequence Alignment Map) to BAM (Binary Alignment Map) format (samtools)\r +6. Sorting mapped reads based on chromosomal coordinates (samtools)\r +7. Adding information regarding paired end reads (e.g., CIGAR field information) (samtools)\r +8. Re-sorting based on chromosomal coordinates (samtools)\r +9. Removal of duplicate reads (samtools)\r +10. Index creation for coordinate-sorted BAM files to enable fast random access (samtools)\r +11. Production of quality metrics and files for the inspection of the mapped ChIP-Seq reads, taking into consideration the experimental design (deeptools2):\r + - Read coverages for genomic regions of two or more BAM files are computed (multiBamSummary). The results are produced in compressed numpy array (NPZ) format and are used to calculate and visualize pairwise correlation values between the read coverages (plotCorrelation). \r + - Estimation of sequencing depth, through genomic position (base pair) sampling, and visualization is performed for multiple BAM files (plotCoverage).\r + - Cumulative read coverages for each indexed BAM file are plotted by counting and sorting all reads overlapping a “window” of specified length (plotFingerprint).\r + - Production of coverage track files (bigWig), with the coverage calculated as the number of reads per consecutive windows of predefined size (bamCoverage), and normalized through various available methods (e.g., Reads Per Kilobase per Million mapped reads; RPKM). The coverage track files are used to calculate scores per selected genomic regions (computeMatrix), typically genes, and a heatmap, based on the scores associated with these genomic regions, is produced (plotHeatmap).\r +12. Calling potential binding positions (peaks) to the genome (peak calling) (MACS2)\r +13. Generation of consensus peak count table for the application of custom analyses on MACS2 peak calling results (bedtools)\r +14. Detection of super-enhancer regions (Rank Ordering of Super-Enhancers; ROSE)\r +15. Differential binding analyses (DiffBind) for:\r + - MACS2 peak calling results\r + - ROSE-detected super-enhancer regions \r + """ ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.525.1" ; + schema1:isBasedOn "https://github.com/BiodataAnalysisGroup/CWL_HTS_pipelines/tree/main/ChIP_Seq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL-based ChIP-Seq workflow" ; + schema1:sdDatePublished "2024-07-12 13:32:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/525/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 42324 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-07-05T09:39:05Z" ; + schema1:dateModified "2023-07-05T09:39:32Z" ; + schema1:description """A CWL-based pipeline for processing ChIP-Seq data (FASTQ format) and performing: \r +\r +- Peak calling\r +- Consensus peak count table generation\r +- Detection of super-enhancer regions\r +- Differential binding analysis\r +\r +On the respective GitHub folder are available:\r +\r +- The CWL wrappers for the workflow\r +- A pre-configured YAML template, based on validation analysis of publicly available HTS data\r +- Tables of metadata (``EZH2_metadata_CLL.csv`` and ``H3K27me3_metadata_CLL.csv``), based on the same validation analysis, to serve as input examples for the design of comparisons during differential binding analysis\r +- A list of ChIP-Seq blacklisted regions (human genome version 38; hg38) from the ENCODE project, which is can be used as input for the workflow, is provided in BED format (``hg38-blacklist.v2.bed``)\r +\r +Briefly, the workflow performs the following steps:\r +\r +1. Quality control of short reads (FastQC)\r +2. Trimming of the reads (e.g., removal of adapter and/or low quality sequences) (Trimmomatic)\r +3. Mapping to reference genome (HISAT2)\r +5. Convertion of mapped reads from SAM (Sequence Alignment Map) to BAM (Binary Alignment Map) format (samtools)\r +6. Sorting mapped reads based on chromosomal coordinates (samtools)\r +7. Adding information regarding paired end reads (e.g., CIGAR field information) (samtools)\r +8. Re-sorting based on chromosomal coordinates (samtools)\r +9. Removal of duplicate reads (samtools)\r +10. Index creation for coordinate-sorted BAM files to enable fast random access (samtools)\r +11. Production of quality metrics and files for the inspection of the mapped ChIP-Seq reads, taking into consideration the experimental design (deeptools2):\r + - Read coverages for genomic regions of two or more BAM files are computed (multiBamSummary). The results are produced in compressed numpy array (NPZ) format and are used to calculate and visualize pairwise correlation values between the read coverages (plotCorrelation). \r + - Estimation of sequencing depth, through genomic position (base pair) sampling, and visualization is performed for multiple BAM files (plotCoverage).\r + - Cumulative read coverages for each indexed BAM file are plotted by counting and sorting all reads overlapping a “window” of specified length (plotFingerprint).\r + - Production of coverage track files (bigWig), with the coverage calculated as the number of reads per consecutive windows of predefined size (bamCoverage), and normalized through various available methods (e.g., Reads Per Kilobase per Million mapped reads; RPKM). The coverage track files are used to calculate scores per selected genomic regions (computeMatrix), typically genes, and a heatmap, based on the scores associated with these genomic regions, is produced (plotHeatmap).\r +12. Calling potential binding positions (peaks) to the genome (peak calling) (MACS2)\r +13. Generation of consensus peak count table for the application of custom analyses on MACS2 peak calling results (bedtools)\r +14. Detection of super-enhancer regions (Rank Ordering of Super-Enhancers; ROSE)\r +15. Differential binding analyses (DiffBind) for:\r + - MACS2 peak calling results\r + - ROSE-detected super-enhancer regions \r + """ ; + schema1:image ; + schema1:keywords "CWL, workflow, ChIP-seq, Epigenomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "CWL-based ChIP-Seq workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/525?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 271851 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 167040 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:MediaObject ; + schema1:contentSize 3277 ; + schema1:dateModified "2024-01-17T10:54:05+00:00" ; + schema1:name "dnn_cnn_5epochs.csv" ; + schema1:sdDatePublished "2024-03-25T10:49:09+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1017?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/rnafusion" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnafusion" ; + schema1:sdDatePublished "2024-07-12 13:18:20 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1017/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3714 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:09Z" ; + schema1:dateModified "2024-06-11T12:55:09Z" ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1017?version=16" ; + schema1:keywords "fusion, fusion-genes, gene-fusion, rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnafusion" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1017?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.258.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_protein_complex_md_setup/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:36:14 +0100" ; + schema1:url "https://workflowhub.eu/workflows/258/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 203702 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 31213 ; + schema1:creator , + ; + schema1:dateCreated "2022-01-10T11:52:43Z" ; + schema1:dateModified "2023-06-07T10:54:32Z" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/258?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CWL Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_protein_complex_md_setup/cwl/workflow.cwl" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """16S Microbial Analysis with mothur (short)\r +\r +The workflows in this collection are from the '16S Microbial Analysis with mothur' tutorial for analysis of 16S data (Saskia Hiltemann, Bérénice Batut, Dave Clements), adapted for piepline use on galaxy australia (Ahmed Mehdi). The workflows developed in galaxy use mothur software package developed by Schloss et al https://pubmed.ncbi.nlm.nih.gov/19801464/. \r +\r +Please also refer to the 16S tutorials available at Galaxy https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop-short/tutorial.html and [https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop/tutorial.html\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/648?version=1" ; + schema1:isBasedOn "https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop-short/tutorial.html" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Workflow 1: Further Quality Control [16S Microbial Analysis With Mothur]" ; + schema1:sdDatePublished "2024-07-12 13:26:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/648/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17182 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2023-11-09T05:04:17Z" ; + schema1:dateModified "2023-11-09T05:13:16Z" ; + schema1:description """16S Microbial Analysis with mothur (short)\r +\r +The workflows in this collection are from the '16S Microbial Analysis with mothur' tutorial for analysis of 16S data (Saskia Hiltemann, Bérénice Batut, Dave Clements), adapted for piepline use on galaxy australia (Ahmed Mehdi). The workflows developed in galaxy use mothur software package developed by Schloss et al https://pubmed.ncbi.nlm.nih.gov/19801464/. \r +\r +Please also refer to the 16S tutorials available at Galaxy https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop-short/tutorial.html and [https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop/tutorial.html\r +""" ; + schema1:isPartOf ; + schema1:keywords "Metagenomics" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Workflow 1: Further Quality Control [16S Microbial Analysis With Mothur]" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/648?version=1" ; + schema1:version 1 ; + ns1:input , + ; + ns1:output , + , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2023-11-08T00:19:30.565058" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-decontamination-VGP9" ; + schema1:license "BSD-3-Clause" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-decontamination-VGP9/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.17" . + + a schema1:Dataset ; + schema1:datePublished "2021-09-15T17:26:39.020087" ; + schema1:description "Cryo-EM processing workflow" ; + schema1:hasPart , + , + ; + schema1:image "workflow.svg" ; + schema1:keywords "cryoem" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "entryTitleTest" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:image ; + schema1:name "workflow" ; + schema1:programmingLanguage . + + a schema1:MediaObject . + + a schema1:Dataset ; + schema1:datePublished "2024-06-25T09:57:05.231229" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/taxonomy-profiling-and-visualization-with-krona" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "taxonomy-profiling-and-visualization-with-krona/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + schema1:datePublished "2024-06-21T15:04:44.017897" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/pox-virus-amplicon" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "pox-virus-amplicon/main" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "pox-virus-amplicon/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/pox-virus-amplicon" ; + schema1:version "0.2" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "# StructuralVariants Workflow" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/278?version=1" ; + schema1:isBasedOn "https://gitlab.bsc.es/lrodrig1/structuralvariants_poc/-/tree/1.1.3/structuralvariants/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CNV_pipeline" ; + schema1:sdDatePublished "2024-07-12 13:35:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/278/ro_crate?version=1" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 8862 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8665 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-14T12:07:59Z" ; + schema1:dateModified "2022-03-14T12:10:41Z" ; + schema1:description "# StructuralVariants Workflow" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/278?version=10" ; + schema1:keywords "cancer, CODEX2, ExomeDepth, manta, TransBioNet, variant calling, GRIDSS, structural variants" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CNV_pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/278?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 66395 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Macromolecular Coarse-Grained Flexibility (FlexServ) tutorial using BioExcel Building Blocks (biobb)\r +\r +This tutorial aims to illustrate the process of generating protein conformational ensembles from 3D structures and analysing its molecular flexibility, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.551.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_flexserv" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Macromolecular Coarse-Grained Flexibility tutorial" ; + schema1:sdDatePublished "2024-07-12 13:32:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/551/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 109577 ; + schema1:creator , + ; + schema1:dateCreated "2023-08-02T10:39:18Z" ; + schema1:dateModified "2023-08-02T11:20:30Z" ; + schema1:description """# Macromolecular Coarse-Grained Flexibility (FlexServ) tutorial using BioExcel Building Blocks (biobb)\r +\r +This tutorial aims to illustrate the process of generating protein conformational ensembles from 3D structures and analysing its molecular flexibility, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/551?version=1" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Macromolecular Coarse-Grained Flexibility tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_flexserv/blob/main/biobb_wf_flexserv/notebooks/biobb_wf_flexserv.ipynb" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.120.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:34:13 +0100" ; + schema1:url "https://workflowhub.eu/workflows/120/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 57824 ; + schema1:creator , + ; + schema1:dateCreated "2022-09-15T09:24:04Z" ; + schema1:dateModified "2023-01-16T13:49:50Z" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/120?version=5" ; + schema1:isPartOf , + , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_md_setup/77a78a8345c385a0bc8588b21153fc1151a2ede2/biobb_wf_md_setup/notebooks/biobb_MDsetup_tutorial.ipynb" ; + schema1:version 3 . + + a schema1:Dataset ; + schema1:datePublished "2023-10-19T15:11:18.085303" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/cutandrun" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "cutandrun/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.14" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """This workflow provides a calculaiton of the power spectrum of Stochastic Gravitational Wave Backgorund (SGWB) from a first-order cosmological phase transition based on the parameterisations of Roper Pol et al. (2023). The power spectrum includes two components: from the sound waves excited by collisions of bubbles of the new phase and from the turbulence that is induced by these collisions.\r +\r +The cosmological epoch of the phase transition is described by the temperature, T_star and by the number(s) of relativistic degrees of freedom, g_star that should be specified as parameters.\r +\r +The phase transition itself is characterised by phenomenological parameters, alpha, beta_H and epsilon_turb, the latent heat, the ratio of the Hubble radius to the bubble size at percolation and the fraction of the energy otuput of the phase transition that goes into turbulence.\r +\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.831.1" ; + schema1:isBasedOn "https://gitlab.renkulab.io/astronomy/mmoda/sgwb" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for SGWB model spectrum" ; + schema1:sdDatePublished "2024-07-12 13:23:05 +0100" ; + schema1:url "https://workflowhub.eu/workflows/831/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1968 ; + schema1:creator , + ; + schema1:dateCreated "2024-04-23T15:35:10Z" ; + schema1:dateModified "2024-04-23T15:36:41Z" ; + schema1:description """This workflow provides a calculaiton of the power spectrum of Stochastic Gravitational Wave Backgorund (SGWB) from a first-order cosmological phase transition based on the parameterisations of Roper Pol et al. (2023). The power spectrum includes two components: from the sound waves excited by collisions of bubbles of the new phase and from the turbulence that is induced by these collisions.\r +\r +The cosmological epoch of the phase transition is described by the temperature, T_star and by the number(s) of relativistic degrees of freedom, g_star that should be specified as parameters.\r +\r +The phase transition itself is characterised by phenomenological parameters, alpha, beta_H and epsilon_turb, the latent heat, the ratio of the Hubble radius to the bubble size at percolation and the fraction of the energy otuput of the phase transition that goes into turbulence.\r +\r +\r +""" ; + schema1:keywords "astronomy" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "SGWB model spectrum" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/831?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.56.6" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_protein-complex_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:34:06 +0100" ; + schema1:url "https://workflowhub.eu/workflows/56/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 156554 ; + schema1:creator , + ; + schema1:dateCreated "2023-07-26T09:21:56Z" ; + schema1:dateModified "2023-07-26T09:22:49Z" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/56?version=6" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_protein-complex_md_setup/master/biobb_wf_protein-complex_md_setup/notebooks/biobb_Protein-Complex_MDsetup_tutorial.ipynb" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """Workflow for Creating a large disease network from various datasets and databases for IBM, and applying the active subnetwork identification method MOGAMUN.\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/681?version=5" ; + schema1:isBasedOn "https://github.com/jdwijnbergen/IBM_ASI_workflow.git" ; + schema1:license "notspecified" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Inclusion Body Myositis Active Subnetwork Identification Workflow" ; + schema1:sdDatePublished "2024-07-12 13:24:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/681/ro_crate?version=5" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 27177 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6953 ; + schema1:creator , + ; + schema1:dateCreated "2023-11-27T16:04:32Z" ; + schema1:dateModified "2023-11-27T16:04:32Z" ; + schema1:description """Workflow for Creating a large disease network from various datasets and databases for IBM, and applying the active subnetwork identification method MOGAMUN.\r +\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/681?version=6" ; + schema1:keywords "Bioinformatics, CWL, Genomics, Transcriptomics, Protein-Protein Interaction" ; + schema1:license "https://choosealicense.com/no-permission/" ; + schema1:name "Inclusion Body Myositis Active Subnetwork Identification Workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/681?version=5" ; + schema1:version 5 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """# Single drug prediction Workflow\r +## Table of Contents\r +\r +- [Single drug prediction Workflow](#single-drug-prediction-workflow)\r + - [Table of Contents](#table-of-contents)\r + - [Description](#description)\r + - [Contents](#contents)\r + - [Building Blocks](#building-blocks)\r + - [Workflows](#workflows)\r + - [Resources](#resources)\r + - [Tests](#tests)\r + - [Instructions](#instructions)\r + - [Local machine](#local-machine)\r + - [Requirements](#requirements)\r + - [Usage steps](#usage-steps)\r + - [MareNostrum 4](#marenostrum-4)\r + - [Requirements in MN4](#requirements-in-mn4)\r + - [Usage steps in MN4](#usage-steps-in-mn4)\r + - [License](#license)\r + - [Contact](#contact)\r +\r +## Description\r +\r +Complementarily, the workflow supports single drug response predictions to provide a baseline prediction in cases where drug response information for a given drug and cell line is not available. As an input, the workflow needs basal gene expression data for a cell, the drug targets (they need to be known for untested drugs) and optionally CARNIVAL features (sub-network activity predicted with CARNIVAL building block) and predicts log(IC50) values. This workflow uses a custom matrix factorization approach built with Google JAX and trained with gradient descent. The workflow can be used both for training a model, and for predicting new drug responses.\r +\r +The workflow uses the following building blocks in order of execution (for training a model):\r +\r +1. Carnival_gex_preprocess\r + - Preprocessed the basal gene expression data from GDSC. The input is a matrix of Gene x Sample expression data.\r +2. Progeny\r + - Using the preprocessed data, it estimates pathway activities for each column in the data (for each sample). It returns a matrix of Pathways x Samples with activity values for 11 pathways.\r +3. Omnipath\r + - It downloads latest Prior Knowledge Network of signalling. This building block can be ommited if there exists already a csv file with the network.\r +4. TF Enrichment\r + - For each sample, transcription factor activities are estimated using Dorothea.\r +5. CarnivalPy\r + - Using the TF activities estimated before, it runs Carnival to obtain a sub-network consistent with the TF activities (for each sample).\r +6. Carnival_feature_merger\r + - Preselect a set of genes by the user (if specified) and merge the features with the basal gene expression data.\r +7. ML Jax Drug Prediction\r + - Trains a model using the combined features to predict IC50 values from GDSC.\r +\r +For details on individual workflow steps, please check the scripts that use each individual building block in the workflow [`GitHub repository`]()\r +\r +## Contents\r +\r +### Building Blocks\r +\r +The ``BuildingBlocks`` folder contains the script to install the\r +Building Blocks used in the Single Drug Prediction Workflow.\r +\r +### Workflows\r +\r +The ``Workflow`` folder contains the workflows implementations.\r +\r +Currently contains the implementation using PyCOMPSs.\r +\r +### Resources\r +\r +The ``Resources`` folder contains a small dataset for testing purposes.\r +\r +### Tests\r +\r +The ``Tests`` folder contains the scripts that run each Building Block\r +used in the workflow for a small dataset.\r +They can be executed individually *without PyCOMPSs installed* for testing\r +purposes.\r +\r +## Instructions\r +\r +### Local machine\r +\r +This section explains the requirements and usage for the Single Drug Prediction Workflow in a laptop or desktop computer.\r +\r +#### Requirements\r +\r +- [`permedcoe`](https://github.com/PerMedCoE/permedcoe) package\r +- [PyCOMPSs](https://pycompss.readthedocs.io/en/stable/Sections/00_Quickstart.html)\r +- [Singularity](https://sylabs.io/guides/3.0/user-guide/installation.html)\r +\r +#### Usage steps\r +\r +1. Clone this repository:\r +\r + ```bash\r + git clone https://github.com/PerMedCoE/single-drug-prediction-workflow.git\r + ```\r +\r +2. Install the Building Blocks required for the COVID19 Workflow:\r +\r + ```bash\r + single-drug-prediction-workflow/BuildingBlocks/./install_BBs.sh\r + ```\r +\r +3. Get the required Building Block images from the project [B2DROP](https://b2drop.bsc.es/index.php/f/444350):\r +\r + - Required images:\r + - toolset.singularity\r + - carnivalpy.singularity\r + - ml-jax.singularity\r +\r + The path where these files are stored **MUST be exported in the `PERMEDCOE_IMAGES`** environment variable.\r +\r + > :warning: **TIP**: These containers can be built manually as follows (be patient since some of them may take some time):\r + 1. Clone the `BuildingBlocks` repository\r + ```bash\r + git clone https://github.com/PerMedCoE/BuildingBlocks.git\r + ```\r + 2. Build the required Building Block images\r + ```bash\r + cd BuildingBlocks/Resources/images\r + ## Download new BB singularity files\r + wget https://github.com/saezlab/permedcoe/archive/refs/heads/master.zip\r + unzip master.zip\r + cd permedcoe-master/containers\r + ## Build containers\r + cd toolset\r + sudo /usr/local/bin/singularity build toolset.sif toolset.singularity\r + mv toolset.sif ../../../\r + cd ..\r + cd carnivalpy\r + sudo /usr/local/bin/singularity build carnivalpy.sif carnivalpy.singularity\r + mv carnivalpy.sif ../../../\r + cd ..\r + cd ml-jax\r + sudo /usr/local/bin/singularity build ml-jax.sif ml-jax.singularity\r + mv ml-jax.sif ../../../tf-jax.sif\r + cd ..\r + cd ../..\r + ## Cleanup\r + rm -rf permedcoe-master\r + rm master.zip\r + cd ../../..\r + ```\r +\r + > :warning: **TIP**: The singularity containers **can to be downloaded** from: https://cloud.sylabs.io/library/pablormier\r +\r +\r +**If using PyCOMPSs in local PC** (make sure that PyCOMPSs in installed):\r +\r +4. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflows/PyCOMPSs\r + ```\r +\r +5. Execute `./run.sh`\r +\r + The execution is prepared to use the singularity images that **MUST** be placed into `BuildingBlocks/Resources/images` folder. If they are located in any other folder, please update the `run.sh` script setting the `PERMEDCOE_IMAGES` to the images folder.\r +\r + > **TIP**: If you want to run the workflow with a different dataset, please update the `run.sh` script setting the `dataset` variable to the new dataset folder and their file names.\r +\r +### MareNostrum 4\r +\r +This section explains the requirements and usage for the Single Drug Prediction Workflow in the MareNostrum 4 supercomputer.\r +\r +#### Requirements in MN4\r +\r +- Access to MN4\r +\r +All Building Blocks are already installed in MN4, and the Single Drug Prediction Workflow available.\r +\r +#### Usage steps in MN4\r +\r +1. Load the `COMPSs`, `Singularity` and `permedcoe` modules\r +\r + ```bash\r + export COMPSS_PYTHON_VERSION=3\r + module load COMPSs/3.1\r + module load singularity/3.5.2\r + module use /apps/modules/modulefiles/tools/COMPSs/libraries\r + module load permedcoe\r + ```\r +\r + > **TIP**: Include the loading into your `${HOME}/.bashrc` file to load it automatically on the session start.\r +\r + This commands will load COMPSs and the permedcoe package which provides all necessary dependencies, as well as the path to the singularity container images (`PERMEDCOE_IMAGES` environment variable) and testing dataset (`SINGLE_DRUG_PREDICTION_WORKFLOW_DATASET` environment variable).\r +\r +2. Get a copy of the pilot workflow into your desired folder\r +\r + ```bash\r + mkdir desired_folder\r + cd desired_folder\r + get_single_drug_prediction_workflow\r + ```\r +\r +3. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflow/PyCOMPSs\r + ```\r +\r +4. Execute `./launch.sh`\r +\r + This command will launch a job into the job queuing system (SLURM) requesting 2 nodes (one node acting half master and half worker, and other full worker node) for 20 minutes, and is prepared to use the singularity images that are already deployed in MN4 (located into the `PERMEDCOE_IMAGES` environment variable). It uses the dataset located into `../../Resources/data` folder.\r +\r + > :warning: **TIP**: If you want to run the workflow with a different dataset, please edit the `launch.sh` script and define the appropriate dataset path.\r +\r + After the execution, a `results` folder will be available with with Single Drug Prediction Workflow results.\r +\r +## License\r +\r +[Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)\r +\r +## Contact\r +\r +\r +\r +This software has been developed for the [PerMedCoE project](https://permedcoe.eu/), funded by the European Commission (EU H2020 [951773](https://cordis.europa.eu/project/id/951773)).\r +\r +![](https://permedcoe.eu/wp-content/uploads/2020/11/logo_1.png "PerMedCoE")\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/478?version=1" ; + schema1:isBasedOn "https://github.com/PerMedCoE/single-drug-prediction-workflow" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for PerMedCoE Single Drug Prediction" ; + schema1:sdDatePublished "2024-07-12 13:33:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/478/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1243 ; + schema1:dateCreated "2023-05-23T12:15:44Z" ; + schema1:dateModified "2023-05-23T12:32:48Z" ; + schema1:description """# Single drug prediction Workflow\r +## Table of Contents\r +\r +- [Single drug prediction Workflow](#single-drug-prediction-workflow)\r + - [Table of Contents](#table-of-contents)\r + - [Description](#description)\r + - [Contents](#contents)\r + - [Building Blocks](#building-blocks)\r + - [Workflows](#workflows)\r + - [Resources](#resources)\r + - [Tests](#tests)\r + - [Instructions](#instructions)\r + - [Local machine](#local-machine)\r + - [Requirements](#requirements)\r + - [Usage steps](#usage-steps)\r + - [MareNostrum 4](#marenostrum-4)\r + - [Requirements in MN4](#requirements-in-mn4)\r + - [Usage steps in MN4](#usage-steps-in-mn4)\r + - [License](#license)\r + - [Contact](#contact)\r +\r +## Description\r +\r +Complementarily, the workflow supports single drug response predictions to provide a baseline prediction in cases where drug response information for a given drug and cell line is not available. As an input, the workflow needs basal gene expression data for a cell, the drug targets (they need to be known for untested drugs) and optionally CARNIVAL features (sub-network activity predicted with CARNIVAL building block) and predicts log(IC50) values. This workflow uses a custom matrix factorization approach built with Google JAX and trained with gradient descent. The workflow can be used both for training a model, and for predicting new drug responses.\r +\r +The workflow uses the following building blocks in order of execution (for training a model):\r +\r +1. Carnival_gex_preprocess\r + - Preprocessed the basal gene expression data from GDSC. The input is a matrix of Gene x Sample expression data.\r +2. Progeny\r + - Using the preprocessed data, it estimates pathway activities for each column in the data (for each sample). It returns a matrix of Pathways x Samples with activity values for 11 pathways.\r +3. Omnipath\r + - It downloads latest Prior Knowledge Network of signalling. This building block can be ommited if there exists already a csv file with the network.\r +4. TF Enrichment\r + - For each sample, transcription factor activities are estimated using Dorothea.\r +5. CarnivalPy\r + - Using the TF activities estimated before, it runs Carnival to obtain a sub-network consistent with the TF activities (for each sample).\r +6. Carnival_feature_merger\r + - Preselect a set of genes by the user (if specified) and merge the features with the basal gene expression data.\r +7. ML Jax Drug Prediction\r + - Trains a model using the combined features to predict IC50 values from GDSC.\r +\r +For details on individual workflow steps, please check the scripts that use each individual building block in the workflow [`GitHub repository`]()\r +\r +## Contents\r +\r +### Building Blocks\r +\r +The ``BuildingBlocks`` folder contains the script to install the\r +Building Blocks used in the Single Drug Prediction Workflow.\r +\r +### Workflows\r +\r +The ``Workflow`` folder contains the workflows implementations.\r +\r +Currently contains the implementation using PyCOMPSs.\r +\r +### Resources\r +\r +The ``Resources`` folder contains a small dataset for testing purposes.\r +\r +### Tests\r +\r +The ``Tests`` folder contains the scripts that run each Building Block\r +used in the workflow for a small dataset.\r +They can be executed individually *without PyCOMPSs installed* for testing\r +purposes.\r +\r +## Instructions\r +\r +### Local machine\r +\r +This section explains the requirements and usage for the Single Drug Prediction Workflow in a laptop or desktop computer.\r +\r +#### Requirements\r +\r +- [`permedcoe`](https://github.com/PerMedCoE/permedcoe) package\r +- [PyCOMPSs](https://pycompss.readthedocs.io/en/stable/Sections/00_Quickstart.html)\r +- [Singularity](https://sylabs.io/guides/3.0/user-guide/installation.html)\r +\r +#### Usage steps\r +\r +1. Clone this repository:\r +\r + ```bash\r + git clone https://github.com/PerMedCoE/single-drug-prediction-workflow.git\r + ```\r +\r +2. Install the Building Blocks required for the COVID19 Workflow:\r +\r + ```bash\r + single-drug-prediction-workflow/BuildingBlocks/./install_BBs.sh\r + ```\r +\r +3. Get the required Building Block images from the project [B2DROP](https://b2drop.bsc.es/index.php/f/444350):\r +\r + - Required images:\r + - toolset.singularity\r + - carnivalpy.singularity\r + - ml-jax.singularity\r +\r + The path where these files are stored **MUST be exported in the `PERMEDCOE_IMAGES`** environment variable.\r +\r + > :warning: **TIP**: These containers can be built manually as follows (be patient since some of them may take some time):\r + 1. Clone the `BuildingBlocks` repository\r + ```bash\r + git clone https://github.com/PerMedCoE/BuildingBlocks.git\r + ```\r + 2. Build the required Building Block images\r + ```bash\r + cd BuildingBlocks/Resources/images\r + ## Download new BB singularity files\r + wget https://github.com/saezlab/permedcoe/archive/refs/heads/master.zip\r + unzip master.zip\r + cd permedcoe-master/containers\r + ## Build containers\r + cd toolset\r + sudo /usr/local/bin/singularity build toolset.sif toolset.singularity\r + mv toolset.sif ../../../\r + cd ..\r + cd carnivalpy\r + sudo /usr/local/bin/singularity build carnivalpy.sif carnivalpy.singularity\r + mv carnivalpy.sif ../../../\r + cd ..\r + cd ml-jax\r + sudo /usr/local/bin/singularity build ml-jax.sif ml-jax.singularity\r + mv ml-jax.sif ../../../tf-jax.sif\r + cd ..\r + cd ../..\r + ## Cleanup\r + rm -rf permedcoe-master\r + rm master.zip\r + cd ../../..\r + ```\r +\r + > :warning: **TIP**: The singularity containers **can to be downloaded** from: https://cloud.sylabs.io/library/pablormier\r +\r +\r +**If using PyCOMPSs in local PC** (make sure that PyCOMPSs in installed):\r +\r +4. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflows/PyCOMPSs\r + ```\r +\r +5. Execute `./run.sh`\r +\r + The execution is prepared to use the singularity images that **MUST** be placed into `BuildingBlocks/Resources/images` folder. If they are located in any other folder, please update the `run.sh` script setting the `PERMEDCOE_IMAGES` to the images folder.\r +\r + > **TIP**: If you want to run the workflow with a different dataset, please update the `run.sh` script setting the `dataset` variable to the new dataset folder and their file names.\r +\r +### MareNostrum 4\r +\r +This section explains the requirements and usage for the Single Drug Prediction Workflow in the MareNostrum 4 supercomputer.\r +\r +#### Requirements in MN4\r +\r +- Access to MN4\r +\r +All Building Blocks are already installed in MN4, and the Single Drug Prediction Workflow available.\r +\r +#### Usage steps in MN4\r +\r +1. Load the `COMPSs`, `Singularity` and `permedcoe` modules\r +\r + ```bash\r + export COMPSS_PYTHON_VERSION=3\r + module load COMPSs/3.1\r + module load singularity/3.5.2\r + module use /apps/modules/modulefiles/tools/COMPSs/libraries\r + module load permedcoe\r + ```\r +\r + > **TIP**: Include the loading into your `${HOME}/.bashrc` file to load it automatically on the session start.\r +\r + This commands will load COMPSs and the permedcoe package which provides all necessary dependencies, as well as the path to the singularity container images (`PERMEDCOE_IMAGES` environment variable) and testing dataset (`SINGLE_DRUG_PREDICTION_WORKFLOW_DATASET` environment variable).\r +\r +2. Get a copy of the pilot workflow into your desired folder\r +\r + ```bash\r + mkdir desired_folder\r + cd desired_folder\r + get_single_drug_prediction_workflow\r + ```\r +\r +3. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflow/PyCOMPSs\r + ```\r +\r +4. Execute `./launch.sh`\r +\r + This command will launch a job into the job queuing system (SLURM) requesting 2 nodes (one node acting half master and half worker, and other full worker node) for 20 minutes, and is prepared to use the singularity images that are already deployed in MN4 (located into the `PERMEDCOE_IMAGES` environment variable). It uses the dataset located into `../../Resources/data` folder.\r +\r + > :warning: **TIP**: If you want to run the workflow with a different dataset, please edit the `launch.sh` script and define the appropriate dataset path.\r +\r + After the execution, a `results` folder will be available with with Single Drug Prediction Workflow results.\r +\r +## License\r +\r +[Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)\r +\r +## Contact\r +\r +\r +\r +This software has been developed for the [PerMedCoE project](https://permedcoe.eu/), funded by the European Commission (EU H2020 [951773](https://cordis.europa.eu/project/id/951773)).\r +\r +![](https://permedcoe.eu/wp-content/uploads/2020/11/logo_1.png "PerMedCoE")\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "PerMedCoE Single Drug Prediction" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/478?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2024-01-11T16:25:21.762853" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-variant-calling" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sars-cov-2-pe-illumina-artic-variant-calling/COVID-19-PE-ARTIC-ILLUMINA" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.19" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "This workflow correspond to the Genome-wide alternative splicing analysis training. It allows to analyze isoform switching by making use of IsoformSwitchAnalyzeR." ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/472?version=3" ; + schema1:license "CC-BY-NC-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genome-wide alternative splicing analysis" ; + schema1:sdDatePublished "2024-07-12 13:33:29 +0100" ; + schema1:url "https://workflowhub.eu/workflows/472/ro_crate?version=3" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 17028 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 81080 ; + schema1:creator ; + schema1:dateCreated "2023-05-25T21:39:41Z" ; + schema1:dateModified "2023-05-25T21:41:36Z" ; + schema1:description "This workflow correspond to the Genome-wide alternative splicing analysis training. It allows to analyze isoform switching by making use of IsoformSwitchAnalyzeR." ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/472?version=3" ; + schema1:keywords "Transcriptomics, alternative-splicing, GTN" ; + schema1:license "https://spdx.org/licenses/CC-BY-NC-4.0" ; + schema1:name "Genome-wide alternative splicing analysis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/472?version=3" ; + schema1:version 3 ; + ns1:input , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 46064 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# polya_liftover - sc/snRNAseq Snakemake Workflow\r +\r +A [Snakemake][sm] workflow for using PolyA_DB and UCSC Liftover with Cellranger.\r +\r +Some genes are not accurately annotated in the reference genome.\r +Here,\r +we use information provide by the [PolyA_DB v3.2][polya] to update the coordinates,\r +then the [USCS Liftover][liftover] tool to update to a more recent genome.\r +Next,\r +we use [Cellranger][cr] to create the reference and count matrix.\r +Finally,\r +by taking advantage of the integrated [Conda][conda] and [Singularity][sing] support,\r +we can run the whole thing in an isolated environment.\r +\r +Please see our [README][readme] for the full details!\r +\r +\r +[sm]: https://snakemake.readthedocs.io/en/stable/index.html "Snakemake"\r +[polya]: https://exon.apps.wistar.org/polya_db/v3/index.php "PolyA_DB"\r +[liftover]: https://genome.ucsc.edu/cgi-bin/hgLiftOver "Liftover"\r +[cr]: https://github.com/alexdobin/STAR "Cellranger"\r +[conda]: https://docs.conda.io/en/latest/ "Conda"\r +[sing]: https://sylabs.io/singularity/ "Singularity"\r +[readme]: https://github.com/IMS-Bio2Core-Facility/polya_liftover/blob/main/README.md""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/263?version=1" ; + schema1:isBasedOn "https://github.com/IMS-Bio2Core-Facility/polya_liftover" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for polya_liftover" ; + schema1:sdDatePublished "2024-07-12 13:36:23 +0100" ; + schema1:url "https://workflowhub.eu/workflows/263/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 451 ; + schema1:creator ; + schema1:dateCreated "2022-01-17T10:44:28Z" ; + schema1:dateModified "2023-01-16T13:57:28Z" ; + schema1:description """# polya_liftover - sc/snRNAseq Snakemake Workflow\r +\r +A [Snakemake][sm] workflow for using PolyA_DB and UCSC Liftover with Cellranger.\r +\r +Some genes are not accurately annotated in the reference genome.\r +Here,\r +we use information provide by the [PolyA_DB v3.2][polya] to update the coordinates,\r +then the [USCS Liftover][liftover] tool to update to a more recent genome.\r +Next,\r +we use [Cellranger][cr] to create the reference and count matrix.\r +Finally,\r +by taking advantage of the integrated [Conda][conda] and [Singularity][sing] support,\r +we can run the whole thing in an isolated environment.\r +\r +Please see our [README][readme] for the full details!\r +\r +\r +[sm]: https://snakemake.readthedocs.io/en/stable/index.html "Snakemake"\r +[polya]: https://exon.apps.wistar.org/polya_db/v3/index.php "PolyA_DB"\r +[liftover]: https://genome.ucsc.edu/cgi-bin/hgLiftOver "Liftover"\r +[cr]: https://github.com/alexdobin/STAR "Cellranger"\r +[conda]: https://docs.conda.io/en/latest/ "Conda"\r +[sing]: https://sylabs.io/singularity/ "Singularity"\r +[readme]: https://github.com/IMS-Bio2Core-Facility/polya_liftover/blob/main/README.md""" ; + schema1:image ; + schema1:keywords "Transcriptomics, scRNA-seq, Snakemake, FAIR workflows, FastQC, MultiQC, Cellranger, LiftOver" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "polya_liftover" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/263?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 14760 . + + a schema1:Dataset ; + schema1:hasPart , + , + , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-consensus-from-variation" ; + schema1:mainEntity ; + schema1:name "COVID-19-CONSENSUS-CONSTRUCTION (v0.2)" ; + schema1:sdDatePublished "2021-07-23 10:18:30 +0100" ; + schema1:softwareVersion "v0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 61638 ; + schema1:name "COVID-19-CONSENSUS-CONSTRUCTION" ; + schema1:programmingLanguage . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "This is the workflow for the recreation potential component of the cultural ecosystems digital twin" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/811?version=1" ; + schema1:isBasedOn "https://biodt.eu/" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for BioDT Cultural Ecosystem Services prototype Digital Twin - Recreation Potential Component" ; + schema1:sdDatePublished "2024-07-12 13:22:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/811/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 501 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-04-02T15:46:26Z" ; + schema1:dateModified "2024-05-17T13:14:09Z" ; + schema1:description "This is the workflow for the recreation potential component of the cultural ecosystems digital twin" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "BioDT Cultural Ecosystem Services prototype Digital Twin - Recreation Potential Component" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/811?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2021-12-20T17:04:47.622964" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/parallel-accession-download" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "parallel-accession-download/main" ; + schema1:sdDatePublished "2021-12-21 03:01:01 +0000" ; + schema1:softwareVersion "v0.1.2" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=19" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-07-12 13:21:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=19" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17866 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:59Z" ; + schema1:dateModified "2024-06-11T12:54:59Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=19" ; + schema1:version 19 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Conformational Transitions calculations tutorial using BioExcel Building Blocks (biobb) and GOdMD\r +\r +This tutorial aims to illustrate the process of computing a conformational transition between two known structural conformations of a protein, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.558.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_godmd/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy Protein Conformational Transitions calculations tutorial" ; + schema1:sdDatePublished "2024-07-12 13:27:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/558/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 16355 ; + schema1:creator , + ; + schema1:dateCreated "2023-08-11T08:36:46Z" ; + schema1:dateModified "2023-08-11T08:38:31Z" ; + schema1:description """# Protein Conformational Transitions calculations tutorial using BioExcel Building Blocks (biobb) and GOdMD\r +\r +This tutorial aims to illustrate the process of computing a conformational transition between two known structural conformations of a protein, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy Protein Conformational Transitions calculations tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_godmd/galaxy/biobb_wf_godmd.ga" ; + schema1:version 1 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "This is the workflow for the biodiversity component of the cultural ecosystems digital twin" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/810?version=1" ; + schema1:isBasedOn "https://biodt.eu/" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for BioDT Cultural Ecosystem Services prototype Digital Twin - Biodiversity Component" ; + schema1:sdDatePublished "2024-07-12 13:22:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/810/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 672 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-04-02T15:45:06Z" ; + schema1:dateModified "2024-05-17T13:15:34Z" ; + schema1:description "This is the workflow for the biodiversity component of the cultural ecosystems digital twin" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "BioDT Cultural Ecosystem Services prototype Digital Twin - Biodiversity Component" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/810?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2024-06-28T13:49:10.541149" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/rnaseq-sr" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "rnaseq-sr/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + schema1:datePublished "2023-11-15T16:55:13.540629" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Scaffolding-HiC-VGP8" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Scaffolding-HiC-VGP8/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.17" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "MARS-seq v2 preprocessing pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/996?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/marsseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/marsseq" ; + schema1:sdDatePublished "2024-07-12 13:20:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/996/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9096 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:00Z" ; + schema1:dateModified "2024-06-11T12:55:00Z" ; + schema1:description "MARS-seq v2 preprocessing pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/996?version=4" ; + schema1:keywords "facs-sorting, MARS-seq, single-cell, single-cell-rna-seq, star-solo, transcriptional-dynamics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/marsseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/996?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + schema1:datePublished "2024-07-10T16:11:49.131170" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/quality-and-contamination-control" ; + schema1:license "GPL-3.0-or-later" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "quality-and-contamination-control/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.24" . + + a schema1:Dataset ; + schema1:datePublished "2021-12-20T17:04:47.614081" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/gromacs-dctmd" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "gromacs-dctmd/main" ; + schema1:sdDatePublished "2021-12-21 03:01:01 +0000" ; + schema1:softwareVersion "v0.1.2" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=22" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-07-12 13:22:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=22" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10903 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:13Z" ; + schema1:dateModified "2024-06-11T12:55:13Z" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=22" ; + schema1:version 22 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Summary\r +\r +This tutorial aims to illustrate the process of protein-ligand docking, step by step, using the BioExcel Building Blocks library (biobb). The particular example used is the Mitogen-activated protein kinase 14 (p38-α) protein (PDB code 3HEC), a well-known Protein Kinase enzyme, in complex with the FDA-approved Imatinib, (PDB Ligand code STI, DrugBank Ligand Code DB00619), a small molecule kinase inhibitor used to treat certain types of cancer.\r +\r +Workflow engine is a jupyter notebook. It can be run in binder, following the link given, or locally. Auxiliar libraries used are: nb\\_conda_kernels, nglview, ipywidgets, plotly, and simpletraj. Environment can be setup using the included environment.yml file.\r +\r +\r +# Parameters\r +\r +## Inputs\r +\r +Parameters needed to configure the workflow:\r +\r +* **pdb_code**: PDB code of the experimental complex structure (if exists).\r +* **ligand_code**: Ligand PDB code (3-letter code) for the small molecule (e.g. STI).\r +\r +## Outputs\r +\r +Output files generated (named according to the input parameters given above):\r +\r +* **output\\_structure**: generated protein-ligand complex""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.127.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_virtual-screening" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Protein-ligand Docking tutorial using BioExcel Building Blocks (biobb) (jupyter notebook)" ; + schema1:sdDatePublished "2024-07-12 13:33:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/127/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 39034 ; + schema1:creator , + , + ; + schema1:dateCreated "2021-06-28T10:30:46Z" ; + schema1:dateModified "2021-06-28T14:21:23Z" ; + schema1:description """# Summary\r +\r +This tutorial aims to illustrate the process of protein-ligand docking, step by step, using the BioExcel Building Blocks library (biobb). The particular example used is the Mitogen-activated protein kinase 14 (p38-α) protein (PDB code 3HEC), a well-known Protein Kinase enzyme, in complex with the FDA-approved Imatinib, (PDB Ligand code STI, DrugBank Ligand Code DB00619), a small molecule kinase inhibitor used to treat certain types of cancer.\r +\r +Workflow engine is a jupyter notebook. It can be run in binder, following the link given, or locally. Auxiliar libraries used are: nb\\_conda_kernels, nglview, ipywidgets, plotly, and simpletraj. Environment can be setup using the included environment.yml file.\r +\r +\r +# Parameters\r +\r +## Inputs\r +\r +Parameters needed to configure the workflow:\r +\r +* **pdb_code**: PDB code of the experimental complex structure (if exists).\r +* **ligand_code**: Ligand PDB code (3-letter code) for the small molecule (e.g. STI).\r +\r +## Outputs\r +\r +Output files generated (named according to the input parameters given above):\r +\r +* **output\\_structure**: generated protein-ligand complex""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/127?version=5" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Protein-ligand Docking tutorial using BioExcel Building Blocks (biobb) (jupyter notebook)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/127?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """\r +\r +![Logo](https://cbg-ethz.github.io/V-pipe/img/logo.svg)\r +\r +[![bio.tools](https://img.shields.io/badge/bio-tools-blue.svg)](https://bio.tools/V-Pipe)\r +[![Snakemake](https://img.shields.io/badge/snakemake-≥6.8.1-blue.svg)](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe)\r +[![Deploy Docker image](https://github.com/cbg-ethz/V-pipe/actions/workflows/deploy-docker.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe)\r +[![Tests](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml)\r +[![Mega-Linter](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml)\r +[![License: Apache-2.0](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)\r +\r +V-pipe is a workflow designed for the analysis of next generation sequencing (NGS) data from viral pathogens. It produces a number of results in a curated format (e.g., consensus sequences, SNV calls, local/global haplotypes).\r +V-pipe is written using the Snakemake workflow management system.\r +\r +## Usage\r +\r +Different ways of initializing V-pipe are presented below. We strongly encourage you to deploy it [using the quick install script](#using-quick-install-script), as this is our preferred method.\r +\r +To configure V-pipe refer to the documentation present in [config/README.md](config/README.md).\r +\r +V-pipe expects the input samples to be organized in a [two-level](config/README.md#samples) directory hierarchy,\r +and the sequencing reads must be provided in a sub-folder named `raw_data`. Further details can be found on the [website](https://cbg-ethz.github.io/V-pipe/usage/).\r +Check the utils subdirectory for [mass-importers tools](utils/README.md#samples-mass-importers) that can assist you in generating this hierarchy.\r +\r +We provide [virus-specific base configuration files](config/README.md#virus-base-config) which contain handy defaults for, e.g., HIV and SARS-CoV-2. Set the virus in the general section of the configuration file:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +```\r +\r +Also see [snakemake's documentation](https://snakemake.readthedocs.io/en/stable/executing/cli.html) to learn more about the command-line options available when executing the workflow.\r +\r +### Using quick install script\r +\r +To deploy V-pipe, use the [installation script](utils/README.md#quick-installer) with the following parameters:\r +\r +```bash\r +curl -O 'https://raw.githubusercontent.com/cbg-ethz/V-pipe/master/utils/quick_install.sh'\r +./quick_install.sh -w work\r +```\r +\r +This script will download and install miniconda, checkout the V-pipe git repository (use `-b` to specify which branch/tag) and setup a work directory (specified with `-w`) with an executable script that will execute the workflow:\r +\r +```bash\r +cd work\r +# edit config.yaml and provide samples/ directory\r +./vpipe --jobs 4 --printshellcmds --dry-run\r +```\r +\r +### Using Docker\r +\r +Note: the [docker image](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe) is only setup with components to run the workflow for HIV and SARS-CoV-2 virus base configurations.\r +Using V-pipe with other viruses or configurations might require internet connectivity for additional software components.\r +\r +Create `config.yaml` or `vpipe.config` and then populate the `samples/` directory.\r +For example, the following config file could be used:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +\r +output:\r + snv: true\r + local: true\r + global: false\r + visualization: true\r + QA: true\r +```\r +\r +Then execute:\r +\r +```bash\r +docker run --rm -it -v $PWD:/work ghcr.io/cbg-ethz/v-pipe:master --jobs 4 --printshellcmds --dry-run\r +```\r +\r +### Using Snakedeploy\r +\r +First install [mamba](https://github.com/conda-forge/miniforge#mambaforge), then create and activate an environment with Snakemake and Snakedeploy:\r +\r +```bash\r +mamba create -c bioconda -c conda-forge --name snakemake snakemake snakedeploy\r +conda activate snakemake\r +```\r +\r +Snakemake's [official workflow installer Snakedeploy](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe) can now be used:\r +\r +```bash\r +snakedeploy deploy-workflow https://github.com/cbg-ethz/V-pipe --tag master .\r +# edit config/config.yaml and provide samples/ directory\r +snakemake --use-conda --jobs 4 --printshellcmds --dry-run\r +```\r +\r +## Dependencies\r +\r +- **[Conda](https://conda.io/docs/index.html)**\r +\r + Conda is a cross-platform package management system and an environment manager application. Snakemake uses mamba as a package manager.\r +\r +- **[Snakemake](https://snakemake.readthedocs.io/)**\r +\r + Snakemake is the central workflow and dependency manager of V-pipe. It determines the order in which individual tools are invoked and checks that programs do not exit unexpectedly.\r +\r +- **[VICUNA](https://www.broadinstitute.org/viral-genomics/vicuna)**\r +\r + VICUNA is a *de novo* assembly software designed for populations with high mutation rates. It is used to build an initial reference for mapping reads with ngshmmalign aligner when a `references/cohort_consensus.fasta` file is not provided. Further details can be found in the [wiki](https://github.com/cbg-ethz/V-pipe/wiki/getting-started#input-files) pages.\r +\r +### Computational tools\r +\r +Other dependencies are managed by using isolated conda environments per rule, and below we list some of the computational tools integrated in V-pipe:\r +\r +- **[FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/)**\r +\r + FastQC gives an overview of the raw sequencing data. Flowcells that have been overloaded or otherwise fail during sequencing can easily be determined with FastQC.\r +\r +- **[PRINSEQ](http://prinseq.sourceforge.net/)**\r +\r + Trimming and clipping of reads is performed by PRINSEQ. It is currently the most versatile raw read processor with many customization options.\r +\r +- **[ngshmmalign](https://github.com/cbg-ethz/ngshmmalign)**\r +\r + We perform the alignment of the curated NGS data using our custom ngshmmalign that takes structural variants into account. It produces multiple consensus sequences that include either majority bases or ambiguous bases.\r +\r +- **[bwa](https://github.com/lh3/bwa)**\r +\r + In order to detect specific cross-contaminations with other probes, the Burrows-Wheeler aligner is used. It quickly yields estimates for foreign genomic material in an experiment.\r + Additionally, It can be used as an alternative aligner to ngshmmalign.\r +\r +- **[MAFFT](http://mafft.cbrc.jp/alignment/software/)**\r +\r + To standardise multiple samples to the same reference genome (say HXB2 for HIV-1), the multiple sequence aligner MAFFT is employed. The multiple sequence alignment helps in determining regions of low conservation and thus makes standardisation of alignments more robust.\r +\r +- **[Samtools and bcftools](https://www.htslib.org/)**\r +\r + The Swiss Army knife of alignment postprocessing and diagnostics. bcftools is also used to generate consensus sequence with indels.\r +\r +- **[SmallGenomeUtilities](https://github.com/cbg-ethz/smallgenomeutilities)**\r +\r + We perform genomic liftovers to standardised reference genomes using our in-house developed python library of utilities for rewriting alignments.\r +\r +- **[ShoRAH](https://github.com/cbg-ethz/shorah)**\r +\r + ShoRAh performs SNV calling and local haplotype reconstruction by using bayesian clustering.\r +\r +- **[LoFreq](https://csb5.github.io/lofreq/)**\r +\r + LoFreq (version 2) is SNVs and indels caller from next-generation sequencing data, and can be used as an alternative engine for SNV calling.\r +\r +- **[SAVAGE](https://bitbucket.org/jbaaijens/savage) and [Haploclique](https://github.com/cbg-ethz/haploclique)**\r +\r + We use HaploClique or SAVAGE to perform global haplotype reconstruction for heterogeneous viral populations by using an overlap graph.\r +\r +## Citation\r +\r +If you use this software in your research, please cite:\r +\r +Posada-Céspedes S., Seifert D., Topolsky I., Jablonski K.P., Metzner K.J., and Beerenwinkel N. 2021.\r +"V-pipe: a computational pipeline for assessing viral genetic diversity from high-throughput sequencing data."\r +_Bioinformatics_, January. doi:[10.1093/bioinformatics/btab015](https://doi.org/10.1093/bioinformatics/btab015).\r +\r +## Contributions\r +\r +- [Ivan Topolsky\\* ![orcid]](https://orcid.org/0000-0002-7561-0810), [![github]](https://github.com/dryak)\r +- [Kim Philipp Jablonski ![orcid]](https://orcid.org/0000-0002-4166-4343), [![github]](https://github.com/kpj)\r +- [Lara Fuhrmann ![orcid]](https://orcid.org/0000-0001-6405-0654), [![github]](https://github.com/LaraFuhrmann)\r +- [Uwe Schmitt ![orcid]](https://orcid.org/0000-0002-4658-0616), [![github]](https://github.com/uweschmitt)\r +- [Monica Dragan ![orcid]](https://orcid.org/0000-0002-7719-5892), [![github]](https://github.com/monicadragan)\r +- [Susana Posada Céspedes ![orcid]](https://orcid.org/0000-0002-7459-8186), [![github]](https://github.com/sposadac)\r +- [David Seifert ![orcid]](https://orcid.org/0000-0003-4739-5110), [![github]](https://github.com/SoapZA)\r +- Tobias Marschall\r +- [Niko Beerenwinkel\\*\\* ![orcid]](https://orcid.org/0000-0002-0573-6119)\r +\r +\\* software maintainer ;\r +\\** group leader\r +\r +[github]: https://cbg-ethz.github.io/V-pipe/img/mark-github.svg\r +[orcid]: https://cbg-ethz.github.io/V-pipe/img/ORCIDiD_iconvector.svg\r +\r +## Contact\r +\r +We encourage users to use the [issue tracker](https://github.com/cbg-ethz/V-pipe/issues). For further enquiries, you can also contact the V-pipe Dev Team .\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/301?version=2" ; + schema1:isBasedOn "https://github.com/cbg-ethz/V-pipe.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for V-pipe (main multi-virus version)" ; + schema1:sdDatePublished "2024-07-12 13:18:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/301/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1308 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-30T16:51:13Z" ; + schema1:dateModified "2022-04-11T09:29:47Z" ; + schema1:description """\r +\r +![Logo](https://cbg-ethz.github.io/V-pipe/img/logo.svg)\r +\r +[![bio.tools](https://img.shields.io/badge/bio-tools-blue.svg)](https://bio.tools/V-Pipe)\r +[![Snakemake](https://img.shields.io/badge/snakemake-≥6.8.1-blue.svg)](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe)\r +[![Deploy Docker image](https://github.com/cbg-ethz/V-pipe/actions/workflows/deploy-docker.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe)\r +[![Tests](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml)\r +[![Mega-Linter](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml)\r +[![License: Apache-2.0](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)\r +\r +V-pipe is a workflow designed for the analysis of next generation sequencing (NGS) data from viral pathogens. It produces a number of results in a curated format (e.g., consensus sequences, SNV calls, local/global haplotypes).\r +V-pipe is written using the Snakemake workflow management system.\r +\r +## Usage\r +\r +Different ways of initializing V-pipe are presented below. We strongly encourage you to deploy it [using the quick install script](#using-quick-install-script), as this is our preferred method.\r +\r +To configure V-pipe refer to the documentation present in [config/README.md](config/README.md).\r +\r +V-pipe expects the input samples to be organized in a [two-level](config/README.md#samples) directory hierarchy,\r +and the sequencing reads must be provided in a sub-folder named `raw_data`. Further details can be found on the [website](https://cbg-ethz.github.io/V-pipe/usage/).\r +Check the utils subdirectory for [mass-importers tools](utils/README.md#samples-mass-importers) that can assist you in generating this hierarchy.\r +\r +We provide [virus-specific base configuration files](config/README.md#virus-base-config) which contain handy defaults for, e.g., HIV and SARS-CoV-2. Set the virus in the general section of the configuration file:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +```\r +\r +Also see [snakemake's documentation](https://snakemake.readthedocs.io/en/stable/executing/cli.html) to learn more about the command-line options available when executing the workflow.\r +\r +### Using quick install script\r +\r +To deploy V-pipe, use the [installation script](utils/README.md#quick-installer) with the following parameters:\r +\r +```bash\r +curl -O 'https://raw.githubusercontent.com/cbg-ethz/V-pipe/master/utils/quick_install.sh'\r +./quick_install.sh -w work\r +```\r +\r +This script will download and install miniconda, checkout the V-pipe git repository (use `-b` to specify which branch/tag) and setup a work directory (specified with `-w`) with an executable script that will execute the workflow:\r +\r +```bash\r +cd work\r +# edit config.yaml and provide samples/ directory\r +./vpipe --jobs 4 --printshellcmds --dry-run\r +```\r +\r +### Using Docker\r +\r +Note: the [docker image](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe) is only setup with components to run the workflow for HIV and SARS-CoV-2 virus base configurations.\r +Using V-pipe with other viruses or configurations might require internet connectivity for additional software components.\r +\r +Create `config.yaml` or `vpipe.config` and then populate the `samples/` directory.\r +For example, the following config file could be used:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +\r +output:\r + snv: true\r + local: true\r + global: false\r + visualization: true\r + QA: true\r +```\r +\r +Then execute:\r +\r +```bash\r +docker run --rm -it -v $PWD:/work ghcr.io/cbg-ethz/v-pipe:master --jobs 4 --printshellcmds --dry-run\r +```\r +\r +### Using Snakedeploy\r +\r +First install [mamba](https://github.com/conda-forge/miniforge#mambaforge), then create and activate an environment with Snakemake and Snakedeploy:\r +\r +```bash\r +mamba create -c bioconda -c conda-forge --name snakemake snakemake snakedeploy\r +conda activate snakemake\r +```\r +\r +Snakemake's [official workflow installer Snakedeploy](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe) can now be used:\r +\r +```bash\r +snakedeploy deploy-workflow https://github.com/cbg-ethz/V-pipe --tag master .\r +# edit config/config.yaml and provide samples/ directory\r +snakemake --use-conda --jobs 4 --printshellcmds --dry-run\r +```\r +\r +## Dependencies\r +\r +- **[Conda](https://conda.io/docs/index.html)**\r +\r + Conda is a cross-platform package management system and an environment manager application. Snakemake uses mamba as a package manager.\r +\r +- **[Snakemake](https://snakemake.readthedocs.io/)**\r +\r + Snakemake is the central workflow and dependency manager of V-pipe. It determines the order in which individual tools are invoked and checks that programs do not exit unexpectedly.\r +\r +- **[VICUNA](https://www.broadinstitute.org/viral-genomics/vicuna)**\r +\r + VICUNA is a *de novo* assembly software designed for populations with high mutation rates. It is used to build an initial reference for mapping reads with ngshmmalign aligner when a `references/cohort_consensus.fasta` file is not provided. Further details can be found in the [wiki](https://github.com/cbg-ethz/V-pipe/wiki/getting-started#input-files) pages.\r +\r +### Computational tools\r +\r +Other dependencies are managed by using isolated conda environments per rule, and below we list some of the computational tools integrated in V-pipe:\r +\r +- **[FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/)**\r +\r + FastQC gives an overview of the raw sequencing data. Flowcells that have been overloaded or otherwise fail during sequencing can easily be determined with FastQC.\r +\r +- **[PRINSEQ](http://prinseq.sourceforge.net/)**\r +\r + Trimming and clipping of reads is performed by PRINSEQ. It is currently the most versatile raw read processor with many customization options.\r +\r +- **[ngshmmalign](https://github.com/cbg-ethz/ngshmmalign)**\r +\r + We perform the alignment of the curated NGS data using our custom ngshmmalign that takes structural variants into account. It produces multiple consensus sequences that include either majority bases or ambiguous bases.\r +\r +- **[bwa](https://github.com/lh3/bwa)**\r +\r + In order to detect specific cross-contaminations with other probes, the Burrows-Wheeler aligner is used. It quickly yields estimates for foreign genomic material in an experiment.\r + Additionally, It can be used as an alternative aligner to ngshmmalign.\r +\r +- **[MAFFT](http://mafft.cbrc.jp/alignment/software/)**\r +\r + To standardise multiple samples to the same reference genome (say HXB2 for HIV-1), the multiple sequence aligner MAFFT is employed. The multiple sequence alignment helps in determining regions of low conservation and thus makes standardisation of alignments more robust.\r +\r +- **[Samtools and bcftools](https://www.htslib.org/)**\r +\r + The Swiss Army knife of alignment postprocessing and diagnostics. bcftools is also used to generate consensus sequence with indels.\r +\r +- **[SmallGenomeUtilities](https://github.com/cbg-ethz/smallgenomeutilities)**\r +\r + We perform genomic liftovers to standardised reference genomes using our in-house developed python library of utilities for rewriting alignments.\r +\r +- **[ShoRAH](https://github.com/cbg-ethz/shorah)**\r +\r + ShoRAh performs SNV calling and local haplotype reconstruction by using bayesian clustering.\r +\r +- **[LoFreq](https://csb5.github.io/lofreq/)**\r +\r + LoFreq (version 2) is SNVs and indels caller from next-generation sequencing data, and can be used as an alternative engine for SNV calling.\r +\r +- **[SAVAGE](https://bitbucket.org/jbaaijens/savage) and [Haploclique](https://github.com/cbg-ethz/haploclique)**\r +\r + We use HaploClique or SAVAGE to perform global haplotype reconstruction for heterogeneous viral populations by using an overlap graph.\r +\r +## Citation\r +\r +If you use this software in your research, please cite:\r +\r +Posada-Céspedes S., Seifert D., Topolsky I., Jablonski K.P., Metzner K.J., and Beerenwinkel N. 2021.\r +"V-pipe: a computational pipeline for assessing viral genetic diversity from high-throughput sequencing data."\r +_Bioinformatics_, January. doi:[10.1093/bioinformatics/btab015](https://doi.org/10.1093/bioinformatics/btab015).\r +\r +## Contributions\r +\r +- [Ivan Topolsky\\* ![orcid]](https://orcid.org/0000-0002-7561-0810), [![github]](https://github.com/dryak)\r +- [Kim Philipp Jablonski ![orcid]](https://orcid.org/0000-0002-4166-4343), [![github]](https://github.com/kpj)\r +- [Lara Fuhrmann ![orcid]](https://orcid.org/0000-0001-6405-0654), [![github]](https://github.com/LaraFuhrmann)\r +- [Uwe Schmitt ![orcid]](https://orcid.org/0000-0002-4658-0616), [![github]](https://github.com/uweschmitt)\r +- [Monica Dragan ![orcid]](https://orcid.org/0000-0002-7719-5892), [![github]](https://github.com/monicadragan)\r +- [Susana Posada Céspedes ![orcid]](https://orcid.org/0000-0002-7459-8186), [![github]](https://github.com/sposadac)\r +- [David Seifert ![orcid]](https://orcid.org/0000-0003-4739-5110), [![github]](https://github.com/SoapZA)\r +- Tobias Marschall\r +- [Niko Beerenwinkel\\*\\* ![orcid]](https://orcid.org/0000-0002-0573-6119)\r +\r +\\* software maintainer ;\r +\\** group leader\r +\r +[github]: https://cbg-ethz.github.io/V-pipe/img/mark-github.svg\r +[orcid]: https://cbg-ethz.github.io/V-pipe/img/ORCIDiD_iconvector.svg\r +\r +## Contact\r +\r +We encourage users to use the [issue tracker](https://github.com/cbg-ethz/V-pipe/issues). For further enquiries, you can also contact the V-pipe Dev Team .\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/301?version=4" ; + schema1:keywords "Alignment, Assembly, covid-19, Genomics, INDELs, rna, SNPs, variant_calling, workflow" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "V-pipe (main multi-virus version)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/301?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-23T13:33:07.949422" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Differential abundance analysis" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/981?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/differentialabundance" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/differentialabundance" ; + schema1:sdDatePublished "2024-07-12 13:21:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/981/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12719 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:48Z" ; + schema1:dateModified "2024-06-11T12:54:48Z" ; + schema1:description "Differential abundance analysis" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/981?version=8" ; + schema1:keywords "ATAC-seq, ChIP-seq, deseq2, differential-abundance, differential-expression, gsea, limma, microarray, rna-seq, shiny" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/differentialabundance" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/981?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/383?version=1" ; + schema1:isBasedOn "https://gitlab.bsc.es/lrodrig1/structuralvariants_poc/-/tree/1.1.3/structuralvariants/nextflow" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CNV_pipeline" ; + schema1:sdDatePublished "2024-07-12 13:35:13 +0100" ; + schema1:url "https://workflowhub.eu/workflows/383/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4361 ; + schema1:creator , + ; + schema1:dateCreated "2022-09-09T10:38:14Z" ; + schema1:dateModified "2022-09-09T10:39:53Z" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/383?version=2" ; + schema1:keywords "CODEX2, TransBioNet, ExomeDepth, variant calling, cancer, manta, GRIDS, structural variants" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CNV_pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/383?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "CLIP analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/973?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/clipseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/clipseq" ; + schema1:sdDatePublished "2024-07-12 13:22:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/973/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6003 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:45Z" ; + schema1:dateModified "2024-06-11T12:54:45Z" ; + schema1:description "CLIP analysis pipeline" ; + schema1:keywords "clip, clip-seq, peak-calling, rna-rbp-interactions" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/clipseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/973?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-07-12 13:20:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7157 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:58Z" ; + schema1:dateModified "2024-06-11T12:54:58Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=28" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-07-12 13:22:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=28" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12883 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:13Z" ; + schema1:dateModified "2024-06-11T12:55:13Z" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=28" ; + schema1:version 28 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=16" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-07-12 13:18:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=16" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10659 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:39Z" ; + schema1:dateModified "2024-06-11T12:54:39Z" ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=16" ; + schema1:version 16 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# MGnify genomes catalogue pipeline\r +\r +[MGnify](https://www.ebi.ac.uk/metagenomics/) A pipeline to perform taxonomic and functional annotation and to generate a catalogue from a set of isolate and/or metagenome-assembled genomes (MAGs) using the workflow described in the following publication:\r +\r +Gurbich TA, Almeida A, Beracochea M, Burdett T, Burgin J, Cochrane G, Raj S, Richardson L, Rogers AB, Sakharova E, Salazar GA and Finn RD. (2023) [MGnify Genomes: A Resource for Biome-specific Microbial Genome Catalogues.](https://www.sciencedirect.com/science/article/pii/S0022283623000724) J Mol Biol. doi: https://doi.org/10.1016/j.jmb.2023.168016\r +\r +Detailed information about existing MGnify catalogues: https://docs.mgnify.org/src/docs/genome-viewer.html\r +\r +### Tools used in the pipeline\r +| Tool/Database | Version | Purpose |\r +| ----------- | ----------- |----------- |\r +| CheckM | 1.1.3 | Determining genome quality |\r +| dRep | 3.2.2 | Genome clustering |\r +| Mash | 2.3 | Sketch for the catalogue; placement of genomes into clusters (update only); strain tree |\r +| GUNC | 1.0.3 | Quality control |\r +| GTDB-Tk | 2.1.0 | Assigning taxonomy; generating alignments |\r +| GTDB | r207_v2 | Database for GTDB-Tk |\r +| Prokka | 1.14.6 | Protein annotation |\r +| IQ-TREE 2 | 2.2.0.3 | Generating a phylogenetic tree |\r +| Kraken 2 | 2.1.2 | Generating a kraken database |\r +| Bracken | 2.6.2 | Generating a bracken database |\r +| MMseqs2 | 13.45111 | Generating a protein catalogue |\r +| eggNOG-mapper | 2.1.3 | Protein annotation (eggNOG, KEGG, COG, CAZy) |\r +| InterProScan | 5.57-90.0 | Protein annotation (InterPro, Pfam) |\r +| CRISPRCasFinder | 4.3.2 | Annotation of CRISPR arrays |\r +| AMRFinderPlus | 3.11.4 | Antimicrobial resistance gene annotation; virulence factors, biocide, heat, acid, and metal resistance gene annotation |\r +| AMRFinderPlus DB | 3.11 2023-02-23.1 | Database for AMRFinderPlus |\r +| SanntiS | 0.9.3.2 | Biosynthetic gene cluster annotation |\r +| Infernal | 1.1.4 | RNA predictions |\r +| tRNAscan-SE | 2.0.9 | tRNA predictions |\r +| Rfam | 14.6 | Identification of SSU/LSU rRNA and other ncRNAs |\r +| Panaroo | 1.3.2 | Pan-genome computation |\r +| Seqtk | 1.3 | Generating a gene catalogue |\r +| VIRify | - | Viral sequence annotation |\r +| MoMofy | 1.0.0 | Mobilome annotation |\r +| samtools | 1.15 | FASTA indexing |\r +\r +## Setup\r +\r +### Environment\r +\r +The pipeline is implemented in [Nextflow](https://www.nextflow.io/).\r +\r +Requirements:\r +- [singulairty](https://sylabs.io/docs/) or [docker](https://www.docker.com/)\r +\r +#### Reference databases\r +\r +The pipeline needs the following reference databases and configuration files (roughtly ~150G):\r +\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/gunc_db_2.0.4.dmnd.gz\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/eggnog_db.tgz\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/rfams_cms/\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/kegg_classes.tsv\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/ncrna/\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/continent_countries.csv\r +- https://data.gtdb.ecogenomic.org/releases/release207/207.0/auxillary_files/gtdbtk_r207_v2_data.tar.gz\r +\r +### Containers\r +\r +This pipeline requires [singularity](https://sylabs.io/docs/) or [docker](https://www.docker.com/) as the container engine to run pipeline.\r +\r +The containers are hosted in [biocontainers](https://biocontainers.pro/) and [quay.io/microbiome-informatics](https://quay.io/organization/microbiome-informatics) repository.\r +\r +It's possible to build the containers from scratch using the following script:\r +\r +```bash\r +cd containers && bash build.sh\r +```\r +\r +## Running the pipeline\r +\r +## Data preparation\r +\r +1. You need to pre-download your data to directories and make sure that genomes are uncompressed. Scripts to fetch genomes from ENA ([fetch_ena.py](https://github.com/EBI-Metagenomics/genomes-pipeline/blob/master/containers/genomes-catalog-update/scripts/fetch_ena.py)) and NCBI ([fetch_ncbi.py](https://github.com/EBI-Metagenomics/genomes-pipeline/blob/master/containers/genomes-catalog-update/scripts/fetch_ncbi.py)) are provided and need to be executed separately from the pipeline. If you have downloaded genomes from both ENA and NCBI, put them into separate folders.\r +\r +2. When genomes are fetched from ENA using the `fetch_ena.py` script, a CSV file with contamination and completeness statistics is also created in the same directory where genomes are saved to. If you are downloading genomes using a different approach, a CSV file needs to be created manually (each line should be genome accession, % completeness, % contamination). The ENA fetching script also pre-filters genomes to satisfy the QS50 cut-off (QS = % completeness - 5 * % contamination). \r +\r +3. You will need the following information to run the pipeline:\r + - catalogue name (for example, zebrafish-faecal)\r + - catalogue version (for example, 1.0)\r + - catalogue biome (for example, root:Host-associated:Human:Digestive system:Large intestine:Fecal)\r + - min and max accession number to be assigned to the genomes (only MGnify specific). Max - Min = #total number of genomes (NCBI+ENA)\r +\r +### Execution\r +\r +The pipeline is built in [Nextflow](https://www.nextflow.io), and utilized containers to run the software (we don't support conda ATM).\r +In order to run the pipeline it's required that the user creates a profile that suits their needs, there is an `ebi` profile in `nexflow.config` that can be used as template.\r +\r +After downloading the databases and adjusting the config file:\r +\r +```bash\r +nextflow run EBI-Metagenomics/genomes-pipeline -c -profile \\\r +--genome-prefix=MGYG \\\r +--biome="root:Host-associated:Fish:Digestive system" \\\r +--ena_genomes= \\\r +--ena_genomes_checkm= \\\r +--mgyg_start=0 \\\r +--mgyg_end=10 \\\r +--catalogue_name=zebrafish-faecal \\\r +--catalogue_version="1.0" \\\r +--ftp_name="zebrafish-faecal" \\\r +--ftp_version="v1.0" \\\r +--outdir=""\r +```\r +\r +### Development\r +\r +Install development tools (including pre-commit hooks to run Black code formatting).\r +\r +```bash\r +pip install -r requirements-dev.txt\r +pre-commit install\r +```\r +\r +#### Code style\r +\r +Use Black, this tool is configured if you install the pre-commit tools as above.\r +\r +To manually run them: black .\r +\r +### Testing\r +\r +This repo has 2 set of tests, python unit tests for some of the most critical python scripts and [nf-test](https://github.com/askimed/nf-test) scripts for the nextflow code.\r +\r +To run the python tests\r +\r +```bash\r +pip install -r requirements-test.txt\r +pytest\r +```\r +\r +To run the nextflow ones the databases have to downloaded manually, we are working to improve this.\r +\r +```bash\r +nf-test test tests/*\r +```\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/462?version=1" ; + schema1:isBasedOn "https://github.com/EBI-Metagenomics/genomes-pipeline.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for MGnify genomes catalogue pipeline" ; + schema1:sdDatePublished "2024-07-12 13:22:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/462/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 129 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-04-28T09:36:34Z" ; + schema1:dateModified "2024-05-23T11:20:39Z" ; + schema1:description """# MGnify genomes catalogue pipeline\r +\r +[MGnify](https://www.ebi.ac.uk/metagenomics/) A pipeline to perform taxonomic and functional annotation and to generate a catalogue from a set of isolate and/or metagenome-assembled genomes (MAGs) using the workflow described in the following publication:\r +\r +Gurbich TA, Almeida A, Beracochea M, Burdett T, Burgin J, Cochrane G, Raj S, Richardson L, Rogers AB, Sakharova E, Salazar GA and Finn RD. (2023) [MGnify Genomes: A Resource for Biome-specific Microbial Genome Catalogues.](https://www.sciencedirect.com/science/article/pii/S0022283623000724) J Mol Biol. doi: https://doi.org/10.1016/j.jmb.2023.168016\r +\r +Detailed information about existing MGnify catalogues: https://docs.mgnify.org/src/docs/genome-viewer.html\r +\r +### Tools used in the pipeline\r +| Tool/Database | Version | Purpose |\r +| ----------- | ----------- |----------- |\r +| CheckM | 1.1.3 | Determining genome quality |\r +| dRep | 3.2.2 | Genome clustering |\r +| Mash | 2.3 | Sketch for the catalogue; placement of genomes into clusters (update only); strain tree |\r +| GUNC | 1.0.3 | Quality control |\r +| GTDB-Tk | 2.1.0 | Assigning taxonomy; generating alignments |\r +| GTDB | r207_v2 | Database for GTDB-Tk |\r +| Prokka | 1.14.6 | Protein annotation |\r +| IQ-TREE 2 | 2.2.0.3 | Generating a phylogenetic tree |\r +| Kraken 2 | 2.1.2 | Generating a kraken database |\r +| Bracken | 2.6.2 | Generating a bracken database |\r +| MMseqs2 | 13.45111 | Generating a protein catalogue |\r +| eggNOG-mapper | 2.1.3 | Protein annotation (eggNOG, KEGG, COG, CAZy) |\r +| InterProScan | 5.57-90.0 | Protein annotation (InterPro, Pfam) |\r +| CRISPRCasFinder | 4.3.2 | Annotation of CRISPR arrays |\r +| AMRFinderPlus | 3.11.4 | Antimicrobial resistance gene annotation; virulence factors, biocide, heat, acid, and metal resistance gene annotation |\r +| AMRFinderPlus DB | 3.11 2023-02-23.1 | Database for AMRFinderPlus |\r +| SanntiS | 0.9.3.2 | Biosynthetic gene cluster annotation |\r +| Infernal | 1.1.4 | RNA predictions |\r +| tRNAscan-SE | 2.0.9 | tRNA predictions |\r +| Rfam | 14.6 | Identification of SSU/LSU rRNA and other ncRNAs |\r +| Panaroo | 1.3.2 | Pan-genome computation |\r +| Seqtk | 1.3 | Generating a gene catalogue |\r +| VIRify | - | Viral sequence annotation |\r +| MoMofy | 1.0.0 | Mobilome annotation |\r +| samtools | 1.15 | FASTA indexing |\r +\r +## Setup\r +\r +### Environment\r +\r +The pipeline is implemented in [Nextflow](https://www.nextflow.io/).\r +\r +Requirements:\r +- [singulairty](https://sylabs.io/docs/) or [docker](https://www.docker.com/)\r +\r +#### Reference databases\r +\r +The pipeline needs the following reference databases and configuration files (roughtly ~150G):\r +\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/gunc_db_2.0.4.dmnd.gz\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/eggnog_db.tgz\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/rfams_cms/\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/kegg_classes.tsv\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/ncrna/\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/continent_countries.csv\r +- https://data.gtdb.ecogenomic.org/releases/release207/207.0/auxillary_files/gtdbtk_r207_v2_data.tar.gz\r +\r +### Containers\r +\r +This pipeline requires [singularity](https://sylabs.io/docs/) or [docker](https://www.docker.com/) as the container engine to run pipeline.\r +\r +The containers are hosted in [biocontainers](https://biocontainers.pro/) and [quay.io/microbiome-informatics](https://quay.io/organization/microbiome-informatics) repository.\r +\r +It's possible to build the containers from scratch using the following script:\r +\r +```bash\r +cd containers && bash build.sh\r +```\r +\r +## Running the pipeline\r +\r +## Data preparation\r +\r +1. You need to pre-download your data to directories and make sure that genomes are uncompressed. Scripts to fetch genomes from ENA ([fetch_ena.py](https://github.com/EBI-Metagenomics/genomes-pipeline/blob/master/containers/genomes-catalog-update/scripts/fetch_ena.py)) and NCBI ([fetch_ncbi.py](https://github.com/EBI-Metagenomics/genomes-pipeline/blob/master/containers/genomes-catalog-update/scripts/fetch_ncbi.py)) are provided and need to be executed separately from the pipeline. If you have downloaded genomes from both ENA and NCBI, put them into separate folders.\r +\r +2. When genomes are fetched from ENA using the `fetch_ena.py` script, a CSV file with contamination and completeness statistics is also created in the same directory where genomes are saved to. If you are downloading genomes using a different approach, a CSV file needs to be created manually (each line should be genome accession, % completeness, % contamination). The ENA fetching script also pre-filters genomes to satisfy the QS50 cut-off (QS = % completeness - 5 * % contamination). \r +\r +3. You will need the following information to run the pipeline:\r + - catalogue name (for example, zebrafish-faecal)\r + - catalogue version (for example, 1.0)\r + - catalogue biome (for example, root:Host-associated:Human:Digestive system:Large intestine:Fecal)\r + - min and max accession number to be assigned to the genomes (only MGnify specific). Max - Min = #total number of genomes (NCBI+ENA)\r +\r +### Execution\r +\r +The pipeline is built in [Nextflow](https://www.nextflow.io), and utilized containers to run the software (we don't support conda ATM).\r +In order to run the pipeline it's required that the user creates a profile that suits their needs, there is an `ebi` profile in `nexflow.config` that can be used as template.\r +\r +After downloading the databases and adjusting the config file:\r +\r +```bash\r +nextflow run EBI-Metagenomics/genomes-pipeline -c -profile \\\r +--genome-prefix=MGYG \\\r +--biome="root:Host-associated:Fish:Digestive system" \\\r +--ena_genomes= \\\r +--ena_genomes_checkm= \\\r +--mgyg_start=0 \\\r +--mgyg_end=10 \\\r +--catalogue_name=zebrafish-faecal \\\r +--catalogue_version="1.0" \\\r +--ftp_name="zebrafish-faecal" \\\r +--ftp_version="v1.0" \\\r +--outdir=""\r +```\r +\r +### Development\r +\r +Install development tools (including pre-commit hooks to run Black code formatting).\r +\r +```bash\r +pip install -r requirements-dev.txt\r +pre-commit install\r +```\r +\r +#### Code style\r +\r +Use Black, this tool is configured if you install the pre-commit tools as above.\r +\r +To manually run them: black .\r +\r +### Testing\r +\r +This repo has 2 set of tests, python unit tests for some of the most critical python scripts and [nf-test](https://github.com/askimed/nf-test) scripts for the nextflow code.\r +\r +To run the python tests\r +\r +```bash\r +pip install -r requirements-test.txt\r +pytest\r +```\r +\r +To run the nextflow ones the databases have to downloaded manually, we are working to improve this.\r +\r +```bash\r +nf-test test tests/*\r +```\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/462?version=1" ; + schema1:keywords "Metagenomics, Nextflow, Bioinformatics" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "MGnify genomes catalogue pipeline" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/462?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=13" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-07-12 13:20:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=13" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14588 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:59Z" ; + schema1:dateModified "2024-06-11T12:54:59Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=13" ; + schema1:version 13 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=17" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-07-12 13:20:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=17" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9238 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:04Z" ; + schema1:dateModified "2024-06-11T12:55:04Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=17" ; + schema1:version 17 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.881.1" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Fgenesh annotation -TSI" ; + schema1:sdDatePublished "2024-07-12 13:18:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/881/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17572 ; + schema1:creator ; + schema1:dateCreated "2024-05-08T07:28:08Z" ; + schema1:dateModified "2024-05-09T04:10:32Z" ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/881?version=3" ; + schema1:isPartOf ; + schema1:keywords "TSI-annotation" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Fgenesh annotation -TSI" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/881?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 1040040 . + + a schema1:Dataset ; + schema1:datePublished "2024-05-30T07:28:39.831046" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/chipseq-sr" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "chipseq-sr/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """# Snakemake workflow: Reconstructing raw tomography data\r +\r +A Snakemake worfklow for tomographically reconstructing raw data using [tomopy](https://tomopy.readthedocs.io/en/stable/).\r +\r +## Installation\r +\r +First download this repo and navigate to it\r +```bash\r +git clone https://codebase.helmholtz.cloud/gernha62/reconstructing-raw-tomography-data.git\r +```\r +```bash\r +cd /path/to/repo\r +```\r +(Optional) Download the example folder with:\r +```bash\r +wget -m -np https://doi2.psi.ch/datasets/das/work/p15/p15869/compression/MI04_02/tif\r +```\r +Create a virtual environment and install all necessary packages (requires conda): \r +```bash\r +conda env create --name reconstr_env --file workflow/envs/reconstr.yml\r +```\r +Activate the new virtual environment: \r +```bash\r +conda activate reconstr_env\r +```\r +\r +## Configuration\r +\r +To configure the workflow, adapt the config file found at `config/config.yaml` . The config looks as follows:\r +```yaml\r +number_of_darks: 50\r +number_of_flats: 100\r +number_of_projections: 501\r +rotation_center: 508.77\r +raw_data:\r + MI04_02: doi2.psi.ch/datasets/das/work/p15/p15869/compression/MI04_02/tif\r +```\r + In the config, adjust `number_of_darks`, `number_of_flats`, `number_of_projections` and `rotation_center` to the number of darks, flats, projections and the rotation center of your dataset. The necessary information can usually be found in the .log file of the folder that contains the raw data. \r +\r +`MI04_02: doi2.psi.ch/datasets/das/work/p15/p15869/compression/MI04_02/tif` denotes the path to the example folder used for reconstruction and the keyword `MI04_02` will be used to name the output (e.g. in this case the output folder will be named `recon_dir_MI04_02`). Replace the examle path with the path to the dataset you want to reconstruct. Additionally, if you want the name of the output folder to have a different suffix, replace the keyword `MI04_02` with a name you prefer.\r +\r +## Run the workflow\r +\r +If the .tif files contain a numerical prefix that is not separated from the actual image index, it is best to first rename the files. The files will be renamed to `00001.tif`, `00002.tif` and so on. If the renaming is needed, run:\r +\r +```bash\r +snakemake --cores 1 'logs/renamefile_MI04_02.log'\r +```\r +If you replaced the keyword `MI04_02` in the config file then adjust the command accordingly (e.g. if you replaced the keyword with `Tomo_dataset` then the command should be `snakemake --cores 1 'logs/renamefile_Tomo_dataset.log'`).\r +\r +Before trying to compute the reconstructions, make sure you have enough memory available (ideally more than 60 GB).\r +To compute the reconstructions using one core, use the command:\r +```bash\r +snakemake --cores 1\r +```\r +If you want to use all available cores instead, use:\r +```bash\r +snakemake --cores all\r +```\r +This creates a folder in `results` with the reconstructed data.\r +\r +## Credit\r +The example dataset used in this project (MI04_02 evolving magma, Mattia Pistone, University of Georgia) was taken from: https://doi.psi.ch/detail/10.16907/05a50450-767f-421d-9832-342b57c201af\r +\r +The script used for reconstruction (`scripts/reconstructs_tomo_datasets.py`) was provided by Alain Studer, PSI.""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.432.1" ; + schema1:isBasedOn "https://codebase.helmholtz.cloud/gernha62/reconstructing-raw-tomography-data.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Reconstructing raw tomography data" ; + schema1:sdDatePublished "2024-07-12 13:34:33 +0100" ; + schema1:url "https://workflowhub.eu/workflows/432/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1061 ; + schema1:creator ; + schema1:dateCreated "2023-02-02T13:46:59Z" ; + schema1:dateModified "2023-02-02T13:50:16Z" ; + schema1:description """# Snakemake workflow: Reconstructing raw tomography data\r +\r +A Snakemake worfklow for tomographically reconstructing raw data using [tomopy](https://tomopy.readthedocs.io/en/stable/).\r +\r +## Installation\r +\r +First download this repo and navigate to it\r +```bash\r +git clone https://codebase.helmholtz.cloud/gernha62/reconstructing-raw-tomography-data.git\r +```\r +```bash\r +cd /path/to/repo\r +```\r +(Optional) Download the example folder with:\r +```bash\r +wget -m -np https://doi2.psi.ch/datasets/das/work/p15/p15869/compression/MI04_02/tif\r +```\r +Create a virtual environment and install all necessary packages (requires conda): \r +```bash\r +conda env create --name reconstr_env --file workflow/envs/reconstr.yml\r +```\r +Activate the new virtual environment: \r +```bash\r +conda activate reconstr_env\r +```\r +\r +## Configuration\r +\r +To configure the workflow, adapt the config file found at `config/config.yaml` . The config looks as follows:\r +```yaml\r +number_of_darks: 50\r +number_of_flats: 100\r +number_of_projections: 501\r +rotation_center: 508.77\r +raw_data:\r + MI04_02: doi2.psi.ch/datasets/das/work/p15/p15869/compression/MI04_02/tif\r +```\r + In the config, adjust `number_of_darks`, `number_of_flats`, `number_of_projections` and `rotation_center` to the number of darks, flats, projections and the rotation center of your dataset. The necessary information can usually be found in the .log file of the folder that contains the raw data. \r +\r +`MI04_02: doi2.psi.ch/datasets/das/work/p15/p15869/compression/MI04_02/tif` denotes the path to the example folder used for reconstruction and the keyword `MI04_02` will be used to name the output (e.g. in this case the output folder will be named `recon_dir_MI04_02`). Replace the examle path with the path to the dataset you want to reconstruct. Additionally, if you want the name of the output folder to have a different suffix, replace the keyword `MI04_02` with a name you prefer.\r +\r +## Run the workflow\r +\r +If the .tif files contain a numerical prefix that is not separated from the actual image index, it is best to first rename the files. The files will be renamed to `00001.tif`, `00002.tif` and so on. If the renaming is needed, run:\r +\r +```bash\r +snakemake --cores 1 'logs/renamefile_MI04_02.log'\r +```\r +If you replaced the keyword `MI04_02` in the config file then adjust the command accordingly (e.g. if you replaced the keyword with `Tomo_dataset` then the command should be `snakemake --cores 1 'logs/renamefile_Tomo_dataset.log'`).\r +\r +Before trying to compute the reconstructions, make sure you have enough memory available (ideally more than 60 GB).\r +To compute the reconstructions using one core, use the command:\r +```bash\r +snakemake --cores 1\r +```\r +If you want to use all available cores instead, use:\r +```bash\r +snakemake --cores all\r +```\r +This creates a folder in `results` with the reconstructed data.\r +\r +## Credit\r +The example dataset used in this project (MI04_02 evolving magma, Mattia Pistone, University of Georgia) was taken from: https://doi.psi.ch/detail/10.16907/05a50450-767f-421d-9832-342b57c201af\r +\r +The script used for reconstruction (`scripts/reconstructs_tomo_datasets.py`) was provided by Alain Studer, PSI.""" ; + schema1:keywords "Reconstruction, Tomography" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Reconstructing raw tomography data" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/432?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-15T08:48:07.467425" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Scaffolding-Bionano-VGP7" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Scaffolding-Bionano-VGP7/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.17" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# gene2phylo\r +\r +**gene2phylo** is a snakemake pipeline for batch phylogenetic analysis of a given set of input genes. \r +\r +## Contents\r + - [Setup](#setup)\r + - [Example data](#example-data)\r + - [Input](#input)\r + - [Output](#output)\r + - [Running your own data](#running-your-own-data)\r + - [Getting help](#getting-help)\r + - [Citations](#citations)\r +\r +## Setup\r +\r +The pipeline is written in Snakemake and uses conda to install the necessary tools.\r +\r +It is *strongly recommended* to install conda using Mambaforge. See details here https://snakemake.readthedocs.io/en/stable/getting_started/installation.html\r +\r +Once conda is installed, you can pull the github repo and set up the base conda environment.\r +\r +```\r +# get github repo\r +git clone https://github.com/o-william-white/gene2phylo\r +\r +# change dir\r +cd gene2phylo\r +\r +# setup conda env\r +conda env create -n snakemake -f workflow/envs/conda_env.yaml\r +```\r +\r +
\r +\r +
\r +\r +## Example data\r +\r +Before you run your own data, it is recommended to run the example datasets provided . This will confirm there are no user-specific issues with the setup and it also installs all the dependencies. The example data includes mitochondrial and ribosomal genes from 25 different butterfly species. \r +\r +To run the example data, use the code below. The first time you run the pipeline, it will take some time to install each of the conda environments, so it is a good time to take a tea break :).\r +```\r +conda activate snakemake\r +\r +snakemake \\\r + --cores 4 \\\r + --use-conda\r +```\r +\r +
\r +\r +
\r +\r +## Input\r +\r +Snakemake requires a `config.yaml` to define input parameters. \r +\r +For the example data provided, the config file is located here `config/config.yaml` and it looks like this:\r +```\r +# name of input directory containg genes\r +input_dir: .test\r +\r +# realign (True or False)\r +realign: True\r +\r +# alignment missing data threshold for alignment (0.0 - 1.0), only required if realign == True\r +missing_threshold: 0.5\r +\r +# alignment trimming method to use (gblocks or clipkit), only required if realign == True\r +alignment_trim: gblocks\r +\r +# name of outgroup sample (optional)\r +# use "NA" if there is no obvious outgroup\r +# if more than one outgroup use a comma separated list i.e. "sampleA,sampleB"\r +outgroup: Eurema_blanda\r +\r +# plot dimensions (cm)\r +plot_height: 20\r +plot_width: 20\r +```\r +\r +
\r +\r +
\r +\r +## Output\r +\r +All output files are saved to the `results` direcotry. Below is a table summarising all of the output files generated by the pipeline.\r +\r +| Directory | Description |\r +|---------------------------|---------------------------|\r +| mafft | Optional: Mafft aligned fasta files of all genes |\r +| mafft_filtered | Optional: Mafft aligned fasta files after the removal of sequences based on a missing data threshold |\r +| alignment_trim | Optional: Ambiguous parts of alignment removed using either gblocks or clipkit |\r +| iqtree | Iqtree phylogenetic analysis for each gene |\r +| iqtree_plots | Plots of Iqtree phylogenetic tree for each gene |\r +| concatenate_alignments | Partitioned alignment of all genes |\r +| iqtree_partitioned | Iqtree partitioned phylogenetic analysis |\r +| iqtree_partitioned_plot | Plot of Iqtree partitioned tree |\r +| astral | Astral phylogenetic analysis of all gene trees |\r +| astral_plot | Plot of Astral tree |\r +\r +
\r +\r +
\r +\r +## Running your own data\r +\r +For the pipeline to function properly, the input gene alignments must be: \r +- in a single directory \r +- end with ".fasta"\r +- named after the aligned gene (e.g. "cox1.fasta" or "28S.fasta")\r +- share identical sample names across alignments (e.g. all genes from sample A share the same name)\r +\r +Please see the example data in the `.test/` directory as an example. \r +\r +Then you need to generate your own config.yaml file, using the example template provided.\r +\r +## Getting help\r +\r +If you have any questions, please do get in touch in the issues or by email o.william.white@gmail.com\r +\r +
\r +\r +
\r +\r +## Citations\r +\r +If you use the pipeline, please cite our bioarxiv preprint: https://doi.org/10.1101/2023.08.11.552985\r +\r +Since the pipeline is a wrapper for several other bioinformatic tools we also ask that you cite the tools used by the pipeline:\r + - Gblocks (default) https://doi.org/10.1093/oxfordjournals.molbev.a026334\r + - Clipkit (optional) https://doi.org/10.1371/journal.pbio.3001007\r + - Mafft (optional) https://doi.org/10.1093/molbev/mst010\r + - Iqtree https://doi.org/10.1093/molbev/msu300\r + - Ete3 https://doi.org/10.1093/molbev/msw046\r + - Ggtree https://doi.org/10.1111/2041-210X.12628\r + - Astral https://doi.org/10.1186/s12859-018-2129-y\r +\r +
\r +\r +
\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/793?version=1" ; + schema1:isBasedOn "https://github.com/o-william-white/gene2phylo.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for gene2phylo" ; + schema1:sdDatePublished "2024-07-12 13:23:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/793/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 929 ; + schema1:dateCreated "2024-03-13T13:36:20Z" ; + schema1:dateModified "2024-03-21T13:56:02Z" ; + schema1:description """# gene2phylo\r +\r +**gene2phylo** is a snakemake pipeline for batch phylogenetic analysis of a given set of input genes. \r +\r +## Contents\r + - [Setup](#setup)\r + - [Example data](#example-data)\r + - [Input](#input)\r + - [Output](#output)\r + - [Running your own data](#running-your-own-data)\r + - [Getting help](#getting-help)\r + - [Citations](#citations)\r +\r +## Setup\r +\r +The pipeline is written in Snakemake and uses conda to install the necessary tools.\r +\r +It is *strongly recommended* to install conda using Mambaforge. See details here https://snakemake.readthedocs.io/en/stable/getting_started/installation.html\r +\r +Once conda is installed, you can pull the github repo and set up the base conda environment.\r +\r +```\r +# get github repo\r +git clone https://github.com/o-william-white/gene2phylo\r +\r +# change dir\r +cd gene2phylo\r +\r +# setup conda env\r +conda env create -n snakemake -f workflow/envs/conda_env.yaml\r +```\r +\r +
\r +\r +
\r +\r +## Example data\r +\r +Before you run your own data, it is recommended to run the example datasets provided . This will confirm there are no user-specific issues with the setup and it also installs all the dependencies. The example data includes mitochondrial and ribosomal genes from 25 different butterfly species. \r +\r +To run the example data, use the code below. The first time you run the pipeline, it will take some time to install each of the conda environments, so it is a good time to take a tea break :).\r +```\r +conda activate snakemake\r +\r +snakemake \\\r + --cores 4 \\\r + --use-conda\r +```\r +\r +
\r +\r +
\r +\r +## Input\r +\r +Snakemake requires a `config.yaml` to define input parameters. \r +\r +For the example data provided, the config file is located here `config/config.yaml` and it looks like this:\r +```\r +# name of input directory containg genes\r +input_dir: .test\r +\r +# realign (True or False)\r +realign: True\r +\r +# alignment missing data threshold for alignment (0.0 - 1.0), only required if realign == True\r +missing_threshold: 0.5\r +\r +# alignment trimming method to use (gblocks or clipkit), only required if realign == True\r +alignment_trim: gblocks\r +\r +# name of outgroup sample (optional)\r +# use "NA" if there is no obvious outgroup\r +# if more than one outgroup use a comma separated list i.e. "sampleA,sampleB"\r +outgroup: Eurema_blanda\r +\r +# plot dimensions (cm)\r +plot_height: 20\r +plot_width: 20\r +```\r +\r +
\r +\r +
\r +\r +## Output\r +\r +All output files are saved to the `results` direcotry. Below is a table summarising all of the output files generated by the pipeline.\r +\r +| Directory | Description |\r +|---------------------------|---------------------------|\r +| mafft | Optional: Mafft aligned fasta files of all genes |\r +| mafft_filtered | Optional: Mafft aligned fasta files after the removal of sequences based on a missing data threshold |\r +| alignment_trim | Optional: Ambiguous parts of alignment removed using either gblocks or clipkit |\r +| iqtree | Iqtree phylogenetic analysis for each gene |\r +| iqtree_plots | Plots of Iqtree phylogenetic tree for each gene |\r +| concatenate_alignments | Partitioned alignment of all genes |\r +| iqtree_partitioned | Iqtree partitioned phylogenetic analysis |\r +| iqtree_partitioned_plot | Plot of Iqtree partitioned tree |\r +| astral | Astral phylogenetic analysis of all gene trees |\r +| astral_plot | Plot of Astral tree |\r +\r +
\r +\r +
\r +\r +## Running your own data\r +\r +For the pipeline to function properly, the input gene alignments must be: \r +- in a single directory \r +- end with ".fasta"\r +- named after the aligned gene (e.g. "cox1.fasta" or "28S.fasta")\r +- share identical sample names across alignments (e.g. all genes from sample A share the same name)\r +\r +Please see the example data in the `.test/` directory as an example. \r +\r +Then you need to generate your own config.yaml file, using the example template provided.\r +\r +## Getting help\r +\r +If you have any questions, please do get in touch in the issues or by email o.william.white@gmail.com\r +\r +
\r +\r +
\r +\r +## Citations\r +\r +If you use the pipeline, please cite our bioarxiv preprint: https://doi.org/10.1101/2023.08.11.552985\r +\r +Since the pipeline is a wrapper for several other bioinformatic tools we also ask that you cite the tools used by the pipeline:\r + - Gblocks (default) https://doi.org/10.1093/oxfordjournals.molbev.a026334\r + - Clipkit (optional) https://doi.org/10.1371/journal.pbio.3001007\r + - Mafft (optional) https://doi.org/10.1093/molbev/mst010\r + - Iqtree https://doi.org/10.1093/molbev/msu300\r + - Ete3 https://doi.org/10.1093/molbev/msw046\r + - Ggtree https://doi.org/10.1111/2041-210X.12628\r + - Astral https://doi.org/10.1186/s12859-018-2129-y\r +\r +
\r +\r +
\r +\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "gene2phylo" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/793?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 10324 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + schema1:datePublished "2024-06-28T13:49:10.539699" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/rnaseq-pe" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "rnaseq-pe/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Conformational ensembles generation\r +\r +## Workflow included in the [ELIXIR 3D-Bioinfo](https://elixir-europe.org/communities/3d-bioinfo) Implementation Study:\r +\r +### Building on PDBe-KB to chart and characterize the conformation landscape of native proteins\r +\r +This tutorial aims to illustrate the process of generating **protein conformational ensembles** from** 3D structures **and analysing its **molecular flexibility**, step by step, using the **BioExcel Building Blocks library (biobb)**.\r +\r +## Conformational landscape of native proteins\r +**Proteins** are **dynamic** systems that adopt multiple **conformational states**, a property essential for many **biological processes** (e.g. binding other proteins, nucleic acids, small molecule ligands, or switching between functionaly active and inactive states). Characterizing the different **conformational states** of proteins and the transitions between them is therefore critical for gaining insight into their **biological function** and can help explain the effects of genetic variants in **health** and **disease** and the action of drugs.\r +\r +**Structural biology** has become increasingly efficient in sampling the different **conformational states** of proteins. The **PDB** has currently archived more than **170,000 individual structures**, but over two thirds of these structures represent **multiple conformations** of the same or related protein, observed in different crystal forms, when interacting with other proteins or other macromolecules, or upon binding small molecule ligands. Charting this conformational diversity across the PDB can therefore be employed to build a useful approximation of the **conformational landscape** of native proteins.\r +\r +A number of resources and **tools** describing and characterizing various often complementary aspects of protein **conformational diversity** in known structures have been developed, notably by groups in Europe. These tools include algorithms with varying degree of sophistication, for aligning the 3D structures of individual protein chains or domains, of protein assemblies, and evaluating their degree of **structural similarity**. Using such tools one can **align structures pairwise**, compute the corresponding **similarity matrix**, and identify ensembles of **structures/conformations** with a defined **similarity level** that tend to recur in different PDB entries, an operation typically performed using **clustering** methods. Such workflows are at the basis of resources such as **CATH, Contemplate, or PDBflex** that offer access to **conformational ensembles** comprised of similar **conformations** clustered according to various criteria. Other types of tools focus on differences between **protein conformations**, identifying regions of proteins that undergo large **collective displacements** in different PDB entries, those that act as **hinges or linkers**, or regions that are inherently **flexible**.\r +\r +To build a meaningful approximation of the **conformational landscape** of native proteins, the **conformational ensembles** (and the differences between them), identified on the basis of **structural similarity/dissimilarity** measures alone, need to be **biophysically characterized**. This may be approached at **two different levels**. \r +- At the **biological level**, it is important to link observed **conformational ensembles**, to their **functional roles** by evaluating the correspondence with **protein family classifications** based on sequence information and **functional annotations** in public databases e.g. Uniprot, PDKe-Knowledge Base (KB). These links should provide valuable mechanistic insights into how the **conformational and dynamic properties** of proteins are exploited by evolution to regulate their **biological function**.

\r +\r +- At the **physical level** one needs to introduce **energetic consideration** to evaluate the likelihood that the identified **conformational ensembles** represent **conformational states** that the protein (or domain under study) samples in isolation. Such evaluation is notoriously **challenging** and can only be roughly approximated by using **computational methods** to evaluate the extent to which the observed **conformational ensembles** can be reproduced by algorithms that simulate the **dynamic behavior** of protein systems. These algorithms include the computationally expensive **classical molecular dynamics (MD) simulations** to sample local thermal fluctuations but also faster more approximate methods such as **Elastic Network Models** and **Normal Node Analysis** (NMA) to model low energy **collective motions**. Alternatively, **enhanced sampling molecular dynamics** can be used to model complex types of **conformational changes** but at a very high computational cost. \r +\r +The **ELIXIR 3D-Bioinfo Implementation Study** *Building on PDBe-KB to chart and characterize the conformation landscape of native proteins* focuses on:\r +\r +1. Mapping the **conformational diversity** of proteins and their homologs across the PDB. \r +2. Characterize the different **flexibility properties** of protein regions, and link this information to sequence and functional annotation.\r +3. Benchmark **computational methods** that can predict a biophysical description of protein motions.\r +\r +This notebook is part of the third objective, where a list of **computational resources** that are able to predict **protein flexibility** and **conformational ensembles** have been collected, evaluated, and integrated in reproducible and interoperable workflows using the **BioExcel Building Blocks library**. Note that the list is not meant to be exhaustive, it is built following the expertise of the implementation study partners.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.487.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_flexdyn/python" ; + schema1:license "other-open" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Protein conformational ensembles generation" ; + schema1:sdDatePublished "2024-07-12 13:33:20 +0100" ; + schema1:url "https://workflowhub.eu/workflows/487/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10859 ; + schema1:creator , + ; + schema1:dateCreated "2023-05-31T13:27:53Z" ; + schema1:dateModified "2023-06-01T09:53:40Z" ; + schema1:description """# Protein Conformational ensembles generation\r +\r +## Workflow included in the [ELIXIR 3D-Bioinfo](https://elixir-europe.org/communities/3d-bioinfo) Implementation Study:\r +\r +### Building on PDBe-KB to chart and characterize the conformation landscape of native proteins\r +\r +This tutorial aims to illustrate the process of generating **protein conformational ensembles** from** 3D structures **and analysing its **molecular flexibility**, step by step, using the **BioExcel Building Blocks library (biobb)**.\r +\r +## Conformational landscape of native proteins\r +**Proteins** are **dynamic** systems that adopt multiple **conformational states**, a property essential for many **biological processes** (e.g. binding other proteins, nucleic acids, small molecule ligands, or switching between functionaly active and inactive states). Characterizing the different **conformational states** of proteins and the transitions between them is therefore critical for gaining insight into their **biological function** and can help explain the effects of genetic variants in **health** and **disease** and the action of drugs.\r +\r +**Structural biology** has become increasingly efficient in sampling the different **conformational states** of proteins. The **PDB** has currently archived more than **170,000 individual structures**, but over two thirds of these structures represent **multiple conformations** of the same or related protein, observed in different crystal forms, when interacting with other proteins or other macromolecules, or upon binding small molecule ligands. Charting this conformational diversity across the PDB can therefore be employed to build a useful approximation of the **conformational landscape** of native proteins.\r +\r +A number of resources and **tools** describing and characterizing various often complementary aspects of protein **conformational diversity** in known structures have been developed, notably by groups in Europe. These tools include algorithms with varying degree of sophistication, for aligning the 3D structures of individual protein chains or domains, of protein assemblies, and evaluating their degree of **structural similarity**. Using such tools one can **align structures pairwise**, compute the corresponding **similarity matrix**, and identify ensembles of **structures/conformations** with a defined **similarity level** that tend to recur in different PDB entries, an operation typically performed using **clustering** methods. Such workflows are at the basis of resources such as **CATH, Contemplate, or PDBflex** that offer access to **conformational ensembles** comprised of similar **conformations** clustered according to various criteria. Other types of tools focus on differences between **protein conformations**, identifying regions of proteins that undergo large **collective displacements** in different PDB entries, those that act as **hinges or linkers**, or regions that are inherently **flexible**.\r +\r +To build a meaningful approximation of the **conformational landscape** of native proteins, the **conformational ensembles** (and the differences between them), identified on the basis of **structural similarity/dissimilarity** measures alone, need to be **biophysically characterized**. This may be approached at **two different levels**. \r +- At the **biological level**, it is important to link observed **conformational ensembles**, to their **functional roles** by evaluating the correspondence with **protein family classifications** based on sequence information and **functional annotations** in public databases e.g. Uniprot, PDKe-Knowledge Base (KB). These links should provide valuable mechanistic insights into how the **conformational and dynamic properties** of proteins are exploited by evolution to regulate their **biological function**.

\r +\r +- At the **physical level** one needs to introduce **energetic consideration** to evaluate the likelihood that the identified **conformational ensembles** represent **conformational states** that the protein (or domain under study) samples in isolation. Such evaluation is notoriously **challenging** and can only be roughly approximated by using **computational methods** to evaluate the extent to which the observed **conformational ensembles** can be reproduced by algorithms that simulate the **dynamic behavior** of protein systems. These algorithms include the computationally expensive **classical molecular dynamics (MD) simulations** to sample local thermal fluctuations but also faster more approximate methods such as **Elastic Network Models** and **Normal Node Analysis** (NMA) to model low energy **collective motions**. Alternatively, **enhanced sampling molecular dynamics** can be used to model complex types of **conformational changes** but at a very high computational cost. \r +\r +The **ELIXIR 3D-Bioinfo Implementation Study** *Building on PDBe-KB to chart and characterize the conformation landscape of native proteins* focuses on:\r +\r +1. Mapping the **conformational diversity** of proteins and their homologs across the PDB. \r +2. Characterize the different **flexibility properties** of protein regions, and link this information to sequence and functional annotation.\r +3. Benchmark **computational methods** that can predict a biophysical description of protein motions.\r +\r +This notebook is part of the third objective, where a list of **computational resources** that are able to predict **protein flexibility** and **conformational ensembles** have been collected, evaluated, and integrated in reproducible and interoperable workflows using the **BioExcel Building Blocks library**. Note that the list is not meant to be exhaustive, it is built following the expertise of the implementation study partners.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "" ; + schema1:name "Python Protein conformational ensembles generation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_flexdyn/python/workflow.py" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=12" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-07-12 13:18:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=12" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9397 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:39Z" ; + schema1:dateModified "2024-06-11T12:54:39Z" ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=12" ; + schema1:version 12 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """[![ci](https://github.com/zavolanlab/zarp/workflows/CI/badge.svg?branch=dev)](https://github.com/zavolanlab/zarp/actions?query=workflow%3Aci)\r +[![GitHub license](https://img.shields.io/github/license/zavolanlab/zarp?color=orange)](https://github.com/zavolanlab/zarp/blob/dev/LICENSE)\r +[![DOI:10.1101/2021.11.18.469017](http://img.shields.io/badge/DOI-10.1101/2021.11.18.469017-B31B1B.svg)](https://doi.org/10.1101/2021.11.18.469017)\r +\r +\r +
\r + \r +
\r +\r +\r +# **ZARP** ([Zavolan-Lab](https://www.biozentrum.unibas.ch/research/researchgroups/overview/unit/zavolan/research-group-mihaela-zavolan/) Automated RNA-Seq Pipeline) \r +...is a generic RNA-Seq analysis workflow that allows \r +users to process and analyze Illumina short-read sequencing libraries with minimum effort. The workflow relies on \r +publicly available bioinformatics tools and currently handles single or paired-end stranded bulk RNA-seq data.\r +The workflow is developed in [Snakemake](https://snakemake.readthedocs.io/en/stable/), a widely used workflow management system in the bioinformatics\r +community.\r +\r +According to the current ZARP implementation, reads are analyzed (pre-processed, aligned, quantified) with state-of-the-art\r +tools to give meaningful initial insights into the quality and composition of an RNA-Seq library, reducing hands-on time for bioinformaticians and giving experimentalists the possibility to rapidly assess their data. Additional reports summarise the results of the individual steps and provide useful visualisations.\r +\r +\r +> **Note:** For a more detailed description of each step, please refer to the [workflow\r +> documentation](https://github.com/zavolanlab/zarp/blob/main/pipeline_documentation.md).\r +\r +\r +## Requirements\r +\r +The workflow has been tested on:\r +- CentOS 7.5\r +- Debian 10\r +- Ubuntu 16.04, 18.04\r +\r +> **NOTE:**\r +> Currently, we only support **Linux** execution. \r +\r +\r +# Installation\r +\r +## 1. Clone the repository\r +\r +Go to the desired directory/folder on your file system, then clone/get the \r +repository and move into the respective directory with:\r +\r +```bash\r +git clone https://github.com/zavolanlab/zarp.git\r +cd zarp\r +```\r +\r +## 2. Conda and Mamba installation\r +\r +Workflow dependencies can be conveniently installed with the [Conda](http://docs.conda.io/projects/conda/en/latest/index.html)\r +package manager. We recommend that you install [Miniconda](https://docs.conda.io/en/latest/miniconda.html) \r +for your system (Linux). Be sure to select Python 3 option. \r +The workflow was built and tested with `miniconda 4.7.12`.\r +Other versions are not guaranteed to work as expected.\r +\r +Given that Miniconda has been installed and is available in the current shell the first\r +dependency for ZARP is the [Mamba](https://github.com/mamba-org/mamba) package manager, which needs to be installed in\r +the `base` conda environment with:\r +\r +```bash\r +conda install mamba -n base -c conda-forge\r +```\r +\r +## 3. Dependencies installation\r +\r +For improved reproducibility and reusability of the workflow,\r +each individual step of the workflow runs either in its own [Singularity](https://sylabs.io/singularity/)\r +container or in its own [Conda](http://docs.conda.io/projects/conda/en/latest/index.html) virtual environemnt. \r +As a consequence, running this workflow has very few individual dependencies. \r +The **container execution** requires Singularity to be installed on the system where the workflow is executed. \r +As the functional installation of Singularity requires root privileges, and Conda currently only provides Singularity\r +for Linux architectures, the installation instructions are slightly different depending on your system/setup:\r +\r +### For most users\r +\r +If you do *not* have root privileges on the machine you want\r +to run the workflow on *or* if you do not have a Linux machine, please [install\r +Singularity](https://sylabs.io/guides/3.5/admin-guide/installation.html) separately and in privileged mode, depending\r +on your system. You may have to ask an authorized person (e.g., a systems\r +administrator) to do that. This will almost certainly be required if you want\r +to run the workflow on a high-performance computing (HPC) cluster. \r +\r +> **NOTE:**\r +> The workflow has been tested with the following Singularity versions: \r +> * `v2.6.2`\r +> * `v3.5.2`\r +\r +After installing Singularity, install the remaining dependencies with:\r +```bash\r +mamba env create -f install/environment.yml\r +```\r +\r +\r +### As root user on Linux\r +\r +If you have a Linux machine, as well as root privileges, (e.g., if you plan to\r +run the workflow on your own computer), you can execute the following command\r +to include Singularity in the Conda environment:\r +\r +```bash\r +mamba env update -f install/environment.root.yml\r +```\r +\r +## 4. Activate environment\r +\r +Activate the Conda environment with:\r +\r +```bash\r +conda activate zarp\r +```\r +\r +# Extra installation steps (optional)\r +\r +## 5. Non-essential dependencies installation\r +\r +Most tests have additional dependencies. If you are planning to run tests, you\r +will need to install these by executing the following command _in your active\r +Conda environment_:\r +\r +```bash\r +mamba env update -f install/environment.dev.yml\r +```\r +\r +## 6. Successful installation tests\r +\r +We have prepared several tests to check the integrity of the workflow and its\r +components. These can be found in subdirectories of the `tests/` directory. \r +The most critical of these tests enable you to execute the entire workflow on a \r +set of small example input files. Note that for this and other tests to complete\r +successfully, [additional dependencies](#installing-non-essential-dependencies) \r +need to be installed. \r +Execute one of the following commands to run the test workflow \r +on your local machine:\r +* Test workflow on local machine with **Singularity**:\r +```bash\r +bash tests/test_integration_workflow/test.local.sh\r +```\r +* Test workflow on local machine with **Conda**:\r +```bash\r +bash tests/test_integration_workflow_with_conda/test.local.sh\r +```\r +Execute one of the following commands to run the test workflow \r +on a [Slurm](https://slurm.schedmd.com/documentation.html)-managed high-performance computing (HPC) cluster:\r +\r +* Test workflow with **Singularity**:\r +\r +```bash\r +bash tests/test_integration_workflow/test.slurm.sh\r +```\r +* Test workflow with **Conda**:\r +\r +```bash\r +bash tests/test_integration_workflow_with_conda/test.slurm.sh\r +```\r +\r +> **NOTE:** Depending on the configuration of your Slurm installation you may\r +> need to adapt file `slurm-config.json` (located directly under `profiles`\r +> directory) and the arguments to options `--cores` and `--jobs`\r +> in the file `config.yaml` of a respective profile.\r +> Consult the manual of your workload manager as well as the section of the\r +> Snakemake manual dealing with [profiles].\r +\r +# Running the workflow on your own samples\r +\r +1. Assuming that your current directory is the repository's root directory,\r +create a directory for your workflow run and move into it with:\r +\r + ```bash\r + mkdir config/my_run\r + cd config/my_run\r + ```\r +\r +2. Create an empty sample table and a workflow configuration file:\r +\r + ```bash\r + touch samples.tsv\r + touch config.yaml\r + ```\r +\r +3. Use your editor of choice to populate these files with appropriate\r +values. Have a look at the examples in the `tests/` directory to see what the\r +files should look like, specifically:\r +\r + - [samples.tsv](https://github.com/zavolanlab/zarp/blob/main/tests/input_files/samples.tsv)\r + - [config.yaml](https://github.com/zavolanlab/zarp/blob/main/tests/input_files/config.yaml)\r +\r + - For more details and explanations, refer to the [pipeline-documentation](https://github.com/zavolanlab/zarp/blob/main/pipeline_documentation.md)\r +\r +\r +4. Create a runner script. Pick one of the following choices for either local\r +or cluster execution. Before execution of the respective command, you need to\r +remember to update the argument of the `--singularity-args` option of a\r +respective profile (file: `profiles/{profile}/config.yaml`) so that\r +it contains a comma-separated list of _all_ directories\r +containing input data files (samples and any annotation files etc) required for\r +your run.\r +\r + Runner script for _local execution_:\r +\r + ```bash\r + cat << "EOF" > run.sh\r + #!/bin/bash\r +\r + snakemake \\\r + --profile="../../profiles/local-singularity" \\\r + --configfile="config.yaml"\r +\r + EOF\r + ```\r +\r + **OR**\r +\r + Runner script for _Slurm cluster exection_ (note that you may need\r + to modify the arguments to `--jobs` and `--cores` in the file:\r + `profiles/slurm-singularity/config.yaml` depending on your HPC\r + and workload manager configuration):\r +\r + ```bash\r + cat << "EOF" > run.sh\r + #!/bin/bash\r + mkdir -p logs/cluster_log\r + snakemake \\\r + --profile="../profiles/slurm-singularity" \\\r + --configfile="config.yaml"\r + EOF\r + ```\r +\r + When running the pipeline with *conda* you should use `local-conda` and\r + `slurm-conda` profiles instead.\r +\r +5. Start your workflow run:\r +\r + ```bash\r + bash run.sh\r + ```\r +\r +# Sample downloads from SRA\r +\r +An independent Snakemake workflow `workflow/rules/sra_download.smk` is included\r +for the download of SRA samples with [sra-tools].\r +\r +> Note: as of Snakemake 7.3.1, only profile conda is supported. \r +> Singularity fails because the *sra-tools* Docker container only has `sh` \r +but `bash` is required.\r +\r +> Note: The workflow uses the implicit temporary directory \r +from snakemake, which is called with [resources.tmpdir].\r +\r +The workflow expects the following config:\r +* `samples`, a sample table (tsv) with column *sample* containing *SRR* identifiers,\r +see example [here](https://github.com/zavolanlab/zarp/blob/main/tests/input_files/sra_samples.tsv).\r +* `outdir`, an output directory\r +* `samples_out`, a pointer to a modified sample table with location of fastq files\r +* `cluster_log_dir`, the cluster log directory.\r +\r +For executing the example one can use the following\r +(with activated *zarp* environment):\r +\r +```bash\r +snakemake --snakefile="workflow/rules/sra_download.smk" \\\r + --profile="profiles/local-conda" \\\r + --config samples="tests/input_files/sra_samples.tsv" \\\r + outdir="results/sra_downloads" \\\r + samples_out="results/sra_downloads/sra_samples.out.tsv" \\\r + log_dir="logs" \\\r + cluster_log_dir="logs/cluster_log"\r +```\r +After successful execution, `results/sra_downloads/sra_samples.out.tsv` should contain:\r +```tsv\r +sample fq1 fq2\r +SRR18552868 results/sra_downloads/SRR18552868/SRR18552868.fastq.gz \r +SRR18549672 results/sra_downloads/SRR18549672/SRR18549672_1.fastq.gz results/sra_downloads/SRR18549672/SRR18549672_2.fastq.gz\r +```\r +\r +\r +# Metadata completion with HTSinfer\r +An independent Snakemake workflow `workflow/rules/htsinfer.smk` that populates the `samples.tsv` required by ZARP with the sample specific parameters `seqmode`, `f1_3p`, `f2_3p`, `organism`, `libtype` and `index_size`. Those parameters are inferred from the provided `fastq.gz` files by [HTSinfer](https://github.com/zavolanlab/htsinfer).\r +\r +> Note: The workflow uses the implicit temporary directory \r +from snakemake, which is called with [resources.tmpdir].\r +\r +\r +The workflow expects the following config:\r +* `samples`, a sample table (tsv) with column *sample* containing sample identifiers, as well as columns *fq1* and *fq2* containing the paths to the input fastq files\r +see example [here](https://github.com/zavolanlab/zarp/blob/main/tests/input_files/sra_samples.tsv). If the table contains further ZARP compatible columns (see [pipeline documentation](https://github.com/zavolanlab/zarp/blob/main/pipeline_documentation.md#read-sample-table)), the values specified there by the user are given priority over htsinfer's results. \r +* `outdir`, an output directory\r +* `samples_out`, path to a modified sample table with inferred parameters\r +* `records`, set to 100000 per default\r + \r +For executing the example one can use the following\r +(with activated *zarp* environment):\r +```bash\r +cd tests/test_htsinfer_workflow\r +snakemake \\\r + --snakefile="../../workflow/rules/htsinfer.smk" \\\r + --restart-times=0 \\\r + --profile="../../profiles/local-singularity" \\\r + --config outdir="results" \\\r + samples="../input_files/htsinfer_samples.tsv" \\\r + samples_out="samples_htsinfer.tsv" \\\r + --notemp \\\r + --keep-incomplete\r +```\r +\r +However, this call will exit with an error, as not all parameters can be inferred from the example files. The argument `--keep-incomplete` makes sure the `samples_htsinfer.tsv` file can nevertheless be inspected. \r +\r +After successful execution - if all parameters could be either inferred or were specified by the user - `[OUTDIR]/[SAMPLES_OUT]` should contain a populated table with parameters `seqmode`, `f1_3p`, `f2_3p`, `organism`, `libtype` and `index_size` for all input samples as described in the [pipeline documentation](https://github.com/zavolanlab/zarp/blob/main/pipeline_documentation.md#read-sample-table).\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.447.1" ; + schema1:isBasedOn "https://github.com/zavolanlab/zarp" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ZARP: An automated workflow for processing of RNA-seq data" ; + schema1:sdDatePublished "2024-07-12 13:33:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/447/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 312593 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 59182 ; + schema1:creator ; + schema1:dateCreated "2023-03-21T13:07:16Z" ; + schema1:dateModified "2023-05-12T15:33:47Z" ; + schema1:description """[![ci](https://github.com/zavolanlab/zarp/workflows/CI/badge.svg?branch=dev)](https://github.com/zavolanlab/zarp/actions?query=workflow%3Aci)\r +[![GitHub license](https://img.shields.io/github/license/zavolanlab/zarp?color=orange)](https://github.com/zavolanlab/zarp/blob/dev/LICENSE)\r +[![DOI:10.1101/2021.11.18.469017](http://img.shields.io/badge/DOI-10.1101/2021.11.18.469017-B31B1B.svg)](https://doi.org/10.1101/2021.11.18.469017)\r +\r +\r +
\r + \r +
\r +\r +\r +# **ZARP** ([Zavolan-Lab](https://www.biozentrum.unibas.ch/research/researchgroups/overview/unit/zavolan/research-group-mihaela-zavolan/) Automated RNA-Seq Pipeline) \r +...is a generic RNA-Seq analysis workflow that allows \r +users to process and analyze Illumina short-read sequencing libraries with minimum effort. The workflow relies on \r +publicly available bioinformatics tools and currently handles single or paired-end stranded bulk RNA-seq data.\r +The workflow is developed in [Snakemake](https://snakemake.readthedocs.io/en/stable/), a widely used workflow management system in the bioinformatics\r +community.\r +\r +According to the current ZARP implementation, reads are analyzed (pre-processed, aligned, quantified) with state-of-the-art\r +tools to give meaningful initial insights into the quality and composition of an RNA-Seq library, reducing hands-on time for bioinformaticians and giving experimentalists the possibility to rapidly assess their data. Additional reports summarise the results of the individual steps and provide useful visualisations.\r +\r +\r +> **Note:** For a more detailed description of each step, please refer to the [workflow\r +> documentation](https://github.com/zavolanlab/zarp/blob/main/pipeline_documentation.md).\r +\r +\r +## Requirements\r +\r +The workflow has been tested on:\r +- CentOS 7.5\r +- Debian 10\r +- Ubuntu 16.04, 18.04\r +\r +> **NOTE:**\r +> Currently, we only support **Linux** execution. \r +\r +\r +# Installation\r +\r +## 1. Clone the repository\r +\r +Go to the desired directory/folder on your file system, then clone/get the \r +repository and move into the respective directory with:\r +\r +```bash\r +git clone https://github.com/zavolanlab/zarp.git\r +cd zarp\r +```\r +\r +## 2. Conda and Mamba installation\r +\r +Workflow dependencies can be conveniently installed with the [Conda](http://docs.conda.io/projects/conda/en/latest/index.html)\r +package manager. We recommend that you install [Miniconda](https://docs.conda.io/en/latest/miniconda.html) \r +for your system (Linux). Be sure to select Python 3 option. \r +The workflow was built and tested with `miniconda 4.7.12`.\r +Other versions are not guaranteed to work as expected.\r +\r +Given that Miniconda has been installed and is available in the current shell the first\r +dependency for ZARP is the [Mamba](https://github.com/mamba-org/mamba) package manager, which needs to be installed in\r +the `base` conda environment with:\r +\r +```bash\r +conda install mamba -n base -c conda-forge\r +```\r +\r +## 3. Dependencies installation\r +\r +For improved reproducibility and reusability of the workflow,\r +each individual step of the workflow runs either in its own [Singularity](https://sylabs.io/singularity/)\r +container or in its own [Conda](http://docs.conda.io/projects/conda/en/latest/index.html) virtual environemnt. \r +As a consequence, running this workflow has very few individual dependencies. \r +The **container execution** requires Singularity to be installed on the system where the workflow is executed. \r +As the functional installation of Singularity requires root privileges, and Conda currently only provides Singularity\r +for Linux architectures, the installation instructions are slightly different depending on your system/setup:\r +\r +### For most users\r +\r +If you do *not* have root privileges on the machine you want\r +to run the workflow on *or* if you do not have a Linux machine, please [install\r +Singularity](https://sylabs.io/guides/3.5/admin-guide/installation.html) separately and in privileged mode, depending\r +on your system. You may have to ask an authorized person (e.g., a systems\r +administrator) to do that. This will almost certainly be required if you want\r +to run the workflow on a high-performance computing (HPC) cluster. \r +\r +> **NOTE:**\r +> The workflow has been tested with the following Singularity versions: \r +> * `v2.6.2`\r +> * `v3.5.2`\r +\r +After installing Singularity, install the remaining dependencies with:\r +```bash\r +mamba env create -f install/environment.yml\r +```\r +\r +\r +### As root user on Linux\r +\r +If you have a Linux machine, as well as root privileges, (e.g., if you plan to\r +run the workflow on your own computer), you can execute the following command\r +to include Singularity in the Conda environment:\r +\r +```bash\r +mamba env update -f install/environment.root.yml\r +```\r +\r +## 4. Activate environment\r +\r +Activate the Conda environment with:\r +\r +```bash\r +conda activate zarp\r +```\r +\r +# Extra installation steps (optional)\r +\r +## 5. Non-essential dependencies installation\r +\r +Most tests have additional dependencies. If you are planning to run tests, you\r +will need to install these by executing the following command _in your active\r +Conda environment_:\r +\r +```bash\r +mamba env update -f install/environment.dev.yml\r +```\r +\r +## 6. Successful installation tests\r +\r +We have prepared several tests to check the integrity of the workflow and its\r +components. These can be found in subdirectories of the `tests/` directory. \r +The most critical of these tests enable you to execute the entire workflow on a \r +set of small example input files. Note that for this and other tests to complete\r +successfully, [additional dependencies](#installing-non-essential-dependencies) \r +need to be installed. \r +Execute one of the following commands to run the test workflow \r +on your local machine:\r +* Test workflow on local machine with **Singularity**:\r +```bash\r +bash tests/test_integration_workflow/test.local.sh\r +```\r +* Test workflow on local machine with **Conda**:\r +```bash\r +bash tests/test_integration_workflow_with_conda/test.local.sh\r +```\r +Execute one of the following commands to run the test workflow \r +on a [Slurm](https://slurm.schedmd.com/documentation.html)-managed high-performance computing (HPC) cluster:\r +\r +* Test workflow with **Singularity**:\r +\r +```bash\r +bash tests/test_integration_workflow/test.slurm.sh\r +```\r +* Test workflow with **Conda**:\r +\r +```bash\r +bash tests/test_integration_workflow_with_conda/test.slurm.sh\r +```\r +\r +> **NOTE:** Depending on the configuration of your Slurm installation you may\r +> need to adapt file `slurm-config.json` (located directly under `profiles`\r +> directory) and the arguments to options `--cores` and `--jobs`\r +> in the file `config.yaml` of a respective profile.\r +> Consult the manual of your workload manager as well as the section of the\r +> Snakemake manual dealing with [profiles].\r +\r +# Running the workflow on your own samples\r +\r +1. Assuming that your current directory is the repository's root directory,\r +create a directory for your workflow run and move into it with:\r +\r + ```bash\r + mkdir config/my_run\r + cd config/my_run\r + ```\r +\r +2. Create an empty sample table and a workflow configuration file:\r +\r + ```bash\r + touch samples.tsv\r + touch config.yaml\r + ```\r +\r +3. Use your editor of choice to populate these files with appropriate\r +values. Have a look at the examples in the `tests/` directory to see what the\r +files should look like, specifically:\r +\r + - [samples.tsv](https://github.com/zavolanlab/zarp/blob/main/tests/input_files/samples.tsv)\r + - [config.yaml](https://github.com/zavolanlab/zarp/blob/main/tests/input_files/config.yaml)\r +\r + - For more details and explanations, refer to the [pipeline-documentation](https://github.com/zavolanlab/zarp/blob/main/pipeline_documentation.md)\r +\r +\r +4. Create a runner script. Pick one of the following choices for either local\r +or cluster execution. Before execution of the respective command, you need to\r +remember to update the argument of the `--singularity-args` option of a\r +respective profile (file: `profiles/{profile}/config.yaml`) so that\r +it contains a comma-separated list of _all_ directories\r +containing input data files (samples and any annotation files etc) required for\r +your run.\r +\r + Runner script for _local execution_:\r +\r + ```bash\r + cat << "EOF" > run.sh\r + #!/bin/bash\r +\r + snakemake \\\r + --profile="../../profiles/local-singularity" \\\r + --configfile="config.yaml"\r +\r + EOF\r + ```\r +\r + **OR**\r +\r + Runner script for _Slurm cluster exection_ (note that you may need\r + to modify the arguments to `--jobs` and `--cores` in the file:\r + `profiles/slurm-singularity/config.yaml` depending on your HPC\r + and workload manager configuration):\r +\r + ```bash\r + cat << "EOF" > run.sh\r + #!/bin/bash\r + mkdir -p logs/cluster_log\r + snakemake \\\r + --profile="../profiles/slurm-singularity" \\\r + --configfile="config.yaml"\r + EOF\r + ```\r +\r + When running the pipeline with *conda* you should use `local-conda` and\r + `slurm-conda` profiles instead.\r +\r +5. Start your workflow run:\r +\r + ```bash\r + bash run.sh\r + ```\r +\r +# Sample downloads from SRA\r +\r +An independent Snakemake workflow `workflow/rules/sra_download.smk` is included\r +for the download of SRA samples with [sra-tools].\r +\r +> Note: as of Snakemake 7.3.1, only profile conda is supported. \r +> Singularity fails because the *sra-tools* Docker container only has `sh` \r +but `bash` is required.\r +\r +> Note: The workflow uses the implicit temporary directory \r +from snakemake, which is called with [resources.tmpdir].\r +\r +The workflow expects the following config:\r +* `samples`, a sample table (tsv) with column *sample* containing *SRR* identifiers,\r +see example [here](https://github.com/zavolanlab/zarp/blob/main/tests/input_files/sra_samples.tsv).\r +* `outdir`, an output directory\r +* `samples_out`, a pointer to a modified sample table with location of fastq files\r +* `cluster_log_dir`, the cluster log directory.\r +\r +For executing the example one can use the following\r +(with activated *zarp* environment):\r +\r +```bash\r +snakemake --snakefile="workflow/rules/sra_download.smk" \\\r + --profile="profiles/local-conda" \\\r + --config samples="tests/input_files/sra_samples.tsv" \\\r + outdir="results/sra_downloads" \\\r + samples_out="results/sra_downloads/sra_samples.out.tsv" \\\r + log_dir="logs" \\\r + cluster_log_dir="logs/cluster_log"\r +```\r +After successful execution, `results/sra_downloads/sra_samples.out.tsv` should contain:\r +```tsv\r +sample fq1 fq2\r +SRR18552868 results/sra_downloads/SRR18552868/SRR18552868.fastq.gz \r +SRR18549672 results/sra_downloads/SRR18549672/SRR18549672_1.fastq.gz results/sra_downloads/SRR18549672/SRR18549672_2.fastq.gz\r +```\r +\r +\r +# Metadata completion with HTSinfer\r +An independent Snakemake workflow `workflow/rules/htsinfer.smk` that populates the `samples.tsv` required by ZARP with the sample specific parameters `seqmode`, `f1_3p`, `f2_3p`, `organism`, `libtype` and `index_size`. Those parameters are inferred from the provided `fastq.gz` files by [HTSinfer](https://github.com/zavolanlab/htsinfer).\r +\r +> Note: The workflow uses the implicit temporary directory \r +from snakemake, which is called with [resources.tmpdir].\r +\r +\r +The workflow expects the following config:\r +* `samples`, a sample table (tsv) with column *sample* containing sample identifiers, as well as columns *fq1* and *fq2* containing the paths to the input fastq files\r +see example [here](https://github.com/zavolanlab/zarp/blob/main/tests/input_files/sra_samples.tsv). If the table contains further ZARP compatible columns (see [pipeline documentation](https://github.com/zavolanlab/zarp/blob/main/pipeline_documentation.md#read-sample-table)), the values specified there by the user are given priority over htsinfer's results. \r +* `outdir`, an output directory\r +* `samples_out`, path to a modified sample table with inferred parameters\r +* `records`, set to 100000 per default\r + \r +For executing the example one can use the following\r +(with activated *zarp* environment):\r +```bash\r +cd tests/test_htsinfer_workflow\r +snakemake \\\r + --snakefile="../../workflow/rules/htsinfer.smk" \\\r + --restart-times=0 \\\r + --profile="../../profiles/local-singularity" \\\r + --config outdir="results" \\\r + samples="../input_files/htsinfer_samples.tsv" \\\r + samples_out="samples_htsinfer.tsv" \\\r + --notemp \\\r + --keep-incomplete\r +```\r +\r +However, this call will exit with an error, as not all parameters can be inferred from the example files. The argument `--keep-incomplete` makes sure the `samples_htsinfer.tsv` file can nevertheless be inspected. \r +\r +After successful execution - if all parameters could be either inferred or were specified by the user - `[OUTDIR]/[SAMPLES_OUT]` should contain a populated table with parameters `seqmode`, `f1_3p`, `f2_3p`, `organism`, `libtype` and `index_size` for all input samples as described in the [pipeline documentation](https://github.com/zavolanlab/zarp/blob/main/pipeline_documentation.md#read-sample-table).\r +\r +""" ; + schema1:image ; + schema1:keywords "Bioinformatics, rna, rna-seq, RNASEQ, NGS, high-throughput" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "ZARP: An automated workflow for processing of RNA-seq data" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/447?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-07-12 13:19:02 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7421 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:14Z" ; + schema1:dateModified "2024-06-11T12:55:14Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "ATACSeq peak-calling and differential analysis pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/965?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/atacseq" ; + schema1:sdDatePublished "2024-07-12 13:22:33 +0100" ; + schema1:url "https://workflowhub.eu/workflows/965/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5926 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:41Z" ; + schema1:dateModified "2024-06-11T12:54:41Z" ; + schema1:description "ATACSeq peak-calling and differential analysis pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/965?version=8" ; + schema1:keywords "ATAC-seq, chromatin-accessibiity" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/atacseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/965?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Genome-wide alternative splicing analysis v.2" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/482?version=2" ; + schema1:license "CC-BY-NC-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genome-wide alternative splicing analysis v.2" ; + schema1:sdDatePublished "2024-07-12 13:33:04 +0100" ; + schema1:url "https://workflowhub.eu/workflows/482/ro_crate?version=2" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 214419 . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 30115 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 119623 ; + schema1:creator ; + schema1:dateCreated "2023-06-07T16:10:45Z" ; + schema1:dateModified "2023-06-11T11:47:11Z" ; + schema1:description "Genome-wide alternative splicing analysis v.2" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/482?version=6" ; + schema1:keywords "Transcriptomics, Alternative splicing, isoform switching" ; + schema1:license "https://spdx.org/licenses/CC-BY-NC-4.0" ; + schema1:name "Genome-wide alternative splicing analysis v.2" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/482?version=2" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """Description: Trinity @ NCI-Gadi contains a staged [Trinity](https://github.com/trinityrnaseq/trinityrnaseq/wiki) workflow that can be run on the National Computational Infrastructure’s (NCI) Gadi supercomputer. Trinity performs de novo transcriptome assembly of RNA-seq data by combining three independent software modules Inchworm, Chrysalis and Butterfly to process RNA-seq reads. The algorithm can detect isoforms, handle paired-end reads, multiple insert sizes and strandedness. \r +\r +Infrastructure\\_deployment\\_metadata: Gadi (NCI)""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.145.1" ; + schema1:isBasedOn "https://github.com/Sydney-Informatics-Hub/Gadi-Trinity" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Trinity @ NCI-Gadi" ; + schema1:sdDatePublished "2024-07-12 13:36:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/145/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11522 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2021-08-17T04:44:57Z" ; + schema1:dateModified "2023-01-16T13:51:44Z" ; + schema1:description """Description: Trinity @ NCI-Gadi contains a staged [Trinity](https://github.com/trinityrnaseq/trinityrnaseq/wiki) workflow that can be run on the National Computational Infrastructure’s (NCI) Gadi supercomputer. Trinity performs de novo transcriptome assembly of RNA-seq data by combining three independent software modules Inchworm, Chrysalis and Butterfly to process RNA-seq reads. The algorithm can detect isoforms, handle paired-end reads, multiple insert sizes and strandedness. \r +\r +Infrastructure\\_deployment\\_metadata: Gadi (NCI)""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "Assembly, Transcriptomics, trinity, NCI, RNASEQ, rna, rna-seq, Gadi, scalable, PBS" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Trinity @ NCI-Gadi" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/145?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 51464 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-07T21:34:48.804317" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.17" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Macromolecular Coarse-Grained Flexibility (FlexServ) tutorial using BioExcel Building Blocks (biobb)\r +\r +This tutorial aims to illustrate the process of generating protein conformational ensembles from 3D structures and analysing its molecular flexibility, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.552.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_flexserv/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL Macromolecular Coarse-Grained Flexibility tutorial" ; + schema1:sdDatePublished "2024-07-12 13:32:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/552/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 125571 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 27401 ; + schema1:creator , + ; + schema1:dateCreated "2023-08-02T10:50:45Z" ; + schema1:dateModified "2023-08-02T11:21:11Z" ; + schema1:description """# Macromolecular Coarse-Grained Flexibility (FlexServ) tutorial using BioExcel Building Blocks (biobb)\r +\r +This tutorial aims to illustrate the process of generating protein conformational ensembles from 3D structures and analysing its molecular flexibility, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CWL Macromolecular Coarse-Grained Flexibility tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_flexserv/cwl/workflow.cwl" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "CAGE-seq pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/969?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/cageseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/cageseq" ; + schema1:sdDatePublished "2024-07-12 13:22:11 +0100" ; + schema1:url "https://workflowhub.eu/workflows/969/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5375 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:44Z" ; + schema1:dateModified "2024-06-11T12:54:44Z" ; + schema1:description "CAGE-seq pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/969?version=2" ; + schema1:keywords "cage, cage-seq, cageseq-data, gene-expression, rna" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/cageseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/969?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """# workflow-qc-of-radseq-reads\r +\r +These workflows are part of a set designed to work for RAD-seq data on the Galaxy platform, using the tools from the Stacks program. \r +\r +Galaxy Australia: https://usegalaxy.org.au/\r +\r +Stacks: http://catchenlab.life.illinois.edu/stacks/\r +\r +## Inputs\r +* demultiplexed reads in fastq format, in a collection\r +* two adapter sequences in fasta format, for input into cutadapt\r +\r +## Steps and outputs\r +\r +The workflow can be modified to suit your own parameters. \r +\r +The workflow steps are:\r +* Run FastQC to get statistics on the raw reads, send to MultiQC to create a nice output. This is tagged as "Report 1" in the Galaxy history. \r +* Run Cutadapt on the reads to cut adapters - enter two files with adapter sequence at the workflow option for "Choose file containing 3' adapters". The default settings are on except that the "Maximum error rate" for the adapters is set to 0.2 instead of 0.1. Send output statistics to MulitQC, this is "Report 2" in the Galaxy history. Note that you may have different requirements here in terms of how many adapter sequences you want to enter. We recommend copying the workflow and modifying as needed. \r +* Send these reads to fastp for additional filtering or trimming. Default settings are on but can be modified as needed. Send output statistics to MultiQC, this is "Report 3" in the Galaxy history. \r +* The filtered and trimmed reads are then ready for the stacks workflows. \r +\r +![qc-wf](wf-image-qc.png)\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/346?version=1" ; + schema1:isBasedOn "https://github.com/AnnaSyme/workflow-qc-of-radseq-reads.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for QC of RADseq reads" ; + schema1:sdDatePublished "2024-07-12 13:35:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/346/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 20279 ; + schema1:creator ; + schema1:dateCreated "2022-05-31T06:55:21Z" ; + schema1:dateModified "2023-01-30T18:21:31Z" ; + schema1:description """# workflow-qc-of-radseq-reads\r +\r +These workflows are part of a set designed to work for RAD-seq data on the Galaxy platform, using the tools from the Stacks program. \r +\r +Galaxy Australia: https://usegalaxy.org.au/\r +\r +Stacks: http://catchenlab.life.illinois.edu/stacks/\r +\r +## Inputs\r +* demultiplexed reads in fastq format, in a collection\r +* two adapter sequences in fasta format, for input into cutadapt\r +\r +## Steps and outputs\r +\r +The workflow can be modified to suit your own parameters. \r +\r +The workflow steps are:\r +* Run FastQC to get statistics on the raw reads, send to MultiQC to create a nice output. This is tagged as "Report 1" in the Galaxy history. \r +* Run Cutadapt on the reads to cut adapters - enter two files with adapter sequence at the workflow option for "Choose file containing 3' adapters". The default settings are on except that the "Maximum error rate" for the adapters is set to 0.2 instead of 0.1. Send output statistics to MulitQC, this is "Report 2" in the Galaxy history. Note that you may have different requirements here in terms of how many adapter sequences you want to enter. We recommend copying the workflow and modifying as needed. \r +* Send these reads to fastp for additional filtering or trimming. Default settings are on but can be modified as needed. Send output statistics to MultiQC, this is "Report 3" in the Galaxy history. \r +* The filtered and trimmed reads are then ready for the stacks workflows. \r +\r +![qc-wf](wf-image-qc.png)\r +""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "QC of RADseq reads" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/346?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 267910 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "MARS-seq v2 preprocessing pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/996?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/marsseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/marsseq" ; + schema1:sdDatePublished "2024-07-12 13:20:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/996/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9093 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:00Z" ; + schema1:dateModified "2024-06-11T12:55:00Z" ; + schema1:description "MARS-seq v2 preprocessing pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/996?version=4" ; + schema1:keywords "facs-sorting, MARS-seq, single-cell, single-cell-rna-seq, star-solo, transcriptional-dynamics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/marsseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/996?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """# Framework for construction of phylogenetic networks on High Performance Computing (HPC) environment\r +\r +## Introduction\r +\r +Phylogeny refers to the evolutionary history and relationship between biological lineages related by common descent. Reticulate evolution refers to the origination of lineages through the complete or partial merging of ancestor lineages. Networks may be used to represent lineage independence events in non-treelike phylogenetic processes.\r +\r +The methodology for reconstructing networks is still in development. Here we explore two methods for reconstructing rooted explicit phylogenetic networks, PhyloNetworks and Phylonet, which employ computationally expensive and time consuming algorithms. The construction of phylogenetic networks follows a coordinated processing flow of data sets analyzed and processed by the coordinated execution of a set of different programs, packages, libraries or pipelines, called workflow activities. \r +\r +In view of the complexity in modeling network experiments, the present work introduces a workflow for phylogenetic network analyses coupled to be executed in High-Performance Computing (HPC) environments. The workflow aims to integrate well-established software, pipelines and scripts, implementing a challenging task since these tools do not consistently profit from the HPC environment, leading to an increase in the expected makespan and idle computing resources.\r +\r +## Requirements\r +\r +1. Python >= 3.8\r + 1. Biopython >= 1.75\r + 2. Pandas >= 1.3.2\r + 3. Parsl >= 1.0\r +3. Raxml >= 8.2.12\r +4. Astral >= 5.7.1\r +5. SnaQ (PhyloNetworks) >= 0.13.0\r +6. MrBayes >= 3.2.7a\r +7. BUCKy >= 1.4.4\r +8. Quartet MaxCut >= 2.10\r +9. PhyloNet >= 3.8.2\r +10. Julia >= 1.4.1\r +11. IQTREE >= 2.0\r +\r +\r +## How to use\r +\r +### Setting up the framework\r +\r +The framework uses a file to get all the needed parameters. For default it loads the file *default.ini* in the config folder, but you can explicitly load other files using the argument ``-s name_of_the_file``, *e.g.* ``-s config/test.ini``.\r +\r +* Edit *parl.env* with the environment variables you may need, such as modules loadeds in SLURM\r +* Edit *work.config* with the directories of your phylogeny studies (the framework receives as input a set of homologous gene alignments of species in the nexus format).\r +* Edit *default.ini* with the path for each of the needed softwares and the parameters of the execution provider.\r +\r +For default, the execution logs are created in the ``runinfo`` folder. To change it you can use the `-r folder_path` parameter.\r +\r +#### Contents of the configuration file\r +\r +* General settings\r +\r +```ini\r +[GENERAL]\r +ExecutionProvider = SLURM\r +ScriptDir = ./scripts\r +Environ = config/parsl.env\r +Workload = config/work.config\r +NetworkMethod = MP\r +TreeMethod = RAXML\r +BootStrap = 1000\r +```\r +\r +1. The framework can be executed in a HPC environment using the Slurm resource manager using the parameter ``ExecutionProvider`` equals to ``SLURM`` or locally with ``LOCAL``. \r +2. The path of the scripts folder is assigned in ``ScriptDir``. It's recommended to use the absolute path to avoid errors.\r +3. The ``Environ`` parameter contains the path of the file used to set environment variables. More details can be seen below.\r +4. In ``Workload`` is the path of the experiments that will be performed.\r +5. ``NetworkMethod`` and ``TreeMethod`` are the default network and tree methods that will be used to perform the workloads' studies.\r +6. ``Bootstrap`` is the parameter used in all the software that use bootstrap (RAxML, IQTREE and ASTRAL)\r +\r +* Workflow execution settings\r + \r + When using SLURM, these are the needed parameters:\r + ```ini\r + [WORKFLOW]\r + Monitor = False\r + PartCore = 24\r + PartNode = 1\r + Walltime = 00:20:00\r + ```\r +\r + 1. ``Monitor`` is a parameter to use parsl's monitor module in HPC environment. It can be *true* or *false*. If you want to use it, it's necessary to set it as *true* and manually change the address in ``infra_manager.py``\r + 2. If you are using it in a HPC environment (using SLURM), the framework is going to submit in a job. ``PartCore`` is the number of cores of the node; ``PartNode`` is the number of nodes of the partition; and the ``Walltime`` parameter is the maximum amount of time the job will be able to run.\r +\r + However, if the the desired execution method is the LocalProvider, _i.e._ the execution is being performed in your own machine, only these parameters are necessary:\r +\r + ```ini\r + [WORKFLOW]\r + Monitor = False\r + MaxCore = 6\r + CoresPerWorker = 1\r +\r + ```\r +\r +* RAxML settings\r +\r + ```ini\r + [RAXML]\r + RaxmlExecutable = raxmlHPC-PTHREADS\r + RaxmlThreads = 6\r + RaxmlEvolutionaryModel = GTRGAMMA --HKY85\r + ```\r +\r +* IQTREE settings\r +\r + ```ini\r + [IQTREE]\r + IqTreeExecutable = iqtree2\r + IqTreeEvolutionaryModel = TIM2+I+G \r + IqTreeThreads = 6\r + ```\r +\r +* ASTRAL settings\r +\r + ```ini\r + [ASTRAL]\r + AstralExecDir = /opt/astral/5.7.1\r + AstralJar = astral.jar\r + ```\r +\r +* PhyloNet settings\r +\r + ```ini\r + [PHYLONET]\r + PhyloNetExecDir = /opt/phylonet/3.8.2/\r + PhyloNetJar = PhyloNet.jar\r + PhyloNetThreads = 6\r + PhyloNetHMax = 3\r + PhyloNetRuns = 5\r + ```\r +\r +* SNAQ settings\r +\r + ```ini\r + [SNAQ]\r + SnaqThreads = 6\r + SnaqHMax = 3\r + SnaqRuns = 3\r + ```\r +\r +* Mr. Bayes settings\r +\r + ```ini\r + [MRBAYES]\r + MBExecutable = mb\r + MBParameters = set usebeagle=no beagledevice=cpu beagleprecision=double; mcmcp ngen=100000 burninfrac=.25 samplefreq=50 printfreq=10000 diagnfreq=10000 nruns=2 nchains=2 temp=0.40 swapfreq=10\r + ```\r +\r +* Bucky settings\r +\r + ```ini\r + [BUCKY]\r + BuckyExecutable = bucky\r + MbSumExecutable = mbsum\r + ```\r +\r +* Quartet MaxCut\r +\r + ```ini\r + QUARTETMAXCUT]\r + QmcExecDir = /opt/quartet/\r + QmcExecutable = find-cut-Linux-64\r + ```\r +\r +#### Workload file\r +\r +For default the workload file is ``work.config`` in the *config* folder. The file contains the absolute paths of the experiment's folders.\r +\r +```\r +/home/rafael.terra/Biocomp/data/Denv_1\r +```\r +\r +You can comment folders using the # character in the beginning of the path. *e. g.* ``#/home/rafael.terra/Biocomp/data/Denv_1``. That way the framework won't read this path.\r +\r +You can also run a specific flow for a path using ``@TreeMethod|NetworkMethod`` in the end of a path. Where *TreeMethod* can be RAXML, IQTREE or MRBAYES and *NetworkMethod* can be MPL or MP (case sensitive). The supported flows are: ``RAXML|MPL``, ``RAXML|MP``, ``IQTREE|MPL``, ``IQTREE|MP`` and ``MRBAYES|MPL``. For example:\r +\r +```\r +/home/rafael.terra/Biocomp/data/Denv_1@RAXML|MPL\r +```\r +\r +#### Environment file\r +\r +The environment file contains all the environment variables (like module files used in SLURM) used during the framework execution. Example:\r +\r +```sh\r +module load python/3.8.2\r +module load raxml/8.2_openmpi-2.0_gnu\r +module load java/jdk-12\r +module load iqtree/2.1.1\r +module load bucky/1.4.4\r +module load mrbayes/3.2.7a-OpenMPI-4.0.4\r +source /scratch/app/modulos/julia-1.5.1.sh\r +```\r +\r +#### Experiment folder\r +\r +Each experiment folder needs to have a *input folder* containing a *.tar.gz* compressed file and a *.json* with the following content. **The framework considers that there is only one file of each extension in the input folder**.\r +\r +```json\r +{\r + "Mapping":"",\r + "Outgroup":""\r +}\r +```\r +\r +Where ``Mapping`` is a direct mapping of the taxon, when there are multiple alleles per species, in the format ``species1:taxon1,taxon2;species2:taxon3,taxon4`` *(white spaces are not supported)* and ``Outgroup`` is the taxon used to root the network. The Mapping parameter is optional (although it has to be in the json file without value), but the outgroup is obligatory. It's important to say that the flow *MRBAYES|MPL* doesn't support multiple alleles per species. Example:\r +\r +```json\r +{\r + "Mapping": "dengue_virus_type_2:FJ850082,FJ850088,JX669479,JX669482,JX669488,KP188569;dengue_virus_type_3:FJ850079,FJ850094,JN697379,JX669494;dengue_virus_type_1:FJ850073,FJ850084,FJ850093,JX669465,JX669466,JX669475,KP188545,KP188547;dengue_virus_type_4:JN559740,JQ513337,JQ513341,JQ513343,JQ513344,JQ513345,KP188563,KP188564;Zika_virus:MH882543", \r + "Outgroup": "MH882543"\r +}\r +```\r +\r +\r +## Running the framework\r +\r +* In a local machine:\r +\r + After setting up the framework, just run ``python3 parsl_workflow.py``.\r + \r +* In a SLURM environment:\r +\r + Create an submition script that inside contains: ``python3 parsl_workflow.py``.\r +\r + ```sh\r + #!/bin/bash\r + #SBATCH --time=15:00:00\r + #SBATCH -e slurm-%j.err\r + #SBATCH -o slurm-%j.out\r + module load python/3.9.6\r + cd /path/to/biocomp\r + python3 parsl_workflow.py\r + ```\r +\r +The framework is under heavy development. If you notice any bug, please create an issue here on GitHub.\r +\r +### Running in a DOCKER container\r +\r +The framework is also available to be used in Docker. It can be built from source or pushed from DockerHub.\r +\r +#### Building it from the source code\r +\r +Adapt the default settings file ``config/default.ini`` according to your machine, setting the number of threads and bootstrap. After that, run ``docker build -t hp2net .`` in the project's root folder.\r +\r +#### Downloading it from Dockerhub\r +\r +The docker image can also be downloaded from [Docker hub](https://hub.docker.com/repository/docker/rafaelstjf/hp2net/general). To do that, just run the command ``docker pull rafaelstjf/hp2net:main``\r +\r +#### Running\r +\r +The first step to run the framework is to setup your dataset. To test if the framework is running without problems in your machine, you can use the [example datasets](example_data).\r +\r +![Alt text](docs/example_data.png)\r +\r +Extracting the ``example_data.zip`` file, a new folder called ``with_outgroup`` is created. This folder contain four datasets of DENV sequences.\r +\r +The next step is the creation of the settings and workload files. For the settings file, download the [default.ini](config/default.ini) from this repository and change it to you liking (the path of all software are already configured to run on docker). The workload file is a text file containing the absolute path of the datasets, followed by the desired pipeline, as shown before in this document. Here for example purposes, the ``input.txt`` file was created.\r +\r +![Alt text](docs/example_files.png)\r +\r +With all the files prepared, the framework can be executed from the ``example_data`` folder as following:\r +\r +``docker run --rm -v $PWD:$PWD rafaelstjf/hp2net:main -s $PWD/default.ini -w $PWD/input.txt``\r +\r +**Important:** the docker doesn't save your logs, for that add the parameter: ``-r $PWD/name_of_your_log_folder``.\r +\r +---\r +If you are running it on **Santos Dumont Supercomputer**, both downloading and execution of the docker container need to be performed from a submission script and executed using ``sg docker -c "sbatch script.sh"``. The snippet below shows an example of submission script.\r +\r +```sh\r +#!/bin/bash\r +#SBATCH --nodes=1\r +#SBATCH --ntasks-per-node=24\r +#SBATCH -p cpu_small\r +#SBATCH -J Hp2NET\r +#SBATCH --exclusive\r +#SBATCH --time=02:00:00\r +#SBATCH -e slurm-%j.err\r +#SBATCH -o slurm-%j.out\r +\r +DIR='/scratch/pcmrnbio2/rafael.terra/WF_parsl/example_data'\r +docker pull rafaelstjf/hp2net:main\r +\r +docker run --rm -v $DIR:$DIR rafaelstjf/hp2net:main -s ${DIR}/sdumont.ini -w ${DIR}/entrada.txt -r ${DIR}/logs\r +```\r +\r +## If you use it, please cite\r +\r +Terra, R., Coelho, M., Cruz, L., Garcia-Zapata, M., Gadelha, L., Osthoff, C., ... & Ocana, K. (2021, July). Gerência e Análises de Workflows aplicados a Redes Filogenéticas de Genomas de Dengue no Brasil. In *Anais do XV Brazilian e-Science Workshop* (pp. 49-56). SBC.\r +\r +**Also cite all the coupled software!**\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.703.1" ; + schema1:isBasedOn "https://github.com/rafaelstjf/biocomp.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for HP2NET - Framework for Construction of Phylogenetic Networks on High Performance Computing (HPC) Environment" ; + schema1:sdDatePublished "2024-07-12 13:25:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/703/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14660 ; + schema1:creator , + ; + schema1:dateCreated "2024-01-09T13:04:17Z" ; + schema1:dateModified "2024-01-18T18:24:01Z" ; + schema1:description """# Framework for construction of phylogenetic networks on High Performance Computing (HPC) environment\r +\r +## Introduction\r +\r +Phylogeny refers to the evolutionary history and relationship between biological lineages related by common descent. Reticulate evolution refers to the origination of lineages through the complete or partial merging of ancestor lineages. Networks may be used to represent lineage independence events in non-treelike phylogenetic processes.\r +\r +The methodology for reconstructing networks is still in development. Here we explore two methods for reconstructing rooted explicit phylogenetic networks, PhyloNetworks and Phylonet, which employ computationally expensive and time consuming algorithms. The construction of phylogenetic networks follows a coordinated processing flow of data sets analyzed and processed by the coordinated execution of a set of different programs, packages, libraries or pipelines, called workflow activities. \r +\r +In view of the complexity in modeling network experiments, the present work introduces a workflow for phylogenetic network analyses coupled to be executed in High-Performance Computing (HPC) environments. The workflow aims to integrate well-established software, pipelines and scripts, implementing a challenging task since these tools do not consistently profit from the HPC environment, leading to an increase in the expected makespan and idle computing resources.\r +\r +## Requirements\r +\r +1. Python >= 3.8\r + 1. Biopython >= 1.75\r + 2. Pandas >= 1.3.2\r + 3. Parsl >= 1.0\r +3. Raxml >= 8.2.12\r +4. Astral >= 5.7.1\r +5. SnaQ (PhyloNetworks) >= 0.13.0\r +6. MrBayes >= 3.2.7a\r +7. BUCKy >= 1.4.4\r +8. Quartet MaxCut >= 2.10\r +9. PhyloNet >= 3.8.2\r +10. Julia >= 1.4.1\r +11. IQTREE >= 2.0\r +\r +\r +## How to use\r +\r +### Setting up the framework\r +\r +The framework uses a file to get all the needed parameters. For default it loads the file *default.ini* in the config folder, but you can explicitly load other files using the argument ``-s name_of_the_file``, *e.g.* ``-s config/test.ini``.\r +\r +* Edit *parl.env* with the environment variables you may need, such as modules loadeds in SLURM\r +* Edit *work.config* with the directories of your phylogeny studies (the framework receives as input a set of homologous gene alignments of species in the nexus format).\r +* Edit *default.ini* with the path for each of the needed softwares and the parameters of the execution provider.\r +\r +For default, the execution logs are created in the ``runinfo`` folder. To change it you can use the `-r folder_path` parameter.\r +\r +#### Contents of the configuration file\r +\r +* General settings\r +\r +```ini\r +[GENERAL]\r +ExecutionProvider = SLURM\r +ScriptDir = ./scripts\r +Environ = config/parsl.env\r +Workload = config/work.config\r +NetworkMethod = MP\r +TreeMethod = RAXML\r +BootStrap = 1000\r +```\r +\r +1. The framework can be executed in a HPC environment using the Slurm resource manager using the parameter ``ExecutionProvider`` equals to ``SLURM`` or locally with ``LOCAL``. \r +2. The path of the scripts folder is assigned in ``ScriptDir``. It's recommended to use the absolute path to avoid errors.\r +3. The ``Environ`` parameter contains the path of the file used to set environment variables. More details can be seen below.\r +4. In ``Workload`` is the path of the experiments that will be performed.\r +5. ``NetworkMethod`` and ``TreeMethod`` are the default network and tree methods that will be used to perform the workloads' studies.\r +6. ``Bootstrap`` is the parameter used in all the software that use bootstrap (RAxML, IQTREE and ASTRAL)\r +\r +* Workflow execution settings\r + \r + When using SLURM, these are the needed parameters:\r + ```ini\r + [WORKFLOW]\r + Monitor = False\r + PartCore = 24\r + PartNode = 1\r + Walltime = 00:20:00\r + ```\r +\r + 1. ``Monitor`` is a parameter to use parsl's monitor module in HPC environment. It can be *true* or *false*. If you want to use it, it's necessary to set it as *true* and manually change the address in ``infra_manager.py``\r + 2. If you are using it in a HPC environment (using SLURM), the framework is going to submit in a job. ``PartCore`` is the number of cores of the node; ``PartNode`` is the number of nodes of the partition; and the ``Walltime`` parameter is the maximum amount of time the job will be able to run.\r +\r + However, if the the desired execution method is the LocalProvider, _i.e._ the execution is being performed in your own machine, only these parameters are necessary:\r +\r + ```ini\r + [WORKFLOW]\r + Monitor = False\r + MaxCore = 6\r + CoresPerWorker = 1\r +\r + ```\r +\r +* RAxML settings\r +\r + ```ini\r + [RAXML]\r + RaxmlExecutable = raxmlHPC-PTHREADS\r + RaxmlThreads = 6\r + RaxmlEvolutionaryModel = GTRGAMMA --HKY85\r + ```\r +\r +* IQTREE settings\r +\r + ```ini\r + [IQTREE]\r + IqTreeExecutable = iqtree2\r + IqTreeEvolutionaryModel = TIM2+I+G \r + IqTreeThreads = 6\r + ```\r +\r +* ASTRAL settings\r +\r + ```ini\r + [ASTRAL]\r + AstralExecDir = /opt/astral/5.7.1\r + AstralJar = astral.jar\r + ```\r +\r +* PhyloNet settings\r +\r + ```ini\r + [PHYLONET]\r + PhyloNetExecDir = /opt/phylonet/3.8.2/\r + PhyloNetJar = PhyloNet.jar\r + PhyloNetThreads = 6\r + PhyloNetHMax = 3\r + PhyloNetRuns = 5\r + ```\r +\r +* SNAQ settings\r +\r + ```ini\r + [SNAQ]\r + SnaqThreads = 6\r + SnaqHMax = 3\r + SnaqRuns = 3\r + ```\r +\r +* Mr. Bayes settings\r +\r + ```ini\r + [MRBAYES]\r + MBExecutable = mb\r + MBParameters = set usebeagle=no beagledevice=cpu beagleprecision=double; mcmcp ngen=100000 burninfrac=.25 samplefreq=50 printfreq=10000 diagnfreq=10000 nruns=2 nchains=2 temp=0.40 swapfreq=10\r + ```\r +\r +* Bucky settings\r +\r + ```ini\r + [BUCKY]\r + BuckyExecutable = bucky\r + MbSumExecutable = mbsum\r + ```\r +\r +* Quartet MaxCut\r +\r + ```ini\r + QUARTETMAXCUT]\r + QmcExecDir = /opt/quartet/\r + QmcExecutable = find-cut-Linux-64\r + ```\r +\r +#### Workload file\r +\r +For default the workload file is ``work.config`` in the *config* folder. The file contains the absolute paths of the experiment's folders.\r +\r +```\r +/home/rafael.terra/Biocomp/data/Denv_1\r +```\r +\r +You can comment folders using the # character in the beginning of the path. *e. g.* ``#/home/rafael.terra/Biocomp/data/Denv_1``. That way the framework won't read this path.\r +\r +You can also run a specific flow for a path using ``@TreeMethod|NetworkMethod`` in the end of a path. Where *TreeMethod* can be RAXML, IQTREE or MRBAYES and *NetworkMethod* can be MPL or MP (case sensitive). The supported flows are: ``RAXML|MPL``, ``RAXML|MP``, ``IQTREE|MPL``, ``IQTREE|MP`` and ``MRBAYES|MPL``. For example:\r +\r +```\r +/home/rafael.terra/Biocomp/data/Denv_1@RAXML|MPL\r +```\r +\r +#### Environment file\r +\r +The environment file contains all the environment variables (like module files used in SLURM) used during the framework execution. Example:\r +\r +```sh\r +module load python/3.8.2\r +module load raxml/8.2_openmpi-2.0_gnu\r +module load java/jdk-12\r +module load iqtree/2.1.1\r +module load bucky/1.4.4\r +module load mrbayes/3.2.7a-OpenMPI-4.0.4\r +source /scratch/app/modulos/julia-1.5.1.sh\r +```\r +\r +#### Experiment folder\r +\r +Each experiment folder needs to have a *input folder* containing a *.tar.gz* compressed file and a *.json* with the following content. **The framework considers that there is only one file of each extension in the input folder**.\r +\r +```json\r +{\r + "Mapping":"",\r + "Outgroup":""\r +}\r +```\r +\r +Where ``Mapping`` is a direct mapping of the taxon, when there are multiple alleles per species, in the format ``species1:taxon1,taxon2;species2:taxon3,taxon4`` *(white spaces are not supported)* and ``Outgroup`` is the taxon used to root the network. The Mapping parameter is optional (although it has to be in the json file without value), but the outgroup is obligatory. It's important to say that the flow *MRBAYES|MPL* doesn't support multiple alleles per species. Example:\r +\r +```json\r +{\r + "Mapping": "dengue_virus_type_2:FJ850082,FJ850088,JX669479,JX669482,JX669488,KP188569;dengue_virus_type_3:FJ850079,FJ850094,JN697379,JX669494;dengue_virus_type_1:FJ850073,FJ850084,FJ850093,JX669465,JX669466,JX669475,KP188545,KP188547;dengue_virus_type_4:JN559740,JQ513337,JQ513341,JQ513343,JQ513344,JQ513345,KP188563,KP188564;Zika_virus:MH882543", \r + "Outgroup": "MH882543"\r +}\r +```\r +\r +\r +## Running the framework\r +\r +* In a local machine:\r +\r + After setting up the framework, just run ``python3 parsl_workflow.py``.\r + \r +* In a SLURM environment:\r +\r + Create an submition script that inside contains: ``python3 parsl_workflow.py``.\r +\r + ```sh\r + #!/bin/bash\r + #SBATCH --time=15:00:00\r + #SBATCH -e slurm-%j.err\r + #SBATCH -o slurm-%j.out\r + module load python/3.9.6\r + cd /path/to/biocomp\r + python3 parsl_workflow.py\r + ```\r +\r +The framework is under heavy development. If you notice any bug, please create an issue here on GitHub.\r +\r +### Running in a DOCKER container\r +\r +The framework is also available to be used in Docker. It can be built from source or pushed from DockerHub.\r +\r +#### Building it from the source code\r +\r +Adapt the default settings file ``config/default.ini`` according to your machine, setting the number of threads and bootstrap. After that, run ``docker build -t hp2net .`` in the project's root folder.\r +\r +#### Downloading it from Dockerhub\r +\r +The docker image can also be downloaded from [Docker hub](https://hub.docker.com/repository/docker/rafaelstjf/hp2net/general). To do that, just run the command ``docker pull rafaelstjf/hp2net:main``\r +\r +#### Running\r +\r +The first step to run the framework is to setup your dataset. To test if the framework is running without problems in your machine, you can use the [example datasets](example_data).\r +\r +![Alt text](docs/example_data.png)\r +\r +Extracting the ``example_data.zip`` file, a new folder called ``with_outgroup`` is created. This folder contain four datasets of DENV sequences.\r +\r +The next step is the creation of the settings and workload files. For the settings file, download the [default.ini](config/default.ini) from this repository and change it to you liking (the path of all software are already configured to run on docker). The workload file is a text file containing the absolute path of the datasets, followed by the desired pipeline, as shown before in this document. Here for example purposes, the ``input.txt`` file was created.\r +\r +![Alt text](docs/example_files.png)\r +\r +With all the files prepared, the framework can be executed from the ``example_data`` folder as following:\r +\r +``docker run --rm -v $PWD:$PWD rafaelstjf/hp2net:main -s $PWD/default.ini -w $PWD/input.txt``\r +\r +**Important:** the docker doesn't save your logs, for that add the parameter: ``-r $PWD/name_of_your_log_folder``.\r +\r +---\r +If you are running it on **Santos Dumont Supercomputer**, both downloading and execution of the docker container need to be performed from a submission script and executed using ``sg docker -c "sbatch script.sh"``. The snippet below shows an example of submission script.\r +\r +```sh\r +#!/bin/bash\r +#SBATCH --nodes=1\r +#SBATCH --ntasks-per-node=24\r +#SBATCH -p cpu_small\r +#SBATCH -J Hp2NET\r +#SBATCH --exclusive\r +#SBATCH --time=02:00:00\r +#SBATCH -e slurm-%j.err\r +#SBATCH -o slurm-%j.out\r +\r +DIR='/scratch/pcmrnbio2/rafael.terra/WF_parsl/example_data'\r +docker pull rafaelstjf/hp2net:main\r +\r +docker run --rm -v $DIR:$DIR rafaelstjf/hp2net:main -s ${DIR}/sdumont.ini -w ${DIR}/entrada.txt -r ${DIR}/logs\r +```\r +\r +## If you use it, please cite\r +\r +Terra, R., Coelho, M., Cruz, L., Garcia-Zapata, M., Gadelha, L., Osthoff, C., ... & Ocana, K. (2021, July). Gerência e Análises de Workflows aplicados a Redes Filogenéticas de Genomas de Dengue no Brasil. In *Anais do XV Brazilian e-Science Workshop* (pp. 49-56). SBC.\r +\r +**Also cite all the coupled software!**\r +\r +""" ; + schema1:keywords "Bioinformatics, Parsl, phylogenetics, HPC" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "HP2NET - Framework for Construction of Phylogenetic Networks on High Performance Computing (HPC) Environment" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/703?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description "This workflow begins from a set of genome assemblies of different samples, strains, species. The genome is first annotated with Funnanotate. Predicted proteins are furtner annotated with Busco. Next, 'ProteinOrtho' finds orthologs across the samples and makes orthogroups. Orthogroups where all samples are represented are extracted. Orthologs in each orthogroup are aligned with ClustalW. Test dataset: https://zenodo.org/record/6610704#.Ypn3FzlBw5k" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/358?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for preparing genomic data for phylogeny recostruction (GTN)" ; + schema1:sdDatePublished "2024-07-12 13:35:33 +0100" ; + schema1:url "https://workflowhub.eu/workflows/358/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 34691 ; + schema1:creator ; + schema1:dateCreated "2022-06-06T14:05:14Z" ; + schema1:dateModified "2023-02-13T14:06:44Z" ; + schema1:description "This workflow begins from a set of genome assemblies of different samples, strains, species. The genome is first annotated with Funnanotate. Predicted proteins are furtner annotated with Busco. Next, 'ProteinOrtho' finds orthologs across the samples and makes orthogroups. Orthogroups where all samples are represented are extracted. Orthologs in each orthogroup are aligned with ClustalW. Test dataset: https://zenodo.org/record/6610704#.Ypn3FzlBw5k" ; + schema1:keywords "phylogenetics, phylogenomics, Annotation" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "preparing genomic data for phylogeny recostruction (GTN)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/358?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2023-08-31T11:09:22.704427" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "atacseq/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Takes fastqs and reference data, to produce a single cell counts matrix into and save in annData format - adding a column called sample with the sample name. " ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/646?version=1" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "other-closed" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq: Count and Load with Cell Ranger" ; + schema1:sdDatePublished "2024-07-12 13:22:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/646/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 16476 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-11-09T01:26:29Z" ; + schema1:dateModified "2023-11-09T03:53:01Z" ; + schema1:description "Takes fastqs and reference data, to produce a single cell counts matrix into and save in annData format - adding a column called sample with the sample name. " ; + schema1:isBasedOn "https://workflowhub.eu/workflows/646?version=1" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "" ; + schema1:name "scRNAseq: Count and Load with Cell Ranger" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/646?version=1" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=9" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-07-12 13:19:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11482 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:15Z" ; + schema1:dateModified "2024-06-11T12:55:15Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=9" ; + schema1:version 9 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/991?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/hlatyping" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hlatyping" ; + schema1:sdDatePublished "2024-07-12 13:18:26 +0100" ; + schema1:url "https://workflowhub.eu/workflows/991/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3408 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:55Z" ; + schema1:dateModified "2024-06-11T12:54:55Z" ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/991?version=9" ; + schema1:keywords "DNA, hla, hla-typing, immunology, optitype, personalized-medicine, rna" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hlatyping" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/991?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.54.4" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_ligand_parameterization" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter GMX Notebook Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-07-12 13:24:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/54/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15271 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-04-14T07:51:54Z" ; + schema1:dateModified "2023-04-14T07:54:27Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/54?version=5" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter GMX Notebook Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_ligand_parameterization/blob/master/biobb_wf_ligand_parameterization/notebooks/biobb_ligand_parameterization_tutorial.ipynb" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Demultiplexing pipeline for Illumina sequencing data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/978?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/demultiplex" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/demultiplex" ; + schema1:sdDatePublished "2024-07-12 13:21:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/978/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9518 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:47Z" ; + schema1:dateModified "2024-06-11T12:54:47Z" ; + schema1:description "Demultiplexing pipeline for Illumina sequencing data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/978?version=7" ; + schema1:keywords "bases2fastq, bcl2fastq, demultiplexing, elementbiosciences, illumina" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/demultiplex" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/978?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + schema1:datePublished "2023-10-23T15:16:21.551531" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "atacseq/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.14" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.130.5" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Amber Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:24:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/130/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 64611 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-04T15:28:57Z" ; + schema1:dateModified "2024-03-04T15:30:41Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/130?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Amber Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_amber_md_setup/blob/main/biobb_wf_amber_md_setup/notebooks/mdsetup/biobb_amber_setup_notebook.ipynb" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + ; + schema1:description "Docking performed by rDock using as 3 different kind of inputs. More info can be found at https://covid19.galaxyproject.org/cheminformatics/" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/14?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Cheminformatics - Docking" ; + schema1:sdDatePublished "2024-07-12 13:37:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/14/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 2177 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9616 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2020-04-10T14:32:01Z" ; + schema1:dateModified "2023-01-16T13:40:50Z" ; + schema1:description "Docking performed by rDock using as 3 different kind of inputs. More info can be found at https://covid19.galaxyproject.org/cheminformatics/" ; + schema1:image ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Cheminformatics - Docking" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/14?version=1" ; + schema1:version 1 ; + ns1:input , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 5524 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=9" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-07-12 13:20:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4258 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:03Z" ; + schema1:dateModified "2024-06-11T12:55:03Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=9" ; + schema1:version 9 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1017?version=15" ; + schema1:isBasedOn "https://github.com/nf-core/rnafusion" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnafusion" ; + schema1:sdDatePublished "2024-07-12 13:18:22 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1017/ro_crate?version=15" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11863 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:10Z" ; + schema1:dateModified "2024-06-11T12:55:10Z" ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1017?version=16" ; + schema1:keywords "fusion, fusion-genes, gene-fusion, rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnafusion" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1017?version=15" ; + schema1:version 15 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +**Based on the official [pmx tutorial](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate how to compute a **fast-growth mutation free energy** calculation, step by step, using the BioExcel **Building Blocks library (biobb)**. The particular example used is the **Staphylococcal nuclease** protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +The **non-equilibrium free energy calculation** protocol performs a **fast alchemical transition** in the direction **WT->Mut** and back **Mut->WT**. The two equilibrium trajectories needed for the tutorial, one for **Wild Type (WT)** and another for the **Mutated (Mut)** protein (Isoleucine 10 to Alanine -I10A-), have already been generated and are included in this example. We will name **WT as stateA** and **Mut as stateB**.\r +\r +![](https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/schema.png)\r +\r +The tutorial calculates the **free energy difference** in the folded state of a protein. Starting from **two 1ns-length independent equilibrium simulations** (WT and mutant), snapshots are selected to start **fast (50ps) transitions** driving the system in the **forward** (WT to mutant) and **reverse** (mutant to WT) directions, and the **work values** required to perform these transitions are collected. With these values, **Crooks Gaussian Intersection** (CGI), **Bennett Acceptance Ratio** (BAR) and **Jarzynski estimator** methods are used to calculate the **free energy difference** between the two states.\r +\r +*Please note that for the sake of disk space this tutorial is using 1ns-length equilibrium trajectories, whereas in the [original example](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/eq.mdp) the equilibrium trajectories used were obtained from 10ns-length simulations.*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.328.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_pmx_tutorial/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)" ; + schema1:sdDatePublished "2024-07-12 13:35:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/328/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8358 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-22T10:02:49Z" ; + schema1:dateModified "2022-11-22T10:03:35Z" ; + schema1:description """# Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +**Based on the official [pmx tutorial](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate how to compute a **fast-growth mutation free energy** calculation, step by step, using the BioExcel **Building Blocks library (biobb)**. The particular example used is the **Staphylococcal nuclease** protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +The **non-equilibrium free energy calculation** protocol performs a **fast alchemical transition** in the direction **WT->Mut** and back **Mut->WT**. The two equilibrium trajectories needed for the tutorial, one for **Wild Type (WT)** and another for the **Mutated (Mut)** protein (Isoleucine 10 to Alanine -I10A-), have already been generated and are included in this example. We will name **WT as stateA** and **Mut as stateB**.\r +\r +![](https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/schema.png)\r +\r +The tutorial calculates the **free energy difference** in the folded state of a protein. Starting from **two 1ns-length independent equilibrium simulations** (WT and mutant), snapshots are selected to start **fast (50ps) transitions** driving the system in the **forward** (WT to mutant) and **reverse** (mutant to WT) directions, and the **work values** required to perform these transitions are collected. With these values, **Crooks Gaussian Intersection** (CGI), **Bennett Acceptance Ratio** (BAR) and **Jarzynski estimator** methods are used to calculate the **free energy difference** between the two states.\r +\r +*Please note that for the sake of disk space this tutorial is using 1ns-length equilibrium trajectories, whereas in the [original example](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/eq.mdp) the equilibrium trajectories used were obtained from 10ns-length simulations.*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/328?version=3" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_pmx_tutorial/python/workflow.py" ; + schema1:version 2 . + + a schema1:Dataset ; + schema1:datePublished "2022-11-28T16:35:36.692567" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/chipseq-sr" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "chipseq-sr/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.2" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +For this workflow:\r +\r +Inputs:\r +* assembled-genome.fasta\r +* hard-repeat-masked-genome.fasta\r +* If using the mRNAs option, the additional inputs required are .cdna, .pro and .dat files. \r +\r +What it does:\r +* This workflow splits the input genomes into single sequences (to decrease computation time), annotates using FgenesH++, and merges the output. \r +\r +Outputs:\r +* genome annotation in gff3 format\r +* fasta files of mRNAs, cDNAs and proteins\r +* Busco report\r +\r +\r +\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.881.3" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Fgenesh annotation -TSI" ; + schema1:sdDatePublished "2024-07-12 13:18:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/881/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17572 ; + schema1:creator ; + schema1:dateCreated "2024-06-18T09:08:35Z" ; + schema1:dateModified "2024-06-18T09:11:51Z" ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +For this workflow:\r +\r +Inputs:\r +* assembled-genome.fasta\r +* hard-repeat-masked-genome.fasta\r +* If using the mRNAs option, the additional inputs required are .cdna, .pro and .dat files. \r +\r +What it does:\r +* This workflow splits the input genomes into single sequences (to decrease computation time), annotates using FgenesH++, and merges the output. \r +\r +Outputs:\r +* genome annotation in gff3 format\r +* fasta files of mRNAs, cDNAs and proteins\r +* Busco report\r +\r +\r +\r +\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/881?version=3" ; + schema1:isPartOf ; + schema1:keywords "TSI-annotation" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Fgenesh annotation -TSI" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/881?version=3" ; + schema1:version 3 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 189968 . + + a schema1:Dataset ; + schema1:datePublished "2022-02-17T09:30:15.975078" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-variation-reporting" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sars-cov-2-variation-reporting/COVID-19-VARIATION-REPORTING" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-07-12 13:19:02 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7425 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:14Z" ; + schema1:dateModified "2024-06-11T12:55:14Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "B cell repertoire analysis pipeline with immcantation framework." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/963?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/airrflow" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/bcellmagic" ; + schema1:sdDatePublished "2024-07-12 13:22:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/963/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4189 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:36Z" ; + schema1:dateModified "2024-06-11T12:54:36Z" ; + schema1:description "B cell repertoire analysis pipeline with immcantation framework." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/963?version=13" ; + schema1:keywords "airr, b-cell, immcantation, immunorepertoire, repseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/bcellmagic" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/963?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# CMIP tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of computing **classical molecular interaction potentials** from **protein structures**, step by step, using the **BioExcel Building Blocks library (biobb)**. Examples shown are **Molecular Interaction Potentials (MIPs) grids, protein-protein/ligand interaction potentials, and protein titration**. The particular structures used are the **Lysozyme** protein (PDB code [1AKI](https://www.rcsb.org/structure/1aki)), and a MD simulation of the complex formed by the **SARS-CoV-2 Receptor Binding Domain and the human Angiotensin Converting Enzyme 2** (PDB code [6VW1](https://www.rcsb.org/structure/6vw1)).\r +\r +The code wrapped is the ***Classical Molecular Interaction Potentials (CMIP)*** code:\r +\r +**Classical molecular interaction potentials: Improved setup procedure in molecular dynamics simulations of proteins.**\r +*Gelpí, J.L., Kalko, S.G., Barril, X., Cirera, J., de la Cruz, X., Luque, F.J. and Orozco, M. (2001)*\r +*Proteins, 45: 428-437. [https://doi.org/10.1002/prot.1159](https://doi.org/10.1002/prot.1159)*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.774.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/main/biobb_wf_cmip/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Classical Molecular Interaction Potentials" ; + schema1:sdDatePublished "2024-07-12 13:24:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/774/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7716 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-04T15:04:07Z" ; + schema1:dateModified "2024-03-04T15:15:20Z" ; + schema1:description """# CMIP tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of computing **classical molecular interaction potentials** from **protein structures**, step by step, using the **BioExcel Building Blocks library (biobb)**. Examples shown are **Molecular Interaction Potentials (MIPs) grids, protein-protein/ligand interaction potentials, and protein titration**. The particular structures used are the **Lysozyme** protein (PDB code [1AKI](https://www.rcsb.org/structure/1aki)), and a MD simulation of the complex formed by the **SARS-CoV-2 Receptor Binding Domain and the human Angiotensin Converting Enzyme 2** (PDB code [6VW1](https://www.rcsb.org/structure/6vw1)).\r +\r +The code wrapped is the ***Classical Molecular Interaction Potentials (CMIP)*** code:\r +\r +**Classical molecular interaction potentials: Improved setup procedure in molecular dynamics simulations of proteins.**\r +*Gelpí, J.L., Kalko, S.G., Barril, X., Cirera, J., de la Cruz, X., Luque, F.J. and Orozco, M. (2001)*\r +*Proteins, 45: 428-437. [https://doi.org/10.1002/prot.1159](https://doi.org/10.1002/prot.1159)*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Classical Molecular Interaction Potentials" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/main/biobb_wf_cmip/python/workflow.py" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1021?version=13" ; + schema1:isBasedOn "https://github.com/nf-core/scrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/scrnaseq" ; + schema1:sdDatePublished "2024-07-12 13:19:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1021/ro_crate?version=13" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11170 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:18Z" ; + schema1:dateModified "2024-06-11T12:55:18Z" ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1021?version=13" ; + schema1:keywords "10x-genomics, 10xgenomics, alevin, bustools, Cellranger, kallisto, rna-seq, single-cell, star-solo" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/scrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1021?version=13" ; + schema1:version 13 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """[![Development](https://img.shields.io/badge/development-active-blue.svg)](https://img.shields.io/badge/development-active-blue.svg)\r +[![Reads2Map](https://circleci.com/gh/Cristianetaniguti/Reads2Map.svg?style=svg)](https://app.circleci.com/pipelines/github/Cristianetaniguti/Reads2Map)\r +\r +## Reads2Map \r +\r +Reads2Map presents a collection of [WDL workflows](https://openwdl.org/) to build linkage maps from sequencing reads. Each workflow release is described in the [Read2Map releases page](https://github.com/Cristianetaniguti/Reads2Map/releases). \r +\r +The main workflows are the `EmpiricalReads2Map.wdl` and the `SimulatedReads2Map.wdl`. The `EmpiricalReads2Map.wdl` is composed by the `EmpiricalSNPCalling.wdl` that performs the SNP calling, and the `EmpiricalMaps.wdl` that performs the genotype calling and map building in empirical reads. The `SimulatedReads2Map.wdl` simulates Illumina reads for RADseq, exome, or WGS data and performs the SNP and genotype calling and genetic map building.\r +\r +By now, [GATK](https://github.com/broadinstitute/gatk), [Freebayes](https://github.com/ekg/freebayes) are included for SNP calling; [updog](https://github.com/dcgerard/updog), [polyRAD](https://github.com/lvclark/polyRAD), [SuperMASSA](https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0030906) for dosage calling; and [OneMap](https://github.com/augusto-garcia/onemap), and [GUSMap](https://github.com/tpbilton/GUSMap) for linkage map build.\r +\r +![math_meth2](https://user-images.githubusercontent.com/7572527/203172239-e4d2d857-84e2-48c5-bb88-01052a287004.png)\r +\r +## How to use\r +\r +Multiple systems are available to run WDL workflows such as Cromwell, miniWDL, and dxWDL. See further information in the [openwdl documentation](https://github.com/openwdl/wdl#execution-engines).\r +\r +To run a pipeline, first navigate to [Reads2Map releases page](https://github.com/Cristianetaniguti/Reads2Map/releases), search for the pipeline tag you which to run, and download the pipeline’s assets (the WDL workflow, the JSON, and the ZIP with accompanying dependencies).\r +\r +## Documentation\r +\r +Check the description of the inputs for the pipelines:\r +\r +* [EmpiricalReads2Map (EmpiricalSNPCalling and EmpiricalMaps)](https://cristianetaniguti.github.io/Tutorials/Reads2Map/EmpiricalReads.html)\r +\r +* [SimulatedReads2Map](https://cristianetaniguti.github.io/Tutorials/Reads2Map/simulatedreads.html)\r +\r +Check how to evaluate the workflows results in Reads2MapApp Shiny:\r +\r +* [Reads2MapApp](https://github.com/Cristianetaniguti/Reads2MapApp)\r +\r +Once you selected the best pipeline using a subset of your data, you can build a complete high-density linkage map:\r +\r +* [A Guide to Build High-Density Linkage Maps](https://cristianetaniguti.github.io/Tutorials/onemap/Quick_HighDens/High_density_maps.html)\r +\r +Check more information and examples of usage in:\r +\r +* [Taniguti, C. H., Taniguti, L. M., Amadeu, R. R., Mollinari, M., Da, G., Pereira, S., Riera-Lizarazu, O., Lau, J., Byrne, D., de Siqueira Gesteira, G., De, T., Oliveira, P., Ferreira, G. C., & Franco Garcia, A. A. Developing best practices for genotyping-by-sequencing analysis using linkage maps as benchmarks. BioRxiv. https://doi.org/10.1101/2022.11.24.517847](https://www.biorxiv.org/content/10.1101/2022.11.24.517847v2)\r +\r +## Third-party software and images\r +\r +- [BWA](https://github.com/lh3/bwa) in [us.gcr.io/broad-gotc-prod/genomes-in-the-cloud:2.5.7-2021-06-09_16-47-48Z](https://console.cloud.google.com/gcr/images/broad-gotc-prod/US/genomes-in-the-cloud): Used to align simulated reads to reference;\r +- [cutadapt](https://github.com/marcelm/cutadapt) in [cristaniguti/ pirs-ddrad-cutadapt:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/pirs-ddrad-cutadapt): Trim simulated reads;\r +- [ddRADseqTools](https://github.com/GGFHF/ddRADseqTools) in [cristaniguti/ pirs-ddrad-cutadapt:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/pirs-ddrad-cutadapt): Set of applications useful to in silico design and testing of double digest RADseq (ddRADseq) experiments;\r +- [Freebayes](https://github.com/ekg/freebayes) in [Cristaniguti/freebayes:0.0.1](): Variant call step;\r +- [GATK](https://github.com/broadinstitute/gatk) in [us.gcr.io/broad-gotc-prod/genomes-in-the-cloud:2.5.7-2021-06-09_16-47-48Z](https://console.cloud.google.com/gcr/images/broad-gotc-prod/US/genomes-in-the-cloud): Variant call step using Haplotype Caller, GenomicsDBImport and GenotypeGVCFs;\r +- [PedigreeSim](https://github.com/PBR/pedigreeSim?files=1) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Simulates progeny genotypes from parents genotypes for different types of populations;\r +- [picard](https://github.com/broadinstitute/picard) in [us.gcr.io/broad-gotc-prod/genomes-in-the-cloud:2.5.7-2021-06-09_16-47-48Z](https://console.cloud.google.com/gcr/images/broad-gotc-prod/US/genomes-in-the-cloud): Process alignment files;\r +- [pirs](https://github.com/galaxy001/pirs) in [cristaniguti/ pirs-ddrad-cutadapt:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/pirs-ddrad-cutadapt): To generate simulates paired-end reads from a reference genome;\r +- [samtools](https://github.com/samtools/samtools) in [us.gcr.io/broad-gotc-prod/genomes-in-the-cloud:2.5.7-2021-06-09_16-47-48Z](https://console.cloud.google.com/gcr/images/broad-gotc-prod/US/genomes-in-the-cloud): Process alignment files;\r +- [SimuSCoP](https://github.com/qasimyu/simuscop) in [cristaniguti/simuscopr:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/simuscopr): Exome and WGS Illumina reads simulations;\r +- [RADinitio](http://catchenlab.life.illinois.edu/radinitio/) in [ cristaniguti/radinitio:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/radinitio): RADseq Illumina reads simulation;\r +- [SuperMASSA](https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0030906) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Efficient Exact Maximum a Posteriori Computation for Bayesian SNP Genotyping in Polyploids;\r +- [bcftools](https://github.com/samtools/bcftools) in [lifebitai/bcftools:1.10.2](https://hub.docker.com/r/lifebitai/bcftools): utilities for variant calling and manipulating VCFs and BCFs;\r +- [vcftools](http://vcftools.sourceforge.net/) in [cristaniguti/split_markers:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/split_markers): program package designed for working with VCF files.\r +- [MCHap](https://github.com/PlantandFoodResearch/MCHap) in [cristaniguti/mchap:0.7.0](https://hub.docker.com/repository/docker/cristaniguti/mchap): Polyploid micro-haplotype assembly using Markov chain Monte Carlo simulation.\r +\r +### R packages\r +\r +- [OneMap](https://github.com/augusto-garcia/onemap) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Is a software for constructing genetic maps in experimental crosses: full-sib, RILs, F2 and backcrosses;\r +- [Reads2MapTools](https://github.com/Cristianetaniguti/Reads2MapTools) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Support package to perform mapping populations simulations and genotyping for OneMap genetic map building\r +- [GUSMap](https://github.com/tpbilton/GUSMap): Genotyping Uncertainty with Sequencing data and linkage MAPping\r +- [updog](https://github.com/dcgerard/updog) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Flexible Genotyping of Polyploids using Next Generation Sequencing Data\r +- [polyRAD](https://github.com/lvclark/polyRAD) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Genotype Calling with Uncertainty from Sequencing Data in Polyploids\r +- [Reads2MapApp](https://github.com/Cristianetaniguti/Reads2MapApp) in [cristaniguti/reads2mapApp:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Shiny app to evaluate Reads2Map workflows results\r +- [simuscopR](https://github.com/Cristianetaniguti/simuscopR) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Wrap-up R package for SimusCop simulations.""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.409.1" ; + schema1:isBasedOn "https://github.com/Cristianetaniguti/Reads2Map" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for EmpiricalReads2Map" ; + schema1:sdDatePublished "2024-07-12 13:34:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/409/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1773 ; + schema1:creator ; + schema1:dateCreated "2022-11-29T20:09:43Z" ; + schema1:dateModified "2023-01-16T14:04:54Z" ; + schema1:description """[![Development](https://img.shields.io/badge/development-active-blue.svg)](https://img.shields.io/badge/development-active-blue.svg)\r +[![Reads2Map](https://circleci.com/gh/Cristianetaniguti/Reads2Map.svg?style=svg)](https://app.circleci.com/pipelines/github/Cristianetaniguti/Reads2Map)\r +\r +## Reads2Map \r +\r +Reads2Map presents a collection of [WDL workflows](https://openwdl.org/) to build linkage maps from sequencing reads. Each workflow release is described in the [Read2Map releases page](https://github.com/Cristianetaniguti/Reads2Map/releases). \r +\r +The main workflows are the `EmpiricalReads2Map.wdl` and the `SimulatedReads2Map.wdl`. The `EmpiricalReads2Map.wdl` is composed by the `EmpiricalSNPCalling.wdl` that performs the SNP calling, and the `EmpiricalMaps.wdl` that performs the genotype calling and map building in empirical reads. The `SimulatedReads2Map.wdl` simulates Illumina reads for RADseq, exome, or WGS data and performs the SNP and genotype calling and genetic map building.\r +\r +By now, [GATK](https://github.com/broadinstitute/gatk), [Freebayes](https://github.com/ekg/freebayes) are included for SNP calling; [updog](https://github.com/dcgerard/updog), [polyRAD](https://github.com/lvclark/polyRAD), [SuperMASSA](https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0030906) for dosage calling; and [OneMap](https://github.com/augusto-garcia/onemap), and [GUSMap](https://github.com/tpbilton/GUSMap) for linkage map build.\r +\r +![math_meth2](https://user-images.githubusercontent.com/7572527/203172239-e4d2d857-84e2-48c5-bb88-01052a287004.png)\r +\r +## How to use\r +\r +Multiple systems are available to run WDL workflows such as Cromwell, miniWDL, and dxWDL. See further information in the [openwdl documentation](https://github.com/openwdl/wdl#execution-engines).\r +\r +To run a pipeline, first navigate to [Reads2Map releases page](https://github.com/Cristianetaniguti/Reads2Map/releases), search for the pipeline tag you which to run, and download the pipeline’s assets (the WDL workflow, the JSON, and the ZIP with accompanying dependencies).\r +\r +## Documentation\r +\r +Check the description of the inputs for the pipelines:\r +\r +* [EmpiricalReads2Map (EmpiricalSNPCalling and EmpiricalMaps)](https://cristianetaniguti.github.io/Tutorials/Reads2Map/EmpiricalReads.html)\r +\r +* [SimulatedReads2Map](https://cristianetaniguti.github.io/Tutorials/Reads2Map/simulatedreads.html)\r +\r +Check how to evaluate the workflows results in Reads2MapApp Shiny:\r +\r +* [Reads2MapApp](https://github.com/Cristianetaniguti/Reads2MapApp)\r +\r +Once you selected the best pipeline using a subset of your data, you can build a complete high-density linkage map:\r +\r +* [A Guide to Build High-Density Linkage Maps](https://cristianetaniguti.github.io/Tutorials/onemap/Quick_HighDens/High_density_maps.html)\r +\r +Check more information and examples of usage in:\r +\r +* [Taniguti, C. H., Taniguti, L. M., Amadeu, R. R., Mollinari, M., Da, G., Pereira, S., Riera-Lizarazu, O., Lau, J., Byrne, D., de Siqueira Gesteira, G., De, T., Oliveira, P., Ferreira, G. C., & Franco Garcia, A. A. Developing best practices for genotyping-by-sequencing analysis using linkage maps as benchmarks. BioRxiv. https://doi.org/10.1101/2022.11.24.517847](https://www.biorxiv.org/content/10.1101/2022.11.24.517847v2)\r +\r +## Third-party software and images\r +\r +- [BWA](https://github.com/lh3/bwa) in [us.gcr.io/broad-gotc-prod/genomes-in-the-cloud:2.5.7-2021-06-09_16-47-48Z](https://console.cloud.google.com/gcr/images/broad-gotc-prod/US/genomes-in-the-cloud): Used to align simulated reads to reference;\r +- [cutadapt](https://github.com/marcelm/cutadapt) in [cristaniguti/ pirs-ddrad-cutadapt:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/pirs-ddrad-cutadapt): Trim simulated reads;\r +- [ddRADseqTools](https://github.com/GGFHF/ddRADseqTools) in [cristaniguti/ pirs-ddrad-cutadapt:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/pirs-ddrad-cutadapt): Set of applications useful to in silico design and testing of double digest RADseq (ddRADseq) experiments;\r +- [Freebayes](https://github.com/ekg/freebayes) in [Cristaniguti/freebayes:0.0.1](): Variant call step;\r +- [GATK](https://github.com/broadinstitute/gatk) in [us.gcr.io/broad-gotc-prod/genomes-in-the-cloud:2.5.7-2021-06-09_16-47-48Z](https://console.cloud.google.com/gcr/images/broad-gotc-prod/US/genomes-in-the-cloud): Variant call step using Haplotype Caller, GenomicsDBImport and GenotypeGVCFs;\r +- [PedigreeSim](https://github.com/PBR/pedigreeSim?files=1) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Simulates progeny genotypes from parents genotypes for different types of populations;\r +- [picard](https://github.com/broadinstitute/picard) in [us.gcr.io/broad-gotc-prod/genomes-in-the-cloud:2.5.7-2021-06-09_16-47-48Z](https://console.cloud.google.com/gcr/images/broad-gotc-prod/US/genomes-in-the-cloud): Process alignment files;\r +- [pirs](https://github.com/galaxy001/pirs) in [cristaniguti/ pirs-ddrad-cutadapt:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/pirs-ddrad-cutadapt): To generate simulates paired-end reads from a reference genome;\r +- [samtools](https://github.com/samtools/samtools) in [us.gcr.io/broad-gotc-prod/genomes-in-the-cloud:2.5.7-2021-06-09_16-47-48Z](https://console.cloud.google.com/gcr/images/broad-gotc-prod/US/genomes-in-the-cloud): Process alignment files;\r +- [SimuSCoP](https://github.com/qasimyu/simuscop) in [cristaniguti/simuscopr:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/simuscopr): Exome and WGS Illumina reads simulations;\r +- [RADinitio](http://catchenlab.life.illinois.edu/radinitio/) in [ cristaniguti/radinitio:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/radinitio): RADseq Illumina reads simulation;\r +- [SuperMASSA](https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0030906) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Efficient Exact Maximum a Posteriori Computation for Bayesian SNP Genotyping in Polyploids;\r +- [bcftools](https://github.com/samtools/bcftools) in [lifebitai/bcftools:1.10.2](https://hub.docker.com/r/lifebitai/bcftools): utilities for variant calling and manipulating VCFs and BCFs;\r +- [vcftools](http://vcftools.sourceforge.net/) in [cristaniguti/split_markers:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/split_markers): program package designed for working with VCF files.\r +- [MCHap](https://github.com/PlantandFoodResearch/MCHap) in [cristaniguti/mchap:0.7.0](https://hub.docker.com/repository/docker/cristaniguti/mchap): Polyploid micro-haplotype assembly using Markov chain Monte Carlo simulation.\r +\r +### R packages\r +\r +- [OneMap](https://github.com/augusto-garcia/onemap) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Is a software for constructing genetic maps in experimental crosses: full-sib, RILs, F2 and backcrosses;\r +- [Reads2MapTools](https://github.com/Cristianetaniguti/Reads2MapTools) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Support package to perform mapping populations simulations and genotyping for OneMap genetic map building\r +- [GUSMap](https://github.com/tpbilton/GUSMap): Genotyping Uncertainty with Sequencing data and linkage MAPping\r +- [updog](https://github.com/dcgerard/updog) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Flexible Genotyping of Polyploids using Next Generation Sequencing Data\r +- [polyRAD](https://github.com/lvclark/polyRAD) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Genotype Calling with Uncertainty from Sequencing Data in Polyploids\r +- [Reads2MapApp](https://github.com/Cristianetaniguti/Reads2MapApp) in [cristaniguti/reads2mapApp:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Shiny app to evaluate Reads2Map workflows results\r +- [simuscopR](https://github.com/Cristianetaniguti/simuscopR) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Wrap-up R package for SimusCop simulations.""" ; + schema1:keywords "WDL, linkage_map, variant_calling" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "EmpiricalReads2Map" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/409?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Conformational ensembles generation\r +\r +## Workflow included in the [ELIXIR 3D-Bioinfo](https://elixir-europe.org/communities/3d-bioinfo) Implementation Study:\r +\r +### Building on PDBe-KB to chart and characterize the conformation landscape of native proteins\r +\r +This tutorial aims to illustrate the process of generating **protein conformational ensembles** from** 3D structures **and analysing its **molecular flexibility**, step by step, using the **BioExcel Building Blocks library (biobb)**.\r +\r +## Conformational landscape of native proteins\r +**Proteins** are **dynamic** systems that adopt multiple **conformational states**, a property essential for many **biological processes** (e.g. binding other proteins, nucleic acids, small molecule ligands, or switching between functionaly active and inactive states). Characterizing the different **conformational states** of proteins and the transitions between them is therefore critical for gaining insight into their **biological function** and can help explain the effects of genetic variants in **health** and **disease** and the action of drugs.\r +\r +**Structural biology** has become increasingly efficient in sampling the different **conformational states** of proteins. The **PDB** has currently archived more than **170,000 individual structures**, but over two thirds of these structures represent **multiple conformations** of the same or related protein, observed in different crystal forms, when interacting with other proteins or other macromolecules, or upon binding small molecule ligands. Charting this conformational diversity across the PDB can therefore be employed to build a useful approximation of the **conformational landscape** of native proteins.\r +\r +A number of resources and **tools** describing and characterizing various often complementary aspects of protein **conformational diversity** in known structures have been developed, notably by groups in Europe. These tools include algorithms with varying degree of sophistication, for aligning the 3D structures of individual protein chains or domains, of protein assemblies, and evaluating their degree of **structural similarity**. Using such tools one can **align structures pairwise**, compute the corresponding **similarity matrix**, and identify ensembles of **structures/conformations** with a defined **similarity level** that tend to recur in different PDB entries, an operation typically performed using **clustering** methods. Such workflows are at the basis of resources such as **CATH, Contemplate, or PDBflex** that offer access to **conformational ensembles** comprised of similar **conformations** clustered according to various criteria. Other types of tools focus on differences between **protein conformations**, identifying regions of proteins that undergo large **collective displacements** in different PDB entries, those that act as **hinges or linkers**, or regions that are inherently **flexible**.\r +\r +To build a meaningful approximation of the **conformational landscape** of native proteins, the **conformational ensembles** (and the differences between them), identified on the basis of **structural similarity/dissimilarity** measures alone, need to be **biophysically characterized**. This may be approached at **two different levels**. \r +- At the **biological level**, it is important to link observed **conformational ensembles**, to their **functional roles** by evaluating the correspondence with **protein family classifications** based on sequence information and **functional annotations** in public databases e.g. Uniprot, PDKe-Knowledge Base (KB). These links should provide valuable mechanistic insights into how the **conformational and dynamic properties** of proteins are exploited by evolution to regulate their **biological function**.

\r +\r +- At the **physical level** one needs to introduce **energetic consideration** to evaluate the likelihood that the identified **conformational ensembles** represent **conformational states** that the protein (or domain under study) samples in isolation. Such evaluation is notoriously **challenging** and can only be roughly approximated by using **computational methods** to evaluate the extent to which the observed **conformational ensembles** can be reproduced by algorithms that simulate the **dynamic behavior** of protein systems. These algorithms include the computationally expensive **classical molecular dynamics (MD) simulations** to sample local thermal fluctuations but also faster more approximate methods such as **Elastic Network Models** and **Normal Node Analysis** (NMA) to model low energy **collective motions**. Alternatively, **enhanced sampling molecular dynamics** can be used to model complex types of **conformational changes** but at a very high computational cost. \r +\r +The **ELIXIR 3D-Bioinfo Implementation Study** *Building on PDBe-KB to chart and characterize the conformation landscape of native proteins* focuses on:\r +\r +1. Mapping the **conformational diversity** of proteins and their homologs across the PDB. \r +2. Characterize the different **flexibility properties** of protein regions, and link this information to sequence and functional annotation.\r +3. Benchmark **computational methods** that can predict a biophysical description of protein motions.\r +\r +This notebook is part of the third objective, where a list of **computational resources** that are able to predict **protein flexibility** and **conformational ensembles** have been collected, evaluated, and integrated in reproducible and interoperable workflows using the **BioExcel Building Blocks library**. Note that the list is not meant to be exhaustive, it is built following the expertise of the implementation study partners.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.488.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_flexdyn/cwl" ; + schema1:license "other-open" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL Protein conformational ensembles generation" ; + schema1:sdDatePublished "2024-07-12 13:33:22 +0100" ; + schema1:url "https://workflowhub.eu/workflows/488/ro_crate?version=2" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 203809 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 39465 ; + schema1:creator , + ; + schema1:dateCreated "2023-06-06T10:10:37Z" ; + schema1:dateModified "2024-04-22T10:41:26Z" ; + schema1:description """# Protein Conformational ensembles generation\r +\r +## Workflow included in the [ELIXIR 3D-Bioinfo](https://elixir-europe.org/communities/3d-bioinfo) Implementation Study:\r +\r +### Building on PDBe-KB to chart and characterize the conformation landscape of native proteins\r +\r +This tutorial aims to illustrate the process of generating **protein conformational ensembles** from** 3D structures **and analysing its **molecular flexibility**, step by step, using the **BioExcel Building Blocks library (biobb)**.\r +\r +## Conformational landscape of native proteins\r +**Proteins** are **dynamic** systems that adopt multiple **conformational states**, a property essential for many **biological processes** (e.g. binding other proteins, nucleic acids, small molecule ligands, or switching between functionaly active and inactive states). Characterizing the different **conformational states** of proteins and the transitions between them is therefore critical for gaining insight into their **biological function** and can help explain the effects of genetic variants in **health** and **disease** and the action of drugs.\r +\r +**Structural biology** has become increasingly efficient in sampling the different **conformational states** of proteins. The **PDB** has currently archived more than **170,000 individual structures**, but over two thirds of these structures represent **multiple conformations** of the same or related protein, observed in different crystal forms, when interacting with other proteins or other macromolecules, or upon binding small molecule ligands. Charting this conformational diversity across the PDB can therefore be employed to build a useful approximation of the **conformational landscape** of native proteins.\r +\r +A number of resources and **tools** describing and characterizing various often complementary aspects of protein **conformational diversity** in known structures have been developed, notably by groups in Europe. These tools include algorithms with varying degree of sophistication, for aligning the 3D structures of individual protein chains or domains, of protein assemblies, and evaluating their degree of **structural similarity**. Using such tools one can **align structures pairwise**, compute the corresponding **similarity matrix**, and identify ensembles of **structures/conformations** with a defined **similarity level** that tend to recur in different PDB entries, an operation typically performed using **clustering** methods. Such workflows are at the basis of resources such as **CATH, Contemplate, or PDBflex** that offer access to **conformational ensembles** comprised of similar **conformations** clustered according to various criteria. Other types of tools focus on differences between **protein conformations**, identifying regions of proteins that undergo large **collective displacements** in different PDB entries, those that act as **hinges or linkers**, or regions that are inherently **flexible**.\r +\r +To build a meaningful approximation of the **conformational landscape** of native proteins, the **conformational ensembles** (and the differences between them), identified on the basis of **structural similarity/dissimilarity** measures alone, need to be **biophysically characterized**. This may be approached at **two different levels**. \r +- At the **biological level**, it is important to link observed **conformational ensembles**, to their **functional roles** by evaluating the correspondence with **protein family classifications** based on sequence information and **functional annotations** in public databases e.g. Uniprot, PDKe-Knowledge Base (KB). These links should provide valuable mechanistic insights into how the **conformational and dynamic properties** of proteins are exploited by evolution to regulate their **biological function**.

\r +\r +- At the **physical level** one needs to introduce **energetic consideration** to evaluate the likelihood that the identified **conformational ensembles** represent **conformational states** that the protein (or domain under study) samples in isolation. Such evaluation is notoriously **challenging** and can only be roughly approximated by using **computational methods** to evaluate the extent to which the observed **conformational ensembles** can be reproduced by algorithms that simulate the **dynamic behavior** of protein systems. These algorithms include the computationally expensive **classical molecular dynamics (MD) simulations** to sample local thermal fluctuations but also faster more approximate methods such as **Elastic Network Models** and **Normal Node Analysis** (NMA) to model low energy **collective motions**. Alternatively, **enhanced sampling molecular dynamics** can be used to model complex types of **conformational changes** but at a very high computational cost. \r +\r +The **ELIXIR 3D-Bioinfo Implementation Study** *Building on PDBe-KB to chart and characterize the conformation landscape of native proteins* focuses on:\r +\r +1. Mapping the **conformational diversity** of proteins and their homologs across the PDB. \r +2. Characterize the different **flexibility properties** of protein regions, and link this information to sequence and functional annotation.\r +3. Benchmark **computational methods** that can predict a biophysical description of protein motions.\r +\r +This notebook is part of the third objective, where a list of **computational resources** that are able to predict **protein flexibility** and **conformational ensembles** have been collected, evaluated, and integrated in reproducible and interoperable workflows using the **BioExcel Building Blocks library**. Note that the list is not meant to be exhaustive, it is built following the expertise of the implementation study partners.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/488?version=1" ; + schema1:keywords "" ; + schema1:license "" ; + schema1:name "CWL Protein conformational ensembles generation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_flexdyn/cwl/workflow.cwl" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1027?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/viralrecon" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/viralrecon" ; + schema1:sdDatePublished "2024-07-12 13:18:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1027/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6372 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:20Z" ; + schema1:dateModified "2024-06-11T12:55:20Z" ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1027?version=10" ; + schema1:keywords "Amplicon, ARTIC, Assembly, covid-19, covid19, illumina, long-read-sequencing, Metagenomics, nanopore, ONT, oxford-nanopore, SARS-CoV-2, variant-calling, viral, Virus" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/viralrecon" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1027?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A Nanostring nCounter analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1003?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/nanostring" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/nanostring" ; + schema1:sdDatePublished "2024-07-12 13:20:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1003/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8834 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:06Z" ; + schema1:dateModified "2024-06-11T12:55:06Z" ; + schema1:description "A Nanostring nCounter analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1003?version=4" ; + schema1:keywords "nanostring, nanostringnorm" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/nanostring" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1003?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """RNAseq workflow UMG: Here we introduce a scientific workflow implementing several open-source software executed by Galaxy parallel scripting language in an high-performance computing environment. We have applied the workflow to a single-cardiomyocyte RNA-seq data retrieved from Gene Expression Omnibus database. The workflow allows for the analysis (alignment, QC, sort and count reads, statistics generation) of raw RNA-seq data and seamless integration of differential expression results into a configurable script code.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.412.1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for RNAseq_UMG_SDumont_v1" ; + schema1:sdDatePublished "2024-07-12 13:34:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/412/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 19967 ; + schema1:creator ; + schema1:dateCreated "2022-12-06T19:22:16Z" ; + schema1:dateModified "2023-02-13T14:05:45Z" ; + schema1:description """RNAseq workflow UMG: Here we introduce a scientific workflow implementing several open-source software executed by Galaxy parallel scripting language in an high-performance computing environment. We have applied the workflow to a single-cardiomyocyte RNA-seq data retrieved from Gene Expression Omnibus database. The workflow allows for the analysis (alignment, QC, sort and count reads, statistics generation) of raw RNA-seq data and seamless integration of differential expression results into a configurable script code.\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "RNAseq_UMG_SDumont_v1" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/412?version=1" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.821.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/main/biobb_wf_dna_helparms/docker" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Docker Structural DNA helical parameters tutorial" ; + schema1:sdDatePublished "2024-07-12 13:23:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/821/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 864 ; + schema1:creator , + ; + schema1:dateCreated "2024-04-22T10:34:40Z" ; + schema1:dateModified "2024-05-22T13:50:29Z" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Docker Structural DNA helical parameters tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/main/biobb_wf_dna_helparms/docker/Dockerfile" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Mutations Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.290.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_md_setup_mutations/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Protein MD Setup tutorial with mutations" ; + schema1:sdDatePublished "2024-07-12 13:33:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/290/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7412 ; + schema1:creator , + ; + schema1:dateCreated "2023-04-14T08:55:52Z" ; + schema1:dateModified "2023-04-14T08:56:54Z" ; + schema1:description """# Mutations Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/290?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Protein MD Setup tutorial with mutations" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_md_setup_mutations/python/workflow.py" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """![](https://github.com/AusARG/pipesnake/blob/main/docs/images/pipesnake_Logo.png)\r + \r +\r +Welcome to the *pipesnake*. \r +Let's get started. \r +\r +---\r +\r +# Introduction\r +\r +**pipesnake** is a bioinformatics best-practice analysis pipeline for phylogenomic reconstruction starting from short-read 'second-generation' sequencing data.\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies.\r +\r +---\r +\r +# Motivation + Project Background\r +\r +We developed *pipesnake* as part of the [***Aus***tralian ***A***mphibian and ***R***eptile ***G***enomics](https://ausargenomics.com/) (*AusARG*) initiative. **AusARG** is a national collaborative project aiming to facilitate the development of genomics resources for Australia's unique amphibian and reptile fauna. This pipeline was developed specifically as part of the *AusARG Phylogenomics Working Group* with the goal of collecting a consistent set of phylogenomic data for all of Australia's frogs and reptiles, under similar assembly, alignment, and tree estimation procedures. \r +\r +*pipesnake* is however, applicable to much broader phylogenomic questions, and is appropriate for processing exon-capture or transcriptomic data, so long as the **input is second-generation (short-read) data**. """ ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/737?version=1" ; + schema1:isBasedOn "https://github.com/AusARG/pipesnake" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for pipesnake" ; + schema1:sdDatePublished "2024-07-12 13:24:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/737/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 3866 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1887 ; + schema1:creator , + ; + schema1:dateCreated "2024-02-02T05:15:00Z" ; + schema1:dateModified "2024-02-08T00:03:01Z" ; + schema1:description """![](https://github.com/AusARG/pipesnake/blob/main/docs/images/pipesnake_Logo.png)\r + \r +\r +Welcome to the *pipesnake*. \r +Let's get started. \r +\r +---\r +\r +# Introduction\r +\r +**pipesnake** is a bioinformatics best-practice analysis pipeline for phylogenomic reconstruction starting from short-read 'second-generation' sequencing data.\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies.\r +\r +---\r +\r +# Motivation + Project Background\r +\r +We developed *pipesnake* as part of the [***Aus***tralian ***A***mphibian and ***R***eptile ***G***enomics](https://ausargenomics.com/) (*AusARG*) initiative. **AusARG** is a national collaborative project aiming to facilitate the development of genomics resources for Australia's unique amphibian and reptile fauna. This pipeline was developed specifically as part of the *AusARG Phylogenomics Working Group* with the goal of collecting a consistent set of phylogenomic data for all of Australia's frogs and reptiles, under similar assembly, alignment, and tree estimation procedures. \r +\r +*pipesnake* is however, applicable to much broader phylogenomic questions, and is appropriate for processing exon-capture or transcriptomic data, so long as the **input is second-generation (short-read) data**. """ ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "pipesnake" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/737?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-07-12 13:20:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7200 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:58Z" ; + schema1:dateModified "2024-06-11T12:54:58Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "This workflow is based on the idea of comparing different gene sets through their semantic interpretation. In many cases, the user studies a specific phenotype (e.g. disease) by analyzing lists of genes resulting from different samples or patients. Their pathway analysis could result in different semantic networks, revealing mechanistic and phenotypic divergence between these gene sets. The workflow of BioTranslator Comparative Analysis compares quantitatively the outputs of pathway analysis, based on the topology of the underlying ontological graph, in order to derive a semantic similarity value for each pair of the initial gene sets. The workflow is available in a Galaxy application and can be used for 14 species. The algorithm accepts as input a batch of gene sets, such as BioTranslator, for the same organism. It performs pathway analysis according to the user-selected ontology and then it compares the derived semantic networks and extracts a matrix with their distances, as well as a respective heatmap." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/193?version=1" ; + schema1:isBasedOn "http://www.biotranslator.gr:8080/u/thodk/w/workflow-of-biotranslator-comparative-analysis" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Workflow of BioTranslator Comparative Analysis" ; + schema1:sdDatePublished "2024-07-12 13:36:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/193/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3984 ; + schema1:dateCreated "2021-09-26T18:20:05Z" ; + schema1:dateModified "2023-01-16T13:53:00Z" ; + schema1:description "This workflow is based on the idea of comparing different gene sets through their semantic interpretation. In many cases, the user studies a specific phenotype (e.g. disease) by analyzing lists of genes resulting from different samples or patients. Their pathway analysis could result in different semantic networks, revealing mechanistic and phenotypic divergence between these gene sets. The workflow of BioTranslator Comparative Analysis compares quantitatively the outputs of pathway analysis, based on the topology of the underlying ontological graph, in order to derive a semantic similarity value for each pair of the initial gene sets. The workflow is available in a Galaxy application and can be used for 14 species. The algorithm accepts as input a batch of gene sets, such as BioTranslator, for the same organism. It performs pathway analysis according to the user-selected ontology and then it compares the derived semantic networks and extracts a matrix with their distances, as well as a respective heatmap." ; + schema1:image ; + schema1:keywords "Semantic Network Analysis, Semantic Comparison, Pathway Analysis" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Workflow of BioTranslator Comparative Analysis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/193?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 29724 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=14" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-07-12 13:18:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=14" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10290 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:39Z" ; + schema1:dateModified "2024-06-11T12:54:39Z" ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=14" ; + schema1:version 14 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Simulations and figures supporting the manuscript \"Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake\"" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.511.1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake" ; + schema1:sdDatePublished "2024-07-12 13:32:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/511/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 305 ; + schema1:creator , + , + , + , + , + ; + schema1:dateCreated "2023-06-21T09:51:56Z" ; + schema1:dateModified "2023-08-18T11:04:05Z" ; + schema1:description "Simulations and figures supporting the manuscript \"Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake\"" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/511?version=4" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/511?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 75460 . + + a schema1:Dataset ; + schema1:datePublished "2021-12-20T17:04:47.625180" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-consensus-from-variation" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sars-cov-2-consensus-from-variation/COVID-19-CONSENSUS-CONSTRUCTION" ; + schema1:sdDatePublished "2021-12-21 03:01:01 +0000" ; + schema1:softwareVersion "v0.2.2" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/986?version=13" ; + schema1:isBasedOn "https://github.com/nf-core/fetchngs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/fetchngs" ; + schema1:sdDatePublished "2024-07-12 13:21:29 +0100" ; + schema1:url "https://workflowhub.eu/workflows/986/ro_crate?version=13" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9686 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:53Z" ; + schema1:dateModified "2024-06-11T12:54:53Z" ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/986?version=13" ; + schema1:keywords "ddbj, download, ena, FASTQ, geo, sra, synapse" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/fetchngs" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/986?version=13" ; + schema1:version 13 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "The pangenome graph construction pipeline renders a collection of sequences into a pangenome graph. Its goal is to build a graph that is locally directed and acyclic while preserving large-scale variation. Maintaining local linearity is important for interpretation, visualization, mapping, comparative genomics, and reuse of pangenome graphs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1007?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/pangenome" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/pangenome" ; + schema1:sdDatePublished "2024-07-12 13:20:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1007/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11481 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:07Z" ; + schema1:dateModified "2024-06-11T12:55:07Z" ; + schema1:description "The pangenome graph construction pipeline renders a collection of sequences into a pangenome graph. Its goal is to build a graph that is locally directed and acyclic while preserving large-scale variation. Maintaining local linearity is important for interpretation, visualization, mapping, comparative genomics, and reuse of pangenome graphs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1007?version=3" ; + schema1:keywords "pangenome" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/pangenome" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1007?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + schema1:datePublished "2024-05-30T07:35:00.361997" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/cutandrun" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "cutandrun/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/963?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/airrflow" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/airrflow" ; + schema1:sdDatePublished "2024-07-12 13:22:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/963/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8509 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:37Z" ; + schema1:dateModified "2024-06-11T12:54:37Z" ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/963?version=13" ; + schema1:keywords "airr, b-cell, immcantation, immunorepertoire, repseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/airrflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/963?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Conformational ensembles generation\r +\r +## Workflow included in the [ELIXIR 3D-Bioinfo](https://elixir-europe.org/communities/3d-bioinfo) Implementation Study:\r +\r +### Building on PDBe-KB to chart and characterize the conformation landscape of native proteins\r +\r +This tutorial aims to illustrate the process of generating **protein conformational ensembles** from** 3D structures **and analysing its **molecular flexibility**, step by step, using the **BioExcel Building Blocks library (biobb)**.\r +\r +## Conformational landscape of native proteins\r +**Proteins** are **dynamic** systems that adopt multiple **conformational states**, a property essential for many **biological processes** (e.g. binding other proteins, nucleic acids, small molecule ligands, or switching between functionaly active and inactive states). Characterizing the different **conformational states** of proteins and the transitions between them is therefore critical for gaining insight into their **biological function** and can help explain the effects of genetic variants in **health** and **disease** and the action of drugs.\r +\r +**Structural biology** has become increasingly efficient in sampling the different **conformational states** of proteins. The **PDB** has currently archived more than **170,000 individual structures**, but over two thirds of these structures represent **multiple conformations** of the same or related protein, observed in different crystal forms, when interacting with other proteins or other macromolecules, or upon binding small molecule ligands. Charting this conformational diversity across the PDB can therefore be employed to build a useful approximation of the **conformational landscape** of native proteins.\r +\r +A number of resources and **tools** describing and characterizing various often complementary aspects of protein **conformational diversity** in known structures have been developed, notably by groups in Europe. These tools include algorithms with varying degree of sophistication, for aligning the 3D structures of individual protein chains or domains, of protein assemblies, and evaluating their degree of **structural similarity**. Using such tools one can **align structures pairwise**, compute the corresponding **similarity matrix**, and identify ensembles of **structures/conformations** with a defined **similarity level** that tend to recur in different PDB entries, an operation typically performed using **clustering** methods. Such workflows are at the basis of resources such as **CATH, Contemplate, or PDBflex** that offer access to **conformational ensembles** comprised of similar **conformations** clustered according to various criteria. Other types of tools focus on differences between **protein conformations**, identifying regions of proteins that undergo large **collective displacements** in different PDB entries, those that act as **hinges or linkers**, or regions that are inherently **flexible**.\r +\r +To build a meaningful approximation of the **conformational landscape** of native proteins, the **conformational ensembles** (and the differences between them), identified on the basis of **structural similarity/dissimilarity** measures alone, need to be **biophysically characterized**. This may be approached at **two different levels**. \r +- At the **biological level**, it is important to link observed **conformational ensembles**, to their **functional roles** by evaluating the correspondence with **protein family classifications** based on sequence information and **functional annotations** in public databases e.g. Uniprot, PDKe-Knowledge Base (KB). These links should provide valuable mechanistic insights into how the **conformational and dynamic properties** of proteins are exploited by evolution to regulate their **biological function**.

\r +\r +- At the **physical level** one needs to introduce **energetic consideration** to evaluate the likelihood that the identified **conformational ensembles** represent **conformational states** that the protein (or domain under study) samples in isolation. Such evaluation is notoriously **challenging** and can only be roughly approximated by using **computational methods** to evaluate the extent to which the observed **conformational ensembles** can be reproduced by algorithms that simulate the **dynamic behavior** of protein systems. These algorithms include the computationally expensive **classical molecular dynamics (MD) simulations** to sample local thermal fluctuations but also faster more approximate methods such as **Elastic Network Models** and **Normal Node Analysis** (NMA) to model low energy **collective motions**. Alternatively, **enhanced sampling molecular dynamics** can be used to model complex types of **conformational changes** but at a very high computational cost. \r +\r +The **ELIXIR 3D-Bioinfo Implementation Study** *Building on PDBe-KB to chart and characterize the conformation landscape of native proteins* focuses on:\r +\r +1. Mapping the **conformational diversity** of proteins and their homologs across the PDB. \r +2. Characterize the different **flexibility properties** of protein regions, and link this information to sequence and functional annotation.\r +3. Benchmark **computational methods** that can predict a biophysical description of protein motions.\r +\r +This notebook is part of the third objective, where a list of **computational resources** that are able to predict **protein flexibility** and **conformational ensembles** have been collected, evaluated, and integrated in reproducible and interoperable workflows using the **BioExcel Building Blocks library**. Note that the list is not meant to be exhaustive, it is built following the expertise of the implementation study partners.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.486.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_flexdyn" ; + schema1:license "other-open" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein conformational ensembles generation" ; + schema1:sdDatePublished "2024-07-12 13:33:23 +0100" ; + schema1:url "https://workflowhub.eu/workflows/486/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 108347 ; + schema1:creator , + ; + schema1:dateCreated "2023-05-31T13:19:38Z" ; + schema1:dateModified "2023-05-31T13:24:08Z" ; + schema1:description """# Protein Conformational ensembles generation\r +\r +## Workflow included in the [ELIXIR 3D-Bioinfo](https://elixir-europe.org/communities/3d-bioinfo) Implementation Study:\r +\r +### Building on PDBe-KB to chart and characterize the conformation landscape of native proteins\r +\r +This tutorial aims to illustrate the process of generating **protein conformational ensembles** from** 3D structures **and analysing its **molecular flexibility**, step by step, using the **BioExcel Building Blocks library (biobb)**.\r +\r +## Conformational landscape of native proteins\r +**Proteins** are **dynamic** systems that adopt multiple **conformational states**, a property essential for many **biological processes** (e.g. binding other proteins, nucleic acids, small molecule ligands, or switching between functionaly active and inactive states). Characterizing the different **conformational states** of proteins and the transitions between them is therefore critical for gaining insight into their **biological function** and can help explain the effects of genetic variants in **health** and **disease** and the action of drugs.\r +\r +**Structural biology** has become increasingly efficient in sampling the different **conformational states** of proteins. The **PDB** has currently archived more than **170,000 individual structures**, but over two thirds of these structures represent **multiple conformations** of the same or related protein, observed in different crystal forms, when interacting with other proteins or other macromolecules, or upon binding small molecule ligands. Charting this conformational diversity across the PDB can therefore be employed to build a useful approximation of the **conformational landscape** of native proteins.\r +\r +A number of resources and **tools** describing and characterizing various often complementary aspects of protein **conformational diversity** in known structures have been developed, notably by groups in Europe. These tools include algorithms with varying degree of sophistication, for aligning the 3D structures of individual protein chains or domains, of protein assemblies, and evaluating their degree of **structural similarity**. Using such tools one can **align structures pairwise**, compute the corresponding **similarity matrix**, and identify ensembles of **structures/conformations** with a defined **similarity level** that tend to recur in different PDB entries, an operation typically performed using **clustering** methods. Such workflows are at the basis of resources such as **CATH, Contemplate, or PDBflex** that offer access to **conformational ensembles** comprised of similar **conformations** clustered according to various criteria. Other types of tools focus on differences between **protein conformations**, identifying regions of proteins that undergo large **collective displacements** in different PDB entries, those that act as **hinges or linkers**, or regions that are inherently **flexible**.\r +\r +To build a meaningful approximation of the **conformational landscape** of native proteins, the **conformational ensembles** (and the differences between them), identified on the basis of **structural similarity/dissimilarity** measures alone, need to be **biophysically characterized**. This may be approached at **two different levels**. \r +- At the **biological level**, it is important to link observed **conformational ensembles**, to their **functional roles** by evaluating the correspondence with **protein family classifications** based on sequence information and **functional annotations** in public databases e.g. Uniprot, PDKe-Knowledge Base (KB). These links should provide valuable mechanistic insights into how the **conformational and dynamic properties** of proteins are exploited by evolution to regulate their **biological function**.

\r +\r +- At the **physical level** one needs to introduce **energetic consideration** to evaluate the likelihood that the identified **conformational ensembles** represent **conformational states** that the protein (or domain under study) samples in isolation. Such evaluation is notoriously **challenging** and can only be roughly approximated by using **computational methods** to evaluate the extent to which the observed **conformational ensembles** can be reproduced by algorithms that simulate the **dynamic behavior** of protein systems. These algorithms include the computationally expensive **classical molecular dynamics (MD) simulations** to sample local thermal fluctuations but also faster more approximate methods such as **Elastic Network Models** and **Normal Node Analysis** (NMA) to model low energy **collective motions**. Alternatively, **enhanced sampling molecular dynamics** can be used to model complex types of **conformational changes** but at a very high computational cost. \r +\r +The **ELIXIR 3D-Bioinfo Implementation Study** *Building on PDBe-KB to chart and characterize the conformation landscape of native proteins* focuses on:\r +\r +1. Mapping the **conformational diversity** of proteins and their homologs across the PDB. \r +2. Characterize the different **flexibility properties** of protein regions, and link this information to sequence and functional annotation.\r +3. Benchmark **computational methods** that can predict a biophysical description of protein motions.\r +\r +This notebook is part of the third objective, where a list of **computational resources** that are able to predict **protein flexibility** and **conformational ensembles** have been collected, evaluated, and integrated in reproducible and interoperable workflows using the **BioExcel Building Blocks library**. Note that the list is not meant to be exhaustive, it is built following the expertise of the implementation study partners.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/486?version=2" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "" ; + schema1:name "Jupyter Notebook Protein conformational ensembles generation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_flexdyn/blob/main/biobb_wf_flexdyn/notebooks/biobb_wf_flexdyn.ipynb" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2023-01-13T13:28:37.647185" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "atacseq/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.3" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "This workflow correspond to the Genome-wide alternative splicing analysis training. It allows to analyze isoform switching by making use of IsoformSwitchAnalyzeR." ; + schema1:hasPart , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.472.1" ; + schema1:license "CC-BY-NC-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genome-wide alternative splicing analysis" ; + schema1:sdDatePublished "2024-07-12 13:33:28 +0100" ; + schema1:url "https://workflowhub.eu/workflows/472/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 17028 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 65307 ; + schema1:creator ; + schema1:dateCreated "2023-05-17T11:21:57Z" ; + schema1:dateModified "2023-05-17T12:59:35Z" ; + schema1:description "This workflow correspond to the Genome-wide alternative splicing analysis training. It allows to analyze isoform switching by making use of IsoformSwitchAnalyzeR." ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/472?version=3" ; + schema1:keywords "Transcriptomics, alternative-splicing, GTN" ; + schema1:license "https://spdx.org/licenses/CC-BY-NC-4.0" ; + schema1:name "Genome-wide alternative splicing analysis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/472?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 46064 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-07-12 13:20:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4215 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:03Z" ; + schema1:dateModified "2024-06-11T12:55:03Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Differential abundance analysis" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/981?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/differentialabundance" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/differentialabundance" ; + schema1:sdDatePublished "2024-07-12 13:21:46 +0100" ; + schema1:url "https://workflowhub.eu/workflows/981/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15717 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:49Z" ; + schema1:dateModified "2024-06-11T12:54:49Z" ; + schema1:description "Differential abundance analysis" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/981?version=8" ; + schema1:keywords "ATAC-seq, ChIP-seq, deseq2, differential-abundance, differential-expression, gsea, limma, microarray, rna-seq, shiny" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/differentialabundance" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/981?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "ATACSeq peak-calling and differential analysis pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/965?version=9" ; + schema1:isBasedOn "https://github.com/nf-core/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/atacseq" ; + schema1:sdDatePublished "2024-07-12 13:22:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/965/ro_crate?version=9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11439 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:42Z" ; + schema1:dateModified "2024-06-11T12:54:42Z" ; + schema1:description "ATACSeq peak-calling and differential analysis pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/965?version=8" ; + schema1:keywords "ATAC-seq, chromatin-accessibiity" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/atacseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/965?version=9" ; + schema1:version 9 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/986?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/fetchngs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/fetchngs" ; + schema1:sdDatePublished "2024-07-12 13:21:28 +0100" ; + schema1:url "https://workflowhub.eu/workflows/986/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6338 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:52Z" ; + schema1:dateModified "2024-06-11T12:54:52Z" ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/986?version=13" ; + schema1:keywords "ddbj, download, ena, FASTQ, geo, sra, synapse" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/fetchngs" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/986?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.286.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_dna_helparms/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Structural DNA helical parameters tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/286/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 22900 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-22T10:14:36Z" ; + schema1:dateModified "2023-01-16T13:58:32Z" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/286?version=3" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Structural DNA helical parameters tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_dna_helparms/python/workflow.py" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Conformational Transitions calculations tutorial using BioExcel Building Blocks (biobb) and GOdMD\r +\r +This tutorial aims to illustrate the process of computing a conformational transition between two known structural conformations of a protein, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.824.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/main/biobb_wf_godmd/docker" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Docker Protein Conformational Transitions calculations tutorial" ; + schema1:sdDatePublished "2024-07-12 13:23:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/824/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 740 ; + schema1:creator , + ; + schema1:dateCreated "2024-04-22T10:50:04Z" ; + schema1:dateModified "2024-05-22T13:47:51Z" ; + schema1:description """# Protein Conformational Transitions calculations tutorial using BioExcel Building Blocks (biobb) and GOdMD\r +\r +This tutorial aims to illustrate the process of computing a conformational transition between two known structural conformations of a protein, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Docker Protein Conformational Transitions calculations tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/main/biobb_wf_godmd/docker/Dockerfile" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +**Based on the official [pmx tutorial](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate how to compute a **fast-growth mutation free energy** calculation, step by step, using the BioExcel **Building Blocks library (biobb)**. The particular example used is the **Staphylococcal nuclease** protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +The **non-equilibrium free energy calculation** protocol performs a **fast alchemical transition** in the direction **WT->Mut** and back **Mut->WT**. The two equilibrium trajectories needed for the tutorial, one for **Wild Type (WT)** and another for the **Mutated (Mut)** protein (Isoleucine 10 to Alanine -I10A-), have already been generated and are included in this example. We will name **WT as stateA** and **Mut as stateB**.\r +\r +![](https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/schema.png)\r +\r +The tutorial calculates the **free energy difference** in the folded state of a protein. Starting from **two 1ns-length independent equilibrium simulations** (WT and mutant), snapshots are selected to start **fast (50ps) transitions** driving the system in the **forward** (WT to mutant) and **reverse** (mutant to WT) directions, and the **work values** required to perform these transitions are collected. With these values, **Crooks Gaussian Intersection** (CGI), **Bennett Acceptance Ratio** (BAR) and **Jarzynski estimator** methods are used to calculate the **free energy difference** between the two states.\r +\r +*Please note that for the sake of disk space this tutorial is using 1ns-length equilibrium trajectories, whereas in the [original example](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/eq.mdp) the equilibrium trajectories used were obtained from 10ns-length simulations.*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.55.6" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_pmx_tutorial" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Mutation Free Energy Calculations" ; + schema1:sdDatePublished "2024-07-12 13:24:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/55/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 48304 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-04T14:26:13Z" ; + schema1:dateModified "2024-03-05T09:41:01Z" ; + schema1:description """# Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +**Based on the official [pmx tutorial](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate how to compute a **fast-growth mutation free energy** calculation, step by step, using the BioExcel **Building Blocks library (biobb)**. The particular example used is the **Staphylococcal nuclease** protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +The **non-equilibrium free energy calculation** protocol performs a **fast alchemical transition** in the direction **WT->Mut** and back **Mut->WT**. The two equilibrium trajectories needed for the tutorial, one for **Wild Type (WT)** and another for the **Mutated (Mut)** protein (Isoleucine 10 to Alanine -I10A-), have already been generated and are included in this example. We will name **WT as stateA** and **Mut as stateB**.\r +\r +![](https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/schema.png)\r +\r +The tutorial calculates the **free energy difference** in the folded state of a protein. Starting from **two 1ns-length independent equilibrium simulations** (WT and mutant), snapshots are selected to start **fast (50ps) transitions** driving the system in the **forward** (WT to mutant) and **reverse** (mutant to WT) directions, and the **work values** required to perform these transitions are collected. With these values, **Crooks Gaussian Intersection** (CGI), **Bennett Acceptance Ratio** (BAR) and **Jarzynski estimator** methods are used to calculate the **free energy difference** between the two states.\r +\r +*Please note that for the sake of disk space this tutorial is using 1ns-length equilibrium trajectories, whereas in the [original example](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/eq.mdp) the equilibrium trajectories used were obtained from 10ns-length simulations.*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/55?version=5" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Mutation Free Energy Calculations" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_pmx_tutorial/blob/main/biobb_wf_pmx_tutorial/notebooks/biobb_wf_pmx_tutorial.ipynb" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-07-12 13:20:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4215 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:03Z" ; + schema1:dateModified "2024-06-11T12:55:03Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """# Cancer Invasion Workflow\r +\r +## Table of Contents\r +\r +- [Cancer Invasion Workflow](#cancer-invasion-workflow)\r + - [Table of Contents](#table-of-contents)\r + - [Description](#description)\r + - [Contents](#contents)\r + - [Building Blocks](#building-blocks)\r + - [Workflows](#workflows)\r + - [Resources](#resources)\r + - [Tests](#tests)\r + - [Instructions](#instructions)\r + - [Local machine](#local-machine)\r + - [Requirements](#requirements)\r + - [Usage steps](#usage-steps)\r + - [MareNostrum 4](#marenostrum-4)\r + - [Requirements in MN4](#requirements-in-mn4)\r + - [Usage steps in MN4](#usage-steps-in-mn4)\r + - [Mahti or Puhti](#mahti-or-puhti)\r + - [Requirements](#requirements)\r + - [Steps](#steps)\r + - [License](#license)\r + - [Contact](#contact)\r +\r +## Description\r +\r +Uses multiscale simulations to describe cancer progression into invasion.\r +\r +The workflow uses the following building blocks, described in order of execution:\r +\r +1. PhysiBoSS-Invasion\r +\r +For details on individual workflow steps, see the user documentation for each building block.\r +\r +[`GitHub repository`]()\r +\r +\r +## Contents\r +\r +### Building Blocks\r +\r +The ``BuildingBlocks`` folder contains the script to install the\r +Building Blocks used in the Cancer Invasion Workflow.\r +\r +### Workflows\r +\r +The ``Workflow`` folder contains the workflows implementations.\r +\r +Currently contains the implementation using PyCOMPSs and Snakemake (in progress).\r +\r +### Resources\r +\r +The ``Resources`` folder contains dataset files.\r +\r +### Tests\r +\r +The ``Tests`` folder contains the scripts that run each Building Block\r +used in the workflow for the given small dataset.\r +They can be executed individually for testing purposes.\r +\r +## Instructions\r +\r +### Local machine\r +\r +This section explains the requirements and usage for the Cancer Invasion Workflow in a laptop or desktop computer.\r +\r +#### Requirements\r +\r +- [`permedcoe`](https://github.com/PerMedCoE/permedcoe) package\r +- [PyCOMPSs](https://pycompss.readthedocs.io/en/stable/Sections/00_Quickstart.html) / [Snakemake](https://snakemake.readthedocs.io/en/stable/)\r +- [Singularity](https://sylabs.io/guides/3.0/user-guide/installation.html)\r +\r +#### Usage steps\r +\r +1. Clone this repository:\r +\r + ```bash\r + git clone https://github.com/PerMedCoE/cancer-invasion-workflow\r + ```\r +\r +2. Install the Building Blocks required for the Cancer Invasion Workflow:\r +\r + ```bash\r + cancer-invasion-workflow/BuildingBlocks/./install_BBs.sh\r + ```\r +\r +3. Get the required Building Block images from the project [B2DROP](https://b2drop.bsc.es/index.php/f/444350):\r +\r + - Required images:\r + - PhysiCell-Invasion.singularity\r +\r + The path where these files are stored **MUST be exported in the `PERMEDCOE_IMAGES`** environment variable.\r +\r + > :warning: **TIP**: These containers can be built manually as follows (be patient since some of them may take some time):\r + 1. Clone the `BuildingBlocks` repository\r + ```bash\r + git clone https://github.com/PerMedCoE/BuildingBlocks.git\r + ```\r + 2. Build the required Building Block images\r + ```bash\r + cd BuildingBlocks/Resources/images\r + sudo singularity build PhysiCell-Invasion.sif PhysiCell-Invasion.singularity\r + cd ../../..\r + ```\r +\r +**If using PyCOMPSs in local PC** (make sure that PyCOMPSs in installed):\r +\r +4. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflows/PyCOMPSs\r + ```\r +\r +5. Execute `./run.sh`\r +\r +**If using Snakemake in local PC** (make sure that SnakeMake is installed):\r +\r +4. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflows/SnakeMake\r + ```\r +\r +5. Execute `./run.sh`\r + > **TIP**: If you want to run the workflow with a different dataset, please update the `run.sh` script setting the `dataset` variable to the new dataset folder and their file names.\r +\r +\r +### MareNostrum 4\r +\r +This section explains the requirements and usage for the Cancer Invasion Workflow in the MareNostrum 4 supercomputer.\r +\r +#### Requirements in MN4\r +\r +- Access to MN4\r +\r +All Building Blocks are already installed in MN4, and the Cancer Invasion Workflow available.\r +\r +#### Usage steps in MN4\r +\r +1. Load the `COMPSs`, `Singularity` and `permedcoe` modules\r +\r + ```bash\r + export COMPSS_PYTHON_VERSION=3\r + module load COMPSs/3.1\r + module load singularity/3.5.2\r + module use /apps/modules/modulefiles/tools/COMPSs/libraries\r + module load permedcoe\r + ```\r +\r + > **TIP**: Include the loading into your `${HOME}/.bashrc` file to load it automatically on the session start.\r +\r + This commands will load COMPSs and the permedcoe package which provides all necessary dependencies, as well as the path to the singularity container images (`PERMEDCOE_IMAGES` environment variable) and testing dataset (`CANCERINVASIONWORKFLOW_DATASET` environment variable).\r +\r +2. Get a copy of the pilot workflow into your desired folder\r +\r + ```bash\r + mkdir desired_folder\r + cd desired_folder\r + get_cancerinvasionworkflow\r + ```\r +\r +3. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflow/PyCOMPSs\r + ```\r +\r +4. Execute `./launch.sh`\r +\r + This command will launch a job into the job queuing system (SLURM) requesting 2 nodes (one node acting half master and half worker, and other full worker node) for 20 minutes, and is prepared to use the singularity images that are already deployed in MN4 (located into the `PERMEDCOE_IMAGES` environment variable). It uses the dataset located into `../../Resources/data` folder.\r +\r + > :warning: **TIP**: If you want to run the workflow with a different dataset, please edit the `launch.sh` script and define the appropriate dataset path.\r +\r + After the execution, a `results` folder will be available with with Cancer Invasion Workflow results.\r +\r +### Mahti or Puhti\r +\r +This section explains how to run the Cancer Invasion workflow on CSC supercomputers using SnakeMake.\r +\r +#### Requirements\r +\r +- Install snakemake (or check if there is a version installed using `module spider snakemake`)\r +- Install workflow, using the same steps as for the local machine. With the exception that containers have to be built elsewhere.\r +\r +#### Steps\r +\r +\r +1. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflow/SnakeMake\r + ```\r +\r +2. Edit `launch.sh` with the correct partition, account, and resource specifications. \r +\r +3. Execute `./launch.sh`\r +\r + > :warning: Snakemake provides a `--cluster` flag, but this functionality should be avoided as it's really not suited for HPC systems.\r +\r +## License\r +\r +[Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)\r +\r +## Contact\r +\r +\r +\r +This software has been developed for the [PerMedCoE project](https://permedcoe.eu/), funded by the European Commission (EU H2020 [951773](https://cordis.europa.eu/project/id/951773)).\r +\r +![](https://permedcoe.eu/wp-content/uploads/2020/11/logo_1.png "PerMedCoE")\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/692?version=1" ; + schema1:isBasedOn "https://github.com/PerMedCoE/cancer-invasion-workflow" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for PerMedCoE Cancer Diagnosis" ; + schema1:sdDatePublished "2024-07-12 13:25:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/692/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 963 ; + schema1:creator ; + schema1:dateCreated "2023-12-20T13:01:39Z" ; + schema1:dateModified "2024-01-24T09:48:16Z" ; + schema1:description """# Cancer Invasion Workflow\r +\r +## Table of Contents\r +\r +- [Cancer Invasion Workflow](#cancer-invasion-workflow)\r + - [Table of Contents](#table-of-contents)\r + - [Description](#description)\r + - [Contents](#contents)\r + - [Building Blocks](#building-blocks)\r + - [Workflows](#workflows)\r + - [Resources](#resources)\r + - [Tests](#tests)\r + - [Instructions](#instructions)\r + - [Local machine](#local-machine)\r + - [Requirements](#requirements)\r + - [Usage steps](#usage-steps)\r + - [MareNostrum 4](#marenostrum-4)\r + - [Requirements in MN4](#requirements-in-mn4)\r + - [Usage steps in MN4](#usage-steps-in-mn4)\r + - [Mahti or Puhti](#mahti-or-puhti)\r + - [Requirements](#requirements)\r + - [Steps](#steps)\r + - [License](#license)\r + - [Contact](#contact)\r +\r +## Description\r +\r +Uses multiscale simulations to describe cancer progression into invasion.\r +\r +The workflow uses the following building blocks, described in order of execution:\r +\r +1. PhysiBoSS-Invasion\r +\r +For details on individual workflow steps, see the user documentation for each building block.\r +\r +[`GitHub repository`]()\r +\r +\r +## Contents\r +\r +### Building Blocks\r +\r +The ``BuildingBlocks`` folder contains the script to install the\r +Building Blocks used in the Cancer Invasion Workflow.\r +\r +### Workflows\r +\r +The ``Workflow`` folder contains the workflows implementations.\r +\r +Currently contains the implementation using PyCOMPSs and Snakemake (in progress).\r +\r +### Resources\r +\r +The ``Resources`` folder contains dataset files.\r +\r +### Tests\r +\r +The ``Tests`` folder contains the scripts that run each Building Block\r +used in the workflow for the given small dataset.\r +They can be executed individually for testing purposes.\r +\r +## Instructions\r +\r +### Local machine\r +\r +This section explains the requirements and usage for the Cancer Invasion Workflow in a laptop or desktop computer.\r +\r +#### Requirements\r +\r +- [`permedcoe`](https://github.com/PerMedCoE/permedcoe) package\r +- [PyCOMPSs](https://pycompss.readthedocs.io/en/stable/Sections/00_Quickstart.html) / [Snakemake](https://snakemake.readthedocs.io/en/stable/)\r +- [Singularity](https://sylabs.io/guides/3.0/user-guide/installation.html)\r +\r +#### Usage steps\r +\r +1. Clone this repository:\r +\r + ```bash\r + git clone https://github.com/PerMedCoE/cancer-invasion-workflow\r + ```\r +\r +2. Install the Building Blocks required for the Cancer Invasion Workflow:\r +\r + ```bash\r + cancer-invasion-workflow/BuildingBlocks/./install_BBs.sh\r + ```\r +\r +3. Get the required Building Block images from the project [B2DROP](https://b2drop.bsc.es/index.php/f/444350):\r +\r + - Required images:\r + - PhysiCell-Invasion.singularity\r +\r + The path where these files are stored **MUST be exported in the `PERMEDCOE_IMAGES`** environment variable.\r +\r + > :warning: **TIP**: These containers can be built manually as follows (be patient since some of them may take some time):\r + 1. Clone the `BuildingBlocks` repository\r + ```bash\r + git clone https://github.com/PerMedCoE/BuildingBlocks.git\r + ```\r + 2. Build the required Building Block images\r + ```bash\r + cd BuildingBlocks/Resources/images\r + sudo singularity build PhysiCell-Invasion.sif PhysiCell-Invasion.singularity\r + cd ../../..\r + ```\r +\r +**If using PyCOMPSs in local PC** (make sure that PyCOMPSs in installed):\r +\r +4. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflows/PyCOMPSs\r + ```\r +\r +5. Execute `./run.sh`\r +\r +**If using Snakemake in local PC** (make sure that SnakeMake is installed):\r +\r +4. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflows/SnakeMake\r + ```\r +\r +5. Execute `./run.sh`\r + > **TIP**: If you want to run the workflow with a different dataset, please update the `run.sh` script setting the `dataset` variable to the new dataset folder and their file names.\r +\r +\r +### MareNostrum 4\r +\r +This section explains the requirements and usage for the Cancer Invasion Workflow in the MareNostrum 4 supercomputer.\r +\r +#### Requirements in MN4\r +\r +- Access to MN4\r +\r +All Building Blocks are already installed in MN4, and the Cancer Invasion Workflow available.\r +\r +#### Usage steps in MN4\r +\r +1. Load the `COMPSs`, `Singularity` and `permedcoe` modules\r +\r + ```bash\r + export COMPSS_PYTHON_VERSION=3\r + module load COMPSs/3.1\r + module load singularity/3.5.2\r + module use /apps/modules/modulefiles/tools/COMPSs/libraries\r + module load permedcoe\r + ```\r +\r + > **TIP**: Include the loading into your `${HOME}/.bashrc` file to load it automatically on the session start.\r +\r + This commands will load COMPSs and the permedcoe package which provides all necessary dependencies, as well as the path to the singularity container images (`PERMEDCOE_IMAGES` environment variable) and testing dataset (`CANCERINVASIONWORKFLOW_DATASET` environment variable).\r +\r +2. Get a copy of the pilot workflow into your desired folder\r +\r + ```bash\r + mkdir desired_folder\r + cd desired_folder\r + get_cancerinvasionworkflow\r + ```\r +\r +3. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflow/PyCOMPSs\r + ```\r +\r +4. Execute `./launch.sh`\r +\r + This command will launch a job into the job queuing system (SLURM) requesting 2 nodes (one node acting half master and half worker, and other full worker node) for 20 minutes, and is prepared to use the singularity images that are already deployed in MN4 (located into the `PERMEDCOE_IMAGES` environment variable). It uses the dataset located into `../../Resources/data` folder.\r +\r + > :warning: **TIP**: If you want to run the workflow with a different dataset, please edit the `launch.sh` script and define the appropriate dataset path.\r +\r + After the execution, a `results` folder will be available with with Cancer Invasion Workflow results.\r +\r +### Mahti or Puhti\r +\r +This section explains how to run the Cancer Invasion workflow on CSC supercomputers using SnakeMake.\r +\r +#### Requirements\r +\r +- Install snakemake (or check if there is a version installed using `module spider snakemake`)\r +- Install workflow, using the same steps as for the local machine. With the exception that containers have to be built elsewhere.\r +\r +#### Steps\r +\r +\r +1. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflow/SnakeMake\r + ```\r +\r +2. Edit `launch.sh` with the correct partition, account, and resource specifications. \r +\r +3. Execute `./launch.sh`\r +\r + > :warning: Snakemake provides a `--cluster` flag, but this functionality should be avoided as it's really not suited for HPC systems.\r +\r +## License\r +\r +[Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)\r +\r +## Contact\r +\r +\r +\r +This software has been developed for the [PerMedCoE project](https://permedcoe.eu/), funded by the European Commission (EU H2020 [951773](https://cordis.europa.eu/project/id/951773)).\r +\r +![](https://permedcoe.eu/wp-content/uploads/2020/11/logo_1.png "PerMedCoE")\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "PerMedCoE Cancer Diagnosis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/692?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:description """\r +\r +\r +GermlineStructuralV-nf is a pipeline for identifying structural variant events in human Illumina short read whole genome sequence data. GermlineStructuralV-nf identifies structural variant and copy number events from BAM files using [Manta](https://github.com/Illumina/manta/blob/master/docs/userGuide/README.md#de-novo-calling), [Smoove](https://github.com/brentp/smoove), and [TIDDIT](https://github.com/SciLifeLab/TIDDIT). Variants are then merged using [SURVIVOR](https://github.com/fritzsedlazeck/SURVIVOR), and annotated by [AnnotSV](https://pubmed.ncbi.nlm.nih.gov/29669011/). The pipeline is written in Nextflow and uses Singularity/Docker to run containerised tools.""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.431.1" ; + schema1:isBasedOn "https://github.com/Sydney-Informatics-Hub/Germline-StructuralV-nf" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for GermlineStructuralV-nf" ; + schema1:sdDatePublished "2024-07-12 13:25:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/431/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4147 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2023-01-31T23:40:40Z" ; + schema1:dateModified "2023-12-18T05:36:07Z" ; + schema1:description """\r +\r +\r +GermlineStructuralV-nf is a pipeline for identifying structural variant events in human Illumina short read whole genome sequence data. GermlineStructuralV-nf identifies structural variant and copy number events from BAM files using [Manta](https://github.com/Illumina/manta/blob/master/docs/userGuide/README.md#de-novo-calling), [Smoove](https://github.com/brentp/smoove), and [TIDDIT](https://github.com/SciLifeLab/TIDDIT). Variants are then merged using [SURVIVOR](https://github.com/fritzsedlazeck/SURVIVOR), and annotated by [AnnotSV](https://pubmed.ncbi.nlm.nih.gov/29669011/). The pipeline is written in Nextflow and uses Singularity/Docker to run containerised tools.""" ; + schema1:keywords "Bioinformatics, Annotation, Genomics, Nextflow, rare diseases, variant_calling, structural variants, manta, smoove, tiddit, annotsv, survivor" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "GermlineStructuralV-nf" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/431?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# ![sanger-tol/insdcdownload](docs/images/sanger-tol-insdcdownload_logo.png)\r +\r +[![GitHub Actions CI Status](https://github.com/sanger-tol/insdcdownload/workflows/nf-core%20CI/badge.svg)](https://github.com/sanger-tol/insdcdownload/actions?query=workflow%3A%22nf-core+CI%22)\r +\r +\r +\r +[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.7155119-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.7155119)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A522.04.0-23aa62.svg)](https://www.nextflow.io/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +\r +[![Get help on Slack](http://img.shields.io/badge/slack-SangerTreeofLife%20%23pipelines-4A154B?labelColor=000000&logo=slack)](https://SangerTreeofLife.slack.com/channels/pipelines)\r +[![Follow on Twitter](http://img.shields.io/badge/twitter-%40sangertol-1DA1F2?labelColor=000000&logo=twitter)](https://twitter.com/sangertol)\r +[![Watch on YouTube](http://img.shields.io/badge/youtube-tree--of--life-FF0000?labelColor=000000&logo=youtube)](https://www.youtube.com/channel/UCFeDpvjU58SA9V0ycRXejhA)\r +\r +## Introduction\r +\r +**sanger-tol/insdcdownload** is a pipeline that downloads assemblies from INSDC into a Tree of Life directory structure.\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!\r +\r +On release, automated continuous integration tests run the pipeline on a full-sized dataset on the GitHub CI infrastructure. This ensures that the pipeline runs in a third-party environment, and has sensible resource allocation defaults set to run on real-world datasets.\r +\r +## Pipeline summary\r +\r +## Overview\r +\r +The pipeline takes an assembly accession number, as well as the assembly name, and downloads it. It also builds a set of common indices (such as `samtools faidx`), and extracts the repeat-masking performed by the NCBI.\r +\r +Steps involved:\r +\r +- Download from the NCBI the genomic sequence (Fasta) and the assembly\r + stats and reports files.\r +- Turn the masked Fasta file into an unmasked one.\r +- Compress and index all Fasta files with `bgzip`, `samtools faidx`, and\r + `samtools dict`.\r +- Generate the `.sizes` file usually required for conversion of data\r + files to UCSC's "big" formats, e.g. bigBed.\r +- Extract the coordinates of the masked regions into a BED file.\r +- Compress and index the BED file with `bgzip` and `tabix`.\r +\r +## Quick Start\r +\r +1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=22.04.0`)\r +\r +2. Install any of [`Docker`](https://docs.docker.com/engine/installation/), [`Singularity`](https://www.sylabs.io/guides/3.0/user-guide/) (you can follow [this tutorial](https://singularity-tutorial.github.io/01-installation/)), [`Podman`](https://podman.io/), [`Shifter`](https://nersc.gitlab.io/development/shifter/how-to-use/) or [`Charliecloud`](https://hpc.github.io/charliecloud/) for full pipeline reproducibility _(you can use [`Conda`](https://conda.io/miniconda.html) both to install Nextflow itself and also to manage software within pipelines. Please only use it within pipelines as a last resort; see [docs](https://nf-co.re/usage/configuration#basic-configuration-profiles))_.\r +\r +3. Download the pipeline and test it on a minimal dataset with a single command:\r +\r + ```bash\r + nextflow run sanger-tol/insdcdownload -profile test,YOURPROFILE --outdir \r + ```\r +\r + Note that some form of configuration will be needed so that Nextflow knows how to fetch the required software. This is usually done in the form of a config profile (`YOURPROFILE` in the example command above). You can chain multiple config profiles in a comma-separated string.\r +\r + > - The pipeline comes with config profiles called `docker`, `singularity`, `podman`, `shifter`, `charliecloud` and `conda` which instruct the pipeline to use the named tool for software management. For example, `-profile test,docker`.\r + > - Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment.\r + > - If you are using `singularity`, please use the [`nf-core download`](https://nf-co.re/tools/#downloading-pipelines-for-offline-use) command to download images first, before running the pipeline. Setting the [`NXF_SINGULARITY_CACHEDIR` or `singularity.cacheDir`](https://www.nextflow.io/docs/latest/singularity.html?#singularity-docker-hub) Nextflow options enables you to store and re-use the images from a central location for future pipeline runs.\r + > - If you are using `conda`, it is highly recommended to use the [`NXF_CONDA_CACHEDIR` or `conda.cacheDir`](https://www.nextflow.io/docs/latest/conda.html) settings to store the environments in a central location for future pipeline runs.\r +\r +4. Start running your own analysis!\r +\r + ```console\r + nextflow run sanger-tol/insdcdownload --assembly_accession GCA_927399515.1 --assembly_name gfLaeSulp1.1 --outdir results\r + ```\r +\r +## Documentation\r +\r +The sanger-tol/insdcdownload pipeline comes with documentation about the pipeline [usage](docs/usage.md) and [output](docs/output.md).\r +\r +## Credits\r +\r +sanger-tol/insdcdownload was mainly written by @muffato, with major borrowings from @priyanka-surana's [read-mapping](https://github.com/sanger-tol/readmapping) pipeline, e.g. the script to remove the repeat-masking, and the overall structure and layout of the sub-workflows.\r +\r +## Contributions and Support\r +\r +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\r +\r +For further information or help, don't hesitate to get in touch on the [Slack `#pipelines` channel](https://sangertreeoflife.slack.com/channels/pipelines). Please [create an issue](https://github.com/sanger-tol/insdcdownload/issues/new/choose) on GitHub if you are not on the Sanger slack channel.\r +\r +## Citations\r +\r +If you use sanger-tol/insdcdownload for your analysis, please cite it using the following doi: [10.5281/zenodo.7155119](https://doi.org/10.5281/zenodo.7155119)\r +\r +An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\r +\r +This pipeline uses code and infrastructure developed and maintained by the [nf-core](https://nf-co.re) community, reused here under the [MIT license](https://github.com/nf-core/tools/blob/master/LICENSE).\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/638?version=1" ; + schema1:isBasedOn "https://github.com/sanger-tol/insdcdownload.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for sanger-tol/insdcdownload v1.1.0 - Deciduous ent" ; + schema1:sdDatePublished "2024-07-12 13:26:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/638/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1657 ; + schema1:creator , + ; + schema1:dateCreated "2023-11-02T11:59:44Z" ; + schema1:dateModified "2023-11-14T11:58:44Z" ; + schema1:description """# ![sanger-tol/insdcdownload](docs/images/sanger-tol-insdcdownload_logo.png)\r +\r +[![GitHub Actions CI Status](https://github.com/sanger-tol/insdcdownload/workflows/nf-core%20CI/badge.svg)](https://github.com/sanger-tol/insdcdownload/actions?query=workflow%3A%22nf-core+CI%22)\r +\r +\r +\r +[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.7155119-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.7155119)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A522.04.0-23aa62.svg)](https://www.nextflow.io/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +\r +[![Get help on Slack](http://img.shields.io/badge/slack-SangerTreeofLife%20%23pipelines-4A154B?labelColor=000000&logo=slack)](https://SangerTreeofLife.slack.com/channels/pipelines)\r +[![Follow on Twitter](http://img.shields.io/badge/twitter-%40sangertol-1DA1F2?labelColor=000000&logo=twitter)](https://twitter.com/sangertol)\r +[![Watch on YouTube](http://img.shields.io/badge/youtube-tree--of--life-FF0000?labelColor=000000&logo=youtube)](https://www.youtube.com/channel/UCFeDpvjU58SA9V0ycRXejhA)\r +\r +## Introduction\r +\r +**sanger-tol/insdcdownload** is a pipeline that downloads assemblies from INSDC into a Tree of Life directory structure.\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!\r +\r +On release, automated continuous integration tests run the pipeline on a full-sized dataset on the GitHub CI infrastructure. This ensures that the pipeline runs in a third-party environment, and has sensible resource allocation defaults set to run on real-world datasets.\r +\r +## Pipeline summary\r +\r +## Overview\r +\r +The pipeline takes an assembly accession number, as well as the assembly name, and downloads it. It also builds a set of common indices (such as `samtools faidx`), and extracts the repeat-masking performed by the NCBI.\r +\r +Steps involved:\r +\r +- Download from the NCBI the genomic sequence (Fasta) and the assembly\r + stats and reports files.\r +- Turn the masked Fasta file into an unmasked one.\r +- Compress and index all Fasta files with `bgzip`, `samtools faidx`, and\r + `samtools dict`.\r +- Generate the `.sizes` file usually required for conversion of data\r + files to UCSC's "big" formats, e.g. bigBed.\r +- Extract the coordinates of the masked regions into a BED file.\r +- Compress and index the BED file with `bgzip` and `tabix`.\r +\r +## Quick Start\r +\r +1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=22.04.0`)\r +\r +2. Install any of [`Docker`](https://docs.docker.com/engine/installation/), [`Singularity`](https://www.sylabs.io/guides/3.0/user-guide/) (you can follow [this tutorial](https://singularity-tutorial.github.io/01-installation/)), [`Podman`](https://podman.io/), [`Shifter`](https://nersc.gitlab.io/development/shifter/how-to-use/) or [`Charliecloud`](https://hpc.github.io/charliecloud/) for full pipeline reproducibility _(you can use [`Conda`](https://conda.io/miniconda.html) both to install Nextflow itself and also to manage software within pipelines. Please only use it within pipelines as a last resort; see [docs](https://nf-co.re/usage/configuration#basic-configuration-profiles))_.\r +\r +3. Download the pipeline and test it on a minimal dataset with a single command:\r +\r + ```bash\r + nextflow run sanger-tol/insdcdownload -profile test,YOURPROFILE --outdir \r + ```\r +\r + Note that some form of configuration will be needed so that Nextflow knows how to fetch the required software. This is usually done in the form of a config profile (`YOURPROFILE` in the example command above). You can chain multiple config profiles in a comma-separated string.\r +\r + > - The pipeline comes with config profiles called `docker`, `singularity`, `podman`, `shifter`, `charliecloud` and `conda` which instruct the pipeline to use the named tool for software management. For example, `-profile test,docker`.\r + > - Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment.\r + > - If you are using `singularity`, please use the [`nf-core download`](https://nf-co.re/tools/#downloading-pipelines-for-offline-use) command to download images first, before running the pipeline. Setting the [`NXF_SINGULARITY_CACHEDIR` or `singularity.cacheDir`](https://www.nextflow.io/docs/latest/singularity.html?#singularity-docker-hub) Nextflow options enables you to store and re-use the images from a central location for future pipeline runs.\r + > - If you are using `conda`, it is highly recommended to use the [`NXF_CONDA_CACHEDIR` or `conda.cacheDir`](https://www.nextflow.io/docs/latest/conda.html) settings to store the environments in a central location for future pipeline runs.\r +\r +4. Start running your own analysis!\r +\r + ```console\r + nextflow run sanger-tol/insdcdownload --assembly_accession GCA_927399515.1 --assembly_name gfLaeSulp1.1 --outdir results\r + ```\r +\r +## Documentation\r +\r +The sanger-tol/insdcdownload pipeline comes with documentation about the pipeline [usage](docs/usage.md) and [output](docs/output.md).\r +\r +## Credits\r +\r +sanger-tol/insdcdownload was mainly written by @muffato, with major borrowings from @priyanka-surana's [read-mapping](https://github.com/sanger-tol/readmapping) pipeline, e.g. the script to remove the repeat-masking, and the overall structure and layout of the sub-workflows.\r +\r +## Contributions and Support\r +\r +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\r +\r +For further information or help, don't hesitate to get in touch on the [Slack `#pipelines` channel](https://sangertreeoflife.slack.com/channels/pipelines). Please [create an issue](https://github.com/sanger-tol/insdcdownload/issues/new/choose) on GitHub if you are not on the Sanger slack channel.\r +\r +## Citations\r +\r +If you use sanger-tol/insdcdownload for your analysis, please cite it using the following doi: [10.5281/zenodo.7155119](https://doi.org/10.5281/zenodo.7155119)\r +\r +An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\r +\r +This pipeline uses code and infrastructure developed and maintained by the [nf-core](https://nf-co.re) community, reused here under the [MIT license](https://github.com/nf-core/tools/blob/master/LICENSE).\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +""" ; + schema1:keywords "Bioinformatics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "sanger-tol/insdcdownload v1.1.0 - Deciduous ent" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/638?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Genes and transcripts annotation with Isoseq using uLTRA and TAMA" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/993?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/isoseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/isoseq" ; + schema1:sdDatePublished "2024-07-12 13:21:02 +0100" ; + schema1:url "https://workflowhub.eu/workflows/993/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8180 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:57Z" ; + schema1:dateModified "2024-06-11T12:54:57Z" ; + schema1:description "Genes and transcripts annotation with Isoseq using uLTRA and TAMA" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/993?version=6" ; + schema1:keywords "isoseq, isoseq-3, rna, tama, ultra" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/isoseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/993?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:description """#### - Deprecated -\r +#### See our updated hybrid assembly workflow: https://workflowhub.eu/workflows/367\r +#### And other workflows: https://workflowhub.eu/projects/16#workflows\r +# \r +**Workflow for sequencing with ONT Nanopore data, from basecalled reads to (meta)assembly and binning**\r +- Workflow Nanopore Quality\r +- Kraken2 taxonomic classification of FASTQ reads\r +- Flye (de-novo assembly)\r +- Medaka (assembly polishing)\r +- metaQUAST (assembly quality reports)\r +\r +**When Illumina reads are provided:** \r + - Workflow Illumina Quality: https://workflowhub.eu/workflows/336?version=1 \r + - Assembly polishing with Pilon
\r + - Workflow binnning https://workflowhub.eu/workflows/64?version=11\r + - Metabat2\r + - CheckM\r + - BUSCO\r + - GTDB-Tk\r +\r +**All tool CWL files and other workflows can be found here:**
\r + Tools: https://git.wur.nl/unlock/cwl/-/tree/master/cwl
\r + Workflows: https://git.wur.nl/unlock/cwl/-/tree/master/cwl/workflows
""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/254?version=3" ; + schema1:isBasedOn "https://git.wur.nl/unlock/cwl/-/blob/master/cwl/workflows/workflow_nanopore_assembly.cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Nanopore Assembly Workflow - Deprecated -" ; + schema1:sdDatePublished "2024-07-12 13:34:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/254/ro_crate?version=3" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 83285 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17085 ; + schema1:creator , + , + ; + schema1:dateCreated "2022-04-20T09:12:09Z" ; + schema1:dateModified "2023-02-02T15:16:21Z" ; + schema1:description """#### - Deprecated -\r +#### See our updated hybrid assembly workflow: https://workflowhub.eu/workflows/367\r +#### And other workflows: https://workflowhub.eu/projects/16#workflows\r +# \r +**Workflow for sequencing with ONT Nanopore data, from basecalled reads to (meta)assembly and binning**\r +- Workflow Nanopore Quality\r +- Kraken2 taxonomic classification of FASTQ reads\r +- Flye (de-novo assembly)\r +- Medaka (assembly polishing)\r +- metaQUAST (assembly quality reports)\r +\r +**When Illumina reads are provided:** \r + - Workflow Illumina Quality: https://workflowhub.eu/workflows/336?version=1 \r + - Assembly polishing with Pilon
\r + - Workflow binnning https://workflowhub.eu/workflows/64?version=11\r + - Metabat2\r + - CheckM\r + - BUSCO\r + - GTDB-Tk\r +\r +**All tool CWL files and other workflows can be found here:**
\r + Tools: https://git.wur.nl/unlock/cwl/-/tree/master/cwl
\r + Workflows: https://git.wur.nl/unlock/cwl/-/tree/master/cwl/workflows
""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/254?version=2" ; + schema1:keywords "nanopore, Genomics, Metagenomics" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Nanopore Assembly Workflow - Deprecated -" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/254?version=3" ; + schema1:version 3 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=17" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-07-12 13:20:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=17" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17866 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:59Z" ; + schema1:dateModified "2024-06-11T12:54:59Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=17" ; + schema1:version 17 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# IGVreport-nf \r +\r +- [Description](#description)\r + - [Diagram](#diagram)\r + - [User guide](#user-guide)\r + - [Workflow summaries](#workflow-summaries)\r + - [Metadata](#metadata)\r + - [Component tools](#component-tools)\r + - [Required (minimum)\r + inputs/parameters](#required-minimum-inputsparameters)\r + - [Additional notes](#additional-notes)\r + - [Help/FAQ/Troubleshooting](#helpfaqtroubleshooting)\r + - [Acknowledgements/citations/credits](#acknowledgementscitationscredits)\r +\r +## Description \r +\r +Quickly generate [IGV `.html` reports](https://github.com/igvteam/igv-reports) for a genomic region of interest in the human genome (hg38). Bcftools is used to subset a VCF to a region of interest, the subset VCF is then passed to IGV-reports, which generates a report consisting of a table of genomic sites or regions and associated IGV views for each site. The reports can be opened by any web browser as a static page. \r +\r +### Diagram \r +\r +```mermaid\r +graph LR;\r + VCF-->|bcftools view|SubsetVCF;\r + SubsetVCF-->|IGVtools|HTMLreport;\r + AlignmentBAM-->|IGVtools|HTMLreport;\r +```\r +\r +### User guide\r +\r +This workflow uses containers for all steps and can run using Singularity or Docker. It requires Nextflow and either Singularity or Docker be installed. For instructions on installing Nextflow, see their [documentation](https://www.nextflow.io/docs/latest/getstarted.html).\r +\r +**This workflow currently only generates reports for the human reference genome assembly, Hg38.** \r +\r +The workflow runs three processes: \r +1. The provided VCF file is subset to a region of interest using Bcftools view \r +2. The Subset VCF file is then indexed using Bcftools index \r +3. The subset VCF and provided Bam file are used to generate the html report for the region of interest. \r +\r +To start clone this repository: \r +```\r +git clone https://github.com/Sydney-Informatics-Hub/IGVreport-nf.git\r +```\r +\r +From the IGVreport-nf directory, run the pipeline: \r +```\r +nextflow run main.nf --sample \\\r + --bam \\\r + --vcf \\\r + --chr --start --stop \r +```\r +\r +This will create a report in a directory titled `./Report`. You can rename this directory at runtime using the flag `--outDir`. All runtime summary reports will be available in the `./runInfo` directory. \r +\r +### Workflow summaries\r +\r +#### Metadata \r +\r +|metadata field | workflow_name / workflow_version |\r +|-------------------|:---------------------------------:|\r +|Version | 1.0 |\r +|Maturity | under development |\r +|Creators | Georgie Samaha |\r +|Source | NA |\r +|License | GPL-3.0 license |\r +|Workflow manager | NextFlow |\r +|Container | None |\r +|Install method | NA |\r +|GitHub | github.com/Sydney-Informatics-Hub/IGVreport-nf |\r +|bio.tools | NA |\r +|BioContainers | NA | \r +|bioconda | NA |\r +\r +#### Component tools\r +\r +* nextflow>=20.07.1\r +* singularity or docker\r +* bcftools/1.16\r +* igv-reports/1.6.1\r +\r +#### Required (minimum) inputs/parameters\r +\r +* An indexed alignment file in Bam format \r +* A gzipped and indexed vcf file\r +\r +## Additional notes\r +\r +## Help/FAQ/troubleshooting \r +\r +## Acknowledgements/citations/credits\r +\r +This workflow was developed by the Sydney Informatics Hub, a Core Research Facility of the University of Sydney and the Australian BioCommons which is enabled by NCRIS via Bioplatforms Australia. \r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/440?version=1" ; + schema1:isBasedOn "https://github.com/Sydney-Informatics-Hub/IGVreport-nf" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for IGVreport-nf" ; + schema1:sdDatePublished "2024-07-12 13:34:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/440/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3381 ; + schema1:creator , + ; + schema1:dateCreated "2023-03-21T05:17:30Z" ; + schema1:dateModified "2023-03-21T05:17:30Z" ; + schema1:description """# IGVreport-nf \r +\r +- [Description](#description)\r + - [Diagram](#diagram)\r + - [User guide](#user-guide)\r + - [Workflow summaries](#workflow-summaries)\r + - [Metadata](#metadata)\r + - [Component tools](#component-tools)\r + - [Required (minimum)\r + inputs/parameters](#required-minimum-inputsparameters)\r + - [Additional notes](#additional-notes)\r + - [Help/FAQ/Troubleshooting](#helpfaqtroubleshooting)\r + - [Acknowledgements/citations/credits](#acknowledgementscitationscredits)\r +\r +## Description \r +\r +Quickly generate [IGV `.html` reports](https://github.com/igvteam/igv-reports) for a genomic region of interest in the human genome (hg38). Bcftools is used to subset a VCF to a region of interest, the subset VCF is then passed to IGV-reports, which generates a report consisting of a table of genomic sites or regions and associated IGV views for each site. The reports can be opened by any web browser as a static page. \r +\r +### Diagram \r +\r +```mermaid\r +graph LR;\r + VCF-->|bcftools view|SubsetVCF;\r + SubsetVCF-->|IGVtools|HTMLreport;\r + AlignmentBAM-->|IGVtools|HTMLreport;\r +```\r +\r +### User guide\r +\r +This workflow uses containers for all steps and can run using Singularity or Docker. It requires Nextflow and either Singularity or Docker be installed. For instructions on installing Nextflow, see their [documentation](https://www.nextflow.io/docs/latest/getstarted.html).\r +\r +**This workflow currently only generates reports for the human reference genome assembly, Hg38.** \r +\r +The workflow runs three processes: \r +1. The provided VCF file is subset to a region of interest using Bcftools view \r +2. The Subset VCF file is then indexed using Bcftools index \r +3. The subset VCF and provided Bam file are used to generate the html report for the region of interest. \r +\r +To start clone this repository: \r +```\r +git clone https://github.com/Sydney-Informatics-Hub/IGVreport-nf.git\r +```\r +\r +From the IGVreport-nf directory, run the pipeline: \r +```\r +nextflow run main.nf --sample \\\r + --bam \\\r + --vcf \\\r + --chr --start --stop \r +```\r +\r +This will create a report in a directory titled `./Report`. You can rename this directory at runtime using the flag `--outDir`. All runtime summary reports will be available in the `./runInfo` directory. \r +\r +### Workflow summaries\r +\r +#### Metadata \r +\r +|metadata field | workflow_name / workflow_version |\r +|-------------------|:---------------------------------:|\r +|Version | 1.0 |\r +|Maturity | under development |\r +|Creators | Georgie Samaha |\r +|Source | NA |\r +|License | GPL-3.0 license |\r +|Workflow manager | NextFlow |\r +|Container | None |\r +|Install method | NA |\r +|GitHub | github.com/Sydney-Informatics-Hub/IGVreport-nf |\r +|bio.tools | NA |\r +|BioContainers | NA | \r +|bioconda | NA |\r +\r +#### Component tools\r +\r +* nextflow>=20.07.1\r +* singularity or docker\r +* bcftools/1.16\r +* igv-reports/1.6.1\r +\r +#### Required (minimum) inputs/parameters\r +\r +* An indexed alignment file in Bam format \r +* A gzipped and indexed vcf file\r +\r +## Additional notes\r +\r +## Help/FAQ/troubleshooting \r +\r +## Acknowledgements/citations/credits\r +\r +This workflow was developed by the Sydney Informatics Hub, a Core Research Facility of the University of Sydney and the Australian BioCommons which is enabled by NCRIS via Bioplatforms Australia. \r +""" ; + schema1:keywords "Alignment, Genomics, variant calling, mapping" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "IGVreport-nf" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/440?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1017?version=10" ; + schema1:isBasedOn "https://github.com/nf-core/rnafusion" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnafusion" ; + schema1:sdDatePublished "2024-07-12 13:18:22 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1017/ro_crate?version=10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10551 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:10Z" ; + schema1:dateModified "2024-06-11T12:55:10Z" ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1017?version=16" ; + schema1:keywords "fusion, fusion-genes, gene-fusion, rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnafusion" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1017?version=10" ; + schema1:version 10 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "ChIP-seq peak-calling and differential analysis pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/971?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/chipseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/chipseq" ; + schema1:sdDatePublished "2024-07-12 13:22:05 +0100" ; + schema1:url "https://workflowhub.eu/workflows/971/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9391 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:45Z" ; + schema1:dateModified "2024-06-11T12:54:45Z" ; + schema1:description "ChIP-seq peak-calling and differential analysis pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/971?version=5" ; + schema1:keywords "ChIP, ChIP-seq, chromatin-immunoprecipitation, macs2, peak-calling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/chipseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/971?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """Germline-ShortV @ NCI-Gadi is an implementation of the BROAD Institute's best practice workflow for germline short variant discovery. This implementation is optimised for the National Compute Infrastucture's Gadi HPC, utilising scatter-gather parallelism to enable use of multiple nodes with high CPU or memory efficiency. This workflow requires sample BAM files, which can be generated using the [Fastq-to-bam @ NCI-Gadi](https://workflowhub.eu/workflows/146) pipeline. Germline-ShortV can be applied to model and non-model organisms (including non-diploid organisms). \r +\r +Infrastructure\\_deployment\\_metadata: Gadi (NCI)""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.143.1" ; + schema1:isBasedOn "https://github.com/Sydney-Informatics-Hub/Germline-ShortV" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Germline-ShortV @ NCI-Gadi" ; + schema1:sdDatePublished "2024-07-12 13:36:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/143/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 728332 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 24981 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2021-08-17T04:35:21Z" ; + schema1:dateModified "2023-01-16T13:51:44Z" ; + schema1:description """Germline-ShortV @ NCI-Gadi is an implementation of the BROAD Institute's best practice workflow for germline short variant discovery. This implementation is optimised for the National Compute Infrastucture's Gadi HPC, utilising scatter-gather parallelism to enable use of multiple nodes with high CPU or memory efficiency. This workflow requires sample BAM files, which can be generated using the [Fastq-to-bam @ NCI-Gadi](https://workflowhub.eu/workflows/146) pipeline. Germline-ShortV can be applied to model and non-model organisms (including non-diploid organisms). \r +\r +Infrastructure\\_deployment\\_metadata: Gadi (NCI)""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "GATK4, variant_calling, WGS, SNPs, INDELs, HaplotyeCaller, Germline, BROAD, Genomics, genome, DNA, DNA-seq" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Germline-ShortV @ NCI-Gadi" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/143?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Takes fastqs and reference data, to produce a single cell counts matrix into and save in annData format - adding a column called sample with the sample name. " ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/513?version=2" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq: Count and Load with starSOLO" ; + schema1:sdDatePublished "2024-07-12 13:22:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/513/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 28207 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-12-12T03:08:41Z" ; + schema1:dateModified "2023-12-12T03:08:41Z" ; + schema1:description "Takes fastqs and reference data, to produce a single cell counts matrix into and save in annData format - adding a column called sample with the sample name. " ; + schema1:isBasedOn "https://workflowhub.eu/workflows/513?version=2" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "scRNAseq: Count and Load with starSOLO" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/513?version=2" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2023-01-20T16:24:30.927470" ; + schema1:hasPart , + , + , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/hic-hicup-cooler" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + , + , + ; + schema1:name "hic-hicup-cooler/hic-fastq-to-cool-hicup-cooler" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "hic-hicup-cooler/chic-fastq-to-cool-hicup-cooler" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/hic-hicup-cooler" ; + schema1:version "0.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.3" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "hic-hicup-cooler/hic-fastq-to-pairs-hicup" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/hic-hicup-cooler" ; + schema1:version "0.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "hic-hicup-cooler/hic-juicermediumtabix-to-cool-cooler" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/hic-hicup-cooler" ; + schema1:version "0.1" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """This workflow can be used to fit dose-response curves from normalised cell-based assay data (%confluence) using the KNIME HCS extension. The workflow expects triplicates for each of eight test concentrations. This workflow needs R-Server to run in the back-end. Start R and run the following command: library(Rserve); Rserve(args = "--vanilla"). \r +Three types of outliers can be removed: 1 - Outliers from triplicate measurement (standard deviation cut-off can be selected), 2 - inactive and weekly active compounds (% confluence cut-offs can be selected), 3 - toxic concentrations (cut-off for reduction in confluence with stepwise increasing concentration can be selected)\r +Output are two dose-response curve fits per compound for pre and post outlier removal with graphical representation and numerical fit parameters. \r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/388?version=1" ; + schema1:isBasedOn "https://hub.knime.com/fraunhoferitmp/spaces/Public/latest/Dose_Response_Cell-based-Assay/DRC_template_WithOutlierDetection~SwmoxbJewJ1k8Dcd" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for DRC_cellbased_OutlierDetection" ; + schema1:sdDatePublished "2024-07-12 13:35:04 +0100" ; + schema1:url "https://workflowhub.eu/workflows/388/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3093204 ; + schema1:creator ; + schema1:dateCreated "2022-09-26T09:58:39Z" ; + schema1:dateModified "2023-01-16T14:02:32Z" ; + schema1:description """This workflow can be used to fit dose-response curves from normalised cell-based assay data (%confluence) using the KNIME HCS extension. The workflow expects triplicates for each of eight test concentrations. This workflow needs R-Server to run in the back-end. Start R and run the following command: library(Rserve); Rserve(args = "--vanilla"). \r +Three types of outliers can be removed: 1 - Outliers from triplicate measurement (standard deviation cut-off can be selected), 2 - inactive and weekly active compounds (% confluence cut-offs can be selected), 3 - toxic concentrations (cut-off for reduction in confluence with stepwise increasing concentration can be selected)\r +Output are two dose-response curve fits per compound for pre and post outlier removal with graphical representation and numerical fit parameters. \r +""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "DRC_cellbased_OutlierDetection" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/388?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 70668 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.293.1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python CNS/XPLOR MD Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-07-12 13:35:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/293/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1728 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-17T14:49:18Z" ; + schema1:dateModified "2022-03-23T10:05:23Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/293?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python CNS/XPLOR MD Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/293?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1017?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/rnafusion" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnafusion" ; + schema1:sdDatePublished "2024-07-12 13:18:21 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1017/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9290 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:10Z" ; + schema1:dateModified "2024-06-11T12:55:10Z" ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1017?version=16" ; + schema1:keywords "fusion, fusion-genes, gene-fusion, rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnafusion" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1017?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + schema1:datePublished "2024-07-09T15:30:12.396930" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.24" . + + a schema1:Dataset ; + schema1:datePublished "2023-11-03T14:51:35.106883" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Scaffolding-Bionano-VGP7" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Scaffolding-Bionano-VGP7/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.17" . + + a schema1:Dataset ; + schema1:datePublished "2021-12-20T17:04:47.617205" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/gromacs-mmgbsa" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "gromacs-mmgbsa/main" ; + schema1:sdDatePublished "2021-12-21 03:01:01 +0000" ; + schema1:softwareVersion "v0.1.2" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# CNVand\r +[![Snakemake](https://img.shields.io/badge/snakemake-≥8.0.0-brightgreen.svg?style=flat-square)](https://snakemake.bitbucket.io)\r +[![Conda](https://img.shields.io/badge/conda-≥23.11.0-brightgreen.svg?style=flat-square)](https://anaconda.org/conda-forge/mamba)\r +![Docker](https://img.shields.io/badge/docker-≥26.1.4-brightgreen.svg?style=flat-square)\r +![License](https://img.shields.io/badge/license-MIT-blue.svg?style=flat-square)\r +[![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-2.1-4baaaa.svg)](code_of_conduct.md) \r +\r +CNVand is a snakemake workflow for CNV analysis, tailored for preparing data used by the [CNVizard](https://github.com/IHGGM-Aachen/CNVizard) CNV visualization tool. Given a set of BAM and VCF files, it utilizes the tools `CNVkit` and `AnnotSV` to analyze and annotate copy number variations.\r +\r +## General Settings and Samplesheet\r +To configure this pipeline, modify the config under `config/config.yaml` as needed. Detailed explanations for each setting are provided within the file.\r +\r +Add samples to the pipeline by completing `config/samplesheet.tsv`. Each `sample` should be associated with a `path` to the corresponding BAM and VCF file.\r +\r +For detailed instructions on how to configure CNVand see `config/README.md`.\r +\r +## Reference Files\r +To use CNVand some external reference files are needed alongside your sample data.\r +\r +### Genome\r +\r +For `cnvkit_fix` to work, you need to specify a reference genome in the config file. Take care to use the same reference file for your entire workflow!\r +\r +### Annotations\r +\r +For AnnotSV to work, the annotation files must be downloaded separately and be referenced in the config file under the respective key. For human annotations, this can be done [here](https://www.lbgi.fr/~geoffroy/Annotations/Annotations_Human_3.4.2.tar.gz). In case this link is not working, check the original [AnnotSV](https://github.com/lgmgeo/AnnotSV/tree/master) repository for updates on how to obtain the annotations.\r +\r +## Pipeline Setup\r +CNVand can be executed using mamba environments or a pre-built docker container.\r +\r +### Mamba (Snakedeploy)\r +For a one-click installation, snakedeploy can be used. For further information, see the entry for CNVand in the [Snakemake Workflow Catalog](https://snakemake.github.io/snakemake-workflow-catalog/?repo=IHGGM-Aachen/CNVand)\r +\r +### Mamba (Manual)\r +This workflow can easily setup manually with the given environment file. Install Snakemake and dependencies using the command:\r +\r +```bash\r +mamba env create -f environment.yml\r +```\r +\r +Then activate the newly created environment with: \r +\r +```bash\r +mamba activate cnvand\r +```\r +\r +Now configure the pipeline and download the needed annotation and refenrece files. When everything is set up, Execute the pipeline with:\r +\r +```bash\r +snakemake --cores all --use-conda\r +```\r +\r +Generate a comprehensive execution report by running:\r +\r +```bash\r +snakemake --report report.zip\r +```\r +\r +\r +### Docker\r +\r +CNVand can also be used inside a Docker container. To do so, first pull the Docker image with:\r +\r +```bash\r +docker pull ghcr.io/ihggm-aachen/cnvand:latest\r +```\r +\r +Then run the container with the bind mounts needed in your setup:\r +\r +```bash\r +docker run -it -v /path/to/your/data:/data ghcr.io/ihggm-aachen/cnvand:latest /bin/bash\r +```\r +\r +This command opens an interactive shell inside the Docker container. Once inside the container, you are placed inside the `/cnvand` the directory. From there then run the pipeline once you set an appropriate configuration:\r +\r +```bash\r +snakemake --cores all --use-conda\r +```\r +\r +## Contributing\r +\r +We welcome contributions to improve CNVand. Please see our [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to get started.\r +\r +## Code of Conduct\r +\r +We are committed to fostering an open and welcoming environment. Please see our [CODE_OF_CONDUCT.md](CODE_OF_CONDUCT.md) for our community guidelines.\r +\r +## Documentation\r +\r +Detailed documentation for the workflow can be found in `workflow/documentation.md`.\r +\r +## Testing\r +\r +To ensure the pipeline runs correctly, we have set up both unit and integration tests. Unit tests are generated from successful workflow runs, and integration tests are configured to run the entire workflow with test data.\r +\r +### Integration Tests\r +\r +The integration test can be run using the data and config provided. Remember to download the correct reference/annotations (GRCh38 in case of the bundled NIST data) by yourself and adjust your local paths as necessary!\r +\r +### Unit Tests\r +\r +Run the unit tests with:\r +\r +```bash\r +pytest -v .tests/unit\r +```\r +\r +This will check for the correct CNVand output per rule.\r +\r +## License\r +\r +This project is licensed under the MIT License. See the [LICENSE](LICENSE.md) file for details.\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.1039.1" ; + schema1:isBasedOn "https://github.com/IHGGM-Aachen/CNVand" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CNVand" ; + schema1:sdDatePublished "2024-07-12 13:18:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1039/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 20573 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1367 ; + schema1:creator ; + schema1:dateCreated "2024-06-10T15:56:02Z" ; + schema1:dateModified "2024-06-10T15:57:31Z" ; + schema1:description """# CNVand\r +[![Snakemake](https://img.shields.io/badge/snakemake-≥8.0.0-brightgreen.svg?style=flat-square)](https://snakemake.bitbucket.io)\r +[![Conda](https://img.shields.io/badge/conda-≥23.11.0-brightgreen.svg?style=flat-square)](https://anaconda.org/conda-forge/mamba)\r +![Docker](https://img.shields.io/badge/docker-≥26.1.4-brightgreen.svg?style=flat-square)\r +![License](https://img.shields.io/badge/license-MIT-blue.svg?style=flat-square)\r +[![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-2.1-4baaaa.svg)](code_of_conduct.md) \r +\r +CNVand is a snakemake workflow for CNV analysis, tailored for preparing data used by the [CNVizard](https://github.com/IHGGM-Aachen/CNVizard) CNV visualization tool. Given a set of BAM and VCF files, it utilizes the tools `CNVkit` and `AnnotSV` to analyze and annotate copy number variations.\r +\r +## General Settings and Samplesheet\r +To configure this pipeline, modify the config under `config/config.yaml` as needed. Detailed explanations for each setting are provided within the file.\r +\r +Add samples to the pipeline by completing `config/samplesheet.tsv`. Each `sample` should be associated with a `path` to the corresponding BAM and VCF file.\r +\r +For detailed instructions on how to configure CNVand see `config/README.md`.\r +\r +## Reference Files\r +To use CNVand some external reference files are needed alongside your sample data.\r +\r +### Genome\r +\r +For `cnvkit_fix` to work, you need to specify a reference genome in the config file. Take care to use the same reference file for your entire workflow!\r +\r +### Annotations\r +\r +For AnnotSV to work, the annotation files must be downloaded separately and be referenced in the config file under the respective key. For human annotations, this can be done [here](https://www.lbgi.fr/~geoffroy/Annotations/Annotations_Human_3.4.2.tar.gz). In case this link is not working, check the original [AnnotSV](https://github.com/lgmgeo/AnnotSV/tree/master) repository for updates on how to obtain the annotations.\r +\r +## Pipeline Setup\r +CNVand can be executed using mamba environments or a pre-built docker container.\r +\r +### Mamba (Snakedeploy)\r +For a one-click installation, snakedeploy can be used. For further information, see the entry for CNVand in the [Snakemake Workflow Catalog](https://snakemake.github.io/snakemake-workflow-catalog/?repo=IHGGM-Aachen/CNVand)\r +\r +### Mamba (Manual)\r +This workflow can easily setup manually with the given environment file. Install Snakemake and dependencies using the command:\r +\r +```bash\r +mamba env create -f environment.yml\r +```\r +\r +Then activate the newly created environment with: \r +\r +```bash\r +mamba activate cnvand\r +```\r +\r +Now configure the pipeline and download the needed annotation and refenrece files. When everything is set up, Execute the pipeline with:\r +\r +```bash\r +snakemake --cores all --use-conda\r +```\r +\r +Generate a comprehensive execution report by running:\r +\r +```bash\r +snakemake --report report.zip\r +```\r +\r +\r +### Docker\r +\r +CNVand can also be used inside a Docker container. To do so, first pull the Docker image with:\r +\r +```bash\r +docker pull ghcr.io/ihggm-aachen/cnvand:latest\r +```\r +\r +Then run the container with the bind mounts needed in your setup:\r +\r +```bash\r +docker run -it -v /path/to/your/data:/data ghcr.io/ihggm-aachen/cnvand:latest /bin/bash\r +```\r +\r +This command opens an interactive shell inside the Docker container. Once inside the container, you are placed inside the `/cnvand` the directory. From there then run the pipeline once you set an appropriate configuration:\r +\r +```bash\r +snakemake --cores all --use-conda\r +```\r +\r +## Contributing\r +\r +We welcome contributions to improve CNVand. Please see our [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to get started.\r +\r +## Code of Conduct\r +\r +We are committed to fostering an open and welcoming environment. Please see our [CODE_OF_CONDUCT.md](CODE_OF_CONDUCT.md) for our community guidelines.\r +\r +## Documentation\r +\r +Detailed documentation for the workflow can be found in `workflow/documentation.md`.\r +\r +## Testing\r +\r +To ensure the pipeline runs correctly, we have set up both unit and integration tests. Unit tests are generated from successful workflow runs, and integration tests are configured to run the entire workflow with test data.\r +\r +### Integration Tests\r +\r +The integration test can be run using the data and config provided. Remember to download the correct reference/annotations (GRCh38 in case of the bundled NIST data) by yourself and adjust your local paths as necessary!\r +\r +### Unit Tests\r +\r +Run the unit tests with:\r +\r +```bash\r +pytest -v .tests/unit\r +```\r +\r +This will check for the correct CNVand output per rule.\r +\r +## License\r +\r +This project is licensed under the MIT License. See the [LICENSE](LICENSE.md) file for details.\r +""" ; + schema1:image ; + schema1:keywords "Bioinformatics, annotsv, CNVkit, Copy Number Variation, Snakemake, Magic" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "CNVand" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1039?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "An example workflow to allow users to run the Specimen Data Refinery tools on data provided in an input CSV file." ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.373.1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for De novo digitisation" ; + schema1:sdDatePublished "2024-07-12 13:35:26 +0100" ; + schema1:url "https://workflowhub.eu/workflows/373/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 21688 ; + schema1:creator , + , + ; + schema1:dateCreated "2022-07-08T13:00:03Z" ; + schema1:dateModified "2022-07-08T13:08:19Z" ; + schema1:description "An example workflow to allow users to run the Specimen Data Refinery tools on data provided in an input CSV file." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/373?version=1" ; + schema1:keywords "Default-SDR" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "De novo digitisation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/373?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """Data QC step, can run alone or as part of a combined workflow for large genome assembly. \r +\r +* What it does: Reports statistics from sequencing reads.\r +* Inputs: long reads (fastq.gz format), short reads (R1 and R2) (fastq.gz format).\r +* Outputs: For long reads: a nanoplot report (the HTML report summarizes all the information). For short reads: a MultiQC report.\r +* Tools used: Nanoplot, FastQC, MultiQC.\r +* Input parameters: None required.\r +* Workflow steps: Long reads are analysed by Nanoplot; Short reads (R1 and R2) are analysed by FastQC; the resulting reports are processed by MultiQC.\r +* Options: see the tool settings options at runtime and change as required. Alternative tool option: fastp\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.222.1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Data QC" ; + schema1:sdDatePublished "2024-07-12 13:36:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/222/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 417031 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15475 ; + schema1:creator ; + schema1:dateCreated "2021-11-08T04:34:47Z" ; + schema1:dateModified "2023-01-30T18:21:31Z" ; + schema1:description """Data QC step, can run alone or as part of a combined workflow for large genome assembly. \r +\r +* What it does: Reports statistics from sequencing reads.\r +* Inputs: long reads (fastq.gz format), short reads (R1 and R2) (fastq.gz format).\r +* Outputs: For long reads: a nanoplot report (the HTML report summarizes all the information). For short reads: a MultiQC report.\r +* Tools used: Nanoplot, FastQC, MultiQC.\r +* Input parameters: None required.\r +* Workflow steps: Long reads are analysed by Nanoplot; Short reads (R1 and R2) are analysed by FastQC; the resulting reports are processed by MultiQC.\r +* Options: see the tool settings options at runtime and change as required. Alternative tool option: fastp\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)\r +""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "Large-genome-assembly" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Data QC" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/222?version=1" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Summary\r +This notebook demonstrates how to retrieve metadata associated to the paper [A SARS-CoV-2 cytopathicity dataset generated by high-content screening of a large drug repurposing collection](https://doi.org/10.1038/s41597-021-00848-4) and available in IDR at [idr0094-ellinger-sarscov2](https://idr.openmicroscopy.org/search/?query=Name:idr0094).\r +Over 300 compounds were used in this investigation. This notebook allows the user to calculate the half maximal inhibitory concentration (IC50) for each compound. IC50 is a measure of the potency of a substance in inhibiting a specific biological or biochemical function. IC50 is a quantitative measure that indicates how much of a particular inhibitory substance (e.g. drug) is needed to inhibit, in vitro, a given biological process or biological component by 50%.\r +User can download the IC50 for each compound used in that study\r +\r +The notebook can be launched in [My Binder](https://mybinder.org/v2/gh/IDR/idr0094-ellinger-sarscov2/master?urlpath=notebooks%2Fnotebooks%2Fidr0094-ic50.ipynb%3FscreenId%3D2603).\r +\r +A shiny app is also available for dynamic plotting of the IC50 curve for each compound.\r +This R shiny app can be launched in [My Binder](https://mybinder.org/v2/gh/IDR/idr0094-ellinger-sarscov2/master?urlpath=shiny/apps/)\r +\r +\r +# Inputs\r +Parameters needed to configure the workflow:\r +\r +**screenId**: Identifier of a screen in IDR.\r +\r +# Ouputs\r +Output file generated:\r +\r +**ic50.csv**: Comma separate value file containing the IC50 for each compound.\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.238.1" ; + schema1:isBasedOn "https://github.com/IDR/idr0094-ellinger-sarscov2" ; + schema1:license "BSD-2-Clause" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Calculate the half maximal inhibitory concentration (IC50) for each compound used in a SARS-CoV-2 study" ; + schema1:sdDatePublished "2024-07-12 13:36:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/238/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 233368 ; + schema1:creator , + ; + schema1:dateCreated "2021-11-16T10:19:16Z" ; + schema1:dateModified "2023-01-16T13:55:00Z" ; + schema1:description """# Summary\r +This notebook demonstrates how to retrieve metadata associated to the paper [A SARS-CoV-2 cytopathicity dataset generated by high-content screening of a large drug repurposing collection](https://doi.org/10.1038/s41597-021-00848-4) and available in IDR at [idr0094-ellinger-sarscov2](https://idr.openmicroscopy.org/search/?query=Name:idr0094).\r +Over 300 compounds were used in this investigation. This notebook allows the user to calculate the half maximal inhibitory concentration (IC50) for each compound. IC50 is a measure of the potency of a substance in inhibiting a specific biological or biochemical function. IC50 is a quantitative measure that indicates how much of a particular inhibitory substance (e.g. drug) is needed to inhibit, in vitro, a given biological process or biological component by 50%.\r +User can download the IC50 for each compound used in that study\r +\r +The notebook can be launched in [My Binder](https://mybinder.org/v2/gh/IDR/idr0094-ellinger-sarscov2/master?urlpath=notebooks%2Fnotebooks%2Fidr0094-ic50.ipynb%3FscreenId%3D2603).\r +\r +A shiny app is also available for dynamic plotting of the IC50 curve for each compound.\r +This R shiny app can be launched in [My Binder](https://mybinder.org/v2/gh/IDR/idr0094-ellinger-sarscov2/master?urlpath=shiny/apps/)\r +\r +\r +# Inputs\r +Parameters needed to configure the workflow:\r +\r +**screenId**: Identifier of a screen in IDR.\r +\r +# Ouputs\r +Output file generated:\r +\r +**ic50.csv**: Comma separate value file containing the IC50 for each compound.\r +\r +""" ; + schema1:isPartOf ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/BSD-2-Clause" ; + schema1:name "Calculate the half maximal inhibitory concentration (IC50) for each compound used in a SARS-CoV-2 study" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/238?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-se-illumina-wgs-variant-calling" ; + schema1:mainEntity ; + schema1:name "COVID-19-SE-WGS-ILLUMINA (v0.1)" ; + schema1:sdDatePublished "2021-03-12 13:41:28 +0000" ; + schema1:softwareVersion "v0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 30245 ; + schema1:name "COVID-19-SE-WGS-ILLUMINA" ; + schema1:programmingLanguage . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "This workflow converts the top-ranking predicted pathways from the \"RetroSynthesis\" and \"Pathway Analysis\" workflows to plasmids intended to be expressed in the specified organism" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/23?version=1" ; + schema1:isBasedOn "https://galaxy-synbiocad.org/u/mdulac/w/genetic-design-1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genetic Design" ; + schema1:sdDatePublished "2024-07-12 13:37:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/23/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9991 ; + schema1:creator ; + schema1:dateCreated "2020-05-29T10:05:20Z" ; + schema1:dateModified "2023-01-16T13:41:35Z" ; + schema1:description "This workflow converts the top-ranking predicted pathways from the \"RetroSynthesis\" and \"Pathway Analysis\" workflows to plasmids intended to be expressed in the specified organism" ; + schema1:keywords "Retrosynthesis, genetic design, pathway prediction" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Genetic Design" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/23?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + ; + ns1:output , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Introduction\r +\r +`katdetectr` is an *R* package for the detection, characterization and visualization of localized hypermutated regions, often referred to as *kataegis*.\r +\r +Please see the [Application Note](https://www.biorxiv.org/content/10.1101/2022.07.11.499364v1) (under submission) for additional background, details and performance evaluations of `katdetectr`.\r +\r +The general workflow of `katdetectr` can be summarized as follows:\r +\r +1. Import of genomic variants; VCF, MAF or VRanges objects.\r +2. Detection of kataegis foci.\r +3. Visualization of segmentation and kataegis foci.\r +\r +Please see the [vignette](https://bioconductor.org/packages/release/bioc/vignettes/katdetectr/inst/doc/General_overview.html) for an overview of the workflow in a step-by-step manner on publicly-available datasets which are included within this package.\r +\r +\r +## Installation\r +\r +Download katdetectr from BioConductor:\r +```R\r +if (!requireNamespace("BiocManager", quietly = TRUE))\r + install.packages("BiocManager")\r +\r +BiocManager::install("katdetectr")\r +\r +```\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.463.1" ; + schema1:isBasedOn "https://github.com/ErasmusMC-CCBC/katdetectr" ; + schema1:license "GPL-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Katdetectr" ; + schema1:sdDatePublished "2024-07-12 13:33:40 +0100" ; + schema1:url "https://workflowhub.eu/workflows/463/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1203 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10674 ; + schema1:creator , + ; + schema1:dateCreated "2023-05-02T11:51:45Z" ; + schema1:dateModified "2023-05-02T11:58:38Z" ; + schema1:description """# Introduction\r +\r +`katdetectr` is an *R* package for the detection, characterization and visualization of localized hypermutated regions, often referred to as *kataegis*.\r +\r +Please see the [Application Note](https://www.biorxiv.org/content/10.1101/2022.07.11.499364v1) (under submission) for additional background, details and performance evaluations of `katdetectr`.\r +\r +The general workflow of `katdetectr` can be summarized as follows:\r +\r +1. Import of genomic variants; VCF, MAF or VRanges objects.\r +2. Detection of kataegis foci.\r +3. Visualization of segmentation and kataegis foci.\r +\r +Please see the [vignette](https://bioconductor.org/packages/release/bioc/vignettes/katdetectr/inst/doc/General_overview.html) for an overview of the workflow in a step-by-step manner on publicly-available datasets which are included within this package.\r +\r +\r +## Installation\r +\r +Download katdetectr from BioConductor:\r +```R\r +if (!requireNamespace("BiocManager", quietly = TRUE))\r + install.packages("BiocManager")\r +\r +BiocManager::install("katdetectr")\r +\r +```\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/GPL-2.0" ; + schema1:name "Katdetectr" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/463?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2023-10-03T10:01:02.570398" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/average-bigwig-between-replicates" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "average-bigwig-between-replicates/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.12" . + + a schema1:Dataset ; + schema1:datePublished "2024-01-12T14:56:00.228274" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-only-VGP3" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-only-VGP3/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.19" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-07-12 13:20:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4227 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:03Z" ; + schema1:dateModified "2024-06-11T12:55:03Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# CMIP MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of computing **classical molecular interaction potentials** from **protein structures**, step by step, using the **BioExcel Building Blocks library (biobb)**. Examples shown are **Molecular Interaction Potentials (MIPs) grids, protein-protein/ligand interaction potentials, and protein titration**. The particular structures used are the **Lysozyme** protein (PDB code [1AKI](https://www.rcsb.org/structure/1aki)), and a MD simulation of the complex formed by the **SARS-CoV-2 Receptor Binding Domain and the human Angiotensin Converting Enzyme 2** (PDB code [6VW1](https://www.rcsb.org/structure/6vw1)).\r +\r +The code wrapped is the ***Classical Molecular Interaction Potentials (CMIP)*** code:\r +\r +**Classical molecular interaction potentials: Improved setup procedure in molecular dynamics simulations of proteins.**\r +*Gelpí, J.L., Kalko, S.G., Barril, X., Cirera, J., de la Cruz, X., Luque, F.J. and Orozco, M. (2001)*\r +*Proteins, 45: 428-437. [https://doi.org/10.1002/prot.1159](https://doi.org/10.1002/prot.1159)*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.773.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_cmip" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Classical Molecular Interaction Potentials" ; + schema1:sdDatePublished "2024-07-12 13:24:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/773/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 78658 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-04T14:46:28Z" ; + schema1:dateModified "2024-03-04T14:48:19Z" ; + schema1:description """# CMIP MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of computing **classical molecular interaction potentials** from **protein structures**, step by step, using the **BioExcel Building Blocks library (biobb)**. Examples shown are **Molecular Interaction Potentials (MIPs) grids, protein-protein/ligand interaction potentials, and protein titration**. The particular structures used are the **Lysozyme** protein (PDB code [1AKI](https://www.rcsb.org/structure/1aki)), and a MD simulation of the complex formed by the **SARS-CoV-2 Receptor Binding Domain and the human Angiotensin Converting Enzyme 2** (PDB code [6VW1](https://www.rcsb.org/structure/6vw1)).\r +\r +The code wrapped is the ***Classical Molecular Interaction Potentials (CMIP)*** code:\r +\r +**Classical molecular interaction potentials: Improved setup procedure in molecular dynamics simulations of proteins.**\r +*Gelpí, J.L., Kalko, S.G., Barril, X., Cirera, J., de la Cruz, X., Luque, F.J. and Orozco, M. (2001)*\r +*Proteins, 45: 428-437. [https://doi.org/10.1002/prot.1159](https://doi.org/10.1002/prot.1159)*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/773?version=1" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Classical Molecular Interaction Potentials" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_cmip/blob/main/biobb_wf_cmip/notebooks/biobb_wf_cmip.ipynb" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1017?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/rnafusion" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnafusion" ; + schema1:sdDatePublished "2024-07-12 13:18:20 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1017/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4396 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:09Z" ; + schema1:dateModified "2024-06-11T12:55:09Z" ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1017?version=16" ; + schema1:keywords "fusion, fusion-genes, gene-fusion, rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnafusion" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1017?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A pipeline to investigate horizontal gene transfer from NGS data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/988?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/hgtseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hgtseq" ; + schema1:sdDatePublished "2024-07-12 13:21:26 +0100" ; + schema1:url "https://workflowhub.eu/workflows/988/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9471 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:54Z" ; + schema1:dateModified "2024-06-11T12:54:54Z" ; + schema1:description "A pipeline to investigate horizontal gene transfer from NGS data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/988?version=1" ; + schema1:keywords "BWA-mem, bwa-mem2, FastQC, genomics-visualization, ggbio, horizontal-gene-transfer, kraken2, krona, MultiQC, NGS, SAMTools, taxonomies, tidyverse" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hgtseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/988?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Analysis of RNA-seq data starting from BAM and focusing on mRNA, lncRNA and miRNA" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/199?version=1" ; + schema1:isBasedOn "http://biotranslator.gr:8080/workflows/run?id=e14715ce11d8be59" ; + schema1:license "notspecified" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for lncRNA" ; + schema1:sdDatePublished "2024-07-12 13:36:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/199/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 307614 ; + schema1:dateCreated "2021-09-29T08:25:31Z" ; + schema1:dateModified "2023-01-16T13:53:17Z" ; + schema1:description "Analysis of RNA-seq data starting from BAM and focusing on mRNA, lncRNA and miRNA" ; + schema1:keywords "" ; + schema1:license "https://choosealicense.com/no-permission/" ; + schema1:name "lncRNA" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/199?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """## Learning objectives\r +- Read data to analyse from an object store.\r +- Analyse data in parallel using Dask.\r +- Show how to use public resources to train neural network.\r +- Load labels associated to the original data\r +- Compare results with ground truth.\r +\r +The authors of the PLOS Biology paper, "Nessys: A new set of tools for the automated detection of nuclei within intact tissues and dense 3D cultures" published in August 2019: https://doi.org/10.1371/journal.pbio.3000388, considered several image segmenation packages, but they did not use the approach described in this notebook.\r +\r +We will analyse the data using [Cellpose](https://www.cellpose.org/) and compare the output with the original segmentation produced by the authors. Cellpose was not considered by the authors. Our workflow shows how public repository can be accessed and data inside it used to validate software tools or new algorithms.\r +\r +We will use a predefined model from Cellpose as a starting point.\r +\r +## Launch\r +This notebook uses the [environment.yml](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_5/environment.yml) file.\r +\r +See [Setup](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_5/setup.md).""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.495.1" ; + schema1:isBasedOn "https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_5/Cellpose_parallel.ipynb" ; + schema1:license "BSD-2-Clause" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Load ome.zarr Image with labels from public S3 repositories, analyze in parallel using Cellpose and compare results" ; + schema1:sdDatePublished "2024-07-12 13:33:11 +0100" ; + schema1:url "https://workflowhub.eu/workflows/495/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 517664 ; + schema1:url "https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_5/includes/CellposeParallel.png" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17325 ; + schema1:creator , + ; + schema1:dateCreated "2023-06-01T10:30:51Z" ; + schema1:dateModified "2023-07-04T00:40:25Z" ; + schema1:description """## Learning objectives\r +- Read data to analyse from an object store.\r +- Analyse data in parallel using Dask.\r +- Show how to use public resources to train neural network.\r +- Load labels associated to the original data\r +- Compare results with ground truth.\r +\r +The authors of the PLOS Biology paper, "Nessys: A new set of tools for the automated detection of nuclei within intact tissues and dense 3D cultures" published in August 2019: https://doi.org/10.1371/journal.pbio.3000388, considered several image segmenation packages, but they did not use the approach described in this notebook.\r +\r +We will analyse the data using [Cellpose](https://www.cellpose.org/) and compare the output with the original segmentation produced by the authors. Cellpose was not considered by the authors. Our workflow shows how public repository can be accessed and data inside it used to validate software tools or new algorithms.\r +\r +We will use a predefined model from Cellpose as a starting point.\r +\r +## Launch\r +This notebook uses the [environment.yml](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_5/environment.yml) file.\r +\r +See [Setup](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_5/setup.md).""" ; + schema1:image ; + schema1:keywords "OME-Zarr, Python, imaging, image processing, Machine Learning, S3" ; + schema1:license "https://spdx.org/licenses/BSD-2-Clause" ; + schema1:name "Load ome.zarr Image with labels from public S3 repositories, analyze in parallel using Cellpose and compare results" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_5/Cellpose_parallel.ipynb" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/protein-ligand-docking).\r +***\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.296.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_virtual_screening/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy Protein-ligand Docking tutorial (Fpocket)" ; + schema1:sdDatePublished "2024-07-12 13:33:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/296/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 23696 ; + schema1:creator , + ; + schema1:dateCreated "2023-05-03T13:48:26Z" ; + schema1:dateModified "2023-05-03T13:49:48Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/protein-ligand-docking).\r +***\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/296?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy Protein-ligand Docking tutorial (Fpocket)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_virtual_screening/galaxy/biobb_wf_virtual_screening.ga" ; + schema1:version 3 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2023-09-15T09:40:24.299054" ; + schema1:hasPart , + , + , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/hic-hicup-cooler" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + , + , + ; + schema1:name "hic-hicup-cooler/hic-fastq-to-cool-hicup-cooler" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "hic-hicup-cooler/chic-fastq-to-cool-hicup-cooler" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/hic-hicup-cooler" ; + schema1:version "0.3" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.11" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "hic-hicup-cooler/hic-fastq-to-pairs-hicup" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/hic-hicup-cooler" ; + schema1:version "0.3" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "hic-hicup-cooler/hic-juicermediumtabix-to-cool-cooler" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/hic-hicup-cooler" ; + schema1:version "0.3" . + + a schema1:Dataset ; + schema1:datePublished "2023-01-16T17:00:18.505110" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "atacseq/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.3" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=20" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-07-12 13:21:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=20" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 18600 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:59Z" ; + schema1:dateModified "2024-06-11T12:54:59Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=20" ; + schema1:version 20 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=21" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-07-12 13:18:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=21" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13915 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:40Z" ; + schema1:dateModified "2024-06-11T12:54:40Z" ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=21" ; + schema1:version 21 . + + a schema1:Dataset ; + schema1:hasPart , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-ivar-analysis" ; + schema1:mainEntity ; + schema1:name "sars-cov-2-pe-illumina-artic-ivar-analysis/SARS-COV-2-ILLUMINA-AMPLICON-IVAR-PANGOLIN-NEXTCLADE (v0.1)" ; + schema1:sdDatePublished "2021-08-31 03:00:45 +0100" ; + schema1:softwareVersion "v0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 48495 ; + schema1:name "SARS-COV-2-ILLUMINA-AMPLICON-IVAR-PANGOLIN-NEXTCLADE" ; + schema1:programmingLanguage . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "Protype demonstrator of a workflow reducing HESS and INTEGRAL/SPI-ACS data to common Light Curve format and combining the lightcurves into a multi-wavelength observation." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/766?version=1" ; + schema1:isBasedOn "https://galaxy.odahub.fr/u/volodymyr/w/workflow-constructed-from-history-unnamed-history-1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Example Multi-Wavelength Light-Curve Analysis" ; + schema1:sdDatePublished "2024-07-12 13:24:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/766/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10761 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-02-21T15:36:09Z" ; + schema1:dateModified "2024-03-01T11:48:35Z" ; + schema1:description "Protype demonstrator of a workflow reducing HESS and INTEGRAL/SPI-ACS data to common Light Curve format and combining the lightcurves into a multi-wavelength observation." ; + schema1:isPartOf ; + schema1:keywords "astronomy" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Example Multi-Wavelength Light-Curve Analysis" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/766?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2022-10-20T13:31:26.677961" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/chipseq-pe" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "chipseq-pe/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.11" . + + a schema1:Dataset ; + schema1:datePublished "2023-09-14T22:03:45.618652" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/rnaseq-pe" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "rnaseq-pe/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.11" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/986?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/fetchngs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/fetchngs" ; + schema1:sdDatePublished "2024-07-12 13:21:29 +0100" ; + schema1:url "https://workflowhub.eu/workflows/986/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7284 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:53Z" ; + schema1:dateModified "2024-06-11T12:54:53Z" ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/986?version=13" ; + schema1:keywords "ddbj, download, ena, FASTQ, geo, sra, synapse" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/fetchngs" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/986?version=8" ; + schema1:version 8 . + + a schema1:MediaObject ; + schema1:about ; + schema1:author , + ; + schema1:encodingFormat "text/markdown" . + + a schema1:Dataset ; + schema1:creator , + , + , + , + , + , + ; + schema1:datePublished "2023-04-13" ; + schema1:description "Study protocol" ; + schema1:hasPart , + , + , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.7551181" ; + schema1:name "BY-COVID - WP5 - Baseline Use Case: SARS-CoV-2 vaccine effectiveness assessment - Study protocol" ; + schema1:url "https://zenodo.org/record/7825979" ; + schema1:version "1.0.3" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """# workflow-partial-ustacks-only\r +\r +\r +These workflows are part of a set designed to work for RAD-seq data on the Galaxy platform, using the tools from the Stacks program. \r +\r +Galaxy Australia: https://usegalaxy.org.au/\r +\r +Stacks: http://catchenlab.life.illinois.edu/stacks/\r +\r +\r +For the full de novo workflow see https://workflowhub.eu/workflows/348\r +\r +You may want to run ustacks with different batches of samples. \r +* To be able to combine these later, there are some necessary steps - we need to keep track of how many samples have already run in ustacks, so that new samples can be labelled with different identifying numbers. \r +* In ustacks, under "Processing options" there is an option called "Start identifier at". \r +* The default for this is 1, which can be used for the first batch of samples. These will then be labelled as sample 1, sample 2 and so on. \r +* For any new batches of samples to process in ustacks, we will want to start numbering these at the next available number. e.g. if there were 10 samples in batch 1, this should then be set to start at 11. \r +\r +To combine multiple outputs from ustacks, providing these have been given appropriate starting identifiers:\r +* Find the ustacks output in the Galaxy history. This will be a list of samples. \r +* Click on the cross button next to the filename to delete, but select "Collection only". This releases the items from the list, but they will now be hidden in the Galaxy history.\r +* In the history panel, click on "hidden" to reveal any hidden files. Unhide the samples. \r +* Do this for all the batches of ustacks outputs that are needed. \r +* Click on the tick button, tick all the samples needed, then "For all selected" choose "Build dataset list"\r +* This is now a combined set of samples for input into cstacks. \r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/349?version=1" ; + schema1:isBasedOn "https://github.com/AnnaSyme/workflow-partial-ustacks-only.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Partial de novo workflow: ustacks only" ; + schema1:sdDatePublished "2024-07-12 13:35:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/349/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3854 ; + schema1:creator ; + schema1:dateCreated "2022-05-31T07:50:13Z" ; + schema1:dateModified "2023-01-30T18:21:31Z" ; + schema1:description """# workflow-partial-ustacks-only\r +\r +\r +These workflows are part of a set designed to work for RAD-seq data on the Galaxy platform, using the tools from the Stacks program. \r +\r +Galaxy Australia: https://usegalaxy.org.au/\r +\r +Stacks: http://catchenlab.life.illinois.edu/stacks/\r +\r +\r +For the full de novo workflow see https://workflowhub.eu/workflows/348\r +\r +You may want to run ustacks with different batches of samples. \r +* To be able to combine these later, there are some necessary steps - we need to keep track of how many samples have already run in ustacks, so that new samples can be labelled with different identifying numbers. \r +* In ustacks, under "Processing options" there is an option called "Start identifier at". \r +* The default for this is 1, which can be used for the first batch of samples. These will then be labelled as sample 1, sample 2 and so on. \r +* For any new batches of samples to process in ustacks, we will want to start numbering these at the next available number. e.g. if there were 10 samples in batch 1, this should then be set to start at 11. \r +\r +To combine multiple outputs from ustacks, providing these have been given appropriate starting identifiers:\r +* Find the ustacks output in the Galaxy history. This will be a list of samples. \r +* Click on the cross button next to the filename to delete, but select "Collection only". This releases the items from the list, but they will now be hidden in the Galaxy history.\r +* In the history panel, click on "hidden" to reveal any hidden files. Unhide the samples. \r +* Do this for all the batches of ustacks outputs that are needed. \r +* Click on the tick button, tick all the samples needed, then "For all selected" choose "Build dataset list"\r +* This is now a combined set of samples for input into cstacks. \r +""" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Partial de novo workflow: ustacks only" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/349?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "A prototype implementation of the Air Quality Prediction pipeline in Galaxy, using CWL tools." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/380?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Air Quality Prediction Prototype" ; + schema1:sdDatePublished "2024-07-12 13:35:24 +0100" ; + schema1:url "https://workflowhub.eu/workflows/380/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 25311 ; + schema1:creator , + ; + schema1:dateCreated "2022-07-29T11:16:20Z" ; + schema1:dateModified "2023-01-16T14:02:21Z" ; + schema1:description "A prototype implementation of the Air Quality Prediction pipeline in Galaxy, using CWL tools." ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Air Quality Prediction Prototype" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/380?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "Mapping against all plant virus then make contig out of the mapped reads then blast them." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/102?version=1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for 2: Plant virus confirmation" ; + schema1:sdDatePublished "2024-07-12 13:34:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/102/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 30174 ; + schema1:dateCreated "2021-02-04T09:10:04Z" ; + schema1:dateModified "2023-02-13T14:06:45Z" ; + schema1:description "Mapping against all plant virus then make contig out of the mapped reads then blast them." ; + schema1:keywords "Virology, mapping, Assembly, reads_selection, blast" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "2: Plant virus confirmation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/102?version=1" ; + schema1:version 1 ; + ns1:input , + ; + ns1:output , + , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2024-04-29T08:49:48.242493" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "atacseq/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1027?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/viralrecon" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/viralrecon" ; + schema1:sdDatePublished "2024-07-12 13:18:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1027/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6114 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:20Z" ; + schema1:dateModified "2024-06-11T12:55:20Z" ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1027?version=10" ; + schema1:keywords "Amplicon, ARTIC, Assembly, covid-19, covid19, illumina, long-read-sequencing, Metagenomics, nanopore, ONT, oxford-nanopore, SARS-CoV-2, variant-calling, viral, Virus" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/viralrecon" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1027?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for screening for functional components of assembled contigs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/987?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/funcscan" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/funcscan" ; + schema1:sdDatePublished "2024-07-12 13:21:28 +0100" ; + schema1:url "https://workflowhub.eu/workflows/987/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 16630 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:54Z" ; + schema1:dateModified "2024-06-11T12:54:54Z" ; + schema1:description "Pipeline for screening for functional components of assembled contigs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/987?version=8" ; + schema1:keywords "amp, AMR, antibiotic-resistance, antimicrobial-peptides, antimicrobial-resistance-genes, arg, Assembly, bgc, biosynthetic-gene-clusters, contigs, function, Metagenomics, natural-products, screening, secondary-metabolites" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/funcscan" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/987?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """A porting of the Trinity RNA assembly pipeline, https://trinityrnaseq.github.io, that uses Nextflow to handle the underlying sub-tasks.\r +This enables additional capabilities to better use HPC resources, such as packing of tasks to fill up nodes and use of node-local disks to improve I/O.\r +By design, the pipeline separates the workflow logic (main file) and the cluster-specific configuration (config files), improving portability.\r +\r +Based on a pipeline by Sydney Informatics Hub: https://github.com/Sydney-Informatics-Hub/SIH-Raijin-Trinity""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/114?version=1" ; + schema1:isBasedOn "https://github.com/marcodelapierre/trinity-nf" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Trinity RNA Assembly" ; + schema1:sdDatePublished "2024-07-12 13:37:05 +0100" ; + schema1:url "https://workflowhub.eu/workflows/114/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8368 ; + schema1:creator ; + schema1:dateCreated "2021-03-17T03:01:55Z" ; + schema1:dateModified "2021-03-17T03:31:12Z" ; + schema1:description """A porting of the Trinity RNA assembly pipeline, https://trinityrnaseq.github.io, that uses Nextflow to handle the underlying sub-tasks.\r +This enables additional capabilities to better use HPC resources, such as packing of tasks to fill up nodes and use of node-local disks to improve I/O.\r +By design, the pipeline separates the workflow logic (main file) and the cluster-specific configuration (config files), improving portability.\r +\r +Based on a pipeline by Sydney Informatics Hub: https://github.com/Sydney-Informatics-Hub/SIH-Raijin-Trinity""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/114?version=1" ; + schema1:isPartOf ; + schema1:keywords "Assembly, Transcriptomics, RNASEQ, Nextflow" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Trinity RNA Assembly" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/114?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "This is a Galaxy workflow that uses to convert the16S BIOM file to table and figures. It is part of the metaDEGalaxy workflow MetaDEGalaxy: Galaxy workflow for differential abundance analysis of 16s metagenomic data. " ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/142?version=1" ; + schema1:isBasedOn "https://github.com/QFAB-Bioinformatics/metaDEGalaxy" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for 16S_biodiversity_BIOM" ; + schema1:sdDatePublished "2024-07-12 13:36:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/142/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 102835 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 19403 ; + schema1:creator ; + schema1:dateCreated "2021-08-11T04:12:04Z" ; + schema1:dateModified "2024-04-17T04:16:05Z" ; + schema1:description "This is a Galaxy workflow that uses to convert the16S BIOM file to table and figures. It is part of the metaDEGalaxy workflow MetaDEGalaxy: Galaxy workflow for differential abundance analysis of 16s metagenomic data. " ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "MetaDEGalaxy" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "16S_biodiversity_BIOM" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/142?version=1" ; + schema1:version 1 ; + ns1:output , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """# eQTL-Catalogue/qtlmap\r +**Portable eQTL analysis and statistical fine mapping workflow used by the eQTL Catalogue**\r +\r +### Introduction\r +\r +**eQTL-Catalogue/qtlmap** is a bioinformatics analysis pipeline used for QTL Analysis.\r +\r +The workflow takes phenotype count matrix (normalized and quality controlled) and genotype data as input, and finds associations between them with the help of sample metadata and phenotype metadata files (See [Input formats and preparation](docs/inputs_expl.md) for required input file details). To map QTLs, pipeline uses [QTLTools's](https://qtltools.github.io/qtltools/) PCA and RUN methods. For manipulation of files [BcfTools](https://samtools.github.io/bcftools/bcftools.html), [Tabix](http://www.htslib.org/doc/tabix.html) and custom [Rscript](https://www.rdocumentation.org/packages/utils/versions/3.5.3/topics/Rscript) scripts are used.\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a bioinformatics workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It comes with docker / singularity containers making installation trivial and results highly reproducible.\r +\r +\r +### Documentation\r +The eQTL-Catalogue/qtlmap pipeline comes with documentation about the pipeline, found in the `docs/` directory:\r +\r +1. [Installation](docs/installation.md)\r +2. Pipeline configuration\r + * [Local installation](docs/configuration/local.md)\r + * [Adding your own system](docs/configuration/adding_your_own.md)\r +3. [Input formats and preparation](docs/inputs_expl.md)\r +4. [Running the pipeline](docs/usage.md)\r +5. [Troubleshooting](docs/troubleshooting.md)\r +\r +\r +\r +### Pipeline Description\r +Mapping QTLs is a process of finding statistically significant associations between phenotypes and genetic variants located nearby (within a specific window around phenotype, a.k.a cis window)\r +This pipeline is designed to perform QTL mapping. It is intended to add this pipeline to the nf-core framework in the future.\r +High level representation of the pipeline is shown below:\r +\r +### Results\r +The output directory of the workflow contains the following subdirectories:\r +\r +1. PCA - genotype and gene expression PCA values used as covariates for QTL analysis.\r +2. sumstats - QTL summary statistics from nominal and permutation passes.\r +3. susie - SuSiE fine mapping credible sets.\r +4. susie_full - full set of susie results for all tested variants (very large files).\r +5. susie_merged - susie credible sets merged with summary statistics from univariate QTL analysis.\r +\r +Column names of the output files are explained [here](https://github.com/eQTL-Catalogue/eQTL-Catalogue-resources/blob/master/tabix/Columns.md).\r +\r +\r +# Contributors\r +* Nurlan Kerimov\r +* Kaur Alasoo\r +* Masahiro Kanai\r +* Ralf Tambets\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/300?version=1" ; + schema1:isBasedOn "https://github.com/eQTL-Catalogue/qtlmap.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for eQTL-Catalogue/qtlmap" ; + schema1:sdDatePublished "2024-07-12 13:35:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/300/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 706230 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15721 ; + schema1:dateCreated "2022-03-29T16:10:54Z" ; + schema1:dateModified "2023-01-16T13:59:09Z" ; + schema1:description """# eQTL-Catalogue/qtlmap\r +**Portable eQTL analysis and statistical fine mapping workflow used by the eQTL Catalogue**\r +\r +### Introduction\r +\r +**eQTL-Catalogue/qtlmap** is a bioinformatics analysis pipeline used for QTL Analysis.\r +\r +The workflow takes phenotype count matrix (normalized and quality controlled) and genotype data as input, and finds associations between them with the help of sample metadata and phenotype metadata files (See [Input formats and preparation](docs/inputs_expl.md) for required input file details). To map QTLs, pipeline uses [QTLTools's](https://qtltools.github.io/qtltools/) PCA and RUN methods. For manipulation of files [BcfTools](https://samtools.github.io/bcftools/bcftools.html), [Tabix](http://www.htslib.org/doc/tabix.html) and custom [Rscript](https://www.rdocumentation.org/packages/utils/versions/3.5.3/topics/Rscript) scripts are used.\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a bioinformatics workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It comes with docker / singularity containers making installation trivial and results highly reproducible.\r +\r +\r +### Documentation\r +The eQTL-Catalogue/qtlmap pipeline comes with documentation about the pipeline, found in the `docs/` directory:\r +\r +1. [Installation](docs/installation.md)\r +2. Pipeline configuration\r + * [Local installation](docs/configuration/local.md)\r + * [Adding your own system](docs/configuration/adding_your_own.md)\r +3. [Input formats and preparation](docs/inputs_expl.md)\r +4. [Running the pipeline](docs/usage.md)\r +5. [Troubleshooting](docs/troubleshooting.md)\r +\r +\r +\r +### Pipeline Description\r +Mapping QTLs is a process of finding statistically significant associations between phenotypes and genetic variants located nearby (within a specific window around phenotype, a.k.a cis window)\r +This pipeline is designed to perform QTL mapping. It is intended to add this pipeline to the nf-core framework in the future.\r +High level representation of the pipeline is shown below:\r +\r +### Results\r +The output directory of the workflow contains the following subdirectories:\r +\r +1. PCA - genotype and gene expression PCA values used as covariates for QTL analysis.\r +2. sumstats - QTL summary statistics from nominal and permutation passes.\r +3. susie - SuSiE fine mapping credible sets.\r +4. susie_full - full set of susie results for all tested variants (very large files).\r +5. susie_merged - susie credible sets merged with summary statistics from univariate QTL analysis.\r +\r +Column names of the output files are explained [here](https://github.com/eQTL-Catalogue/eQTL-Catalogue-resources/blob/master/tabix/Columns.md).\r +\r +\r +# Contributors\r +* Nurlan Kerimov\r +* Kaur Alasoo\r +* Masahiro Kanai\r +* Ralf Tambets\r +""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "eQTL-Catalogue/qtlmap" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/300?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# 1. About TF-Prioritizer\r +\r +This pipeline gives you a full analysis of nfcore chromatine accessibility peak data (ChIP-Seq, ATAC-Seq or DNAse-Seq)\r +and nfcore RNA-seq count data. It performs\r +DESeq2, TEPIC and DYNAMITE including all preprocessing and postprocessing steps necessary to transform the data. It also\r +gives you plots for deep analysis of the data. The general workflow is sketched in the images below:\r +\r +## Graphical abstract:\r +\r +![Graphical abstrat](https://raw.githubusercontent.com/biomedbigdata/TF-Prioritizer/master/media/graphicalAbstract.png)\r +\r +## Technical workflow:\r +\r +![Technical workflow](https://github.com/biomedbigdata/TF-Prioritizer/raw/master/media/technicalWorkflow.png)\r +\r +# 2. License and Citing\r +\r +TF-Prioritizer is distributed under the [GNU General Public License](https://www.gnu.org/licenses/gpl-3.0.en.html). The\r +Graphical Abstract and the Technical Workflow\r +was created using [biorender.com](https://biorender.com/).\r +\r +# 3. Usage\r +\r +The software can be executed using docker. For the following command, only [python3](https://www.python.org/downloads/),\r +[curl](https://curl.se/download.html) and [docker](https://docs.docker.com/get-docker/) are required.\r +Explanations about the configs can be found in\r +the [config readme](https://github.com/biomedbigdata/TF-Prioritizer/blob/master/configTemplates/README.md).\r +\r +```bash\r +curl -s https://raw.githubusercontent.com/biomedbigdata/TF-Prioritizer/master/docker.py | python3 - -c [config_file] -o [output_dir] -t [threads]\r +```\r +\r +Note, that for this approach an internet connection is required. The docker image will be downloaded\r +from [DockerHub](https://hub.docker.com/r/nicotru/tf-prioritizer) on the first execution as well as with every update we\r +release. Furthermore, the wrapper script\r +will be fetched from GitHub with every execution.\r +\r +If curl is not available (for example if you are using windows), or you want to be able to execute the software without\r +an internet connection, you can download the wrapper script\r +from [here](https://raw.githubusercontent.com/biomedbigdata/TF-Prioritizer/pipeJar/docker.py).\r +\r +You can then execute the script using\r +\r +```bash\r +python3 [script_path] -c [config_file] -o [output_dir] -t [threads]\r +```\r +\r +## If you want to use the pipeline without docker\r +\r +We do not recommend using the pipeline without docker, because the dependencies are very complex, and it is very hard to\r +install them correctly. However, if you want to use the pipeline without docker, you can do so by installing the\r +dependencies manually. The dependencies and their correct installation process can be derived from\r +the [Dockerfile](https://github.com/biomedbigdata/TF-Prioritizer/blob/master/Dockerfile) and the environment scripts\r +which can be found in\r +the [environment directory](https://github.com/biomedbigdata/TF-Prioritizer/tree/master/environment).""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/433?version=1" ; + schema1:isBasedOn "https://github.com/biomedbigdata/TF-Prioritizer.git" ; + schema1:license "LGPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for TF-Prioritizer" ; + schema1:sdDatePublished "2024-07-12 13:34:33 +0100" ; + schema1:url "https://workflowhub.eu/workflows/433/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2005 ; + schema1:dateCreated "2023-02-02T14:42:11Z" ; + schema1:dateModified "2023-02-02T14:42:11Z" ; + schema1:description """# 1. About TF-Prioritizer\r +\r +This pipeline gives you a full analysis of nfcore chromatine accessibility peak data (ChIP-Seq, ATAC-Seq or DNAse-Seq)\r +and nfcore RNA-seq count data. It performs\r +DESeq2, TEPIC and DYNAMITE including all preprocessing and postprocessing steps necessary to transform the data. It also\r +gives you plots for deep analysis of the data. The general workflow is sketched in the images below:\r +\r +## Graphical abstract:\r +\r +![Graphical abstrat](https://raw.githubusercontent.com/biomedbigdata/TF-Prioritizer/master/media/graphicalAbstract.png)\r +\r +## Technical workflow:\r +\r +![Technical workflow](https://github.com/biomedbigdata/TF-Prioritizer/raw/master/media/technicalWorkflow.png)\r +\r +# 2. License and Citing\r +\r +TF-Prioritizer is distributed under the [GNU General Public License](https://www.gnu.org/licenses/gpl-3.0.en.html). The\r +Graphical Abstract and the Technical Workflow\r +was created using [biorender.com](https://biorender.com/).\r +\r +# 3. Usage\r +\r +The software can be executed using docker. For the following command, only [python3](https://www.python.org/downloads/),\r +[curl](https://curl.se/download.html) and [docker](https://docs.docker.com/get-docker/) are required.\r +Explanations about the configs can be found in\r +the [config readme](https://github.com/biomedbigdata/TF-Prioritizer/blob/master/configTemplates/README.md).\r +\r +```bash\r +curl -s https://raw.githubusercontent.com/biomedbigdata/TF-Prioritizer/master/docker.py | python3 - -c [config_file] -o [output_dir] -t [threads]\r +```\r +\r +Note, that for this approach an internet connection is required. The docker image will be downloaded\r +from [DockerHub](https://hub.docker.com/r/nicotru/tf-prioritizer) on the first execution as well as with every update we\r +release. Furthermore, the wrapper script\r +will be fetched from GitHub with every execution.\r +\r +If curl is not available (for example if you are using windows), or you want to be able to execute the software without\r +an internet connection, you can download the wrapper script\r +from [here](https://raw.githubusercontent.com/biomedbigdata/TF-Prioritizer/pipeJar/docker.py).\r +\r +You can then execute the script using\r +\r +```bash\r +python3 [script_path] -c [config_file] -o [output_dir] -t [threads]\r +```\r +\r +## If you want to use the pipeline without docker\r +\r +We do not recommend using the pipeline without docker, because the dependencies are very complex, and it is very hard to\r +install them correctly. However, if you want to use the pipeline without docker, you can do so by installing the\r +dependencies manually. The dependencies and their correct installation process can be derived from\r +the [Dockerfile](https://github.com/biomedbigdata/TF-Prioritizer/blob/master/Dockerfile) and the environment scripts\r +which can be found in\r +the [environment directory](https://github.com/biomedbigdata/TF-Prioritizer/tree/master/environment).""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/LGPL-3.0" ; + schema1:name "TF-Prioritizer" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/433?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 1828513 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "The workflow takes raw ONT reads and trimmed Illumina WGS paired reads collections, the ONT raw stats table (calculated from WF1) and the estimated genome size (calculated from WF1) to run NextDenovo and subsequently polish the assembly with HyPo. It produces collapsed assemblies (unpolished and polished) and runs all the QC analyses (gfastats, BUSCO, and Merqury)." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/789?version=1" ; + schema1:isBasedOn "https://github.com/ERGA-consortium/pipelines/tree/main/assembly/galaxy" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ERGA ONT+Illumina Assembly+QC NextDenovo+HyPo v2403 (WF2)" ; + schema1:sdDatePublished "2024-07-12 13:23:46 +0100" ; + schema1:url "https://workflowhub.eu/workflows/789/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 232811 ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/2.Contigging/pics/Cont_ONTnextdenovo_2403.png" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 65262 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-11T14:45:27Z" ; + schema1:dateModified "2024-03-11T14:45:27Z" ; + schema1:description "The workflow takes raw ONT reads and trimmed Illumina WGS paired reads collections, the ONT raw stats table (calculated from WF1) and the estimated genome size (calculated from WF1) to run NextDenovo and subsequently polish the assembly with HyPo. It produces collapsed assemblies (unpolished and polished) and runs all the QC analyses (gfastats, BUSCO, and Merqury)." ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "name:ERGA, name:ASSEMBLY+QC, name:ONT, name:ILLUMINA" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ERGA ONT+Illumina Assembly+QC NextDenovo+HyPo v2403 (WF2)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/2.Contigging/Galaxy-Workflow-ERGA_ONT_Illumina_Assembly_QC_NextDenovo_HyPo_v2403_(WF2).ga" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and modern ancient DNA pipeline in Nextflow and with cloud support." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-07-12 13:21:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3451 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:49Z" ; + schema1:dateModified "2024-06-11T12:54:49Z" ; + schema1:description "A fully reproducible and modern ancient DNA pipeline in Nextflow and with cloud support." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + schema1:datePublished "2024-03-26T20:00:55.315103" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Scaffolding-HiC-VGP8" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Scaffolding-HiC-VGP8/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """![bacpage](https://raw.githubusercontent.com/CholGen/bacpage/split_into_command/.github/logo_dark.png){width=500}\r +\r +This repository contains an easy-to-use pipeline for the assembly and analysis of bacterial genomes using ONT long-read or Illumina short-read technology.\r +\r +# Introduction\r +Advances in sequencing technology during the COVID-19 pandemic has led to massive increases in the generation of sequencing data. Many bioinformatics tools have been developed to analyze this data, but very few tools can be utilized by individuals without prior bioinformatics training.\r +\r +This pipeline was designed to encapsulate pre-existing tools to automate analysis of whole genome sequencing of bacteria. Installation is fast and straightfoward. The pipeline is easy to setup and contains rationale defaults, but is highly modular and configurable by more advance users.\r +A successful run generates consensus sequences, typing information, phylogenetic tree, and quality control report.\r +\r +# Features\r +We anticipate the pipeline will be able to perform the following functions:\r +- [x] Reference-based assembly of Illumina paired-end reads\r +- [x] *De novo* assembly of Illumina paired-end reads\r +- [ ] *De novo* assembly of ONT long reads\r +- [x] Run quality control checks\r +- [x] Variant calling using [bcftools](https://github.com/samtools/bcftools)\r +- [x] Maximum-likelihood phylogenetic inference of processed samples and background dataset using [iqtree](https://github.com/iqtree/iqtree2) \r +- [x] MLST profiling and virulence factor detection\r +- [x] Antimicrobial resistance genes detection\r +- [ ] Plasmid detection\r +\r +# Installation\r +1. Install `miniconda` by running the following two command:\r +```commandline\r +curl -L -O "https://github.com/conda-forge/miniforge/releases/latest/download/Mambaforge-$(uname)-$(uname -m).sh"\r +bash Mambaforge-$(uname)-$(uname -m).sh\r +```\r +\r +2. Clone the repository:\r +```commandline\r +git clone https://github.com/CholGen/bacpage.git\r +```\r +\r +3. Install and activate the pipeline's conda environment:\r +```commandline\r +cd bacpage/\r +mamba env create -f environment.yaml\r +mamba activate bacpage\r +```\r +\r +4. Install the `bacpage` command:\r +```commandline\r +pip install .\r +```\r +\r +5. Test the installation:\r +```commandline\r +bacpage -h\r +bacpage version\r +```\r +These command should print the help and version of the program. Please create an issue if this is not the case.\r +\r +# Usage\r +0. Navigate to the pipeline's directory.\r +1. Copy the `example/` directory to create a directory specifically for each batch of samples.\r +```commandline\r +cp example/ \r +```\r +2. Place raw sequencing reads in the `input/` directory of your project directory.\r +3. Record the name and absolute path of raw sequencing reads in the `sample_data.csv` found within your project directory.\r +4. Replace the values `` and `` in `config.yaml` found within your project directory, with the absolute path of your project directory and pipeline directory, respectively.\r +5. Determine how many cores are available on your computer:\r +```commandline\r +cat /proc/cpuinfo | grep processor\r +```\r +6. From the pipeline's directory, run the entire pipeline on your samples using the following command:\r +```commandline\r +snakemake --configfile /config.yaml --cores \r +```\r +This will generate a consensus sequence in FASTA format for each of your samples and place them in `/results/consensus_sequences/.masked.fasta`. An HTML report containing alignment and quality metrics for your samples can be found at `/results/reports/qc_report.html`. A phylogeny comparing your sequences to the background dataset can be found at `/results/phylogeny/phylogeny.tree`\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/693?version=1" ; + schema1:isBasedOn "https://github.com/CholGen/bacpage.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Reference-based assembly with bacpage" ; + schema1:sdDatePublished "2024-07-12 13:25:40 +0100" ; + schema1:url "https://workflowhub.eu/workflows/693/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 11384 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4757 ; + schema1:dateCreated "2023-12-20T17:45:10Z" ; + schema1:dateModified "2023-12-20T17:49:26Z" ; + schema1:description """![bacpage](https://raw.githubusercontent.com/CholGen/bacpage/split_into_command/.github/logo_dark.png){width=500}\r +\r +This repository contains an easy-to-use pipeline for the assembly and analysis of bacterial genomes using ONT long-read or Illumina short-read technology.\r +\r +# Introduction\r +Advances in sequencing technology during the COVID-19 pandemic has led to massive increases in the generation of sequencing data. Many bioinformatics tools have been developed to analyze this data, but very few tools can be utilized by individuals without prior bioinformatics training.\r +\r +This pipeline was designed to encapsulate pre-existing tools to automate analysis of whole genome sequencing of bacteria. Installation is fast and straightfoward. The pipeline is easy to setup and contains rationale defaults, but is highly modular and configurable by more advance users.\r +A successful run generates consensus sequences, typing information, phylogenetic tree, and quality control report.\r +\r +# Features\r +We anticipate the pipeline will be able to perform the following functions:\r +- [x] Reference-based assembly of Illumina paired-end reads\r +- [x] *De novo* assembly of Illumina paired-end reads\r +- [ ] *De novo* assembly of ONT long reads\r +- [x] Run quality control checks\r +- [x] Variant calling using [bcftools](https://github.com/samtools/bcftools)\r +- [x] Maximum-likelihood phylogenetic inference of processed samples and background dataset using [iqtree](https://github.com/iqtree/iqtree2) \r +- [x] MLST profiling and virulence factor detection\r +- [x] Antimicrobial resistance genes detection\r +- [ ] Plasmid detection\r +\r +# Installation\r +1. Install `miniconda` by running the following two command:\r +```commandline\r +curl -L -O "https://github.com/conda-forge/miniforge/releases/latest/download/Mambaforge-$(uname)-$(uname -m).sh"\r +bash Mambaforge-$(uname)-$(uname -m).sh\r +```\r +\r +2. Clone the repository:\r +```commandline\r +git clone https://github.com/CholGen/bacpage.git\r +```\r +\r +3. Install and activate the pipeline's conda environment:\r +```commandline\r +cd bacpage/\r +mamba env create -f environment.yaml\r +mamba activate bacpage\r +```\r +\r +4. Install the `bacpage` command:\r +```commandline\r +pip install .\r +```\r +\r +5. Test the installation:\r +```commandline\r +bacpage -h\r +bacpage version\r +```\r +These command should print the help and version of the program. Please create an issue if this is not the case.\r +\r +# Usage\r +0. Navigate to the pipeline's directory.\r +1. Copy the `example/` directory to create a directory specifically for each batch of samples.\r +```commandline\r +cp example/ \r +```\r +2. Place raw sequencing reads in the `input/` directory of your project directory.\r +3. Record the name and absolute path of raw sequencing reads in the `sample_data.csv` found within your project directory.\r +4. Replace the values `` and `` in `config.yaml` found within your project directory, with the absolute path of your project directory and pipeline directory, respectively.\r +5. Determine how many cores are available on your computer:\r +```commandline\r +cat /proc/cpuinfo | grep processor\r +```\r +6. From the pipeline's directory, run the entire pipeline on your samples using the following command:\r +```commandline\r +snakemake --configfile /config.yaml --cores \r +```\r +This will generate a consensus sequence in FASTA format for each of your samples and place them in `/results/consensus_sequences/.masked.fasta`. An HTML report containing alignment and quality metrics for your samples can be found at `/results/reports/qc_report.html`. A phylogeny comparing your sequences to the background dataset can be found at `/results/phylogeny/phylogeny.tree`\r +""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Reference-based assembly with bacpage" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/693?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# scRNA-Seq pipelines\r +\r +Here we forge the tools to analyze single cell RNA-Seq experiments. The analysis workflow is based on the Bioconductor packages [*scater*](https://bioconductor.org/packages/devel/bioc/vignettes/scater/inst/doc/overview.html) and [*scran*](https://bioconductor.org/packages/devel/bioc/vignettes/scran/inst/doc/scran.html) as well as the Bioconductor workflows by Lun ATL, McCarthy DJ, & Marioni JC [*A step-by-step workflow for low-level analysis of single-cell RNA-seq data.*](http://doi.org/10.12688/f1000research.9501.1) F1000Res. 2016 Aug 31 [revised 2016 Oct 31];5:2122 and Amezquita RA, Lun ATL et al. [*Orchestrating Single-Cell Analysis with Bioconductor*](https://osca.bioconductor.org/index.html) Nat Methods. 2020 Feb;17(2):137-145.\r +\r +## Implemented protocols\r + - MARS-Seq (massively parallel single-cell RNA-sequencing): The protocol is based on the publications of Jaitin DA, et al. (2014). *Massively parallel single-cell RNA-seq for marker-free decomposition of tissues into cell types.* Science (New York, N.Y.), 343(6172), 776–779. https://doi.org/10.1126/science.1247651 and Keren-Shaul H., et al. (2019). *MARS-seq2.0: an experimental and analytical pipeline for indexed sorting combined with single-cell RNA sequencing.* Nature Protocols. https://doi.org/10.1038/s41596-019-0164-4. The MARS-Seq library preparation protocol is given [here](https://github.com/imbforge/NGSpipe2go/blob/master/resources/MARS-Seq_protocol_Step-by-Step_MML.pdf). The sequencing reads are demultiplexed according to the respective pool barcodes before they are used as input for the analysis pipeline. \r +- Smart-seq2: Libraries are generated using the [Smart-seq2 kit](http://www.nature.com/nmeth/journal/v10/n11/full/nmeth.2639.html). \r +\r +## Pipeline Workflow\r +All analysis steps are illustrated in the pipeline [flowchart](https://www.draw.io/?lightbox=1&highlight=0000ff&edit=_blank&layers=1&nav=1&title=scRNA-Seq#R7R3ZcpvK8mtUlTxIxSIh6dF2oiyVODm2Uzk5LykEI4kYAWGxrXz9nZ6FdUBIQoDjm%2BTeIwYYZqZ7eu%2BegXq1fXrn697ms2sie6BI5tNAfTNQFFmdTPB%2FoGVHW6bKjDasfctkDyUNt9YfxBol1hpZJgoyD4aua4eWl200XMdBRphp033ffcw%2BtnLt7Fc9fY0KDbeGbhdbv1tmuGGtsjZPbrxH1nrDPj1TpvTGUjfu174bOex7jusgemer827YHIONbrqPqSb17UC98l03pL%2B2T1fIhmXlK0bfW5TcjYfsIyes88J76eNqcz0L7YfNzfL6%2Fe3i55%2F3Qw6AB92O2FoMFM3GHV6a1gOsrm2tHXJD%2Bx3BUC99sgzxJf61Zv8lry39fAseEumLt5LFCHd87Tfh1sa%2FZHzP1pfIvoyX9Mq1XZ88pC7IH%2FxIEPrufQwkvIiXK9cJGUbJGoxbDzbIZD2SfuKrlWXbqU4R%2BRN3yu8QGKqXhru1DHwp4d9rWw8C9juGo0T6D%2FXQcmGFhnMpnl4aKgxQD8gP0VOqiUHpHXK3KPR3%2BBF2dzbR6CtsMw1nDAsfU6g5ZW2bFFaOJZVtCbYd1nHfCV7gHww1xGjyYFz%2Fc%2Fs9%2Bhi9%2BfPf3Ye7xY%2Fg4b%2Bh%2FKLx5K0Gf8vwZO3rpoWhfQz6NIAtykzKYEtMU1PYMpYE2KLxB0%2FBlj%2Ffwzd30r%2F%2FfVzcvPW3snn3bvdhOJELwEMmprfs0vXDjbt2Hd1%2Bm7SmIAALkzzzyXU9BpZfKAx3DIR6FLq4KYUU6MkK%2F4XXRxN29SN1580T65lc7PiFg%2Bebegkuf6TvJa%2BRq%2BQ98wLYDr40ALYAaWhcWDYfThEBMwhXCvnAjXwDVexFtsdD3V%2BjsOo59iAsfSUi%2BcjGNOwhywpFSEFexfPWd6kHPNdywiDV81doSPBzPJtk8VMZZ9lU7vnJbFb1PP5BR5CgZzyVWhj75e7L%2Fa%2FN7Ofvf%2B9CZfHz%2BrusLYez54KwRaw6PyKNZ00j0km8SN3DiRIgctYDlH8YEDBc4AdkzXsq50vwcK1exsJerpTBxeVWtxwAiuUh28LcADdeptkc%2FcY%2Bflh7JJXzCTzdybfpwAp8tMr0uglDkLYvADrKYm2FmMuOfNsbORg7lIW1Xa5cjCn45%2FW7W5iasnbxxRD%2Fb2m7S%2BDe6AH0ggWfN6YKi8C4ub4I0G%2F4udX9EP8c8fsjzL7dh1167JLBWGXSiFF%2BGPqRY%2Bgh3m2FeVb0Gq%2FrIlkAKfUvhocuEEVE61YCurzg0gU2VvbSDbwxYILGwV3e6UnQbh6sYvLQCJEZEmJshfoSv50iM1j%2FRivLsYhCokivlnDndaXA%2Fezmbbu6GRBeE3jIINwGa%2BEwUdC5XTOyAQ2f%2F%2FwQxnIntHRgh7pjxpMbwrStFRY4CdPXMU9F%2FlETbgAHn5ARhbDeKRwsGUm5MncGHUgTqEBc20mrQPK4ARVILDMJhJTcEmBF0IOfWFH0sMoIk6cCI5fuZrwhtjyNhctVKSXVXkO5sGSKYMV422E6Q0HIHypZpWCa7cBdrQJgQjkgHCboiw0Zky4lff77R0bq36eaDtKKaaKnlqimTWoIslJX12xJQ%2FgWIP%2FL8heYgDGpBCtQPQ1g6T4BRbOcNaVpS9c3kT%2FEzSAaEEmBIT6heBLG8kl8J23jvSCihIrG8Dd%2BwtNNM%2B5b6Z8kRuEXjMKnjEHucOkr29FJEpeAI3GBpgi%2FcuDkmRXaZmeIUQdL8gaBTqpDqfBUDMPUU0o5IMOla%2B4KjQURKzRjPUXfejYaVAljycPF13mLxRsMSu14t5KDRYFUX1at%2FqGxOOaKWSx133DN5qcR9yu98tHvyPIB%2BzBwcf%2FS54ub2%2BEtIHL57GoO5OD5eq5rNz5Z2qn04U3ldF93Ml9MnxuELuuOTdWJtkvkdzIvsNU2PivSqfTKxry%2Fo2kZTWInnxbuNNo6HQOMEO7m5iZhfWkhk%2F%2BDn9IrA%2FMNn2zDR%2FhSJ3MEocJrbo6sOwn8EWtni5ywi0mNRqPmpqQ7IDW64QZ0XMnUQ%2F1cU4JGEUsn0hMRS9KabLWOoaSEX6aUpPQNUMksQ7cvmMc0BCXikvtPbbSCz7j4qZVNHEwr4lNKqxOPGytEt1iqgR4ffd2Lx3WaFq1kdDJ5pglUQpFOKM8PFuTxZUqWP0CBUzp11Tw3BY6D9NkocJ5Q%2FN%2Fi4VsOlfAlL9HZaPuQ7CC4p6XuYaQPh2xXXRAZ36GWsrJ4BN7wNTGnigxsy1pUx8u3bQpPHTVVSh7g5kysZl6mNK9ulM3Ycjl60P2gEaN%2FWZcnKaAFEJ0HIHkhy3cfLKLopC28IkSrZG%2BXsUH4Gr%2BYWRZmoyj9Ln0N5iq0JEuvXC8cHS4PpZZzL4dUqjnkqbyQKfyfyOuNxdlkueNYntbjjvIRQVnHc0eRc7xVuzP%2FYG8Nz%2BMsGM9geBZGmMjqy5Nbmgp4qh3xpPVEiollhIUehP9cEZJcR4D4HWHSF%2B6IDs7VU3cFA9YfybCJbxAGi%2Fnu%2FYB50nL8fBMtR3hvV%2FByxse3eHBA8Rex2xQeA88dvvHbGNHmmNvupeoUTqVUXUR3cpQ%2BS7hVrYS%2Bk0hO1rGURfgGCP1EygWgjYuUXtYEJEo73JlYh9ALQyy5ylVCTpC9dB9boCRKNSk5H0kY1yQJE6lpknBkEGTWQT3WpBxWnBbUKMSReDVfNo40DPpiwKqcg62a82ZTJGVvJUA%2FFYfU6bRRHBKKLT1yl%2B%2FBozKxRe1MbKlLo8Z9CdTO0ygljV%2F78XE8OT8%2Bys8mtaDRSO3aqNSWBFyAlqb52q%2FVxtve6xvVfHKt%2B2t1qPWHehzKhFoAljrvxb6fqrl9n%2BUr%2B59vOEHj9uHi8Z186%2F78cjn%2F8k19%2BrrZ3g97pD3%2FvXxo2gt8PJUPTWYn8aHauv3t3cVNbc3%2BBqvuxGjseRBkdE4dPsm3wBA%2FXIkfpyBzTiW%2B4QjguZxFmo6VdvHaMmk6ZZ0lfhMa3CR9TfwpOVoHvqzUQjNXVtYQAguZdy1vLdMkpDAHk6Tlhq2EOijJbiUJzsQBTskU2714WJNL%2FA%2Bv3hUw1wke5hW%2BlpNr%2FA8e98Mr18Fd6xYBLsII%2B4iCcFBMyG0ADdQZfDxrvSnggdBMr44PxoN6MtALlVi1ukyncQPNacmFWmGL1ncPkkQJ5iSUjnBFf4lCLwK2A%2FUrYINfFJgOvvnbqGI53fmeY7%2BiG0%2FDIhyJxTUm467jPzw9YCZNXnj%2B%2F0LfWjbg%2B3tkPyDoddBGZM2Uc6MKk7KQKh3BnWri%2BbRneC4VUN1HQWSHQT%2BR3cAEOo3i4DS3gELbOwJWDFVFirYWVLGh6WpE%2FjPR0Iw8G%2BMdPF2dmtdqfMKL35Iaz3XZuyUPD3aruSVnvd%2BStrvu6X7EIxvyvWi4to0l81RIPdIN2I%2BcQ%2FULwc%2BAzBO1Rf4ilnqVLqXerkx%2FXJjdK%2FXOO7PTVo67N6SnKPX6CFTJnlKfVsMumUixiFdkEWwsZ%2FczMHxHD9Dvn%2FQGVu5%2FhiTRaBEYI9o2utmap0Vi5rsqDbDMByv2eXHII%2FjGaINsD%2FnB6ObkRRJ2WViYfsWqfsXjxUQv3mywJX0wQqYrTBAmO0qNOI4O%2Feya1gpoFsEM8mAiChtREEJXmr4FRuosAyrHJZGnwLlpoYMQ40ewwcQgoLROkRyETJRGN1F2Yy%2BYeVvS6nxWj8HLSgMcXlzk7fREjF%2FR1uPPM3NjazGOcf029uGkeNtZnTFn8vUXnXS84h93zk%2FqBaU06B%2BxnBXy0ROmfhbJRavrKjGBGmwptQG7MaEJPFadRkeSmEjbWvo6Q%2BSz%2B1JykznYrTJuKzbyvLGQqtKtW%2BX%2B92P4aG23MlI%2Baab0dbwcXg6nz0XVODayOkN7yvSVHEURAL5xIqNpWa5UiHyj5LBAZIrUapyteKtMzxNCN82hMx9w6bjyocCnhdzVJp1BtASDoYJl1nDnoeAo2klMlKQ2E0nrwaQMZKudh9qhmPk5HE4yJ38FyRxLPQwf597O%2Fjslj63Q26hZp7Yzs%2FGQ45OMcMqzib%2FrQ650bSirPYmT4gSDG31Z2fjSOKnc89q4VWb2k7rMDgyYIi%2BRiCmw3mMNIGXVT9xrZ%2BRn6fq1dCKHMzLtWTKyfEjVRCnWg2iTkYnjwV9gOYimTBHTmvRuJveC3k3lrEljMt8jvOeeH8%2FboXdLfXsFxkRy5ExNamdgwkJqLy2t9Xdr3UaSJx6mwYZ5OEWbPkuKlhfNJ1q3ormQolUndv5fMhceTZCmYcJVbUswr00lLkzz2%2BcPl7Ts4R3esBLkiv%2FOBl6IiEa6Lr1uEh8J7mjAnSdJIUXmu7kh6wMfIGndBLN1k5gEiBn1Rq6MAcqWwW9H0MLTwrIdm0noknEfTqNm7dOo3LlBqwn8HRSPCML3luTvIB91je9o5A%2FsY%2Fh%2BCNtm0lQNk5xIpwhKfHVuzv3rJbr%2BmnOztVEm8tHmXDWLZi2Zc%2FmA%2B2bOXSMHXbrmDouFSm2R0OcKMBMlMbl%2BIFQbeoMPImcNrvU2bLnwSSiOiAejHEyIJ9KgdUJ8BmFxPOtWWBRb%2BP5631ej0mLtzNx%2BWvg0tZ8WPiysHWbcw9hHKtXS0DAeS8%2FKGxUE2cCzrTA8d9pkSv7k8zlc5uwgeTInJVaJmmXiabkIegaZU5O6lTkrTVy9CYAtxt6Hvm7c9zT%2BlVqtYMpskMxC%2F2AFUNEsoEfk9j7VpAFszwfqjQUmprNF4gstHz2qnNB%2FCWFWW0JoWkA4KX6fD%2Fso8nX2gso3CGLnHAN4%2BTvkuJlzSiqkhO7pGpVPSKEHDAG8JZ6I%2B3GdmUQ%2B0n2F9DDyc6fxkW%2Bsw1XbMd2FKO2kDjAL0LZdmowYkCkCTlcVWx4cWQ64weq%2BTcgkSoZIT%2BW60dRSA1RavIOLpSEKC3fewr0xDXm5hXvFKnaPHDIxzzzIISO3yEDngiAacYJgvzLg5uV1H3p%2FplwSaRMnZxFMqlYVXtr5a3RJssUpBuT8LbxT2flb1GOWy3HjKUz3mJXDWUj8GvN2Urs%2FrlNA%2FWk6QTZRJWdq19DxW7vACsg6Iy84%2FbScc53k8%2F%2BBnX5Q0HM492fOSxrE4lCxZpI8E7D22fR0aix0AT7%2F7PH97rs6x6%2Fy02rrqaNd8c54mJ1Yzz44dbLH9cfc4WE9sp7xuIy4lswfy%2FOQ%2BbpqZK3oio6%2BpaG3mIc9gE5IjpWnpj1PZ4dWIibC1M3NbVPXdV1bHt3IIxpBsv7T51EqR4zyWZtKc0q4ILq4ZR2c1%2BPtsRMA6odmEuF7RMaW%2BjYhYi%2FC1J9NWlTnNUtNnq%2FoTqdZXU3Y%2BqdtGvu5Y7KDGpWNhANMp4eFA0xnsxyKnSnhB2qf3LACLHVDAhi0QNag1mkiZWDKc2%2B6j84gqUkCE2oh14fVb4E5HBwGME%2Bjx%2FOJeMp76qfjolm8TU%2F9e%2BnjanM9C%2B2Hzc3y%2Bv3t4uef98NyR30BuYAPCPlc3mY1FrGzT2gNom2CvLS7AjtblrIzVrU5BSRhheV0VAZrKhR6Li3sLMKKLEVvhNNlCYlAVBvLFab%2FUxidEAmKpxwuYX8PUodbxgU5oa8VFUxsolTV8KoctxubWOlZdgvynZXegWPBSvPakY2v9CmOZS0vEqcsvnyFk0ay1iJtKVMxKlWJKgFY56LvkfLuIba%2FMyCXzGsE7cMudrBP89jVptOzEr1frs9TuCzP0BnGbO8L6tap2I4vze9VuRgv1ovT24F16F5qgMXMtWzmz0SQM8EjJDMcRjkTh%2BmyzL1FPRMLN2fa62Wg2wsw2OWRUyz%2FiLBTOxN2HlrxHSTfIR8ZQVHKkyp8DHV6EXfyIcSANNIR54k3grwWBSQfjaF5sUhrAZfLhIfCgDTxiN7QxEli%2F0E0XtGEoxSAd4sCLkh8RTXrL4yxEt9P1%2BjZnhBvnP3K%2FMb1rT94fnr8%2FVjGUJvZJAUlYVzcJMpsXtwkZ7Nq%2F%2FX5wEWbdrow7rratpC2cv%2BY2x%2FvrA9vl7e3T%2B7O3X388eXbsK6Rm5sQuq7ykjtdcpo9VXbv87ymx7mN3OyQCWKQ%2BATHYtQu9eJ6OwKP4B4%2BQU7UoPUZYjmBNXJx4YwW760VgA6iO8iN4JrNiljTYru3IrR8lyPbszN851N9px2XbBSegNrxMXIHEUJ88ZVUSibW64aPRT2eFAqSe6pIZvuhyULAF21izxPwchbwyplcvXUhX%2BkSbh%2F0wjjIjqu0ngv0x0L%2B0KDKuqggXPu2QiqrBlnLMB759u4S8m5hOvu4ZYId9Cqkqbnqm%2BH8wD1Xn72Op2ruUNaxoO6Qohb5qzptwPMgrhspdbu1zn4qa1UVtP26QGd1jIXD7lMd4x6f%2Fn4ayM9zMMnBMU6T3LklMyWNPnufV7STDn%2BvZ5GY9Acdz3K2XV1EElNWpReIlLcLqPuqxeafnyinIFJtO8JVFOqm7oW17Qe8JV2Y0Udb9wGsoKQrYpVluWyspk7ccRflHA02xao4unKUe3bWhHwYndrHKrN8l%2FY41j3EysS2r8Hu8eCklxf2nq1woyoyl%2B67C3wXBazkVjm1CoQeX8ZBJ6nCVwvyR8g508ut5eWzrFqVq7P1VoO%2FgxJnUb5gF2vGVM0yGBtfswMoCUvf6CbBAimrvzUU6jmf5tigKAJxKnD%2FqHzJGwcuZ8yl0UgHxBKStyI732LHns1%2F8m5EcoCS7dL8bohDxzwtQo5BE%2BNYkU6bnj5asfvtQiZX0nIHtIT0d0Xoj%2BfRzmm%2BePbIeILT%2BFcQZl2zVf0X5pSfkkGhwGbBvvvPFSVrmIKT4Rw5uWvX3%2BLP%2F9Ez2YPx2X0SCaGq19UHE3IQV8lKsIR681cUhOSIQZqCXzhjFE97BRuULuuD7lsQSUNSLS3HsEfJEhg7g0SCeRs9QK9rjusNySyV0BNeVJ%2BPjRUJ%2BHp1QcEaDm%2Bv38Zj%2Fvb54msChVpfubKjgMOC%2Bossuh67ZPhBtBx6rhfZvKLPIeu6o8zvPi4ym9Rx46tDh1Cz2wvHAfKE9g7vYJx6Y61IRSkoUkQX3kdBwHEiLq9AyjFAkMKQFGIi3JqNBAi%2Fd%2BgAcGORdOTDCnJc56%2FJAZhP8vpRkTEImb7aRBaA0ELbIzX8SKtQI2e%2FdFApfDzOWl20ec5OW79SeNYw3FalcD7gvlUK%2FxzZoYU5b%2B2suWiLSTZhoTHDRu0diFUzSW6vf6VV5b5A82rtmLJqtjOBfNz1CQpyp7ECXdb8PNRDKFw%2BRtt6U3SlapD9rPj5NZ2l5aysdeQXJNIeGXNo3U7%2BBZDlNz5aZT5QIKGYio982xs5kIFTSkaHCSk1qXa1oAsC7fDYiF4mVTZTySFM608aMdYPQz9yIOTVLE5D3CFfW73Oep9%2FdbMG6HZXHKsJ%2BhY0v0aWW9Bb1VqLJp4vHdv2gpDjMxtZjFxPPUC6OAK%2BpIHqhMC5Xf%2FedokdZ4u1xzXRf1PjFemB5d0e0YCVV8KSggO%2B%2B6q5AeKF0LHYCQyHjYCD3I78TFYbCJgklxor9oMk4F8K9n8EUBQr6sjAYoOftdW5BmjxKQDWmtfr2JYiuptUOY4TwuPtGr%2BIBcGAFT6mDelSyLSWY2JMoAiOf3x2A2poChHYG%2B4RAvxNFjFA5KyKpK5jyUxa3QZnTEw2fde746ISDK48Nf40yVvJCN4TVZSKK6oXNW0ge0csZosM07mVPCl9ec96lcuFpYv43NOX8SVmMWH6cYxgm8%2BuCSr12%2F8B). Specify desired analysis details for your data in the respective *essential.vars.groovy* file (see below) and run the selected pipeline *marsseq.pipeline.groovy* or *smartsseq.pipeline.groovy* as described [here](https://gitlab.rlp.net/imbforge/NGSpipe2go/-/blob/master/README.md). The analysis allows further parameter fine-tuning subsequent the initial analysis e.g. for plotting and QC thresholding. Therefore, a customisable *sc.report.Rmd* file will be generated in the output reports folder after running the pipeline. Go through the steps and modify the default settings where appropriate. Subsequently, the *sc.report.Rmd* file can be converted to a final html report using the *knitr* R-package.\r +\r +### The pipelines includes:\r +- FastQC, MultiQC and other tools for rawdata quality control\r +- Adapter trimming with Cutadapt\r +- Mapping to the genome using STAR\r +- generation of bigWig tracks for visualisation of alignment\r +- Quantification with featureCounts (Subread) and UMI-tools (if UMIs are used for deduplication)\r +- Downstream analysis in R using a pre-designed markdown report file (*sc.report.Rmd*). Modify this file to fit your custom parameter and thresholds and render it to your final html report. The Rmd file uses, among others, the following tools and methods:\r + - QC: the [scater](http://bioconductor.org/packages/release/bioc/html/scater.html) package.\r + - Normalization: the [scran](http://bioconductor.org/packages/release/bioc/html/scran.html) package.\r + - Differential expression analysis: the [scde](http://bioconductor.org/packages/release/bioc/html/scde.html) package.\r + - Trajectory analysis (pseudotime): the [monocle](https://bioconductor.org/packages/release/bioc/html/monocle.html) package.\r +\r +### Pipeline parameter settings\r +- essential.vars.groovy: essential parameter describing the experiment \r + - project folder name\r + - reference genome\r + - experiment design\r + - adapter sequence, etc.\r +- additional (more specialized) parameter can be given in the var.groovy-files of the individual pipeline modules \r +- targets.txt: comma-separated txt-file giving information about the analysed samples. The following columns are required \r + - sample: sample identifier. Must be a unique substring of the input sample file name (e.g. common prefixes and suffixes may be removed). These names are grebbed against the count file names to merge targets.txt to the count data.\r + - plate: plate ID (number) \r + - row: plate row (letter)\r + - col: late column (number)\r + - cells: 0c/1c/10c (control wells)\r + - group: default variable for cell grouping (e.g. by condition)\r + \r + for pool-based libraries like MARSseq required additionally:\r + - pool: the pool ID comprises all cells from 1 library pool (i.e. a set of unique cell barcodes; the cell barcodes are re-used in other pools). Must be a unique substring of the input sample file name. For pool-based design, the pool ID is grebbed against the respective count data filename instead of the sample name as stated above.\r + - barcode: cell barcodes used as cell identifier in the count files. After merging the count data with targets.txt, the barcodes are replaced with sample IDs given in the sample column (i.e. here, sample names need not be a substring of input sample file name).\r +\r +### Programs required\r +- FastQC\r +- STAR\r +- Samtools\r +- Bedtools\r +- Subread\r +- Picard\r +- UCSC utilities\r +- RSeQC\r +- UMI-tools\r +- R\r +\r +## Resources\r +- QC: the [scater](http://bioconductor.org/packages/release/bioc/html/scater.html) package.\r +- Normalization: the [scran](http://bioconductor.org/packages/release/bioc/html/scran.html) package.\r +- Trajectory analysis (pseudotime): the [monocle](https://bioconductor.org/packages/release/bioc/html/monocle.html) package.\r +- A [tutorial](https://scrnaseq-course.cog.sanger.ac.uk/website/index.html) from Hemberg lab\r +- Luecken and Theis 2019 [Current best practices in single‐cell RNA‐seq analysis: a tutorial](https://www.embopress.org/doi/10.15252/msb.20188746)\r +\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/61?version=1" ; + schema1:isBasedOn "https://gitlab.rlp.net/imbforge/NGSpipe2go/-/tree/master/pipelines/scRNAseq" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNA-seq MARS-seq" ; + schema1:sdDatePublished "2024-07-12 13:36:25 +0100" ; + schema1:url "https://workflowhub.eu/workflows/61/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2104 ; + schema1:creator ; + schema1:dateCreated "2020-10-07T07:46:11Z" ; + schema1:dateModified "2023-01-16T13:44:52Z" ; + schema1:description """# scRNA-Seq pipelines\r +\r +Here we forge the tools to analyze single cell RNA-Seq experiments. The analysis workflow is based on the Bioconductor packages [*scater*](https://bioconductor.org/packages/devel/bioc/vignettes/scater/inst/doc/overview.html) and [*scran*](https://bioconductor.org/packages/devel/bioc/vignettes/scran/inst/doc/scran.html) as well as the Bioconductor workflows by Lun ATL, McCarthy DJ, & Marioni JC [*A step-by-step workflow for low-level analysis of single-cell RNA-seq data.*](http://doi.org/10.12688/f1000research.9501.1) F1000Res. 2016 Aug 31 [revised 2016 Oct 31];5:2122 and Amezquita RA, Lun ATL et al. [*Orchestrating Single-Cell Analysis with Bioconductor*](https://osca.bioconductor.org/index.html) Nat Methods. 2020 Feb;17(2):137-145.\r +\r +## Implemented protocols\r + - MARS-Seq (massively parallel single-cell RNA-sequencing): The protocol is based on the publications of Jaitin DA, et al. (2014). *Massively parallel single-cell RNA-seq for marker-free decomposition of tissues into cell types.* Science (New York, N.Y.), 343(6172), 776–779. https://doi.org/10.1126/science.1247651 and Keren-Shaul H., et al. (2019). *MARS-seq2.0: an experimental and analytical pipeline for indexed sorting combined with single-cell RNA sequencing.* Nature Protocols. https://doi.org/10.1038/s41596-019-0164-4. The MARS-Seq library preparation protocol is given [here](https://github.com/imbforge/NGSpipe2go/blob/master/resources/MARS-Seq_protocol_Step-by-Step_MML.pdf). The sequencing reads are demultiplexed according to the respective pool barcodes before they are used as input for the analysis pipeline. \r +- Smart-seq2: Libraries are generated using the [Smart-seq2 kit](http://www.nature.com/nmeth/journal/v10/n11/full/nmeth.2639.html). \r +\r +## Pipeline Workflow\r +All analysis steps are illustrated in the pipeline [flowchart](https://www.draw.io/?lightbox=1&highlight=0000ff&edit=_blank&layers=1&nav=1&title=scRNA-Seq#R7R3ZcpvK8mtUlTxIxSIh6dF2oiyVODm2Uzk5LykEI4kYAWGxrXz9nZ6FdUBIQoDjm%2BTeIwYYZqZ7eu%2BegXq1fXrn697ms2sie6BI5tNAfTNQFFmdTPB%2FoGVHW6bKjDasfctkDyUNt9YfxBol1hpZJgoyD4aua4eWl200XMdBRphp033ffcw%2BtnLt7Fc9fY0KDbeGbhdbv1tmuGGtsjZPbrxH1nrDPj1TpvTGUjfu174bOex7jusgemer827YHIONbrqPqSb17UC98l03pL%2B2T1fIhmXlK0bfW5TcjYfsIyes88J76eNqcz0L7YfNzfL6%2Fe3i55%2F3Qw6AB92O2FoMFM3GHV6a1gOsrm2tHXJD%2Bx3BUC99sgzxJf61Zv8lry39fAseEumLt5LFCHd87Tfh1sa%2FZHzP1pfIvoyX9Mq1XZ88pC7IH%2FxIEPrufQwkvIiXK9cJGUbJGoxbDzbIZD2SfuKrlWXbqU4R%2BRN3yu8QGKqXhru1DHwp4d9rWw8C9juGo0T6D%2FXQcmGFhnMpnl4aKgxQD8gP0VOqiUHpHXK3KPR3%2BBF2dzbR6CtsMw1nDAsfU6g5ZW2bFFaOJZVtCbYd1nHfCV7gHww1xGjyYFz%2Fc%2Fs9%2Bhi9%2BfPf3Ye7xY%2Fg4b%2Bh%2FKLx5K0Gf8vwZO3rpoWhfQz6NIAtykzKYEtMU1PYMpYE2KLxB0%2FBlj%2Ffwzd30r%2F%2FfVzcvPW3snn3bvdhOJELwEMmprfs0vXDjbt2Hd1%2Bm7SmIAALkzzzyXU9BpZfKAx3DIR6FLq4KYUU6MkK%2F4XXRxN29SN1580T65lc7PiFg%2Bebegkuf6TvJa%2BRq%2BQ98wLYDr40ALYAaWhcWDYfThEBMwhXCvnAjXwDVexFtsdD3V%2BjsOo59iAsfSUi%2BcjGNOwhywpFSEFexfPWd6kHPNdywiDV81doSPBzPJtk8VMZZ9lU7vnJbFb1PP5BR5CgZzyVWhj75e7L%2Fa%2FN7Ofvf%2B9CZfHz%2BrusLYez54KwRaw6PyKNZ00j0km8SN3DiRIgctYDlH8YEDBc4AdkzXsq50vwcK1exsJerpTBxeVWtxwAiuUh28LcADdeptkc%2FcY%2Bflh7JJXzCTzdybfpwAp8tMr0uglDkLYvADrKYm2FmMuOfNsbORg7lIW1Xa5cjCn45%2FW7W5iasnbxxRD%2Fb2m7S%2BDe6AH0ggWfN6YKi8C4ub4I0G%2F4udX9EP8c8fsjzL7dh1167JLBWGXSiFF%2BGPqRY%2Bgh3m2FeVb0Gq%2FrIlkAKfUvhocuEEVE61YCurzg0gU2VvbSDbwxYILGwV3e6UnQbh6sYvLQCJEZEmJshfoSv50iM1j%2FRivLsYhCokivlnDndaXA%2Fezmbbu6GRBeE3jIINwGa%2BEwUdC5XTOyAQ2f%2F%2FwQxnIntHRgh7pjxpMbwrStFRY4CdPXMU9F%2FlETbgAHn5ARhbDeKRwsGUm5MncGHUgTqEBc20mrQPK4ARVILDMJhJTcEmBF0IOfWFH0sMoIk6cCI5fuZrwhtjyNhctVKSXVXkO5sGSKYMV422E6Q0HIHypZpWCa7cBdrQJgQjkgHCboiw0Zky4lff77R0bq36eaDtKKaaKnlqimTWoIslJX12xJQ%2FgWIP%2FL8heYgDGpBCtQPQ1g6T4BRbOcNaVpS9c3kT%2FEzSAaEEmBIT6heBLG8kl8J23jvSCihIrG8Dd%2BwtNNM%2B5b6Z8kRuEXjMKnjEHucOkr29FJEpeAI3GBpgi%2FcuDkmRXaZmeIUQdL8gaBTqpDqfBUDMPUU0o5IMOla%2B4KjQURKzRjPUXfejYaVAljycPF13mLxRsMSu14t5KDRYFUX1at%2FqGxOOaKWSx133DN5qcR9yu98tHvyPIB%2BzBwcf%2FS54ub2%2BEtIHL57GoO5OD5eq5rNz5Z2qn04U3ldF93Ml9MnxuELuuOTdWJtkvkdzIvsNU2PivSqfTKxry%2Fo2kZTWInnxbuNNo6HQOMEO7m5iZhfWkhk%2F%2BDn9IrA%2FMNn2zDR%2FhSJ3MEocJrbo6sOwn8EWtni5ywi0mNRqPmpqQ7IDW64QZ0XMnUQ%2F1cU4JGEUsn0hMRS9KabLWOoaSEX6aUpPQNUMksQ7cvmMc0BCXikvtPbbSCz7j4qZVNHEwr4lNKqxOPGytEt1iqgR4ffd2Lx3WaFq1kdDJ5pglUQpFOKM8PFuTxZUqWP0CBUzp11Tw3BY6D9NkocJ5Q%2FN%2Fi4VsOlfAlL9HZaPuQ7CC4p6XuYaQPh2xXXRAZ36GWsrJ4BN7wNTGnigxsy1pUx8u3bQpPHTVVSh7g5kysZl6mNK9ulM3Ycjl60P2gEaN%2FWZcnKaAFEJ0HIHkhy3cfLKLopC28IkSrZG%2BXsUH4Gr%2BYWRZmoyj9Ln0N5iq0JEuvXC8cHS4PpZZzL4dUqjnkqbyQKfyfyOuNxdlkueNYntbjjvIRQVnHc0eRc7xVuzP%2FYG8Nz%2BMsGM9geBZGmMjqy5Nbmgp4qh3xpPVEiollhIUehP9cEZJcR4D4HWHSF%2B6IDs7VU3cFA9YfybCJbxAGi%2Fnu%2FYB50nL8fBMtR3hvV%2FByxse3eHBA8Rex2xQeA88dvvHbGNHmmNvupeoUTqVUXUR3cpQ%2BS7hVrYS%2Bk0hO1rGURfgGCP1EygWgjYuUXtYEJEo73JlYh9ALQyy5ylVCTpC9dB9boCRKNSk5H0kY1yQJE6lpknBkEGTWQT3WpBxWnBbUKMSReDVfNo40DPpiwKqcg62a82ZTJGVvJUA%2FFYfU6bRRHBKKLT1yl%2B%2FBozKxRe1MbKlLo8Z9CdTO0ygljV%2F78XE8OT8%2Bys8mtaDRSO3aqNSWBFyAlqb52q%2FVxtve6xvVfHKt%2B2t1qPWHehzKhFoAljrvxb6fqrl9n%2BUr%2B59vOEHj9uHi8Z186%2F78cjn%2F8k19%2BrrZ3g97pD3%2FvXxo2gt8PJUPTWYn8aHauv3t3cVNbc3%2BBqvuxGjseRBkdE4dPsm3wBA%2FXIkfpyBzTiW%2B4QjguZxFmo6VdvHaMmk6ZZ0lfhMa3CR9TfwpOVoHvqzUQjNXVtYQAguZdy1vLdMkpDAHk6Tlhq2EOijJbiUJzsQBTskU2714WJNL%2FA%2Bv3hUw1wke5hW%2BlpNr%2FA8e98Mr18Fd6xYBLsII%2B4iCcFBMyG0ADdQZfDxrvSnggdBMr44PxoN6MtALlVi1ukyncQPNacmFWmGL1ncPkkQJ5iSUjnBFf4lCLwK2A%2FUrYINfFJgOvvnbqGI53fmeY7%2BiG0%2FDIhyJxTUm467jPzw9YCZNXnj%2B%2F0LfWjbg%2B3tkPyDoddBGZM2Uc6MKk7KQKh3BnWri%2BbRneC4VUN1HQWSHQT%2BR3cAEOo3i4DS3gELbOwJWDFVFirYWVLGh6WpE%2FjPR0Iw8G%2BMdPF2dmtdqfMKL35Iaz3XZuyUPD3aruSVnvd%2BStrvu6X7EIxvyvWi4to0l81RIPdIN2I%2BcQ%2FULwc%2BAzBO1Rf4ilnqVLqXerkx%2FXJjdK%2FXOO7PTVo67N6SnKPX6CFTJnlKfVsMumUixiFdkEWwsZ%2FczMHxHD9Dvn%2FQGVu5%2FhiTRaBEYI9o2utmap0Vi5rsqDbDMByv2eXHII%2FjGaINsD%2FnB6ObkRRJ2WViYfsWqfsXjxUQv3mywJX0wQqYrTBAmO0qNOI4O%2Feya1gpoFsEM8mAiChtREEJXmr4FRuosAyrHJZGnwLlpoYMQ40ewwcQgoLROkRyETJRGN1F2Yy%2BYeVvS6nxWj8HLSgMcXlzk7fREjF%2FR1uPPM3NjazGOcf029uGkeNtZnTFn8vUXnXS84h93zk%2FqBaU06B%2BxnBXy0ROmfhbJRavrKjGBGmwptQG7MaEJPFadRkeSmEjbWvo6Q%2BSz%2B1JykznYrTJuKzbyvLGQqtKtW%2BX%2B92P4aG23MlI%2Baab0dbwcXg6nz0XVODayOkN7yvSVHEURAL5xIqNpWa5UiHyj5LBAZIrUapyteKtMzxNCN82hMx9w6bjyocCnhdzVJp1BtASDoYJl1nDnoeAo2klMlKQ2E0nrwaQMZKudh9qhmPk5HE4yJ38FyRxLPQwf597O%2Fjslj63Q26hZp7Yzs%2FGQ45OMcMqzib%2FrQ650bSirPYmT4gSDG31Z2fjSOKnc89q4VWb2k7rMDgyYIi%2BRiCmw3mMNIGXVT9xrZ%2BRn6fq1dCKHMzLtWTKyfEjVRCnWg2iTkYnjwV9gOYimTBHTmvRuJveC3k3lrEljMt8jvOeeH8%2FboXdLfXsFxkRy5ExNamdgwkJqLy2t9Xdr3UaSJx6mwYZ5OEWbPkuKlhfNJ1q3ormQolUndv5fMhceTZCmYcJVbUswr00lLkzz2%2BcPl7Ts4R3esBLkiv%2FOBl6IiEa6Lr1uEh8J7mjAnSdJIUXmu7kh6wMfIGndBLN1k5gEiBn1Rq6MAcqWwW9H0MLTwrIdm0noknEfTqNm7dOo3LlBqwn8HRSPCML3luTvIB91je9o5A%2FsY%2Fh%2BCNtm0lQNk5xIpwhKfHVuzv3rJbr%2BmnOztVEm8tHmXDWLZi2Zc%2FmA%2B2bOXSMHXbrmDouFSm2R0OcKMBMlMbl%2BIFQbeoMPImcNrvU2bLnwSSiOiAejHEyIJ9KgdUJ8BmFxPOtWWBRb%2BP5631ej0mLtzNx%2BWvg0tZ8WPiysHWbcw9hHKtXS0DAeS8%2FKGxUE2cCzrTA8d9pkSv7k8zlc5uwgeTInJVaJmmXiabkIegaZU5O6lTkrTVy9CYAtxt6Hvm7c9zT%2BlVqtYMpskMxC%2F2AFUNEsoEfk9j7VpAFszwfqjQUmprNF4gstHz2qnNB%2FCWFWW0JoWkA4KX6fD%2Fso8nX2gso3CGLnHAN4%2BTvkuJlzSiqkhO7pGpVPSKEHDAG8JZ6I%2B3GdmUQ%2B0n2F9DDyc6fxkW%2Bsw1XbMd2FKO2kDjAL0LZdmowYkCkCTlcVWx4cWQ64weq%2BTcgkSoZIT%2BW60dRSA1RavIOLpSEKC3fewr0xDXm5hXvFKnaPHDIxzzzIISO3yEDngiAacYJgvzLg5uV1H3p%2FplwSaRMnZxFMqlYVXtr5a3RJssUpBuT8LbxT2flb1GOWy3HjKUz3mJXDWUj8GvN2Urs%2FrlNA%2FWk6QTZRJWdq19DxW7vACsg6Iy84%2FbScc53k8%2F%2BBnX5Q0HM492fOSxrE4lCxZpI8E7D22fR0aix0AT7%2F7PH97rs6x6%2Fy02rrqaNd8c54mJ1Yzz44dbLH9cfc4WE9sp7xuIy4lswfy%2FOQ%2BbpqZK3oio6%2BpaG3mIc9gE5IjpWnpj1PZ4dWIibC1M3NbVPXdV1bHt3IIxpBsv7T51EqR4zyWZtKc0q4ILq4ZR2c1%2BPtsRMA6odmEuF7RMaW%2BjYhYi%2FC1J9NWlTnNUtNnq%2FoTqdZXU3Y%2BqdtGvu5Y7KDGpWNhANMp4eFA0xnsxyKnSnhB2qf3LACLHVDAhi0QNag1mkiZWDKc2%2B6j84gqUkCE2oh14fVb4E5HBwGME%2Bjx%2FOJeMp76qfjolm8TU%2F9e%2BnjanM9C%2B2Hzc3y%2Bv3t4uef98NyR30BuYAPCPlc3mY1FrGzT2gNom2CvLS7AjtblrIzVrU5BSRhheV0VAZrKhR6Li3sLMKKLEVvhNNlCYlAVBvLFab%2FUxidEAmKpxwuYX8PUodbxgU5oa8VFUxsolTV8KoctxubWOlZdgvynZXegWPBSvPakY2v9CmOZS0vEqcsvnyFk0ay1iJtKVMxKlWJKgFY56LvkfLuIba%2FMyCXzGsE7cMudrBP89jVptOzEr1frs9TuCzP0BnGbO8L6tap2I4vze9VuRgv1ovT24F16F5qgMXMtWzmz0SQM8EjJDMcRjkTh%2BmyzL1FPRMLN2fa62Wg2wsw2OWRUyz%2FiLBTOxN2HlrxHSTfIR8ZQVHKkyp8DHV6EXfyIcSANNIR54k3grwWBSQfjaF5sUhrAZfLhIfCgDTxiN7QxEli%2F0E0XtGEoxSAd4sCLkh8RTXrL4yxEt9P1%2BjZnhBvnP3K%2FMb1rT94fnr8%2FVjGUJvZJAUlYVzcJMpsXtwkZ7Nq%2F%2FX5wEWbdrow7rratpC2cv%2BY2x%2FvrA9vl7e3T%2B7O3X388eXbsK6Rm5sQuq7ykjtdcpo9VXbv87ymx7mN3OyQCWKQ%2BATHYtQu9eJ6OwKP4B4%2BQU7UoPUZYjmBNXJx4YwW760VgA6iO8iN4JrNiljTYru3IrR8lyPbszN851N9px2XbBSegNrxMXIHEUJ88ZVUSibW64aPRT2eFAqSe6pIZvuhyULAF21izxPwchbwyplcvXUhX%2BkSbh%2F0wjjIjqu0ngv0x0L%2B0KDKuqggXPu2QiqrBlnLMB759u4S8m5hOvu4ZYId9Cqkqbnqm%2BH8wD1Xn72Op2ruUNaxoO6Qohb5qzptwPMgrhspdbu1zn4qa1UVtP26QGd1jIXD7lMd4x6f%2Fn4ayM9zMMnBMU6T3LklMyWNPnufV7STDn%2BvZ5GY9Acdz3K2XV1EElNWpReIlLcLqPuqxeafnyinIFJtO8JVFOqm7oW17Qe8JV2Y0Udb9wGsoKQrYpVluWyspk7ccRflHA02xao4unKUe3bWhHwYndrHKrN8l%2FY41j3EysS2r8Hu8eCklxf2nq1woyoyl%2B67C3wXBazkVjm1CoQeX8ZBJ6nCVwvyR8g508ut5eWzrFqVq7P1VoO%2FgxJnUb5gF2vGVM0yGBtfswMoCUvf6CbBAimrvzUU6jmf5tigKAJxKnD%2FqHzJGwcuZ8yl0UgHxBKStyI732LHns1%2F8m5EcoCS7dL8bohDxzwtQo5BE%2BNYkU6bnj5asfvtQiZX0nIHtIT0d0Xoj%2BfRzmm%2BePbIeILT%2BFcQZl2zVf0X5pSfkkGhwGbBvvvPFSVrmIKT4Rw5uWvX3%2BLP%2F9Ez2YPx2X0SCaGq19UHE3IQV8lKsIR681cUhOSIQZqCXzhjFE97BRuULuuD7lsQSUNSLS3HsEfJEhg7g0SCeRs9QK9rjusNySyV0BNeVJ%2BPjRUJ%2BHp1QcEaDm%2Bv38Zj%2Fvb54msChVpfubKjgMOC%2Bossuh67ZPhBtBx6rhfZvKLPIeu6o8zvPi4ym9Rx46tDh1Cz2wvHAfKE9g7vYJx6Y61IRSkoUkQX3kdBwHEiLq9AyjFAkMKQFGIi3JqNBAi%2Fd%2BgAcGORdOTDCnJc56%2FJAZhP8vpRkTEImb7aRBaA0ELbIzX8SKtQI2e%2FdFApfDzOWl20ec5OW79SeNYw3FalcD7gvlUK%2FxzZoYU5b%2B2suWiLSTZhoTHDRu0diFUzSW6vf6VV5b5A82rtmLJqtjOBfNz1CQpyp7ECXdb8PNRDKFw%2BRtt6U3SlapD9rPj5NZ2l5aysdeQXJNIeGXNo3U7%2BBZDlNz5aZT5QIKGYio982xs5kIFTSkaHCSk1qXa1oAsC7fDYiF4mVTZTySFM608aMdYPQz9yIOTVLE5D3CFfW73Oep9%2FdbMG6HZXHKsJ%2BhY0v0aWW9Bb1VqLJp4vHdv2gpDjMxtZjFxPPUC6OAK%2BpIHqhMC5Xf%2FedokdZ4u1xzXRf1PjFemB5d0e0YCVV8KSggO%2B%2B6q5AeKF0LHYCQyHjYCD3I78TFYbCJgklxor9oMk4F8K9n8EUBQr6sjAYoOftdW5BmjxKQDWmtfr2JYiuptUOY4TwuPtGr%2BIBcGAFT6mDelSyLSWY2JMoAiOf3x2A2poChHYG%2B4RAvxNFjFA5KyKpK5jyUxa3QZnTEw2fde746ISDK48Nf40yVvJCN4TVZSKK6oXNW0ge0csZosM07mVPCl9ec96lcuFpYv43NOX8SVmMWH6cYxgm8%2BuCSr12%2F8B). Specify desired analysis details for your data in the respective *essential.vars.groovy* file (see below) and run the selected pipeline *marsseq.pipeline.groovy* or *smartsseq.pipeline.groovy* as described [here](https://gitlab.rlp.net/imbforge/NGSpipe2go/-/blob/master/README.md). The analysis allows further parameter fine-tuning subsequent the initial analysis e.g. for plotting and QC thresholding. Therefore, a customisable *sc.report.Rmd* file will be generated in the output reports folder after running the pipeline. Go through the steps and modify the default settings where appropriate. Subsequently, the *sc.report.Rmd* file can be converted to a final html report using the *knitr* R-package.\r +\r +### The pipelines includes:\r +- FastQC, MultiQC and other tools for rawdata quality control\r +- Adapter trimming with Cutadapt\r +- Mapping to the genome using STAR\r +- generation of bigWig tracks for visualisation of alignment\r +- Quantification with featureCounts (Subread) and UMI-tools (if UMIs are used for deduplication)\r +- Downstream analysis in R using a pre-designed markdown report file (*sc.report.Rmd*). Modify this file to fit your custom parameter and thresholds and render it to your final html report. The Rmd file uses, among others, the following tools and methods:\r + - QC: the [scater](http://bioconductor.org/packages/release/bioc/html/scater.html) package.\r + - Normalization: the [scran](http://bioconductor.org/packages/release/bioc/html/scran.html) package.\r + - Differential expression analysis: the [scde](http://bioconductor.org/packages/release/bioc/html/scde.html) package.\r + - Trajectory analysis (pseudotime): the [monocle](https://bioconductor.org/packages/release/bioc/html/monocle.html) package.\r +\r +### Pipeline parameter settings\r +- essential.vars.groovy: essential parameter describing the experiment \r + - project folder name\r + - reference genome\r + - experiment design\r + - adapter sequence, etc.\r +- additional (more specialized) parameter can be given in the var.groovy-files of the individual pipeline modules \r +- targets.txt: comma-separated txt-file giving information about the analysed samples. The following columns are required \r + - sample: sample identifier. Must be a unique substring of the input sample file name (e.g. common prefixes and suffixes may be removed). These names are grebbed against the count file names to merge targets.txt to the count data.\r + - plate: plate ID (number) \r + - row: plate row (letter)\r + - col: late column (number)\r + - cells: 0c/1c/10c (control wells)\r + - group: default variable for cell grouping (e.g. by condition)\r + \r + for pool-based libraries like MARSseq required additionally:\r + - pool: the pool ID comprises all cells from 1 library pool (i.e. a set of unique cell barcodes; the cell barcodes are re-used in other pools). Must be a unique substring of the input sample file name. For pool-based design, the pool ID is grebbed against the respective count data filename instead of the sample name as stated above.\r + - barcode: cell barcodes used as cell identifier in the count files. After merging the count data with targets.txt, the barcodes are replaced with sample IDs given in the sample column (i.e. here, sample names need not be a substring of input sample file name).\r +\r +### Programs required\r +- FastQC\r +- STAR\r +- Samtools\r +- Bedtools\r +- Subread\r +- Picard\r +- UCSC utilities\r +- RSeQC\r +- UMI-tools\r +- R\r +\r +## Resources\r +- QC: the [scater](http://bioconductor.org/packages/release/bioc/html/scater.html) package.\r +- Normalization: the [scran](http://bioconductor.org/packages/release/bioc/html/scran.html) package.\r +- Trajectory analysis (pseudotime): the [monocle](https://bioconductor.org/packages/release/bioc/html/monocle.html) package.\r +- A [tutorial](https://scrnaseq-course.cog.sanger.ac.uk/website/index.html) from Hemberg lab\r +- Luecken and Theis 2019 [Current best practices in single‐cell RNA‐seq analysis: a tutorial](https://www.embopress.org/doi/10.15252/msb.20188746)\r +\r +\r +""" ; + schema1:keywords "scRNA-seq, MARS-seq, bpipe, groovy" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "scRNA-seq MARS-seq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/61?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for the identification of circular DNAs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/972?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/circdna" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/circdna" ; + schema1:sdDatePublished "2024-07-12 13:22:02 +0100" ; + schema1:url "https://workflowhub.eu/workflows/972/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8586 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:45Z" ; + schema1:dateModified "2024-06-11T12:54:45Z" ; + schema1:description "Pipeline for the identification of circular DNAs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/972?version=7" ; + schema1:keywords "ampliconarchitect, ampliconsuite, circle-seq, circular, DNA, eccdna, ecdna, extrachromosomal-circular-dna, Genomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/circdna" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/972?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2021-12-20T17:04:47.628302" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-ont-artic-variant-calling" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sars-cov-2-ont-artic-variant-calling/COVID-19-ARTIC-ONT" ; + schema1:sdDatePublished "2021-12-21 03:01:00 +0000" ; + schema1:softwareVersion "v0.3.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """\r +# BridgeDb tutorial: Gene HGNC name to Ensembl identifier\r +\r +This tutorial explains how to use the BridgeDb identifier mapping service to translate HGNC names to Ensembl identifiers. This step is part of the OpenRiskNet use case to link Adverse Outcome Pathways to [WikiPathways](https://wikipathways.org/).\r +\r +First we need to load the Python library to allow calls to the [BridgeDb REST webservice](http://bridgedb.prod.openrisknet.org/swagger/):\r +\r +\r +```python\r +import requests\r +```\r +\r +Let's assume we're interested in the gene with HGNC MECP2 (FIXME: look up a gene in AOPWiki), the API call to make mappings is given below as `callUrl`. Here, the `H` indicates that the query (`MECP2`) is an HGNC symbol:\r +\r +\r +```python\r +callUrl = 'http://bridgedb.prod.openrisknet.org/Human/xrefs/H/MECP2'\r +```\r +\r +The default call returns all identifiers, not just for Ensembl:\r +\r +\r +```python\r +response = requests.get(callUrl)\r +response.text\r +```\r +\r +\r +\r +\r + 'GO:0001964\\tGeneOntology\\nuc065cav.1\\tUCSC Genome Browser\\n312750\\tOMIM\\nGO:0042551\\tGeneOntology\\nuc065car.1\\tUCSC Genome Browser\\nA0A087X1U4\\tUniprot-TrEMBL\\n4204\\tWikiGenes\\nGO:0043524\\tGeneOntology\\nILMN_1702715\\tIllumina\\n34355_at\\tAffy\\nGO:0007268\\tGeneOntology\\nMECP2\\tHGNC\\nuc065caz.1\\tUCSC Genome Browser\\nA_33_P3339036\\tAgilent\\nGO:0006576\\tGeneOntology\\nuc065cbg.1\\tUCSC Genome Browser\\nGO:0006342\\tGeneOntology\\n300496\\tOMIM\\nGO:0035176\\tGeneOntology\\nuc065cbc.1\\tUCSC Genome Browser\\nGO:0033555\\tGeneOntology\\nGO:0045892\\tGeneOntology\\nA_23_P114361\\tAgilent\\nGO:0045893\\tGeneOntology\\nENSG00000169057\\tEnsembl\\nGO:0090063\\tGeneOntology\\nGO:0005515\\tGeneOntology\\nGO:0002087\\tGeneOntology\\nGO:0005634\\tGeneOntology\\nGO:0007416\\tGeneOntology\\nGO:0008104\\tGeneOntology\\nGO:0042826\\tGeneOntology\\nGO:0007420\\tGeneOntology\\nGO:0035067\\tGeneOntology\\n300005\\tOMIM\\nNP_001104262\\tRefSeq\\nA0A087WVW7\\tUniprot-TrEMBL\\nNP_004983\\tRefSeq\\nGO:0046470\\tGeneOntology\\nGO:0010385\\tGeneOntology\\n11722682_at\\tAffy\\nGO:0051965\\tGeneOntology\\nNM_001316337\\tRefSeq\\nuc065caw.1\\tUCSC Genome Browser\\nA0A0D9SFX7\\tUniprot-TrEMBL\\nA0A140VKC4\\tUniprot-TrEMBL\\nGO:0003723\\tGeneOntology\\nGO:0019233\\tGeneOntology\\nGO:0001666\\tGeneOntology\\nGO:0003729\\tGeneOntology\\nGO:0021591\\tGeneOntology\\nuc065cas.1\\tUCSC Genome Browser\\nGO:0019230\\tGeneOntology\\nGO:0003682\\tGeneOntology\\nGO:0001662\\tGeneOntology\\nuc065cbh.1\\tUCSC Genome Browser\\nX99687_at\\tAffy\\nGO:0008344\\tGeneOntology\\nGO:0009791\\tGeneOntology\\nuc065cbd.1\\tUCSC Genome Browser\\nGO:0019904\\tGeneOntology\\nGO:0030182\\tGeneOntology\\nGO:0035197\\tGeneOntology\\n8175998\\tAffy\\nGO:0016358\\tGeneOntology\\nNM_004992\\tRefSeq\\nGO:0003714\\tGeneOntology\\nGO:0005739\\tGeneOntology\\nGO:0005615\\tGeneOntology\\nGO:0005737\\tGeneOntology\\nuc004fjv.3\\tUCSC Genome Browser\\n202617_s_at\\tAffy\\nGO:0050905\\tGeneOntology\\nGO:0008327\\tGeneOntology\\nD3YJ43\\tUniprot-TrEMBL\\nGO:0003677\\tGeneOntology\\nGO:0006541\\tGeneOntology\\nGO:0040029\\tGeneOntology\\nA_33_P3317211\\tAgilent\\nNP_001303266\\tRefSeq\\n11722683_a_at\\tAffy\\nGO:0008211\\tGeneOntology\\nGO:0051151\\tGeneOntology\\nNM_001110792\\tRefSeq\\nX89430_at\\tAffy\\nGO:2000820\\tGeneOntology\\nuc065cat.1\\tUCSC Genome Browser\\nGO:0003700\\tGeneOntology\\nGO:0047485\\tGeneOntology\\n4204\\tEntrez Gene\\nGO:0009405\\tGeneOntology\\nA0A0D9SEX1\\tUniprot-TrEMBL\\nGO:0098794\\tGeneOntology\\n3C2I\\tPDB\\nHs.200716\\tUniGene\\nGO:0000792\\tGeneOntology\\nuc065cax.1\\tUCSC Genome Browser\\n300055\\tOMIM\\n5BT2\\tPDB\\nGO:0006020\\tGeneOntology\\nGO:0031175\\tGeneOntology\\nuc065cbe.1\\tUCSC Genome Browser\\nGO:0008284\\tGeneOntology\\nuc065cba.1\\tUCSC Genome Browser\\nGO:0060291\\tGeneOntology\\n202618_s_at\\tAffy\\nGO:0016573\\tGeneOntology\\n17115453\\tAffy\\nA0A1B0GTV0\\tUniprot-TrEMBL\\nuc065cbi.1\\tUCSC Genome Browser\\nGO:0048167\\tGeneOntology\\nGO:0007616\\tGeneOntology\\nGO:0016571\\tGeneOntology\\nuc004fjw.3\\tUCSC Genome Browser\\nGO:0007613\\tGeneOntology\\nGO:0007612\\tGeneOntology\\nGO:0021549\\tGeneOntology\\n11722684_a_at\\tAffy\\nGO:0001078\\tGeneOntology\\nX94628_rna1_s_at\\tAffy\\nGO:0007585\\tGeneOntology\\nGO:0010468\\tGeneOntology\\nGO:0031061\\tGeneOntology\\nA_24_P237486\\tAgilent\\nGO:0050884\\tGeneOntology\\nGO:0000930\\tGeneOntology\\nGO:0005829\\tGeneOntology\\nuc065cau.1\\tUCSC Genome Browser\\nH7BY72\\tUniprot-TrEMBL\\n202616_s_at\\tAffy\\nGO:0006355\\tGeneOntology\\nuc065cay.1\\tUCSC Genome Browser\\nGO:0010971\\tGeneOntology\\n300673\\tOMIM\\nGO:0008542\\tGeneOntology\\nGO:0060079\\tGeneOntology\\nuc065cbf.1\\tUCSC Genome Browser\\nGO:0006122\\tGeneOntology\\nuc065cbb.1\\tUCSC Genome Browser\\nGO:0007052\\tGeneOntology\\nC9JH89\\tUniprot-TrEMBL\\nB5MCB4\\tUniprot-TrEMBL\\nGO:0032048\\tGeneOntology\\nGO:0050432\\tGeneOntology\\nGO:0001976\\tGeneOntology\\nI6LM39\\tUniprot-TrEMBL\\nGO:0005813\\tGeneOntology\\nILMN_1682091\\tIllumina\\nP51608\\tUniprot-TrEMBL\\n1QK9\\tPDB\\nGO:0006349\\tGeneOntology\\nGO:1900114\\tGeneOntology\\nGO:0000122\\tGeneOntology\\nGO:0006351\\tGeneOntology\\nGO:0008134\\tGeneOntology\\nILMN_1824898\\tIllumina\\n300260\\tOMIM\\n0006510725\\tIllumina\\n'\r +\r +\r +\r +You can also see the results are returned as a TSV file, consisting of two columns, the identifier and the matching database.\r +\r +We will want to convert this reply into a Python dictionary (with the identifier as key, as one database may have multiple identifiers):\r +\r +\r +```python\r +lines = response.text.split("\\n")\r +mappings = {}\r +for line in lines:\r + if ('\\t' in line):\r + tuple = line.split('\\t')\r + identifier = tuple[0]\r + database = tuple[1]\r + if (database == "Ensembl"):\r + mappings[identifier] = database\r +\r +print(mappings)\r +```\r +\r + {'ENSG00000169057': 'Ensembl'}\r +\r +\r +Alternatively, we can restrivct the return values from the BridgeDb webservice to just return Ensembl identifiers (system code `En`). For this, we add the `?dataSource=En` call parameter:\r +\r +\r +```python\r +callUrl = 'http://bridgedb-swagger.prod.openrisknet.org/Human/xrefs/H/MECP2?dataSource=En'\r +response = requests.get(callUrl)\r +response.text\r +```\r +\r +\r +\r +\r + 'ENSG00000169057\\tEnsembl\\n'\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/326?version=2" ; + schema1:isBasedOn "https://github.com/OpenRiskNet/notebooks.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for BridgeDb tutorial: Gene HGNC name to Ensembl identifier" ; + schema1:sdDatePublished "2024-07-12 13:35:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/326/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8122 ; + schema1:creator , + ; + schema1:dateCreated "2022-04-06T13:09:58Z" ; + schema1:dateModified "2022-04-06T13:09:58Z" ; + schema1:description """\r +# BridgeDb tutorial: Gene HGNC name to Ensembl identifier\r +\r +This tutorial explains how to use the BridgeDb identifier mapping service to translate HGNC names to Ensembl identifiers. This step is part of the OpenRiskNet use case to link Adverse Outcome Pathways to [WikiPathways](https://wikipathways.org/).\r +\r +First we need to load the Python library to allow calls to the [BridgeDb REST webservice](http://bridgedb.prod.openrisknet.org/swagger/):\r +\r +\r +```python\r +import requests\r +```\r +\r +Let's assume we're interested in the gene with HGNC MECP2 (FIXME: look up a gene in AOPWiki), the API call to make mappings is given below as `callUrl`. Here, the `H` indicates that the query (`MECP2`) is an HGNC symbol:\r +\r +\r +```python\r +callUrl = 'http://bridgedb.prod.openrisknet.org/Human/xrefs/H/MECP2'\r +```\r +\r +The default call returns all identifiers, not just for Ensembl:\r +\r +\r +```python\r +response = requests.get(callUrl)\r +response.text\r +```\r +\r +\r +\r +\r + 'GO:0001964\\tGeneOntology\\nuc065cav.1\\tUCSC Genome Browser\\n312750\\tOMIM\\nGO:0042551\\tGeneOntology\\nuc065car.1\\tUCSC Genome Browser\\nA0A087X1U4\\tUniprot-TrEMBL\\n4204\\tWikiGenes\\nGO:0043524\\tGeneOntology\\nILMN_1702715\\tIllumina\\n34355_at\\tAffy\\nGO:0007268\\tGeneOntology\\nMECP2\\tHGNC\\nuc065caz.1\\tUCSC Genome Browser\\nA_33_P3339036\\tAgilent\\nGO:0006576\\tGeneOntology\\nuc065cbg.1\\tUCSC Genome Browser\\nGO:0006342\\tGeneOntology\\n300496\\tOMIM\\nGO:0035176\\tGeneOntology\\nuc065cbc.1\\tUCSC Genome Browser\\nGO:0033555\\tGeneOntology\\nGO:0045892\\tGeneOntology\\nA_23_P114361\\tAgilent\\nGO:0045893\\tGeneOntology\\nENSG00000169057\\tEnsembl\\nGO:0090063\\tGeneOntology\\nGO:0005515\\tGeneOntology\\nGO:0002087\\tGeneOntology\\nGO:0005634\\tGeneOntology\\nGO:0007416\\tGeneOntology\\nGO:0008104\\tGeneOntology\\nGO:0042826\\tGeneOntology\\nGO:0007420\\tGeneOntology\\nGO:0035067\\tGeneOntology\\n300005\\tOMIM\\nNP_001104262\\tRefSeq\\nA0A087WVW7\\tUniprot-TrEMBL\\nNP_004983\\tRefSeq\\nGO:0046470\\tGeneOntology\\nGO:0010385\\tGeneOntology\\n11722682_at\\tAffy\\nGO:0051965\\tGeneOntology\\nNM_001316337\\tRefSeq\\nuc065caw.1\\tUCSC Genome Browser\\nA0A0D9SFX7\\tUniprot-TrEMBL\\nA0A140VKC4\\tUniprot-TrEMBL\\nGO:0003723\\tGeneOntology\\nGO:0019233\\tGeneOntology\\nGO:0001666\\tGeneOntology\\nGO:0003729\\tGeneOntology\\nGO:0021591\\tGeneOntology\\nuc065cas.1\\tUCSC Genome Browser\\nGO:0019230\\tGeneOntology\\nGO:0003682\\tGeneOntology\\nGO:0001662\\tGeneOntology\\nuc065cbh.1\\tUCSC Genome Browser\\nX99687_at\\tAffy\\nGO:0008344\\tGeneOntology\\nGO:0009791\\tGeneOntology\\nuc065cbd.1\\tUCSC Genome Browser\\nGO:0019904\\tGeneOntology\\nGO:0030182\\tGeneOntology\\nGO:0035197\\tGeneOntology\\n8175998\\tAffy\\nGO:0016358\\tGeneOntology\\nNM_004992\\tRefSeq\\nGO:0003714\\tGeneOntology\\nGO:0005739\\tGeneOntology\\nGO:0005615\\tGeneOntology\\nGO:0005737\\tGeneOntology\\nuc004fjv.3\\tUCSC Genome Browser\\n202617_s_at\\tAffy\\nGO:0050905\\tGeneOntology\\nGO:0008327\\tGeneOntology\\nD3YJ43\\tUniprot-TrEMBL\\nGO:0003677\\tGeneOntology\\nGO:0006541\\tGeneOntology\\nGO:0040029\\tGeneOntology\\nA_33_P3317211\\tAgilent\\nNP_001303266\\tRefSeq\\n11722683_a_at\\tAffy\\nGO:0008211\\tGeneOntology\\nGO:0051151\\tGeneOntology\\nNM_001110792\\tRefSeq\\nX89430_at\\tAffy\\nGO:2000820\\tGeneOntology\\nuc065cat.1\\tUCSC Genome Browser\\nGO:0003700\\tGeneOntology\\nGO:0047485\\tGeneOntology\\n4204\\tEntrez Gene\\nGO:0009405\\tGeneOntology\\nA0A0D9SEX1\\tUniprot-TrEMBL\\nGO:0098794\\tGeneOntology\\n3C2I\\tPDB\\nHs.200716\\tUniGene\\nGO:0000792\\tGeneOntology\\nuc065cax.1\\tUCSC Genome Browser\\n300055\\tOMIM\\n5BT2\\tPDB\\nGO:0006020\\tGeneOntology\\nGO:0031175\\tGeneOntology\\nuc065cbe.1\\tUCSC Genome Browser\\nGO:0008284\\tGeneOntology\\nuc065cba.1\\tUCSC Genome Browser\\nGO:0060291\\tGeneOntology\\n202618_s_at\\tAffy\\nGO:0016573\\tGeneOntology\\n17115453\\tAffy\\nA0A1B0GTV0\\tUniprot-TrEMBL\\nuc065cbi.1\\tUCSC Genome Browser\\nGO:0048167\\tGeneOntology\\nGO:0007616\\tGeneOntology\\nGO:0016571\\tGeneOntology\\nuc004fjw.3\\tUCSC Genome Browser\\nGO:0007613\\tGeneOntology\\nGO:0007612\\tGeneOntology\\nGO:0021549\\tGeneOntology\\n11722684_a_at\\tAffy\\nGO:0001078\\tGeneOntology\\nX94628_rna1_s_at\\tAffy\\nGO:0007585\\tGeneOntology\\nGO:0010468\\tGeneOntology\\nGO:0031061\\tGeneOntology\\nA_24_P237486\\tAgilent\\nGO:0050884\\tGeneOntology\\nGO:0000930\\tGeneOntology\\nGO:0005829\\tGeneOntology\\nuc065cau.1\\tUCSC Genome Browser\\nH7BY72\\tUniprot-TrEMBL\\n202616_s_at\\tAffy\\nGO:0006355\\tGeneOntology\\nuc065cay.1\\tUCSC Genome Browser\\nGO:0010971\\tGeneOntology\\n300673\\tOMIM\\nGO:0008542\\tGeneOntology\\nGO:0060079\\tGeneOntology\\nuc065cbf.1\\tUCSC Genome Browser\\nGO:0006122\\tGeneOntology\\nuc065cbb.1\\tUCSC Genome Browser\\nGO:0007052\\tGeneOntology\\nC9JH89\\tUniprot-TrEMBL\\nB5MCB4\\tUniprot-TrEMBL\\nGO:0032048\\tGeneOntology\\nGO:0050432\\tGeneOntology\\nGO:0001976\\tGeneOntology\\nI6LM39\\tUniprot-TrEMBL\\nGO:0005813\\tGeneOntology\\nILMN_1682091\\tIllumina\\nP51608\\tUniprot-TrEMBL\\n1QK9\\tPDB\\nGO:0006349\\tGeneOntology\\nGO:1900114\\tGeneOntology\\nGO:0000122\\tGeneOntology\\nGO:0006351\\tGeneOntology\\nGO:0008134\\tGeneOntology\\nILMN_1824898\\tIllumina\\n300260\\tOMIM\\n0006510725\\tIllumina\\n'\r +\r +\r +\r +You can also see the results are returned as a TSV file, consisting of two columns, the identifier and the matching database.\r +\r +We will want to convert this reply into a Python dictionary (with the identifier as key, as one database may have multiple identifiers):\r +\r +\r +```python\r +lines = response.text.split("\\n")\r +mappings = {}\r +for line in lines:\r + if ('\\t' in line):\r + tuple = line.split('\\t')\r + identifier = tuple[0]\r + database = tuple[1]\r + if (database == "Ensembl"):\r + mappings[identifier] = database\r +\r +print(mappings)\r +```\r +\r + {'ENSG00000169057': 'Ensembl'}\r +\r +\r +Alternatively, we can restrivct the return values from the BridgeDb webservice to just return Ensembl identifiers (system code `En`). For this, we add the `?dataSource=En` call parameter:\r +\r +\r +```python\r +callUrl = 'http://bridgedb-swagger.prod.openrisknet.org/Human/xrefs/H/MECP2?dataSource=En'\r +response = requests.get(callUrl)\r +response.text\r +```\r +\r +\r +\r +\r + 'ENSG00000169057\\tEnsembl\\n'\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/326?version=2" ; + schema1:keywords "Toxicology, jupyter" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "BridgeDb tutorial: Gene HGNC name to Ensembl identifier" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/326?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + schema1:datePublished "2024-03-06T19:03:19.804236" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Purge-duplicate-contigs-VGP6/main" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Purge-duplicate-contigs-VGP6/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:version "0.3.5" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + ; + schema1:description "This workflow generates a file describing the active site of the protein for each of the fragment screening crystal structures using rDock s rbcavity. It also creates a single hybrid molecule that contains all the ligands - the \"frankenstein\" ligand. More info can be found at https://covid19.galaxyproject.org/cheminformatics/" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/13?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Cheminformatics - Active site generation" ; + schema1:sdDatePublished "2024-07-12 13:37:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/13/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1441 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5287 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2020-04-10T14:25:46Z" ; + schema1:dateModified "2023-01-16T13:40:43Z" ; + schema1:description "This workflow generates a file describing the active site of the protein for each of the fragment screening crystal structures using rDock s rbcavity. It also creates a single hybrid molecule that contains all the ligands - the \"frankenstein\" ligand. More info can be found at https://covid19.galaxyproject.org/cheminformatics/" ; + schema1:image ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Cheminformatics - Active site generation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/13?version=1" ; + schema1:version 1 ; + ns1:input , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 3534 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """
\r +\r +drawing\r +\r +
\r +\r +MultiAffinity enables the study of how gene dysregulation propagates on a multilayer network on a disease of interest, uncovering key genes. Find the detailed documentation for the tool [here](https://marbatlle.github.io/multiAffinity/).\r +\r +![alt](https://github.com/marbatlle/multiAffinity/raw/main/docs/img/multiAffinity_workflow.png)""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/250?version=1" ; + schema1:isBasedOn "https://github.com/inab/ipc_workflows/tree/multiaffinity-20220318/multiAffinity" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for multiAffinity" ; + schema1:sdDatePublished "2024-07-12 13:33:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/250/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2562 ; + schema1:creator , + ; + schema1:dateCreated "2021-12-14T10:01:12Z" ; + schema1:dateModified "2023-04-21T11:42:40Z" ; + schema1:description """
\r +\r +drawing\r +\r +
\r +\r +MultiAffinity enables the study of how gene dysregulation propagates on a multilayer network on a disease of interest, uncovering key genes. Find the detailed documentation for the tool [here](https://marbatlle.github.io/multiAffinity/).\r +\r +![alt](https://github.com/marbatlle/multiAffinity/raw/main/docs/img/multiAffinity_workflow.png)""" ; + schema1:image ; + schema1:keywords "cancer, pediatric, rna-seq, networks, community-detection" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "multiAffinity" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/250?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 13027 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.260.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_md_setup/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL Amber Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:36:07 +0100" ; + schema1:url "https://workflowhub.eu/workflows/260/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 153854 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 25801 ; + schema1:creator , + ; + schema1:dateCreated "2022-01-10T13:24:19Z" ; + schema1:dateModified "2023-06-08T07:26:22Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/260?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CWL Amber Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/260?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 6240 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """\r +\r +# MoMofy\r +Module for integrative Mobilome prediction\r +\r +\r +\r +Bacteria can acquire genetic material through horizontal gene transfer, allowing them to rapidly adapt to changing environmental conditions. These mobile genetic elements can be classified into three main categories: plasmids, phages, and integrons. Autonomous elements are those capable of excising themselves from the chromosome, reintegrating elsewhere, and potentially modifying the host's physiology. Small integrative elements like insertion sequences usually contain one or two genes and are frequently present in multiple copies in the genome, whereas large elements like integrative conjugative elements, often carry multiple cargo genes. The acquisition of large mobile genetic elements may provide genes for defence against other mobile genetic elements or impart new metabolic capabilities to the host.\r +\r +MoMofy is a wraper that integrates the ouptput of different tools designed for the prediction of autonomous integrative mobile genetic elements in prokaryotic genomes and metagenomes. \r +\r +## Contents\r +- [ Workflow ](#wf)\r +- [ Setup ](#sp)\r +- [ MoMofy install and dependencies ](#install)\r +- [ Usage ](#usage)\r +- [ Inputs ](#in)\r +- [ Outputs ](#out)\r +- [ Tests ](#test)\r +- [ Performance ](#profile)\r +- [ Citation ](#cite)\r +\r +\r +\r +## Workflow\r +\r +\r +\r +\r +\r +## Setup\r +\r +This workflow is built using [Nextflow](https://www.nextflow.io/). It uses Singularity containers making installation trivial and results highly reproducible.\r +Explained in this section section, there is one manual step required to build the singularity image for [ICEfinder](https://bioinfo-mml.sjtu.edu.cn/ICEfinder/index.php), as we can't distribute that software due to license issues.\r +\r +- Install [Nextflow version >=21.10](https://www.nextflow.io/docs/latest/getstarted.html#installation)\r +- Install [Singularity](https://github.com/apptainer/singularity/blob/master/INSTALL.md)\r +\r +\r +## MoMofy install and dependencies\r +\r +To install MoMofy, clone this repo by:\r +\r +```bash\r +$ git clone https://github.com/EBI-Metagenomics/momofy.git\r +```\r +\r +The mobileOG-database is required to run an extra step of annotation on the mobilome coding sequences. The first time you run MoMofy, you will need to download the [Beatrix 1.6 v1](https://mobileogdb.flsi.cloud.vt.edu/entries/database_download) database, move the tarball to `/PATH/momofy/databases`, decompress it, and run the script to format the db for diamond:\r +\r +```bash\r +$ mv beatrix-1-6_v1_all.zip /PATH/momofy/databases\r +$ cd /PATH/momofy/databases\r +$ unzip beatrix-1-6_v1_all.zip\r +$ nextflow run /PATH/momofy/format_mobileOG.nf\r +```\r +\r +Most of the tools are available on [quay.io](https://quay.io) and no install is needed. \r +\r +In the case of ICEfinder, you will need to contact the author to get a copy of the software, visit the [ICEfinder website](https://bioinfo-mml.sjtu.edu.cn/ICEfinder/download.html) for more information. Once you have the `ICEfinder_linux.tar.gz` tarball, move it to `momofy/templates` and build the singularity image using the following command:\r +\r +```bash\r +$ mv ICEfinder_linux.tar.gz /PATH/momofy/templates/\r +$ cd /PATH/momofy/templates/\r +$ sudo singularity build ../../singularity/icefinder-v1.0-local.sif icefinder-v1.0-local.def\r +```\r +\r +PaliDIS is an optional step on the workflow and the install is optional as well. Visit [PaliDIS repo](https://github.com/blue-moon22/PaliDIS) for installing instructions.\r +\r +If you are aim to run the pipeline in a system with jobs scheduler as LSF or SGE, set up a config file and provide it as part of the arguments as follows:\r +\r +```bash\r +$ nextflow run /PATH/momofy/momofy.nf --assembly contigs.fasta -c /PATH/configs/some_cluster.config\r +```\r +\r +You can find an example in the `configs` directory of this repo.\r +\r +\r +\r +## Usage\r +\r +Running the tool with `--help` option will display the following message:\r +\r +```bash\r +$ nextflow run /PATH/momofy/momofy.nf --help\r +N E X T F L O W ~ version 21.10.0\r +Launching `momofy.nf` [gigantic_pare] - revision: XXXXX\r +\r + MoMofy is a wraper that integrates the ouptput of different tools designed for the prediction of autonomous integrative mobile genetic elements in prokaryotic genomes and metagenomes.\r +\r + Usage:\r + The basic command for running the pipeline is as follows:\r +\r + nextflow run momofy.nf --assembly contigs.fasta\r +\r + Mandatory arguments:\r + --assembly (Meta)genomic assembly in fasta format (uncompress)\r +\r + Optional arguments:\r + --user_genes User annotation files. See --prot_fasta and --prot_gff [ default = false ]\r + --prot_gff Annotation file in GFF3 format. Mandatory with --user_genes true\r + --prot_fasta Fasta file of aminoacids. Mandatory with --user_genes true\r + --palidis Incorporate PaliDIS predictions to final output [ default = false ]\r + --palidis_fasta Fasta file of PaliDIS insertion sequences. Mandatory with --palidis true\r + --palidis_info Information file of PaliDIS insertion sequences. Mandatory with --palidis true\r + --gff_validation Run a step of format validation on the GFF3 file output [ default = true ]\r + --outdir Output directory to place final MoMofy results [ default = MoMofy_results ]\r + --help This usage statement [ default = false ]\r +```\r +\r +\r +## Inputs\r +\r +To run MoMofy in multiple samples, create a directory per sample and launch the tool from the sample directory. The only mandatory input is the (meta)genomic assembly file in fasta format (uncompress).\r +\r +Basic run:\r +\r +```bash\r +$ nextflow run /PATH/momofy/momofy.nf --assembly contigs.fasta\r +```\r +\r +Note that the final output in gff format is created by adding information to PROKKA output. If you have your own protein prediction files, provide the gff and the fasta file of amino acid sequences (both uncompressed files are mandatory with this option). These files will be used for Diamond annotation and CDS coordinates mapping to the MGEs boundaries. If any original annotation is present in the gff file, it will remained untouched.\r +\r +Running MoMofy with user's genes prediction:\r +\r +```bash\r +$ nextflow run /PATH/momofy/momofy.nf --assembly contigs.fasta \\\r + --user_genes true \\\r + --prot_fasta proteins.faa \\\r + --prot_gff annotation.gff \\\r +```\r +\r +If you want to incorporate PaliDIS predictions to the final output, provide the path of the two outputs of PaliDIS (fasta file of insertion sequences and the information for each insertion sequence file).\r +\r +To run MoMofy incorporating PaliDIS results:\r +\r +```bash\r +$ nextflow run /PATH/momofy/momofy.nf --assembly contigs.fasta \\\r + --palidis true \\\r + --palidis_fasta insertion_sequences.fasta \\\r + --palidis_info insertion_sequences_info.txt \\\r +```\r +\r +Then, if you have protein files and PaliDIS outputs, you can run:\r +\r +```bash\r +$ nextflow run /PATH/momofy/momofy.nf --assembly contigs.fasta \\\r + --user_genes true \\\r + --prot_fasta proteins.faa \\\r + --prot_gff annotation.gff \\\r + --palidis true \\\r + --palidis_fasta insertion_sequences.fasta \\\r + --palidis_info insertion_sequences_info.txt \\\r +```\r +\r +A GFF validation process is used to detect formatting errors in the final GFF3 output. This process can be skipped adding `--gff_validation false` to the command.\r +\r +\r +\r +## Outputs\r +\r +Results will be written by default in the `MoMofy_results` directory inside the sample dir unless the user define `--outdir` option. There you will find the following output files:\r +\r +```bash\r +MoMofy_results/\r +├── discarded_mge.txt\r +├── momofy_predictions.fna\r +├── momofy_predictions.gff\r +└── nested_integrons.txt\r +```\r +\r +The main MoMofy output files are the `momofy_predictions.fna` containing the nucleotide sequences of every prediction, and the `momofy_predictions.gff` containing the mobilome annotation plus any other feature annotated by PROKKA or in the gff file provided by the user with the option `--user_genes`. The labels used in the Type column of the gff file corresponds to the following nomenclature according to the [Sequence Ontology resource](http://www.sequenceontology.org/browser/current_svn/term/SO:0000001):\r +\r +| Type in gff file | Sequence ontology ID | Element description | Reporting tool |\r +| ------------- | ------------- | ------------- | ------------- |\r +| insertion_sequence | [SO:0000973](http://www.sequenceontology.org/browser/current_svn/term/SO:0000973) | Insertion sequence | ISEScan, PaliDIS |\r +| terminal_inverted_repeat_element | [SO:0000481](http://www.sequenceontology.org/browser/current_svn/term/SO:0000481) | Terminal Inverted Repeat (TIR) flanking insertion sequences | ISEScan, PaliDIS |\r +| integron | [SO:0000365](http://www.sequenceontology.org/browser/current_svn/term/SO:0000365) | Integrative mobilizable element | IntegronFinder, ICEfinder |\r +| attC_site | [SO:0000950](http://www.sequenceontology.org/browser/current_svn/term/SO:0000950) | Integration site of DNA integron | IntegronFinder |\r +| conjugative_transposon | [SO:0000371](http://www.sequenceontology.org/browser/current_svn/term/SO:0000371) | Integrative Conjugative Element | ICEfinder |\r +| direct_repeat | [SO:0000314](http://www.sequenceontology.org/browser/current_svn/term/SO:0000371) | Flanking regions on mobilizable elements | ICEfinder |\r +| CDS | [SO:0000316](http://www.sequenceontology.org/browser/current_svn/term/SO:0000316) | Coding sequence | Prodigal |\r +\r +\r +The file `discarded_mge.txt` contains a list of predictions that were discarded, along with the reason for their exclusion. Possible reasons include:\r +\r +1. overlapping For insertion sequences only, ISEScan prediction is discarded if an overlap with PaliDIS is found. \r +2. mge<500bp Discarded by length.\r +3. no_cds If there are no genes encoded in the prediction.\r +\r +The file `nested_integrons.txt` is a report of overlapping predictions reported by IntegronFinder and ICEfinder. No predictions are discarded in this case.\r +\r +Additionally, you will see the directories containing the main outputs of each tool.\r +\r +\r +## Tests\r +\r +Nextflow tests are executed with [nf-test](https://github.com/askimed/nf-test). It takes around 3 min in executing.\r +\r +Run:\r +\r +```bash\r +$ cd /PATH/momofy\r +$ nf-test test *.nf.test\r +```\r +\r +\r +## Performance\r +\r +MoMofy performance was profiled using 460 public metagenomic assemblies and co-assemblies of chicken gut (ERP122587, ERP125074, and ERP131894) with sizes ranging from ~62 K to ~893 M assembled bases. We used the metagenomic assemblies, CDS prediction and annotation files generated by MGnify v5 pipeline, and PaliDIS outputs generated after downsampling the number of reads to 10 M. MoMofy was run adding the following options: `-with-report -with-trace -with-timeline timeline.out`.\r +\r +\r +

\r + \r +

\r +

\r + \r + \r +

\r +\r +\r +\r +## Citation\r +\r +If you use MoMofy on your data analysis, please cite:\r +\r +XXXXX\r +\r +\r +MoMofy is a wrapper that integrates the output of the following tools and DBs:\r +\r +1) ISEScan v1.7.2.3 [Xie et al., Bioinformatics, 2017](https://doi.org/10.1093/bioinformatics/btx433)\r +2) IntegronFinder2 v2.0.2 [Néron et al., Microorganisms, 2022](https://doi.org/10.3390/microorganisms10040700)\r +3) ICEfinder v1.0 [Liu et al., Nucleic Acids Research, 2019](https://doi.org/10.1093/nar/gky1123)\r +4) PaliDIS [Carr et al., biorxiv, 2022](https://doi.org/10.1101/2022.06.27.497710)\r +\r +Databases:\r +- MobileOG-DB Beatrix 1.6 v1 [Brown et al., Appl Environ Microbiol, 2022](https://doi.org/10.1128/aem.00991-22)\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/452?version=1" ; + schema1:isBasedOn "https://github.com/EBI-Metagenomics/momofy.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for MoMofy: Module for integrative Mobilome prediction" ; + schema1:sdDatePublished "2024-07-12 13:34:22 +0100" ; + schema1:url "https://workflowhub.eu/workflows/452/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 121232 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3473 ; + schema1:creator , + ; + schema1:dateCreated "2023-04-06T09:40:57Z" ; + schema1:dateModified "2023-04-12T08:14:09Z" ; + schema1:description """\r +\r +# MoMofy\r +Module for integrative Mobilome prediction\r +\r +\r +\r +Bacteria can acquire genetic material through horizontal gene transfer, allowing them to rapidly adapt to changing environmental conditions. These mobile genetic elements can be classified into three main categories: plasmids, phages, and integrons. Autonomous elements are those capable of excising themselves from the chromosome, reintegrating elsewhere, and potentially modifying the host's physiology. Small integrative elements like insertion sequences usually contain one or two genes and are frequently present in multiple copies in the genome, whereas large elements like integrative conjugative elements, often carry multiple cargo genes. The acquisition of large mobile genetic elements may provide genes for defence against other mobile genetic elements or impart new metabolic capabilities to the host.\r +\r +MoMofy is a wraper that integrates the ouptput of different tools designed for the prediction of autonomous integrative mobile genetic elements in prokaryotic genomes and metagenomes. \r +\r +## Contents\r +- [ Workflow ](#wf)\r +- [ Setup ](#sp)\r +- [ MoMofy install and dependencies ](#install)\r +- [ Usage ](#usage)\r +- [ Inputs ](#in)\r +- [ Outputs ](#out)\r +- [ Tests ](#test)\r +- [ Performance ](#profile)\r +- [ Citation ](#cite)\r +\r +\r +\r +## Workflow\r +\r +\r +\r +\r +\r +## Setup\r +\r +This workflow is built using [Nextflow](https://www.nextflow.io/). It uses Singularity containers making installation trivial and results highly reproducible.\r +Explained in this section section, there is one manual step required to build the singularity image for [ICEfinder](https://bioinfo-mml.sjtu.edu.cn/ICEfinder/index.php), as we can't distribute that software due to license issues.\r +\r +- Install [Nextflow version >=21.10](https://www.nextflow.io/docs/latest/getstarted.html#installation)\r +- Install [Singularity](https://github.com/apptainer/singularity/blob/master/INSTALL.md)\r +\r +\r +## MoMofy install and dependencies\r +\r +To install MoMofy, clone this repo by:\r +\r +```bash\r +$ git clone https://github.com/EBI-Metagenomics/momofy.git\r +```\r +\r +The mobileOG-database is required to run an extra step of annotation on the mobilome coding sequences. The first time you run MoMofy, you will need to download the [Beatrix 1.6 v1](https://mobileogdb.flsi.cloud.vt.edu/entries/database_download) database, move the tarball to `/PATH/momofy/databases`, decompress it, and run the script to format the db for diamond:\r +\r +```bash\r +$ mv beatrix-1-6_v1_all.zip /PATH/momofy/databases\r +$ cd /PATH/momofy/databases\r +$ unzip beatrix-1-6_v1_all.zip\r +$ nextflow run /PATH/momofy/format_mobileOG.nf\r +```\r +\r +Most of the tools are available on [quay.io](https://quay.io) and no install is needed. \r +\r +In the case of ICEfinder, you will need to contact the author to get a copy of the software, visit the [ICEfinder website](https://bioinfo-mml.sjtu.edu.cn/ICEfinder/download.html) for more information. Once you have the `ICEfinder_linux.tar.gz` tarball, move it to `momofy/templates` and build the singularity image using the following command:\r +\r +```bash\r +$ mv ICEfinder_linux.tar.gz /PATH/momofy/templates/\r +$ cd /PATH/momofy/templates/\r +$ sudo singularity build ../../singularity/icefinder-v1.0-local.sif icefinder-v1.0-local.def\r +```\r +\r +PaliDIS is an optional step on the workflow and the install is optional as well. Visit [PaliDIS repo](https://github.com/blue-moon22/PaliDIS) for installing instructions.\r +\r +If you are aim to run the pipeline in a system with jobs scheduler as LSF or SGE, set up a config file and provide it as part of the arguments as follows:\r +\r +```bash\r +$ nextflow run /PATH/momofy/momofy.nf --assembly contigs.fasta -c /PATH/configs/some_cluster.config\r +```\r +\r +You can find an example in the `configs` directory of this repo.\r +\r +\r +\r +## Usage\r +\r +Running the tool with `--help` option will display the following message:\r +\r +```bash\r +$ nextflow run /PATH/momofy/momofy.nf --help\r +N E X T F L O W ~ version 21.10.0\r +Launching `momofy.nf` [gigantic_pare] - revision: XXXXX\r +\r + MoMofy is a wraper that integrates the ouptput of different tools designed for the prediction of autonomous integrative mobile genetic elements in prokaryotic genomes and metagenomes.\r +\r + Usage:\r + The basic command for running the pipeline is as follows:\r +\r + nextflow run momofy.nf --assembly contigs.fasta\r +\r + Mandatory arguments:\r + --assembly (Meta)genomic assembly in fasta format (uncompress)\r +\r + Optional arguments:\r + --user_genes User annotation files. See --prot_fasta and --prot_gff [ default = false ]\r + --prot_gff Annotation file in GFF3 format. Mandatory with --user_genes true\r + --prot_fasta Fasta file of aminoacids. Mandatory with --user_genes true\r + --palidis Incorporate PaliDIS predictions to final output [ default = false ]\r + --palidis_fasta Fasta file of PaliDIS insertion sequences. Mandatory with --palidis true\r + --palidis_info Information file of PaliDIS insertion sequences. Mandatory with --palidis true\r + --gff_validation Run a step of format validation on the GFF3 file output [ default = true ]\r + --outdir Output directory to place final MoMofy results [ default = MoMofy_results ]\r + --help This usage statement [ default = false ]\r +```\r +\r +\r +## Inputs\r +\r +To run MoMofy in multiple samples, create a directory per sample and launch the tool from the sample directory. The only mandatory input is the (meta)genomic assembly file in fasta format (uncompress).\r +\r +Basic run:\r +\r +```bash\r +$ nextflow run /PATH/momofy/momofy.nf --assembly contigs.fasta\r +```\r +\r +Note that the final output in gff format is created by adding information to PROKKA output. If you have your own protein prediction files, provide the gff and the fasta file of amino acid sequences (both uncompressed files are mandatory with this option). These files will be used for Diamond annotation and CDS coordinates mapping to the MGEs boundaries. If any original annotation is present in the gff file, it will remained untouched.\r +\r +Running MoMofy with user's genes prediction:\r +\r +```bash\r +$ nextflow run /PATH/momofy/momofy.nf --assembly contigs.fasta \\\r + --user_genes true \\\r + --prot_fasta proteins.faa \\\r + --prot_gff annotation.gff \\\r +```\r +\r +If you want to incorporate PaliDIS predictions to the final output, provide the path of the two outputs of PaliDIS (fasta file of insertion sequences and the information for each insertion sequence file).\r +\r +To run MoMofy incorporating PaliDIS results:\r +\r +```bash\r +$ nextflow run /PATH/momofy/momofy.nf --assembly contigs.fasta \\\r + --palidis true \\\r + --palidis_fasta insertion_sequences.fasta \\\r + --palidis_info insertion_sequences_info.txt \\\r +```\r +\r +Then, if you have protein files and PaliDIS outputs, you can run:\r +\r +```bash\r +$ nextflow run /PATH/momofy/momofy.nf --assembly contigs.fasta \\\r + --user_genes true \\\r + --prot_fasta proteins.faa \\\r + --prot_gff annotation.gff \\\r + --palidis true \\\r + --palidis_fasta insertion_sequences.fasta \\\r + --palidis_info insertion_sequences_info.txt \\\r +```\r +\r +A GFF validation process is used to detect formatting errors in the final GFF3 output. This process can be skipped adding `--gff_validation false` to the command.\r +\r +\r +\r +## Outputs\r +\r +Results will be written by default in the `MoMofy_results` directory inside the sample dir unless the user define `--outdir` option. There you will find the following output files:\r +\r +```bash\r +MoMofy_results/\r +├── discarded_mge.txt\r +├── momofy_predictions.fna\r +├── momofy_predictions.gff\r +└── nested_integrons.txt\r +```\r +\r +The main MoMofy output files are the `momofy_predictions.fna` containing the nucleotide sequences of every prediction, and the `momofy_predictions.gff` containing the mobilome annotation plus any other feature annotated by PROKKA or in the gff file provided by the user with the option `--user_genes`. The labels used in the Type column of the gff file corresponds to the following nomenclature according to the [Sequence Ontology resource](http://www.sequenceontology.org/browser/current_svn/term/SO:0000001):\r +\r +| Type in gff file | Sequence ontology ID | Element description | Reporting tool |\r +| ------------- | ------------- | ------------- | ------------- |\r +| insertion_sequence | [SO:0000973](http://www.sequenceontology.org/browser/current_svn/term/SO:0000973) | Insertion sequence | ISEScan, PaliDIS |\r +| terminal_inverted_repeat_element | [SO:0000481](http://www.sequenceontology.org/browser/current_svn/term/SO:0000481) | Terminal Inverted Repeat (TIR) flanking insertion sequences | ISEScan, PaliDIS |\r +| integron | [SO:0000365](http://www.sequenceontology.org/browser/current_svn/term/SO:0000365) | Integrative mobilizable element | IntegronFinder, ICEfinder |\r +| attC_site | [SO:0000950](http://www.sequenceontology.org/browser/current_svn/term/SO:0000950) | Integration site of DNA integron | IntegronFinder |\r +| conjugative_transposon | [SO:0000371](http://www.sequenceontology.org/browser/current_svn/term/SO:0000371) | Integrative Conjugative Element | ICEfinder |\r +| direct_repeat | [SO:0000314](http://www.sequenceontology.org/browser/current_svn/term/SO:0000371) | Flanking regions on mobilizable elements | ICEfinder |\r +| CDS | [SO:0000316](http://www.sequenceontology.org/browser/current_svn/term/SO:0000316) | Coding sequence | Prodigal |\r +\r +\r +The file `discarded_mge.txt` contains a list of predictions that were discarded, along with the reason for their exclusion. Possible reasons include:\r +\r +1. overlapping For insertion sequences only, ISEScan prediction is discarded if an overlap with PaliDIS is found. \r +2. mge<500bp Discarded by length.\r +3. no_cds If there are no genes encoded in the prediction.\r +\r +The file `nested_integrons.txt` is a report of overlapping predictions reported by IntegronFinder and ICEfinder. No predictions are discarded in this case.\r +\r +Additionally, you will see the directories containing the main outputs of each tool.\r +\r +\r +## Tests\r +\r +Nextflow tests are executed with [nf-test](https://github.com/askimed/nf-test). It takes around 3 min in executing.\r +\r +Run:\r +\r +```bash\r +$ cd /PATH/momofy\r +$ nf-test test *.nf.test\r +```\r +\r +\r +## Performance\r +\r +MoMofy performance was profiled using 460 public metagenomic assemblies and co-assemblies of chicken gut (ERP122587, ERP125074, and ERP131894) with sizes ranging from ~62 K to ~893 M assembled bases. We used the metagenomic assemblies, CDS prediction and annotation files generated by MGnify v5 pipeline, and PaliDIS outputs generated after downsampling the number of reads to 10 M. MoMofy was run adding the following options: `-with-report -with-trace -with-timeline timeline.out`.\r +\r +\r +

\r + \r +

\r +

\r + \r + \r +

\r +\r +\r +\r +## Citation\r +\r +If you use MoMofy on your data analysis, please cite:\r +\r +XXXXX\r +\r +\r +MoMofy is a wrapper that integrates the output of the following tools and DBs:\r +\r +1) ISEScan v1.7.2.3 [Xie et al., Bioinformatics, 2017](https://doi.org/10.1093/bioinformatics/btx433)\r +2) IntegronFinder2 v2.0.2 [Néron et al., Microorganisms, 2022](https://doi.org/10.3390/microorganisms10040700)\r +3) ICEfinder v1.0 [Liu et al., Nucleic Acids Research, 2019](https://doi.org/10.1093/nar/gky1123)\r +4) PaliDIS [Carr et al., biorxiv, 2022](https://doi.org/10.1101/2022.06.27.497710)\r +\r +Databases:\r +- MobileOG-DB Beatrix 1.6 v1 [Brown et al., Appl Environ Microbiol, 2022](https://doi.org/10.1128/aem.00991-22)\r +""" ; + schema1:image ; + schema1:keywords "Mobilome, Genomics, Metagenomics, Nextflow, MGE" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "MoMofy: Module for integrative Mobilome prediction" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/452?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "Pre-processing of mass spectrometry-based metabolomics data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/57?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/metaboigniter" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/metaboigniter" ; + schema1:sdDatePublished "2024-07-12 13:22:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/57/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 23140 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:00Z" ; + schema1:dateModified "2024-06-11T12:55:00Z" ; + schema1:description "Pre-processing of mass spectrometry-based metabolomics data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/57?version=3" ; + schema1:keywords "Metabolomics, identification, quantification, mass-spectrometry, ms1, ms2" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/metaboigniter" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/57?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "B/T cell repertoire analysis pipeline with immcantation framework. WIP, currently requires a bunch of changes first." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/963?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/airrflow" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/bcellmagic" ; + schema1:sdDatePublished "2024-07-12 13:22:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/963/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4303 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:36Z" ; + schema1:dateModified "2024-06-11T12:54:36Z" ; + schema1:description "B/T cell repertoire analysis pipeline with immcantation framework. WIP, currently requires a bunch of changes first." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/963?version=13" ; + schema1:keywords "airr, b-cell, immcantation, immunorepertoire, repseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/bcellmagic" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/963?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2024-04-29T07:22:37.314835" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/cutandrun" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "cutandrun/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.285.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_abc_md_setup/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python ABC MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/285/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8001 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-23T12:41:27Z" ; + schema1:dateModified "2023-04-14T08:43:57Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/285?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python ABC MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_abc_md_setup/python/workflow.py" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/gmx-ligand-parameterization).\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.294.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_ligand_parameterization/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy GMX Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/294/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8541 ; + schema1:creator , + ; + schema1:dateCreated "2023-05-03T13:44:01Z" ; + schema1:dateModified "2023-05-03T13:45:13Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/gmx-ligand-parameterization).\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/294?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy GMX Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_ligand_parameterization/galaxy/biobb_wf_ligand_parameterization.ga" ; + schema1:version 3 ; + ns1:output , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Analysis pipeline for CUT&RUN and CUT&TAG experiments that includes sequencing QC, spike-in normalisation, IgG control normalisation, peak calling and downstream peak analysis." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/976?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/cutandrun" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/cutandrun" ; + schema1:sdDatePublished "2024-07-12 13:21:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/976/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13316 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:46Z" ; + schema1:dateModified "2024-06-11T12:54:46Z" ; + schema1:description "Analysis pipeline for CUT&RUN and CUT&TAG experiments that includes sequencing QC, spike-in normalisation, IgG control normalisation, peak calling and downstream peak analysis." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/976?version=7" ; + schema1:keywords "cutandrun, cutandrun-seq, cutandtag, cutandtag-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/cutandrun" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/976?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Taxonomic classification and profiling of shotgun short- and long-read metagenomic data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1025?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/taxprofiler" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/taxprofiler" ; + schema1:sdDatePublished "2024-07-12 13:18:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1025/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15607 ; + schema1:creator , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:19Z" ; + schema1:dateModified "2024-06-11T12:55:19Z" ; + schema1:description "Taxonomic classification and profiling of shotgun short- and long-read metagenomic data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1025?version=10" ; + schema1:keywords "Classification, illumina, long-reads, Metagenomics, microbiome, nanopore, pathogen, Profiling, shotgun, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/taxprofiler" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1025?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for screening for functional components of assembled contigs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/987?version=9" ; + schema1:isBasedOn "https://github.com/nf-core/funcscan" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/funcscan" ; + schema1:sdDatePublished "2024-07-12 13:21:28 +0100" ; + schema1:url "https://workflowhub.eu/workflows/987/ro_crate?version=9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 16687 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-07-09T03:02:51Z" ; + schema1:dateModified "2024-07-09T03:02:51Z" ; + schema1:description "Pipeline for screening for functional components of assembled contigs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/987?version=8" ; + schema1:keywords "amp, AMR, antibiotic-resistance, antimicrobial-peptides, antimicrobial-resistance-genes, arg, Assembly, bgc, biosynthetic-gene-clusters, contigs, function, Metagenomics, natural-products, screening, secondary-metabolites" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/funcscan" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/987?version=9" ; + schema1:version 9 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/gmx-ligand-parameterization).\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.294.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_ligand_parameterization/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy GMX Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/294/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9016 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-24T14:09:28Z" ; + schema1:dateModified "2022-11-22T09:50:02Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/gmx-ligand-parameterization).\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/294?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy GMX Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_ligand_parameterization/galaxy/biobb_wf_ligand_parameterization.ga" ; + schema1:version 1 ; + ns1:output , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """\r +\r +![Logo](https://cbg-ethz.github.io/V-pipe/img/logo.svg)\r +\r +[![bio.tools](https://img.shields.io/badge/bio-tools-blue.svg)](https://bio.tools/V-Pipe)\r +[![Snakemake](https://img.shields.io/badge/snakemake-≥7.11.0-blue.svg)](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe)\r +[![Deploy Docker image](https://github.com/cbg-ethz/V-pipe/actions/workflows/deploy-docker.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe)\r +[![Tests](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml)\r +[![Mega-Linter](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml)\r +[![License: Apache-2.0](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)\r +\r +V-pipe is a workflow designed for the analysis of next generation sequencing (NGS) data from viral pathogens. It produces a number of results in a curated format (e.g., consensus sequences, SNV calls, local/global haplotypes).\r +V-pipe is written using the Snakemake workflow management system.\r +\r +## Usage\r +\r +Different ways of initializing V-pipe are presented below. We strongly encourage you to deploy it [using the quick install script](#using-quick-install-script), as this is our preferred method.\r +\r +To configure V-pipe refer to the documentation present in [config/README.md](config/README.md).\r +\r +V-pipe expects the input samples to be organized in a [two-level](config/README.md#samples) directory hierarchy,\r +and the sequencing reads must be provided in a sub-folder named `raw_data`. Further details can be found on the [website](https://cbg-ethz.github.io/V-pipe/usage/).\r +Check the utils subdirectory for [mass-importers tools](utils/README.md#samples-mass-importers) that can assist you in generating this hierarchy.\r +\r +We provide [virus-specific base configuration files](config/README.md#virus-base-config) which contain handy defaults for, e.g., HIV and SARS-CoV-2. Set the virus in the general section of the configuration file:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +```\r +\r +Also see [snakemake's documentation](https://snakemake.readthedocs.io/en/stable/executing/cli.html) to learn more about the command-line options available when executing the workflow.\r +\r +Tutorials introducing usage of V-pipe are available in the [docs/](docs/README.md) subdirectory.\r +\r +### Using quick install script\r +\r +To deploy V-pipe, use the [installation script](utils/README.md#quick-installer) with the following parameters:\r +\r +```bash\r +curl -O 'https://raw.githubusercontent.com/cbg-ethz/V-pipe/master/utils/quick_install.sh'\r +./quick_install.sh -w work\r +```\r +\r +This script will download and install miniconda, checkout the V-pipe git repository (use `-b` to specify which branch/tag) and setup a work directory (specified with `-w`) with an executable script that will execute the workflow:\r +\r +```bash\r +cd work\r +# edit config.yaml and provide samples/ directory\r +./vpipe --jobs 4 --printshellcmds --dry-run\r +```\r +\r +### Using Docker\r +\r +Note: the [docker image](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe) is only setup with components to run the workflow for HIV and SARS-CoV-2 virus base configurations.\r +Using V-pipe with other viruses or configurations might require internet connectivity for additional software components.\r +\r +Create `config.yaml` or `vpipe.config` and then populate the `samples/` directory.\r +For example, the following config file could be used:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +\r +output:\r + snv: true\r + local: true\r + global: false\r + visualization: true\r + QA: true\r +```\r +\r +Then execute:\r +\r +```bash\r +docker run --rm -it -v $PWD:/work ghcr.io/cbg-ethz/v-pipe:master --jobs 4 --printshellcmds --dry-run\r +```\r +\r +### Using Snakedeploy\r +\r +First install [mamba](https://github.com/conda-forge/miniforge#mambaforge), then create and activate an environment with Snakemake and Snakedeploy:\r +\r +```bash\r +mamba create -c bioconda -c conda-forge --name snakemake snakemake snakedeploy\r +conda activate snakemake\r +```\r +\r +Snakemake's [official workflow installer Snakedeploy](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe) can now be used:\r +\r +```bash\r +snakedeploy deploy-workflow https://github.com/cbg-ethz/V-pipe --tag master .\r +# edit config/config.yaml and provide samples/ directory\r +snakemake --use-conda --jobs 4 --printshellcmds --dry-run\r +```\r +\r +## Dependencies\r +\r +- **[Conda](https://conda.io/docs/index.html)**\r +\r + Conda is a cross-platform package management system and an environment manager application. Snakemake uses mamba as a package manager.\r +\r +- **[Snakemake](https://snakemake.readthedocs.io/)**\r +\r + Snakemake is the central workflow and dependency manager of V-pipe. It determines the order in which individual tools are invoked and checks that programs do not exit unexpectedly.\r +\r +- **[VICUNA](https://www.broadinstitute.org/viral-genomics/vicuna)**\r +\r + VICUNA is a *de novo* assembly software designed for populations with high mutation rates. It is used to build an initial reference for mapping reads with ngshmmalign aligner when a `references/cohort_consensus.fasta` file is not provided. Further details can be found in the [wiki](https://github.com/cbg-ethz/V-pipe/wiki/getting-started#input-files) pages.\r +\r +### Computational tools\r +\r +Other dependencies are managed by using isolated conda environments per rule, and below we list some of the computational tools integrated in V-pipe:\r +\r +- **[FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/)**\r +\r + FastQC gives an overview of the raw sequencing data. Flowcells that have been overloaded or otherwise fail during sequencing can easily be determined with FastQC.\r +\r +- **[PRINSEQ](http://prinseq.sourceforge.net/)**\r +\r + Trimming and clipping of reads is performed by PRINSEQ. It is currently the most versatile raw read processor with many customization options.\r +\r +- **[ngshmmalign](https://github.com/cbg-ethz/ngshmmalign)**\r +\r + We perform the alignment of the curated NGS data using our custom ngshmmalign that takes structural variants into account. It produces multiple consensus sequences that include either majority bases or ambiguous bases.\r +\r +- **[bwa](https://github.com/lh3/bwa)**\r +\r + In order to detect specific cross-contaminations with other probes, the Burrows-Wheeler aligner is used. It quickly yields estimates for foreign genomic material in an experiment.\r + Additionally, It can be used as an alternative aligner to ngshmmalign.\r +\r +- **[MAFFT](http://mafft.cbrc.jp/alignment/software/)**\r +\r + To standardise multiple samples to the same reference genome (say HXB2 for HIV-1), the multiple sequence aligner MAFFT is employed. The multiple sequence alignment helps in determining regions of low conservation and thus makes standardisation of alignments more robust.\r +\r +- **[Samtools and bcftools](https://www.htslib.org/)**\r +\r + The Swiss Army knife of alignment postprocessing and diagnostics. bcftools is also used to generate consensus sequence with indels.\r +\r +- **[SmallGenomeUtilities](https://github.com/cbg-ethz/smallgenomeutilities)**\r +\r + We perform genomic liftovers to standardised reference genomes using our in-house developed python library of utilities for rewriting alignments.\r +\r +- **[ShoRAH](https://github.com/cbg-ethz/shorah)**\r +\r + ShoRAh performs SNV calling and local haplotype reconstruction by using bayesian clustering.\r +\r +- **[LoFreq](https://csb5.github.io/lofreq/)**\r +\r + LoFreq (version 2) is SNVs and indels caller from next-generation sequencing data, and can be used as an alternative engine for SNV calling.\r +\r +- **[SAVAGE](https://bitbucket.org/jbaaijens/savage) and [Haploclique](https://github.com/cbg-ethz/haploclique)**\r +\r + We use HaploClique or SAVAGE to perform global haplotype reconstruction for heterogeneous viral populations by using an overlap graph.\r +\r +## Citation\r +\r +If you use this software in your research, please cite:\r +\r +Posada-Céspedes S., Seifert D., Topolsky I., Jablonski K.P., Metzner K.J., and Beerenwinkel N. 2021.\r +"V-pipe: a computational pipeline for assessing viral genetic diversity from high-throughput sequencing data."\r +_Bioinformatics_, January. doi:[10.1093/bioinformatics/btab015](https://doi.org/10.1093/bioinformatics/btab015).\r +\r +## Contributions\r +\r +- [Ivan Topolsky\\* ![orcid]](https://orcid.org/0000-0002-7561-0810), [![github]](https://github.com/dryak)\r +- [Kim Philipp Jablonski ![orcid]](https://orcid.org/0000-0002-4166-4343), [![github]](https://github.com/kpj)\r +- [Lara Fuhrmann ![orcid]](https://orcid.org/0000-0001-6405-0654), [![github]](https://github.com/LaraFuhrmann)\r +- [Uwe Schmitt ![orcid]](https://orcid.org/0000-0002-4658-0616), [![github]](https://github.com/uweschmitt)\r +- [Michal Okoniewski ![orcid]](https://orcid.org/0000-0003-4722-4506), [![github]](https://github.com/michalogit)\r +- [Monica Dragan ![orcid]](https://orcid.org/0000-0002-7719-5892), [![github]](https://github.com/monicadragan)\r +- [Susana Posada Céspedes ![orcid]](https://orcid.org/0000-0002-7459-8186), [![github]](https://github.com/sposadac)\r +- [David Seifert ![orcid]](https://orcid.org/0000-0003-4739-5110), [![github]](https://github.com/SoapZA)\r +- Tobias Marschall\r +- [Niko Beerenwinkel\\*\\* ![orcid]](https://orcid.org/0000-0002-0573-6119)\r +\r +\\* software maintainer ;\r +\\** group leader\r +\r +[github]: https://cbg-ethz.github.io/V-pipe/img/mark-github.svg\r +[orcid]: https://cbg-ethz.github.io/V-pipe/img/ORCIDiD_iconvector.svg\r +\r +## Contact\r +\r +We encourage users to use the [issue tracker](https://github.com/cbg-ethz/V-pipe/issues). For further enquiries, you can also contact the V-pipe Dev Team .\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/301?version=3" ; + schema1:isBasedOn "https://github.com/cbg-ethz/V-pipe.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for V-pipe (main multi-virus version)" ; + schema1:sdDatePublished "2024-07-12 13:18:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/301/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1467 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-06T11:12:18Z" ; + schema1:dateModified "2023-01-16T13:59:09Z" ; + schema1:description """\r +\r +![Logo](https://cbg-ethz.github.io/V-pipe/img/logo.svg)\r +\r +[![bio.tools](https://img.shields.io/badge/bio-tools-blue.svg)](https://bio.tools/V-Pipe)\r +[![Snakemake](https://img.shields.io/badge/snakemake-≥7.11.0-blue.svg)](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe)\r +[![Deploy Docker image](https://github.com/cbg-ethz/V-pipe/actions/workflows/deploy-docker.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe)\r +[![Tests](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml)\r +[![Mega-Linter](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml)\r +[![License: Apache-2.0](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)\r +\r +V-pipe is a workflow designed for the analysis of next generation sequencing (NGS) data from viral pathogens. It produces a number of results in a curated format (e.g., consensus sequences, SNV calls, local/global haplotypes).\r +V-pipe is written using the Snakemake workflow management system.\r +\r +## Usage\r +\r +Different ways of initializing V-pipe are presented below. We strongly encourage you to deploy it [using the quick install script](#using-quick-install-script), as this is our preferred method.\r +\r +To configure V-pipe refer to the documentation present in [config/README.md](config/README.md).\r +\r +V-pipe expects the input samples to be organized in a [two-level](config/README.md#samples) directory hierarchy,\r +and the sequencing reads must be provided in a sub-folder named `raw_data`. Further details can be found on the [website](https://cbg-ethz.github.io/V-pipe/usage/).\r +Check the utils subdirectory for [mass-importers tools](utils/README.md#samples-mass-importers) that can assist you in generating this hierarchy.\r +\r +We provide [virus-specific base configuration files](config/README.md#virus-base-config) which contain handy defaults for, e.g., HIV and SARS-CoV-2. Set the virus in the general section of the configuration file:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +```\r +\r +Also see [snakemake's documentation](https://snakemake.readthedocs.io/en/stable/executing/cli.html) to learn more about the command-line options available when executing the workflow.\r +\r +Tutorials introducing usage of V-pipe are available in the [docs/](docs/README.md) subdirectory.\r +\r +### Using quick install script\r +\r +To deploy V-pipe, use the [installation script](utils/README.md#quick-installer) with the following parameters:\r +\r +```bash\r +curl -O 'https://raw.githubusercontent.com/cbg-ethz/V-pipe/master/utils/quick_install.sh'\r +./quick_install.sh -w work\r +```\r +\r +This script will download and install miniconda, checkout the V-pipe git repository (use `-b` to specify which branch/tag) and setup a work directory (specified with `-w`) with an executable script that will execute the workflow:\r +\r +```bash\r +cd work\r +# edit config.yaml and provide samples/ directory\r +./vpipe --jobs 4 --printshellcmds --dry-run\r +```\r +\r +### Using Docker\r +\r +Note: the [docker image](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe) is only setup with components to run the workflow for HIV and SARS-CoV-2 virus base configurations.\r +Using V-pipe with other viruses or configurations might require internet connectivity for additional software components.\r +\r +Create `config.yaml` or `vpipe.config` and then populate the `samples/` directory.\r +For example, the following config file could be used:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +\r +output:\r + snv: true\r + local: true\r + global: false\r + visualization: true\r + QA: true\r +```\r +\r +Then execute:\r +\r +```bash\r +docker run --rm -it -v $PWD:/work ghcr.io/cbg-ethz/v-pipe:master --jobs 4 --printshellcmds --dry-run\r +```\r +\r +### Using Snakedeploy\r +\r +First install [mamba](https://github.com/conda-forge/miniforge#mambaforge), then create and activate an environment with Snakemake and Snakedeploy:\r +\r +```bash\r +mamba create -c bioconda -c conda-forge --name snakemake snakemake snakedeploy\r +conda activate snakemake\r +```\r +\r +Snakemake's [official workflow installer Snakedeploy](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe) can now be used:\r +\r +```bash\r +snakedeploy deploy-workflow https://github.com/cbg-ethz/V-pipe --tag master .\r +# edit config/config.yaml and provide samples/ directory\r +snakemake --use-conda --jobs 4 --printshellcmds --dry-run\r +```\r +\r +## Dependencies\r +\r +- **[Conda](https://conda.io/docs/index.html)**\r +\r + Conda is a cross-platform package management system and an environment manager application. Snakemake uses mamba as a package manager.\r +\r +- **[Snakemake](https://snakemake.readthedocs.io/)**\r +\r + Snakemake is the central workflow and dependency manager of V-pipe. It determines the order in which individual tools are invoked and checks that programs do not exit unexpectedly.\r +\r +- **[VICUNA](https://www.broadinstitute.org/viral-genomics/vicuna)**\r +\r + VICUNA is a *de novo* assembly software designed for populations with high mutation rates. It is used to build an initial reference for mapping reads with ngshmmalign aligner when a `references/cohort_consensus.fasta` file is not provided. Further details can be found in the [wiki](https://github.com/cbg-ethz/V-pipe/wiki/getting-started#input-files) pages.\r +\r +### Computational tools\r +\r +Other dependencies are managed by using isolated conda environments per rule, and below we list some of the computational tools integrated in V-pipe:\r +\r +- **[FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/)**\r +\r + FastQC gives an overview of the raw sequencing data. Flowcells that have been overloaded or otherwise fail during sequencing can easily be determined with FastQC.\r +\r +- **[PRINSEQ](http://prinseq.sourceforge.net/)**\r +\r + Trimming and clipping of reads is performed by PRINSEQ. It is currently the most versatile raw read processor with many customization options.\r +\r +- **[ngshmmalign](https://github.com/cbg-ethz/ngshmmalign)**\r +\r + We perform the alignment of the curated NGS data using our custom ngshmmalign that takes structural variants into account. It produces multiple consensus sequences that include either majority bases or ambiguous bases.\r +\r +- **[bwa](https://github.com/lh3/bwa)**\r +\r + In order to detect specific cross-contaminations with other probes, the Burrows-Wheeler aligner is used. It quickly yields estimates for foreign genomic material in an experiment.\r + Additionally, It can be used as an alternative aligner to ngshmmalign.\r +\r +- **[MAFFT](http://mafft.cbrc.jp/alignment/software/)**\r +\r + To standardise multiple samples to the same reference genome (say HXB2 for HIV-1), the multiple sequence aligner MAFFT is employed. The multiple sequence alignment helps in determining regions of low conservation and thus makes standardisation of alignments more robust.\r +\r +- **[Samtools and bcftools](https://www.htslib.org/)**\r +\r + The Swiss Army knife of alignment postprocessing and diagnostics. bcftools is also used to generate consensus sequence with indels.\r +\r +- **[SmallGenomeUtilities](https://github.com/cbg-ethz/smallgenomeutilities)**\r +\r + We perform genomic liftovers to standardised reference genomes using our in-house developed python library of utilities for rewriting alignments.\r +\r +- **[ShoRAH](https://github.com/cbg-ethz/shorah)**\r +\r + ShoRAh performs SNV calling and local haplotype reconstruction by using bayesian clustering.\r +\r +- **[LoFreq](https://csb5.github.io/lofreq/)**\r +\r + LoFreq (version 2) is SNVs and indels caller from next-generation sequencing data, and can be used as an alternative engine for SNV calling.\r +\r +- **[SAVAGE](https://bitbucket.org/jbaaijens/savage) and [Haploclique](https://github.com/cbg-ethz/haploclique)**\r +\r + We use HaploClique or SAVAGE to perform global haplotype reconstruction for heterogeneous viral populations by using an overlap graph.\r +\r +## Citation\r +\r +If you use this software in your research, please cite:\r +\r +Posada-Céspedes S., Seifert D., Topolsky I., Jablonski K.P., Metzner K.J., and Beerenwinkel N. 2021.\r +"V-pipe: a computational pipeline for assessing viral genetic diversity from high-throughput sequencing data."\r +_Bioinformatics_, January. doi:[10.1093/bioinformatics/btab015](https://doi.org/10.1093/bioinformatics/btab015).\r +\r +## Contributions\r +\r +- [Ivan Topolsky\\* ![orcid]](https://orcid.org/0000-0002-7561-0810), [![github]](https://github.com/dryak)\r +- [Kim Philipp Jablonski ![orcid]](https://orcid.org/0000-0002-4166-4343), [![github]](https://github.com/kpj)\r +- [Lara Fuhrmann ![orcid]](https://orcid.org/0000-0001-6405-0654), [![github]](https://github.com/LaraFuhrmann)\r +- [Uwe Schmitt ![orcid]](https://orcid.org/0000-0002-4658-0616), [![github]](https://github.com/uweschmitt)\r +- [Michal Okoniewski ![orcid]](https://orcid.org/0000-0003-4722-4506), [![github]](https://github.com/michalogit)\r +- [Monica Dragan ![orcid]](https://orcid.org/0000-0002-7719-5892), [![github]](https://github.com/monicadragan)\r +- [Susana Posada Céspedes ![orcid]](https://orcid.org/0000-0002-7459-8186), [![github]](https://github.com/sposadac)\r +- [David Seifert ![orcid]](https://orcid.org/0000-0003-4739-5110), [![github]](https://github.com/SoapZA)\r +- Tobias Marschall\r +- [Niko Beerenwinkel\\*\\* ![orcid]](https://orcid.org/0000-0002-0573-6119)\r +\r +\\* software maintainer ;\r +\\** group leader\r +\r +[github]: https://cbg-ethz.github.io/V-pipe/img/mark-github.svg\r +[orcid]: https://cbg-ethz.github.io/V-pipe/img/ORCIDiD_iconvector.svg\r +\r +## Contact\r +\r +We encourage users to use the [issue tracker](https://github.com/cbg-ethz/V-pipe/issues). For further enquiries, you can also contact the V-pipe Dev Team .\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/301?version=4" ; + schema1:keywords "Alignment, Assembly, covid-19, Genomics, INDELs, rna, SNPs, variant_calling, workflow" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "V-pipe (main multi-virus version)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/301?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/amber-protein-ligand-complex-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.298.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_complex_md_setup/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/298/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 100823 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-25T11:42:23Z" ; + schema1:dateModified "2022-11-23T08:43:28Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/amber-protein-ligand-complex-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/298?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_complex_md_setup/galaxy/biobb_wf_amber_complex_md_setup.ga" ; + schema1:version 1 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """Workflow for Metagenomics from raw reads to annotated bins.\r +Steps:\r + - workflow_quality.cwl:\r + - FastQC (control)\r + - fastp (trimming)\r + - Kraken2 (Taxonomic Read Classification\r + - SPAdes (Assembly)\r + - QUAST (Assembly quality report)\r + - BBmap (Read mapping to assembly)\r + - sam_to_bam (sam to indexed bam)\r + - metabatContigDepths (jgi_summarize_bam_contig_depths)\r + - MetaBat2 (binning)\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/64?version=7" ; + schema1:isBasedOn "https://gitlab.com/m-unlock/cwl/-/blob/master/cwl/workflows/workflow_metagenomics_binning.cwl" ; + schema1:license "CC0-1.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Metagenomics workflow" ; + schema1:sdDatePublished "2024-07-12 13:34:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/64/ro_crate?version=7" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 43887 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11066 ; + schema1:creator , + ; + schema1:dateCreated "2021-01-08T10:15:11Z" ; + schema1:dateModified "2021-02-12T12:29:29Z" ; + schema1:description """Workflow for Metagenomics from raw reads to annotated bins.\r +Steps:\r + - workflow_quality.cwl:\r + - FastQC (control)\r + - fastp (trimming)\r + - Kraken2 (Taxonomic Read Classification\r + - SPAdes (Assembly)\r + - QUAST (Assembly quality report)\r + - BBmap (Read mapping to assembly)\r + - sam_to_bam (sam to indexed bam)\r + - metabatContigDepths (jgi_summarize_bam_contig_depths)\r + - MetaBat2 (binning)\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/64?version=10" ; + schema1:keywords "Metagenomics, microbial, metagenome, binning" ; + schema1:license "https://spdx.org/licenses/CC0-1.0" ; + schema1:name "Metagenomics workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/64?version=7" ; + schema1:version 7 ; + ns1:input , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.132.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Amber Constant pH MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/132/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 54708 ; + schema1:creator , + ; + schema1:dateCreated "2022-09-15T12:33:15Z" ; + schema1:dateModified "2023-01-16T13:50:20Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/132?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Amber Constant pH MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_amber_md_setup/master/biobb_wf_amber_md_setup/notebooks/mdsetup_ph/biobb_amber_CpHMD_notebook.ipynb" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """Objective. Biomarkers have become important for the prognosis and diagnosis of various diseases. High-throughput methods such as RNA-sequencing facilitate the detection of differentially expressed genes (DEGs), hence potential biomarker candidates. Individual studies suggest long lists of DEGs, hampering the identification of clinically relevant ones. Concerning preeclampsia, a major obstetric burden with high risk for adverse maternal and/or neonatal outcomes, limitations in diagnosis and prediction are still important issues. Therefore, we developed a workflow to facilitate the screening for biomarkers.\r +Methods. Based on the tool DeSeq2, we established a comprehensive workflow for the identification of DEGs, analyzing data from multiple publicly available RNA-sSequencing studies. We applied it to four RNA-sSequencing datasets (one blood, three placenta) analyzing patients with preeclampsia and normotensive controls. We compared our results with other published approaches and evaluated their performance. \r +Results. We identified 110 genes dysregulated in preeclampsia, observed in ≥3 of the analyzed studies, six even in all four studies. Among them were FLT-1, TREM-1, and FN1 which either represent established biomarkers on protein level, or promising candidates based on recent studies. In comparison, using a published meta-analysis approach we obtained 5,240 DEGs.\r +Conclusions. We present a data analysis workflow for preeclampsia biomarker screening, capable of identifying significant biomarker candidates, while drastically decreasing the numbers of candidates. Moreover, we were also able to confirm its performance for heart failure. Our approach can be applied to additional diseases for biomarker identification and the set of identified DEGs in preeclampsia represents a resource for further studies.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.338.1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Biomarker screening in preeclampsia" ; + schema1:sdDatePublished "2024-07-12 13:35:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/338/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 22749 ; + schema1:creator ; + schema1:dateCreated "2022-05-03T13:05:01Z" ; + schema1:dateModified "2023-02-13T14:06:45Z" ; + schema1:description """Objective. Biomarkers have become important for the prognosis and diagnosis of various diseases. High-throughput methods such as RNA-sequencing facilitate the detection of differentially expressed genes (DEGs), hence potential biomarker candidates. Individual studies suggest long lists of DEGs, hampering the identification of clinically relevant ones. Concerning preeclampsia, a major obstetric burden with high risk for adverse maternal and/or neonatal outcomes, limitations in diagnosis and prediction are still important issues. Therefore, we developed a workflow to facilitate the screening for biomarkers.\r +Methods. Based on the tool DeSeq2, we established a comprehensive workflow for the identification of DEGs, analyzing data from multiple publicly available RNA-sSequencing studies. We applied it to four RNA-sSequencing datasets (one blood, three placenta) analyzing patients with preeclampsia and normotensive controls. We compared our results with other published approaches and evaluated their performance. \r +Results. We identified 110 genes dysregulated in preeclampsia, observed in ≥3 of the analyzed studies, six even in all four studies. Among them were FLT-1, TREM-1, and FN1 which either represent established biomarkers on protein level, or promising candidates based on recent studies. In comparison, using a published meta-analysis approach we obtained 5,240 DEGs.\r +Conclusions. We present a data analysis workflow for preeclampsia biomarker screening, capable of identifying significant biomarker candidates, while drastically decreasing the numbers of candidates. Moreover, we were also able to confirm its performance for heart failure. Our approach can be applied to additional diseases for biomarker identification and the set of identified DEGs in preeclampsia represents a resource for further studies.\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Biomarker screening in preeclampsia" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/338?version=1" ; + schema1:version 1 ; + ns1:output , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# WRF/EMEP Linear Workflow\r +\r +Example Common Workflow Language (CWL) workflow and tool descriptors for running the \r +Weather Research and Forecase (WRF) and EMEP models.\r +\r +This workflow is designed for a single model domain. Example datasets for testing this \r +workflow can be downloaded from Zenodo.\r +\r +\r +## Requirements:\r +\r +* docker or singularity\r +* conda\r +* cwltool\r +* Toil - optional, useful for running on HPC or distributed computing systems\r +\r +### CWL / Toil Installation:\r +\r +The workflow runner (either cwltool, or Toil) can be installed using either conda or pip.\r +Environment files for conda are included, and can be used as shown below:\r +* cwltool only:\r + * `conda env create --file install/env_cwlrunner.yml --name cwl`\r +* Toil & cwltool:\r + * `conda env create --file install/env_toil.yml --name toil`\r +\r +### Setup for Example Workflow\r +\r +* Download the example dataset from Zenodo: https://doi.org/10.5281/zenodo.7817216\r +* Extract into the `input_files` directory:\r + * `tar -zxvf wrf_emep_UK_example_inputs.tar.gz -C input_files --strip-components=1`\r +\r +## Running the Workflow\r +\r +The full workflow is broken into several logical steps:\r +1. ERA5 download\r +2. WPS 1st step: Geogrid geography file creation\r +3. WPS process: ungribbing of ERA5 data, and running of metgrid to produce meteorology files.\r +4. WRF process: generation of WRF input files by REAL, and running of WRF model\r +5. EMEP model: running of EMEP chemistry and transport model\r +\r +Steps 1 and 3 require you to register with the CDS service, in order to download ERA5 data\r +before using in the WPS process.\r +Steps 2 and 5 require you to download extra input data - the instructions on how to do this\r +are included in the README.txt files in the relevant input data directories.\r +\r +A full workflow for all steps is provided here. But each separate step can by run on it's \r +own too, following the instructions given below. We recommend running step 4 first, to \r +explore how the REAL & WRF workflow works, before trying the other steps.\r +\r +### 1. ERA5 download.\r +\r +Before running the ERA5 download tool, ensure that you have reqistered for the CDS service, \r +signed the ERA5 licensing agreement, and saved the CDS API key (`.cdsapirc`) in your \r +working directory.\r +\r +To run the ERA5 download tool use the following command:\r +```\r +cwltool [--cachdir CACHE] [--singularity] workflows/era5_workflow.cwl example_workflow_configurations/era5_download_settings.yaml\r +```\r +Note that the `--cachedir CACHE` option sets the working directory cache, which enables the\r +reuse of any steps previously run (and the restarting of the workflow from this point).\r +The `--singularity` option is needed if you are using singularity instead of docker.\r +\r +### 2. WPS: Geogrid geography file creation\r +\r +Before running the geogrid tool you will need to download the geography data from the\r +[UCAR website](https://www2.mmm.ucar.edu/wrf/users/download/get_sources_wps_geog.html).\r +These should be extracted into the `input_files/geogrid_geog_input` directory.\r +\r +To run the geogrid program use the following command:\r +```\r +cwltool [--cachdir CACHE] [--singularity] workflows/geogrid_workflow.cwl example_workflow_configurations/wps_geogrid_cwl_settings.yaml\r +```\r +\r +### 3. WPS: Creation of meteorology input files\r +\r +Before running the WPS process you will have to download the ERA5 datafiles (which will be\r +called `preslev_[YYYYMMDD].grib` and `surface_[YYYYMMDD].grib`) and copy these to the directory\r +`input_files/wps_era5_input`. If you have also run geogrid in step 2 you can replace the \r +`geo_em.d01.nc` file in the `input_files/wps_geogrid_input` directory with the file that \r +geogrid created.\r +\r +To run the wps metgrid process use the following command:\r +```\r +cwltool [--cachdir CACHE] [--singularity] workflows/wps_workflow.cwl example_workflow_configurations/wps_metgrid_cwl_settings.yaml\r +```\r +\r +### 4. WRF: Creation of WRF input files, and running WRF model\r +\r +The WRF model can be run without any prepreparation, except for the downloading of the \r +input data from Zenodo. However, if you have created new meteorology files (`met_em*`) using\r +WPS you can replace the files in the `input_files/wrf_met_input` directory with these.\r +\r +To run the WRF process (including REAL) use the following command:\r +```\r +cwltool [--cachdir CACHE] [--singularity] workflows/wrf_workflow.cwl example_workflow_configurations/wrf_real_cwl_settings.yaml\r +``` \r +\r +### 5. EMEP: Running EMEP chemistry and transport model\r +\r +Before running the EMEP model you will need to download the EMEP input dataset. This can be\r +done using the `catalog.py` tool, following the instructions in the `input_files/emep_input/README.txt`\r +file. If you have run WRF you can also replace the `wrfout*` data files in the \r +`input_Files/emep_wrf_input` directory with those you have created.\r +\r +To run the EMEP model use the following command:\r +```\r +cwltool [--cachdir CACHE] [--singularity] workflows/emep_workflow.cwl example_workflow_configurations/emep_cwl_settings.yaml\r +```\r +\r +### Full Workflow\r +\r +Before running the full workflow make sure you have carried out the setup tasks described\r +above.\r +\r +To run the full workflow use the following command:\r +```\r +cwltool [--cachdir CACHE] [--singularity] wrf_emep_full_workflow.cwl example_workflow_configurations/wrf_emep_full_workflow_cwl_settings.yaml\r +```\r +\r +## Notes\r +\r +### WRF filenames\r +\r +In order to work with singularity, all filenames need to exclude special characters.\r +To ensure that all WRF filenames comply with this requirement, you will need to add the \r +`nocolons = .true.` option to your WPS, REAL and WRF namelists to ensure this.\r +\r +### MPI parallel processing\r +\r +The WPS processes all run in single thread mode. REAL, WRF and EMEP have been compiled with\r +MPI support. The default cores for each of these is 2, 9 and 9, respectively. The \r +settings file can be edited to modify these requirements.\r +\r +### Caching intermediate workflow steps\r +\r +To cache the data from individual steps you can use the `--cachedir ` optional flag.\r +\r +\r +## License and Copyright \r +\r +These workflow scripts have been developed by the [Research IT](https://research-it.manchester.ac.uk/) \r +at the [University of Manchester](https://www.manchester.ac.uk/).\r +\r +Copyright 2023 [University of Manchester, UK](https://www.manchester.ac.uk/).\r +\r +Licensed under the MIT license, see the LICENSE file for details.""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/455?version=1" ; + schema1:isBasedOn "https://github.com/UoMResearchIT/wrf_emep_cwl_linear_workflow.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for WRF / EMEP Linear Workflow" ; + schema1:sdDatePublished "2024-07-12 13:34:19 +0100" ; + schema1:url "https://workflowhub.eu/workflows/455/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 37708 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4306 ; + schema1:creator ; + schema1:dateCreated "2023-04-12T12:04:44Z" ; + schema1:dateModified "2023-04-12T12:45:16Z" ; + schema1:description """# WRF/EMEP Linear Workflow\r +\r +Example Common Workflow Language (CWL) workflow and tool descriptors for running the \r +Weather Research and Forecase (WRF) and EMEP models.\r +\r +This workflow is designed for a single model domain. Example datasets for testing this \r +workflow can be downloaded from Zenodo.\r +\r +\r +## Requirements:\r +\r +* docker or singularity\r +* conda\r +* cwltool\r +* Toil - optional, useful for running on HPC or distributed computing systems\r +\r +### CWL / Toil Installation:\r +\r +The workflow runner (either cwltool, or Toil) can be installed using either conda or pip.\r +Environment files for conda are included, and can be used as shown below:\r +* cwltool only:\r + * `conda env create --file install/env_cwlrunner.yml --name cwl`\r +* Toil & cwltool:\r + * `conda env create --file install/env_toil.yml --name toil`\r +\r +### Setup for Example Workflow\r +\r +* Download the example dataset from Zenodo: https://doi.org/10.5281/zenodo.7817216\r +* Extract into the `input_files` directory:\r + * `tar -zxvf wrf_emep_UK_example_inputs.tar.gz -C input_files --strip-components=1`\r +\r +## Running the Workflow\r +\r +The full workflow is broken into several logical steps:\r +1. ERA5 download\r +2. WPS 1st step: Geogrid geography file creation\r +3. WPS process: ungribbing of ERA5 data, and running of metgrid to produce meteorology files.\r +4. WRF process: generation of WRF input files by REAL, and running of WRF model\r +5. EMEP model: running of EMEP chemistry and transport model\r +\r +Steps 1 and 3 require you to register with the CDS service, in order to download ERA5 data\r +before using in the WPS process.\r +Steps 2 and 5 require you to download extra input data - the instructions on how to do this\r +are included in the README.txt files in the relevant input data directories.\r +\r +A full workflow for all steps is provided here. But each separate step can by run on it's \r +own too, following the instructions given below. We recommend running step 4 first, to \r +explore how the REAL & WRF workflow works, before trying the other steps.\r +\r +### 1. ERA5 download.\r +\r +Before running the ERA5 download tool, ensure that you have reqistered for the CDS service, \r +signed the ERA5 licensing agreement, and saved the CDS API key (`.cdsapirc`) in your \r +working directory.\r +\r +To run the ERA5 download tool use the following command:\r +```\r +cwltool [--cachdir CACHE] [--singularity] workflows/era5_workflow.cwl example_workflow_configurations/era5_download_settings.yaml\r +```\r +Note that the `--cachedir CACHE` option sets the working directory cache, which enables the\r +reuse of any steps previously run (and the restarting of the workflow from this point).\r +The `--singularity` option is needed if you are using singularity instead of docker.\r +\r +### 2. WPS: Geogrid geography file creation\r +\r +Before running the geogrid tool you will need to download the geography data from the\r +[UCAR website](https://www2.mmm.ucar.edu/wrf/users/download/get_sources_wps_geog.html).\r +These should be extracted into the `input_files/geogrid_geog_input` directory.\r +\r +To run the geogrid program use the following command:\r +```\r +cwltool [--cachdir CACHE] [--singularity] workflows/geogrid_workflow.cwl example_workflow_configurations/wps_geogrid_cwl_settings.yaml\r +```\r +\r +### 3. WPS: Creation of meteorology input files\r +\r +Before running the WPS process you will have to download the ERA5 datafiles (which will be\r +called `preslev_[YYYYMMDD].grib` and `surface_[YYYYMMDD].grib`) and copy these to the directory\r +`input_files/wps_era5_input`. If you have also run geogrid in step 2 you can replace the \r +`geo_em.d01.nc` file in the `input_files/wps_geogrid_input` directory with the file that \r +geogrid created.\r +\r +To run the wps metgrid process use the following command:\r +```\r +cwltool [--cachdir CACHE] [--singularity] workflows/wps_workflow.cwl example_workflow_configurations/wps_metgrid_cwl_settings.yaml\r +```\r +\r +### 4. WRF: Creation of WRF input files, and running WRF model\r +\r +The WRF model can be run without any prepreparation, except for the downloading of the \r +input data from Zenodo. However, if you have created new meteorology files (`met_em*`) using\r +WPS you can replace the files in the `input_files/wrf_met_input` directory with these.\r +\r +To run the WRF process (including REAL) use the following command:\r +```\r +cwltool [--cachdir CACHE] [--singularity] workflows/wrf_workflow.cwl example_workflow_configurations/wrf_real_cwl_settings.yaml\r +``` \r +\r +### 5. EMEP: Running EMEP chemistry and transport model\r +\r +Before running the EMEP model you will need to download the EMEP input dataset. This can be\r +done using the `catalog.py` tool, following the instructions in the `input_files/emep_input/README.txt`\r +file. If you have run WRF you can also replace the `wrfout*` data files in the \r +`input_Files/emep_wrf_input` directory with those you have created.\r +\r +To run the EMEP model use the following command:\r +```\r +cwltool [--cachdir CACHE] [--singularity] workflows/emep_workflow.cwl example_workflow_configurations/emep_cwl_settings.yaml\r +```\r +\r +### Full Workflow\r +\r +Before running the full workflow make sure you have carried out the setup tasks described\r +above.\r +\r +To run the full workflow use the following command:\r +```\r +cwltool [--cachdir CACHE] [--singularity] wrf_emep_full_workflow.cwl example_workflow_configurations/wrf_emep_full_workflow_cwl_settings.yaml\r +```\r +\r +## Notes\r +\r +### WRF filenames\r +\r +In order to work with singularity, all filenames need to exclude special characters.\r +To ensure that all WRF filenames comply with this requirement, you will need to add the \r +`nocolons = .true.` option to your WPS, REAL and WRF namelists to ensure this.\r +\r +### MPI parallel processing\r +\r +The WPS processes all run in single thread mode. REAL, WRF and EMEP have been compiled with\r +MPI support. The default cores for each of these is 2, 9 and 9, respectively. The \r +settings file can be edited to modify these requirements.\r +\r +### Caching intermediate workflow steps\r +\r +To cache the data from individual steps you can use the `--cachedir ` optional flag.\r +\r +\r +## License and Copyright \r +\r +These workflow scripts have been developed by the [Research IT](https://research-it.manchester.ac.uk/) \r +at the [University of Manchester](https://www.manchester.ac.uk/).\r +\r +Copyright 2023 [University of Manchester, UK](https://www.manchester.ac.uk/).\r +\r +Licensed under the MIT license, see the LICENSE file for details.""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "WRF / EMEP Linear Workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/455?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + . + + a schema1:Dataset ; + schema1:datePublished "2024-06-24T13:46:01.976755" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/amr_gene_detection" ; + schema1:license "GPL-3.0-or-later" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "amr_gene_detection/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:MediaObject ; + schema1:about ; + schema1:author , + ; + schema1:encodingFormat "text/markdown" . + + a schema1:Dataset ; + schema1:creator , + , + , + , + , + , + ; + schema1:datePublished "2023-04-13" ; + schema1:description "Study protocol" ; + schema1:hasPart , + , + , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.7551181" ; + schema1:name "BY-COVID - WP5 - Baseline Use Case: SARS-CoV-2 vaccine effectiveness assessment - Study protocol" ; + schema1:url "https://zenodo.org/record/7825979" ; + schema1:version "1.0.3" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# prepareChIPs\r +\r +This is a simple `snakemake` workflow template for preparing **single-end** ChIP-Seq data.\r +The steps implemented are:\r +\r +1. Download raw fastq files from SRA\r +2. Trim and Filter raw fastq files using `AdapterRemoval`\r +3. Align to the supplied genome using `bowtie2`\r +4. Deduplicate Alignments using `Picard MarkDuplicates`\r +5. Call Macs2 Peaks using `macs2`\r +\r +A pdf of the rulegraph is available [here](workflow/rules/rulegraph.pdf)\r +\r +Full details for each step are given below.\r +Any additional parameters for tools can be specified using `config/config.yml`, along with many of the requisite paths\r +\r +To run the workflow with default settings, simply run as follows (after editing `config/samples.tsv`)\r +\r +```bash\r +snakemake --use-conda --cores 16\r +```\r +\r +If running on an HPC cluster, a snakemake profile will required for submission to the queueing system and appropriate resource allocation.\r +Please discuss this will your HPC support team.\r +Nodes may also have restricted internet access and rules which download files may not work on many HPCs.\r +Please see below or discuss this with your support team\r +\r +Whilst no snakemake wrappers are explicitly used in this workflow, the underlying scripts are utilised where possible to minimise any issues with HPC clusters with restrictions on internet access.\r +These scripts are based on `v1.31.1` of the snakemake wrappers\r +\r +### Important Note Regarding OSX Systems\r +\r +It should be noted that this workflow is **currently incompatible with OSX-based systems**. \r +There are two unsolved issues\r +\r +1. `fasterq-dump` has a bug which is specific to conda environments. This has been updated in v3.0.3 but this patch has not yet been made available to conda environments for OSX. Please check [here](https://anaconda.org/bioconda/sra-tools) to see if this has been updated.\r +2. The following error appears in some OSX-based R sessions, in a system-dependent manner:\r +```\r +Error in grid.Call(C_textBounds, as.graphicsAnnot(x$label), x$x, x$y, : \r + polygon edge not found\r +```\r +\r +The fix for this bug is currently unknown\r +\r +## Download Raw Data\r +\r +### Outline\r +\r +The file `samples.tsv` is used to specify all steps for this workflow.\r +This file must contain the columns: `accession`, `target`, `treatment` and `input`\r +\r +1. `accession` must be an SRA accession. Only single-end data is currently supported by this workflow\r +2. `target` defines the ChIP target. All files common to a target and treatment will be used to generate summarised coverage in bigWig Files\r +3. `treatment` defines the treatment group each file belongs to. If only one treatment exists, simply use the value 'control' or similar for every file\r +4. `input` should contain the accession for the relevant input sample. These will only be downloaded once. Valid input samples are *required* for this workflow\r +\r +As some HPCs restrict internet access for submitted jobs, *it may be prudent to run the initial rules in an interactive session* if at all possible.\r +This can be performed using the following (with 2 cores provided as an example)\r +\r +```bash\r +snakemake --use-conda --until get_fastq --cores 2\r +```\r +\r +### Outputs\r +\r +- Downloaded files will be gzipped and written to `data/fastq/raw`.\r +- `FastQC` and `MultiQC` will also be run, with output in `docs/qc/raw`\r +\r +Both of these directories are able to be specified as relative paths in `config.yml`\r +\r +## Read Filtering\r +\r +### Outline\r +\r +Read trimming is performed using [AdapterRemoval](https://adapterremoval.readthedocs.io/en/stable/).\r +Default settings are customisable using config.yml, with the defaults set to discard reads shorter than 50nt, and to trim using quality scores with a threshold of Q30.\r +\r +### Outputs\r +\r +- Trimmed fastq.gz files will be written to `data/fastq/trimmed`\r +- `FastQC` and `MultiQC` will also be run, with output in `docs/qc/trimmed`\r +- AdapterRemoval 'settings' files will be written to `output/adapterremoval`\r +\r +## Alignments\r +\r +### Outline\r +\r +Alignment is performed using [`bowtie2`](https://bowtie-bio.sourceforge.net/bowtie2/manual.shtml) and it is assumed that this index is available before running this workflow.\r +The path and prefix must be provided using config.yml\r +\r +This index will also be used to produce the file `chrom.sizes` which is essential for conversion of bedGraph files to the more efficient bigWig files.\r +\r +### Outputs\r +\r +- Alignments will be written to `data/aligned`\r +- `bowtie2` log files will be written to `output/bowtie2` (not the conenvtional log directory)\r +- The file `chrom.sizes` will be written to `output/annotations`\r +\r +Both sorted and the original unsorted alignments will be returned.\r +However, the unsorted alignments are marked with `temp()` and can be deleted using \r +\r +```bash\r +snakemake --delete-temp-output --cores 1\r +```\r +\r +## Deduplication\r +\r +### Outline\r +\r +Deduplication is performed using [MarkDuplicates](https://gatk.broadinstitute.org/hc/en-us/articles/360037052812-MarkDuplicates-Picard-) from the Picard set of tools.\r +By default, deduplication will remove the duplicates from the set of alignments.\r +All resultant bam files will be sorted and indexed.\r +\r +### Outputs\r +\r +- Deduplicated alignments are written to `data/deduplicated` and are indexed\r +- DuplicationMetrics files are written to `output/markDuplicates`\r +\r +## Peak Calling\r +\r +### Outline\r +\r +This is performed using [`macs2 callpeak`](https://pypi.org/project/MACS2/).\r +\r +- Peak calling will be performed on:\r + a. each sample individually, and \r + b. merged samples for those sharing a common ChIP target and treatment group.\r +- Coverage bigWig files for each individual sample are produced using CPM values (i.e. Signal Per Million Reads, SPMR)\r +- For all combinations of target and treatment coverage bigWig files are also produced, along with fold-enrichment bigWig files\r +\r +### Outputs\r +\r +- Individual outputs are written to `output/macs2/{accession}`\r + + Peaks are written in `narrowPeak` format along with `summits.bed`\r + + bedGraph files are automatically converted to bigWig files, and the originals are marked with `temp()` for subsequent deletion\r + + callpeak log files are also added to this directory\r +- Merged outputs are written to `output/macs2/{target}/`\r + + bedGraph Files are also converted to bigWig and marked with `temp()`\r + + Fold-Enrichment bigWig files are also created with the original bedGraph files marked with `temp()`\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.528.1" ; + schema1:isBasedOn "https://github.com/smped/prepareChIPs.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for prepareChIPs:" ; + schema1:sdDatePublished "2024-07-12 13:32:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/528/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3868 ; + schema1:creator ; + schema1:dateCreated "2023-07-09T08:54:36Z" ; + schema1:dateModified "2023-07-09T09:01:09Z" ; + schema1:description """# prepareChIPs\r +\r +This is a simple `snakemake` workflow template for preparing **single-end** ChIP-Seq data.\r +The steps implemented are:\r +\r +1. Download raw fastq files from SRA\r +2. Trim and Filter raw fastq files using `AdapterRemoval`\r +3. Align to the supplied genome using `bowtie2`\r +4. Deduplicate Alignments using `Picard MarkDuplicates`\r +5. Call Macs2 Peaks using `macs2`\r +\r +A pdf of the rulegraph is available [here](workflow/rules/rulegraph.pdf)\r +\r +Full details for each step are given below.\r +Any additional parameters for tools can be specified using `config/config.yml`, along with many of the requisite paths\r +\r +To run the workflow with default settings, simply run as follows (after editing `config/samples.tsv`)\r +\r +```bash\r +snakemake --use-conda --cores 16\r +```\r +\r +If running on an HPC cluster, a snakemake profile will required for submission to the queueing system and appropriate resource allocation.\r +Please discuss this will your HPC support team.\r +Nodes may also have restricted internet access and rules which download files may not work on many HPCs.\r +Please see below or discuss this with your support team\r +\r +Whilst no snakemake wrappers are explicitly used in this workflow, the underlying scripts are utilised where possible to minimise any issues with HPC clusters with restrictions on internet access.\r +These scripts are based on `v1.31.1` of the snakemake wrappers\r +\r +### Important Note Regarding OSX Systems\r +\r +It should be noted that this workflow is **currently incompatible with OSX-based systems**. \r +There are two unsolved issues\r +\r +1. `fasterq-dump` has a bug which is specific to conda environments. This has been updated in v3.0.3 but this patch has not yet been made available to conda environments for OSX. Please check [here](https://anaconda.org/bioconda/sra-tools) to see if this has been updated.\r +2. The following error appears in some OSX-based R sessions, in a system-dependent manner:\r +```\r +Error in grid.Call(C_textBounds, as.graphicsAnnot(x$label), x$x, x$y, : \r + polygon edge not found\r +```\r +\r +The fix for this bug is currently unknown\r +\r +## Download Raw Data\r +\r +### Outline\r +\r +The file `samples.tsv` is used to specify all steps for this workflow.\r +This file must contain the columns: `accession`, `target`, `treatment` and `input`\r +\r +1. `accession` must be an SRA accession. Only single-end data is currently supported by this workflow\r +2. `target` defines the ChIP target. All files common to a target and treatment will be used to generate summarised coverage in bigWig Files\r +3. `treatment` defines the treatment group each file belongs to. If only one treatment exists, simply use the value 'control' or similar for every file\r +4. `input` should contain the accession for the relevant input sample. These will only be downloaded once. Valid input samples are *required* for this workflow\r +\r +As some HPCs restrict internet access for submitted jobs, *it may be prudent to run the initial rules in an interactive session* if at all possible.\r +This can be performed using the following (with 2 cores provided as an example)\r +\r +```bash\r +snakemake --use-conda --until get_fastq --cores 2\r +```\r +\r +### Outputs\r +\r +- Downloaded files will be gzipped and written to `data/fastq/raw`.\r +- `FastQC` and `MultiQC` will also be run, with output in `docs/qc/raw`\r +\r +Both of these directories are able to be specified as relative paths in `config.yml`\r +\r +## Read Filtering\r +\r +### Outline\r +\r +Read trimming is performed using [AdapterRemoval](https://adapterremoval.readthedocs.io/en/stable/).\r +Default settings are customisable using config.yml, with the defaults set to discard reads shorter than 50nt, and to trim using quality scores with a threshold of Q30.\r +\r +### Outputs\r +\r +- Trimmed fastq.gz files will be written to `data/fastq/trimmed`\r +- `FastQC` and `MultiQC` will also be run, with output in `docs/qc/trimmed`\r +- AdapterRemoval 'settings' files will be written to `output/adapterremoval`\r +\r +## Alignments\r +\r +### Outline\r +\r +Alignment is performed using [`bowtie2`](https://bowtie-bio.sourceforge.net/bowtie2/manual.shtml) and it is assumed that this index is available before running this workflow.\r +The path and prefix must be provided using config.yml\r +\r +This index will also be used to produce the file `chrom.sizes` which is essential for conversion of bedGraph files to the more efficient bigWig files.\r +\r +### Outputs\r +\r +- Alignments will be written to `data/aligned`\r +- `bowtie2` log files will be written to `output/bowtie2` (not the conenvtional log directory)\r +- The file `chrom.sizes` will be written to `output/annotations`\r +\r +Both sorted and the original unsorted alignments will be returned.\r +However, the unsorted alignments are marked with `temp()` and can be deleted using \r +\r +```bash\r +snakemake --delete-temp-output --cores 1\r +```\r +\r +## Deduplication\r +\r +### Outline\r +\r +Deduplication is performed using [MarkDuplicates](https://gatk.broadinstitute.org/hc/en-us/articles/360037052812-MarkDuplicates-Picard-) from the Picard set of tools.\r +By default, deduplication will remove the duplicates from the set of alignments.\r +All resultant bam files will be sorted and indexed.\r +\r +### Outputs\r +\r +- Deduplicated alignments are written to `data/deduplicated` and are indexed\r +- DuplicationMetrics files are written to `output/markDuplicates`\r +\r +## Peak Calling\r +\r +### Outline\r +\r +This is performed using [`macs2 callpeak`](https://pypi.org/project/MACS2/).\r +\r +- Peak calling will be performed on:\r + a. each sample individually, and \r + b. merged samples for those sharing a common ChIP target and treatment group.\r +- Coverage bigWig files for each individual sample are produced using CPM values (i.e. Signal Per Million Reads, SPMR)\r +- For all combinations of target and treatment coverage bigWig files are also produced, along with fold-enrichment bigWig files\r +\r +### Outputs\r +\r +- Individual outputs are written to `output/macs2/{accession}`\r + + Peaks are written in `narrowPeak` format along with `summits.bed`\r + + bedGraph files are automatically converted to bigWig files, and the originals are marked with `temp()` for subsequent deletion\r + + callpeak log files are also added to this directory\r +- Merged outputs are written to `output/macs2/{target}/`\r + + bedGraph Files are also converted to bigWig and marked with `temp()`\r + + Fold-Enrichment bigWig files are also created with the original bedGraph files marked with `temp()`\r +""" ; + schema1:keywords "Bioinformatics, Genomics, Transcriptomics" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "prepareChIPs:" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/528?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Take an anndata file, and perform basic QC with scanpy. Produces a filtered AnnData object." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/467?version=3" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq_CellQC" ; + schema1:sdDatePublished "2024-07-12 13:22:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/467/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 21389 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-06-22T06:28:47Z" ; + schema1:dateModified "2023-11-09T03:49:00Z" ; + schema1:description "Take an anndata file, and perform basic QC with scanpy. Produces a filtered AnnData object." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/467?version=3" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "scRNAseq_CellQC" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/467?version=3" ; + schema1:version 3 ; + ns1:input , + , + , + , + ; + ns1:output , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Simple bacterial assembly and annotation" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/966?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/bacass" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/bacass" ; + schema1:sdDatePublished "2024-07-12 13:22:22 +0100" ; + schema1:url "https://workflowhub.eu/workflows/966/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11764 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:43Z" ; + schema1:dateModified "2024-06-11T12:54:43Z" ; + schema1:description "Simple bacterial assembly and annotation" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/966?version=7" ; + schema1:keywords "Assembly, bacterial-genomes, denovo, denovo-assembly, genome-assembly, hybrid-assembly, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/bacass" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/966?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + schema1:datePublished "2023-10-30T19:22:11.103969" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/kmer-profiling-hifi-VGP1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "kmer-profiling-hifi-VGP1/main" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "kmer-profiling-hifi-VGP1/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/kmer-profiling-hifi-VGP1" ; + schema1:version "0.1.1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 5647 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + schema1:datePublished "2024-03-06T11:16:42.537263" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:description """This is the nested workflow of the "parent" nanopore workflow without the guppy basecalling step\r +\r +Workflow for sequencing with ONT nanopore, from basecalling to assembly quality.\r +Steps:\r + - Kraken2 (taxonomic classification of FASTQ reads)\r + - Krona (classification visualization)\r + - Flye (de novo assembly)\r + - Medaka (assembly polishing)\r + - QUAST (assembly quality reports)\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/254?version=1" ; + schema1:isBasedOn "https://git.wur.nl/unlock/cwl/-/blob/master/cwl/workflows/workflow_nanopore_assembly.cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nanopore workflow with direct FASTQ reads inputs (without Guppy)" ; + schema1:sdDatePublished "2024-07-12 13:34:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/254/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 27556 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8124 ; + schema1:creator , + , + ; + schema1:dateCreated "2022-01-06T07:38:04Z" ; + schema1:dateModified "2022-01-07T09:30:01Z" ; + schema1:description """This is the nested workflow of the "parent" nanopore workflow without the guppy basecalling step\r +\r +Workflow for sequencing with ONT nanopore, from basecalling to assembly quality.\r +Steps:\r + - Kraken2 (taxonomic classification of FASTQ reads)\r + - Krona (classification visualization)\r + - Flye (de novo assembly)\r + - Medaka (assembly polishing)\r + - QUAST (assembly quality reports)\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/254?version=2" ; + schema1:keywords "nanopore, Genomics, Metagenomics" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "nanopore workflow with direct FASTQ reads inputs (without Guppy)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/254?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + ; + ns1:output , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "The simplest workflow among a collection of workflows intended to solve tasks up to CTF estimation." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/598?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for SCIPION: acquire -> motionCorr -> ctf -> report" ; + schema1:sdDatePublished "2024-07-12 13:17:40 +0100" ; + schema1:url "https://workflowhub.eu/workflows/598/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 34990 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4994 ; + schema1:dateCreated "2023-10-04T10:58:43Z" ; + schema1:dateModified "2024-07-10T12:30:33Z" ; + schema1:description "The simplest workflow among a collection of workflows intended to solve tasks up to CTF estimation." ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "scipion, cryoem, spa, image processing, TalosArctica, TitanKrios, Glacios" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "SCIPION: acquire -> motionCorr -> ctf -> report" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/598?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Metabolome Annotation Workflow (MAW) takes liquid chromatography tandem mass spectrometry data (LC-MS2) as .mzML format data files. It performs spectral database dereplication using R Package Spectra and compound database dereplication using MetFrag/SIRIUS. Final candidate selection is executed in Python using RDKit and PubChemPy. The classification of the tentative candidates from the input data are classified using ChemONT chemical ontology.\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.510.1" ; + schema1:isBasedOn "https://github.com/zmahnoor14/MAW" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Metabolome Annotation Workflow (MAW)" ; + schema1:sdDatePublished "2024-07-12 13:32:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/510/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1569 ; + schema1:creator , + , + , + , + , + ; + schema1:dateCreated "2023-06-19T20:09:24Z" ; + schema1:dateModified "2023-06-19T20:10:21Z" ; + schema1:description """Metabolome Annotation Workflow (MAW) takes liquid chromatography tandem mass spectrometry data (LC-MS2) as .mzML format data files. It performs spectral database dereplication using R Package Spectra and compound database dereplication using MetFrag/SIRIUS. Final candidate selection is executed in Python using RDKit and PubChemPy. The classification of the tentative candidates from the input data are classified using ChemONT chemical ontology.\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/510?version=1" ; + schema1:keywords "Metabolomics, Annotation, mass-spectrometry, identification, Bioinformatics, FAIR workflows, workflow, gnps, massbank, hmdb, spectra, rdkit, Cheminformatics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Metabolome Annotation Workflow (MAW)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/510?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 12178 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """# workflow-ref-guided-stacks\r +\r +These workflows are part of a set designed to work for RAD-seq data on the Galaxy platform, using the tools from the Stacks program. \r +\r +Galaxy Australia: https://usegalaxy.org.au/\r +\r +Stacks: http://catchenlab.life.illinois.edu/stacks/\r +\r +## Inputs\r +* demultiplexed reads in fastq format, may be output from the QC workflow. Files are in a collection. \r +* population map in text format\r +* reference genome in fasta format\r +\r +## Steps and outputs\r +\r +BWA MEM 2:\r +* The reads are mapped to the reference genome; output in BAM format\r +* The collection of bam files is named something like Map with BWA-MEM on collection 5 (mapped reads in BAM format)\r +* Each of the bam files in the collection is named something like sample_CAAC\r +\r +Samtools stats before filtering:\r +* These bam files are sent to Samtools stats to get statistics; these are then sent to MultiQC to provide a nice output. This is tagged as "bam stats before filtering" in the Galaxy history. \r +* The "General Statistics" show how many reads were mapped - if there is a low mapping rate, it may be worth re-checking or repeating QC on the raw reads, or considering a different reference genome, or using a de novo approach. To see if many reads have been soft-clipped by Bwa mem (which may affect how well gstacks can work), look at the "Alignment Metrics" section, and the row with "Mapped bases (Cigar)". Hover over the dots to see sample names especially towards the left of the row - these have the least mapped reads.\r +\r +Samtools view:\r +* This step filters out certain reads from the bam files. The default settings are to exclude reads if they are unmapped, if the alignment is not primary or is supplementary, if the read fails platform/vendor quality checks, and if the read is a PCR or optical duplicate. \r +* The output bams are tagged with "filtered bams" in the Galaxy history.\r +\r +Samtools stats after filtering:\r +* Filtered bams are sent again to samtools stats, and statistics to MultiQC, with the report tagged as "bam stats after filtering" in the Galaxy history. \r +\r +gstacks:\r +* Filtered bams and a population map are sent to gstacks. The outputs are:\r +* Catalog of loci in fasta format\r +* Variant calls in VCF format\r +* Note: some bam files cause errors here with gstacks. For example, the log file may say "Error, all records discard with file SampleXYZ.FASTQ.bam, Aborted". If this occurs, check the bam stats (as described above). Some of the options are to re-do QC on the raw reads, change settings for mapping reads in BWA MEM, and/or delete this sample/s from the population map and proceed to gstacks. \r +The sample can still remain in the list of bam files but gstacks will only consider what is listed in the pop map. \r +\r +populations:\r +* gstacks outputs and a population map are snet to the "populations" module. The outputs are:\r +* Locus consensus sequences in fasta format\r +* Snp calls, in VCF format\r +* Haplotypes, in VCF format\r +* Summary statistics\r +\r +![qc-wf](wf-ref-guided.png)\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/347?version=1" ; + schema1:isBasedOn "https://github.com/AnnaSyme/workflow-ref-guided-stacks.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Stacks RAD-seq reference-guided workflow" ; + schema1:sdDatePublished "2024-07-12 13:35:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/347/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 33550 ; + schema1:creator ; + schema1:dateCreated "2022-05-31T07:20:38Z" ; + schema1:dateModified "2023-01-30T18:21:31Z" ; + schema1:description """# workflow-ref-guided-stacks\r +\r +These workflows are part of a set designed to work for RAD-seq data on the Galaxy platform, using the tools from the Stacks program. \r +\r +Galaxy Australia: https://usegalaxy.org.au/\r +\r +Stacks: http://catchenlab.life.illinois.edu/stacks/\r +\r +## Inputs\r +* demultiplexed reads in fastq format, may be output from the QC workflow. Files are in a collection. \r +* population map in text format\r +* reference genome in fasta format\r +\r +## Steps and outputs\r +\r +BWA MEM 2:\r +* The reads are mapped to the reference genome; output in BAM format\r +* The collection of bam files is named something like Map with BWA-MEM on collection 5 (mapped reads in BAM format)\r +* Each of the bam files in the collection is named something like sample_CAAC\r +\r +Samtools stats before filtering:\r +* These bam files are sent to Samtools stats to get statistics; these are then sent to MultiQC to provide a nice output. This is tagged as "bam stats before filtering" in the Galaxy history. \r +* The "General Statistics" show how many reads were mapped - if there is a low mapping rate, it may be worth re-checking or repeating QC on the raw reads, or considering a different reference genome, or using a de novo approach. To see if many reads have been soft-clipped by Bwa mem (which may affect how well gstacks can work), look at the "Alignment Metrics" section, and the row with "Mapped bases (Cigar)". Hover over the dots to see sample names especially towards the left of the row - these have the least mapped reads.\r +\r +Samtools view:\r +* This step filters out certain reads from the bam files. The default settings are to exclude reads if they are unmapped, if the alignment is not primary or is supplementary, if the read fails platform/vendor quality checks, and if the read is a PCR or optical duplicate. \r +* The output bams are tagged with "filtered bams" in the Galaxy history.\r +\r +Samtools stats after filtering:\r +* Filtered bams are sent again to samtools stats, and statistics to MultiQC, with the report tagged as "bam stats after filtering" in the Galaxy history. \r +\r +gstacks:\r +* Filtered bams and a population map are sent to gstacks. The outputs are:\r +* Catalog of loci in fasta format\r +* Variant calls in VCF format\r +* Note: some bam files cause errors here with gstacks. For example, the log file may say "Error, all records discard with file SampleXYZ.FASTQ.bam, Aborted". If this occurs, check the bam stats (as described above). Some of the options are to re-do QC on the raw reads, change settings for mapping reads in BWA MEM, and/or delete this sample/s from the population map and proceed to gstacks. \r +The sample can still remain in the list of bam files but gstacks will only consider what is listed in the pop map. \r +\r +populations:\r +* gstacks outputs and a population map are snet to the "populations" module. The outputs are:\r +* Locus consensus sequences in fasta format\r +* Snp calls, in VCF format\r +* Haplotypes, in VCF format\r +* Summary statistics\r +\r +![qc-wf](wf-ref-guided.png)\r +""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Stacks RAD-seq reference-guided workflow" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/347?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 317842 . + + a schema1:Dataset ; + schema1:datePublished "2024-06-21T20:31:16.897292" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/openms-metaprosip" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "openms-metaprosip/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +For this workflow:\r +\r +Inputs:\r +* assembled-genome.fasta\r +* hard-repeat-masked-genome.fasta\r +* If using the mRNAs option, the additional inputs required are .cdna, .pro and .dat files. \r +\r +What it does:\r +* This workflow splits the input genomes into single sequences (to decrease computation time), annotates using FgenesH++, and merges the output. \r +\r +Outputs:\r +* genome annotation in gff3 format\r +* fasta files of mRNAs, cDNAs and proteins\r +* Busco report\r +\r +\r +\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.881.2" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Fgenesh annotation -TSI" ; + schema1:sdDatePublished "2024-07-12 13:18:02 +0100" ; + schema1:url "https://workflowhub.eu/workflows/881/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17572 ; + schema1:creator ; + schema1:dateCreated "2024-06-18T08:56:16Z" ; + schema1:dateModified "2024-06-18T09:02:37Z" ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +For this workflow:\r +\r +Inputs:\r +* assembled-genome.fasta\r +* hard-repeat-masked-genome.fasta\r +* If using the mRNAs option, the additional inputs required are .cdna, .pro and .dat files. \r +\r +What it does:\r +* This workflow splits the input genomes into single sequences (to decrease computation time), annotates using FgenesH++, and merges the output. \r +\r +Outputs:\r +* genome annotation in gff3 format\r +* fasta files of mRNAs, cDNAs and proteins\r +* Busco report\r +\r +\r +\r +\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/881?version=3" ; + schema1:isPartOf ; + schema1:keywords "TSI-annotation" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Fgenesh annotation -TSI" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/881?version=2" ; + schema1:version 2 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 1040040 . + + a schema1:Dataset ; + schema1:datePublished "2023-10-18T08:01:03.789178" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/openms-metaprosip" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "openms-metaprosip/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.12" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """\r +

+nf-core/vipr +

\r +

Build Status Nextflow Gitter

\r +

install with bioconda Docker Container available https://www.singularity-hub.org/static/img/hosted-singularity--hub-%23e32929.svg

\r +

nf-core/vipr is a bioinformatics best-practice analysis pipeline for assembly and intrahost / low-frequency variant calling for viral samples.

\r +

The pipeline is built using Nextflow, a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It comes with docker / singularity containers making installation trivial and results highly reproducible.

\r +

+Pipeline Steps

\r +\r +\r +\r +Step\r +Main program/s\r +\r +\r +\r +\r +Trimming, combining of read-pairs per sample and QC\r +Skewer, FastQC\r +\r +\r +Decontamination\r +decont\r +\r +\r +Metagenomics classification / Sample purity\r +Kraken\r +\r +\r +Assembly to contigs\r +BBtools’ Tadpole\r +\r +\r +Assembly polishing\r +ViPR Tools\r +\r +\r +Mapping to assembly\r +BWA, LoFreq\r +\r +\r +Low frequency variant calling\r +LoFreq\r +\r +\r +Coverage and variant AF plots (two processes)\r +Bedtools, ViPR Tools\r +\r +\r +\r +

+Documentation

\r +

Documentation about the pipeline can be found in the docs/ directory:

\r +
    \r +
  1. Installation and configuration
  2. \r +
  3. Running the pipeline
  4. \r +
  5. Output and how to interpret the results
  6. \r +
\r +

+Credits

\r +

This pipeline was originally developed by Andreas Wilm (andreas-wilm) at Genome Institute of Singapore.
\r +It started out as an ecosystem around LoFreq and went through a couple of iterations.
\r +The current version had three predecessors ViPR 1, ViPR 2 and ViPR 3.

\r +

An incomplete list of publications using (previous versions of) ViPR:

\r +\r +

Plenty of people provided essential feedback, including:

\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/20?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/vipr/blob/master/main.nf" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/vipr" ; + schema1:sdDatePublished "2024-07-12 13:37:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/20/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14256 ; + schema1:creator ; + schema1:dateCreated "2020-05-14T14:42:23Z" ; + schema1:dateModified "2023-01-16T13:41:25Z" ; + schema1:description """\r +

+nf-core/vipr +

\r +

Build Status Nextflow Gitter

\r +

install with bioconda Docker Container available https://www.singularity-hub.org/static/img/hosted-singularity--hub-%23e32929.svg

\r +

nf-core/vipr is a bioinformatics best-practice analysis pipeline for assembly and intrahost / low-frequency variant calling for viral samples.

\r +

The pipeline is built using Nextflow, a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It comes with docker / singularity containers making installation trivial and results highly reproducible.

\r +

+Pipeline Steps

\r +\r +\r +\r +Step\r +Main program/s\r +\r +\r +\r +\r +Trimming, combining of read-pairs per sample and QC\r +Skewer, FastQC\r +\r +\r +Decontamination\r +decont\r +\r +\r +Metagenomics classification / Sample purity\r +Kraken\r +\r +\r +Assembly to contigs\r +BBtools’ Tadpole\r +\r +\r +Assembly polishing\r +ViPR Tools\r +\r +\r +Mapping to assembly\r +BWA, LoFreq\r +\r +\r +Low frequency variant calling\r +LoFreq\r +\r +\r +Coverage and variant AF plots (two processes)\r +Bedtools, ViPR Tools\r +\r +\r +\r +

+Documentation

\r +

Documentation about the pipeline can be found in the docs/ directory:

\r +
    \r +
  1. Installation and configuration
  2. \r +
  3. Running the pipeline
  4. \r +
  5. Output and how to interpret the results
  6. \r +
\r +

+Credits

\r +

This pipeline was originally developed by Andreas Wilm (andreas-wilm) at Genome Institute of Singapore.
\r +It started out as an ecosystem around LoFreq and went through a couple of iterations.
\r +The current version had three predecessors ViPR 1, ViPR 2 and ViPR 3.

\r +

An incomplete list of publications using (previous versions of) ViPR:

\r +\r +

Plenty of people provided essential feedback, including:

\r +\r +""" ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/vipr" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/20?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for screening for functional components of assembled contigs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/987?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/funcscan" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/funcscan" ; + schema1:sdDatePublished "2024-07-12 13:21:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/987/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14851 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:53Z" ; + schema1:dateModified "2024-06-11T12:54:53Z" ; + schema1:description "Pipeline for screening for functional components of assembled contigs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/987?version=8" ; + schema1:keywords "amp, AMR, antibiotic-resistance, antimicrobial-peptides, antimicrobial-resistance-genes, arg, Assembly, bgc, biosynthetic-gene-clusters, contigs, function, Metagenomics, natural-products, screening, secondary-metabolites" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/funcscan" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/987?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state of the art epitope prediction pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/984?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/epitopeprediction" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/epitopeprediction" ; + schema1:sdDatePublished "2024-07-12 13:21:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/984/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11589 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:52Z" ; + schema1:dateModified "2024-06-11T12:54:52Z" ; + schema1:description "A fully reproducible and state of the art epitope prediction pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/984?version=7" ; + schema1:keywords "epitope, epitope-prediction, mhc-binding-prediction" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/epitopeprediction" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/984?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Simulations and figures supporting the manuscript \"Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake\"" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.511.5" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake" ; + schema1:sdDatePublished "2024-07-12 13:32:46 +0100" ; + schema1:url "https://workflowhub.eu/workflows/511/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 332 ; + schema1:creator , + , + , + , + , + ; + schema1:dateCreated "2024-01-16T13:18:14Z" ; + schema1:dateModified "2024-01-16T13:19:15Z" ; + schema1:description "Simulations and figures supporting the manuscript \"Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake\"" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/511?version=4" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/511?version=5" ; + schema1:version 5 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 75460 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Macromolecular Coarse-Grained Flexibility (FlexServ) tutorial using BioExcel Building Blocks (biobb)\r +\r +This tutorial aims to illustrate the process of generating protein conformational ensembles from 3D structures and analysing its molecular flexibility, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.823.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/main/biobb_wf_flexserv/docker" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Docker Macromolecular Coarse-Grained Flexibility tutorial" ; + schema1:sdDatePublished "2024-07-12 13:23:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/823/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 749 ; + schema1:creator , + ; + schema1:dateCreated "2024-04-22T10:45:48Z" ; + schema1:dateModified "2024-05-22T13:47:12Z" ; + schema1:description """# Macromolecular Coarse-Grained Flexibility (FlexServ) tutorial using BioExcel Building Blocks (biobb)\r +\r +This tutorial aims to illustrate the process of generating protein conformational ensembles from 3D structures and analysing its molecular flexibility, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Docker Macromolecular Coarse-Grained Flexibility tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/main/biobb_wf_flexserv/docker/Dockerfile" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2022-02-04T12:20:39.138022" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/parallel-accession-download" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "parallel-accession-download/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 9406 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """## Summary\r +HPPIDiscovery is a scientific workflow to augment, predict and perform an insilico curation of host-pathogen Protein-Protein Interactions (PPIs) using graph theory to build new candidate ppis and machine learning to predict and evaluate them by combining multiple PPI detection methods of proteins according to three categories: structural, based on primary aminoacid sequence and functional annotations.
\r +\r +HPPIDiscovery contains three main steps: (i) acquirement of pathogen and host proteins information from seed ppis provided by HPIDB search methods, (ii) Model training and generation of new candidate ppis from HPIDB seed proteins' partners, and (iii) Evaluation of new candidate ppis and results exportation.\r +\r +(i) The first step acquires the identification of the taxonomy ids of the host and pathogen organisms in the result files. Then it proceeds parsing and cleaning the HPIDB results and downloading the protein interactions of the found organisms from the STRING database. The string protein identifiers are also mapped using the id mapping tool of uniprot API and we retrieve the uniprot entry ids along with the functional annotations, sequence, domain and kegg enzymes.\r +\r +(ii) The second step builds the training dataset using the non redundant hpidb validated interactions of each genome as positive set and random string low confidence ppis from each genome as negative set. Then, PredPrin tool is executed in the training mode to obtain the model that will evaluate the new candidate PPIs. The new ppis are then generated by performing a pairwise combination of string partners of host and pathogen hpidb proteins. \r +\r +Finally, (iii) in the third step, the predprin tool is used in the test mode to evaluate the new ppis and generate the reports and list of positively predicted ppis.\r +\r +The figure below illustrates the steps of this workflow.\r +\r +## Requirements:\r +* Edit the configuration file (config.yaml) according to your own data, filling out the following fields:\r + - base_data: location of the organism folders directory, example: /home/user/data/genomes \r + - parameters_file: Since this workflow may perform parallel processing of multiple organisms at the same time, you must prepate a tabulated file containng the genome folder names located in base data, where the hpidb files are located. Example: /home/user/data/params.tsv. It must have the following columns: genome (folder name), hpidb_seed_network (the result exported by one of the search methods available in hpidb database), hpidb_search_method (the type of search used to generate the results) and target_taxon (the target taxon id). The column hpidb_source may have two values: keyword or homology. In the keyword mode, you provide a taxonomy, protein name, publication id or detection method and you save all results (mitab.zip) in the genome folder. Finally, in the homology mode allows the user to search for host pathogen ppis giving as input fasta sequences of a set of proteins of the target pathgen for enrichment (so you have to select the search for a pathogen set) and you save the zip folder results (interaction data) in the genome folder. This option is extremely useful when you are not sure that your organism has validated protein interactions, then it finds validated interactions from the closest proteins in the database. In case of using the homology mode, the identifiers of the pathogens' query fasta sequences must be a Uniprot ID. All the query protein IDs must belong to the same target organism (taxon id).\r + - model_file: path of a previously trained model in joblib format (if you want to train from the known validated PPIs given as seeds, just put a 'None' value)\r +\r +## Usage Instructions\r +The steps below consider the creation of a sqlite database file with all he tasks events which can be used after to retrieve the execution time taken by the tasks. It is possible run locally too (see luigi's documentation to change the running command).

\r +* Preparation:\r + 1. ````git clone https://github.com/YasCoMa/hppidiscovery.git````\r + 2. ````cd hppidiscovery````\r + 3. ````mkdir luigi_log```` \r + 4. ````luigid --background --logdir luigi_log```` (start luigi server)\r + 5. conda env create -f hp_ppi_augmentation.yml\r + 6. conda activate hp_ppi_augmentation\r + 6.1. (execute ````pip3 install wget```` (it is not installed in the environment))\r + 7. run ````pwd```` command and get the full path\r + 8. Substitute in config_example.yaml with the full path obtained in the previous step\r + 9. Download SPRINT pre-computed similarities in https://www.csd.uwo.ca/~ilie/SPRINT/precomputed_similarities.zip and unzip it inside workflow_hpAugmentation/predprin/core/sprint/HSP/\r + 10. ````cd workflow_hpAugmentation/predprin/````\r + 11. Uncompress annotation_data.zip\r + 12. Uncompress sequence_data.zip\r + 13. ````cd ../../````\r + 14. ````cd workflow_hpAugmentation````\r + 15. snake -n (check the plan of jobs, it should return no errors and exceptions)\r + 16. snakemake -j 4 (change this number according the number of genomes to analyse and the amount of cores available in your machine)""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/611?version=1" ; + schema1:isBasedOn "https://github.com/YasCoMa/hppidiscovery" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for HPPIDiscovery - Scientific workflow to augment, predict and evaluate host-pathogen protein-protein interactions" ; + schema1:sdDatePublished "2024-07-12 13:27:14 +0100" ; + schema1:url "https://workflowhub.eu/workflows/611/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 54971 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10251 ; + schema1:creator ; + schema1:dateCreated "2023-10-19T23:56:34Z" ; + schema1:dateModified "2023-10-19T23:56:34Z" ; + schema1:description """## Summary\r +HPPIDiscovery is a scientific workflow to augment, predict and perform an insilico curation of host-pathogen Protein-Protein Interactions (PPIs) using graph theory to build new candidate ppis and machine learning to predict and evaluate them by combining multiple PPI detection methods of proteins according to three categories: structural, based on primary aminoacid sequence and functional annotations.
\r +\r +HPPIDiscovery contains three main steps: (i) acquirement of pathogen and host proteins information from seed ppis provided by HPIDB search methods, (ii) Model training and generation of new candidate ppis from HPIDB seed proteins' partners, and (iii) Evaluation of new candidate ppis and results exportation.\r +\r +(i) The first step acquires the identification of the taxonomy ids of the host and pathogen organisms in the result files. Then it proceeds parsing and cleaning the HPIDB results and downloading the protein interactions of the found organisms from the STRING database. The string protein identifiers are also mapped using the id mapping tool of uniprot API and we retrieve the uniprot entry ids along with the functional annotations, sequence, domain and kegg enzymes.\r +\r +(ii) The second step builds the training dataset using the non redundant hpidb validated interactions of each genome as positive set and random string low confidence ppis from each genome as negative set. Then, PredPrin tool is executed in the training mode to obtain the model that will evaluate the new candidate PPIs. The new ppis are then generated by performing a pairwise combination of string partners of host and pathogen hpidb proteins. \r +\r +Finally, (iii) in the third step, the predprin tool is used in the test mode to evaluate the new ppis and generate the reports and list of positively predicted ppis.\r +\r +The figure below illustrates the steps of this workflow.\r +\r +## Requirements:\r +* Edit the configuration file (config.yaml) according to your own data, filling out the following fields:\r + - base_data: location of the organism folders directory, example: /home/user/data/genomes \r + - parameters_file: Since this workflow may perform parallel processing of multiple organisms at the same time, you must prepate a tabulated file containng the genome folder names located in base data, where the hpidb files are located. Example: /home/user/data/params.tsv. It must have the following columns: genome (folder name), hpidb_seed_network (the result exported by one of the search methods available in hpidb database), hpidb_search_method (the type of search used to generate the results) and target_taxon (the target taxon id). The column hpidb_source may have two values: keyword or homology. In the keyword mode, you provide a taxonomy, protein name, publication id or detection method and you save all results (mitab.zip) in the genome folder. Finally, in the homology mode allows the user to search for host pathogen ppis giving as input fasta sequences of a set of proteins of the target pathgen for enrichment (so you have to select the search for a pathogen set) and you save the zip folder results (interaction data) in the genome folder. This option is extremely useful when you are not sure that your organism has validated protein interactions, then it finds validated interactions from the closest proteins in the database. In case of using the homology mode, the identifiers of the pathogens' query fasta sequences must be a Uniprot ID. All the query protein IDs must belong to the same target organism (taxon id).\r + - model_file: path of a previously trained model in joblib format (if you want to train from the known validated PPIs given as seeds, just put a 'None' value)\r +\r +## Usage Instructions\r +The steps below consider the creation of a sqlite database file with all he tasks events which can be used after to retrieve the execution time taken by the tasks. It is possible run locally too (see luigi's documentation to change the running command).

\r +* Preparation:\r + 1. ````git clone https://github.com/YasCoMa/hppidiscovery.git````\r + 2. ````cd hppidiscovery````\r + 3. ````mkdir luigi_log```` \r + 4. ````luigid --background --logdir luigi_log```` (start luigi server)\r + 5. conda env create -f hp_ppi_augmentation.yml\r + 6. conda activate hp_ppi_augmentation\r + 6.1. (execute ````pip3 install wget```` (it is not installed in the environment))\r + 7. run ````pwd```` command and get the full path\r + 8. Substitute in config_example.yaml with the full path obtained in the previous step\r + 9. Download SPRINT pre-computed similarities in https://www.csd.uwo.ca/~ilie/SPRINT/precomputed_similarities.zip and unzip it inside workflow_hpAugmentation/predprin/core/sprint/HSP/\r + 10. ````cd workflow_hpAugmentation/predprin/````\r + 11. Uncompress annotation_data.zip\r + 12. Uncompress sequence_data.zip\r + 13. ````cd ../../````\r + 14. ````cd workflow_hpAugmentation````\r + 15. snake -n (check the plan of jobs, it should return no errors and exceptions)\r + 16. snakemake -j 4 (change this number according the number of genomes to analyse and the amount of cores available in your machine)""" ; + schema1:image ; + schema1:keywords "Bioinformatics, Protein-Protein interaction prediction, host-pathogen PPIs, proteins network augmentation" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "HPPIDiscovery - Scientific workflow to augment, predict and evaluate host-pathogen protein-protein interactions" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/611?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + ; + schema1:description """# ![IMPaCT program](https://github.com/EGA-archive/sarek-IMPaCT-data-QC/blob/master/impact_qc/docs/png/impact_data_logo_pink_horitzontal.png)\r +\r +[![IMPaCT](https://img.shields.io/badge/Web%20-IMPaCT-blue)](https://impact.isciii.es/)\r +[![IMPaCT-isciii](https://img.shields.io/badge/Web%20-IMPaCT--isciii-red)](https://www.isciii.es/QueHacemos/Financiacion/IMPaCT/Paginas/default.aspx)\r +[![IMPaCT-Data](https://img.shields.io/badge/Web%20-IMPaCT--Data-1d355c.svg?labelColor=000000)](https://impact-data.bsc.es/)\r +\r +## Introduction of the project\r +\r +IMPaCT-Data is the IMPaCT program that aims to support the development of a common, interoperable and integrated system for the collection and analysis of clinical and molecular data by providing the knowledge and resources available in the Spanish Science and Technology System. This development will make it possible to answer research questions based on the different clinical and molecular information systems available. Fundamentally, it aims to provide researchers with a population perspective based on individual data.\r +\r +The IMPaCT-Data project is divided into different work packages (WP). In the context of IMPaCT-Data WP3 (Genomics), a working group of experts worked on the generation of a specific quality control (QC) workflow for germline exome samples.\r +\r +To achieve this, a set of metrics related to human genomic data was decided upon, and the toolset or software to extract these metrics was implemented in an existing variant calling workflow called Sarek, part of the nf-core community. The final outcome is a Nextflow subworkflow, called IMPaCT-QC implemented in the Sarek pipeline.\r +\r +Below you can find the explanation of this workflow (raw pipeline), the link to the documentation of the IMPaCT QC subworkflow and a linked documentation associated to the QC metrics added in the mentioned workflow.\r +\r +- [IMPaCT-data subworkflow documentation](https://github.com/EGA-archive/sarek-IMPaCT-data-QC/tree/master/impact_qc)\r +\r +- [Metrics documentation](https://github.com/EGA-archive/sarek-IMPaCT-data-QC/blob/master/impact_qc/docs/QC_Sarek_supporing_documentation.pdf)\r +\r +

\r + \r + \r + nf-core/sarek\r + \r +

\r +\r +[![GitHub Actions CI Status](https://github.com/nf-core/sarek/actions/workflows/ci.yml/badge.svg)](https://github.com/nf-core/sarek/actions/workflows/ci.yml)\r +[![GitHub Actions Linting Status](https://github.com/nf-core/sarek/actions/workflows/linting.yml/badge.svg)](https://github.com/nf-core/sarek/actions/workflows/linting.yml)\r +[![AWS CI](https://img.shields.io/badge/CI%20tests-full%20size-FF9900?labelColor=000000&logo=Amazon%20AWS)](https://nf-co.re/sarek/results)\r +[![nf-test](https://img.shields.io/badge/unit_tests-nf--test-337ab7.svg)](https://www.nf-test.com)\r +[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.3476425-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.3476425)\r +[![nf-test](https://img.shields.io/badge/unit_tests-nf--test-337ab7.svg)](https://www.nf-test.com)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A523.04.0-23aa62.svg)](https://www.nextflow.io/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +[![Launch on Seqera Platform](https://img.shields.io/badge/Launch%20%F0%9F%9A%80-Seqera%20Platform-%234256e7)](https://tower.nf/launch?pipeline=https://github.com/nf-core/sarek)\r +\r +[![Get help on Slack](http://img.shields.io/badge/slack-nf--core%20%23sarek-4A154B?labelColor=000000&logo=slack)](https://nfcore.slack.com/channels/sarek)\r +[![Follow on Twitter](http://img.shields.io/badge/twitter-%40nf__core-1DA1F2?labelColor=000000&logo=twitter)](https://twitter.com/nf_core)\r +[![Follow on Mastodon](https://img.shields.io/badge/mastodon-nf__core-6364ff?labelColor=FFFFFF&logo=mastodon)](https://mstdn.science/@nf_core)\r +[![Watch on YouTube](http://img.shields.io/badge/youtube-nf--core-FF0000?labelColor=000000&logo=youtube)](https://www.youtube.com/c/nf-core)\r +\r +## Introduction\r +\r +**nf-core/sarek** is a workflow designed to detect variants on whole genome or targeted sequencing data. Initially designed for Human, and Mouse, it can work on any species with a reference genome. Sarek can also handle tumour / normal pairs and could include additional relapses.\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!\r +\r +On release, automated continuous integration tests run the pipeline on a full-sized dataset on the AWS cloud infrastructure. This ensures that the pipeline runs on AWS, has sensible resource allocation defaults set to run on real-world datasets, and permits the persistent storage of results to benchmark between pipeline releases and other analysis sources. The results obtained from the full-sized test can be viewed on the [nf-core website](https://nf-co.re/sarek/results).\r +\r +It's listed on [Elixir - Tools and Data Services Registry](https://bio.tools/nf-core-sarek) and [Dockstore](https://dockstore.org/workflows/github.com/nf-core/sarek).\r +\r +

\r + \r +

\r +\r +## Pipeline summary\r +\r +Depending on the options and samples provided, the pipeline can currently perform the following:\r +\r +- Form consensus reads from UMI sequences (`fgbio`)\r +- Sequencing quality control and trimming (enabled by `--trim_fastq`) (`FastQC`, `fastp`)\r +- Map Reads to Reference (`BWA-mem`, `BWA-mem2`, `dragmap` or `Sentieon BWA-mem`)\r +- Process BAM file (`GATK MarkDuplicates`, `GATK BaseRecalibrator` and `GATK ApplyBQSR` or `Sentieon LocusCollector` and `Sentieon Dedup`)\r +- Summarise alignment statistics (`samtools stats`, `mosdepth`)\r +- Variant calling (enabled by `--tools`, see [compatibility](https://nf-co.re/sarek/latest/docs/usage#which-variant-calling-tool-is-implemented-for-which-data-type)):\r + - `ASCAT`\r + - `CNVkit`\r + - `Control-FREEC`\r + - `DeepVariant`\r + - `freebayes`\r + - `GATK HaplotypeCaller`\r + - `Manta`\r + - `mpileup`\r + - `MSIsensor-pro`\r + - `Mutect2`\r + - `Sentieon Haplotyper`\r + - `Strelka2`\r + - `TIDDIT`\r +- Variant filtering and annotation (`SnpEff`, `Ensembl VEP`, `BCFtools annotate`)\r +- Summarise and represent QC (`MultiQC`)\r +\r +

\r + \r +

\r +\r +## Usage\r +\r +> [!NOTE]\r +> If you are new to Nextflow and nf-core, please refer to [this page](https://nf-co.re/docs/usage/installation) on how to set-up Nextflow. Make sure to [test your setup](https://nf-co.re/docs/usage/introduction#how-to-run-a-pipeline) with `-profile test` before running the workflow on actual data.\r +\r +First, prepare a samplesheet with your input data that looks as follows:\r +\r +`samplesheet.csv`:\r +\r +```csv\r +patient,sample,lane,fastq_1,fastq_2\r +ID1,S1,L002,ID1_S1_L002_R1_001.fastq.gz,ID1_S1_L002_R2_001.fastq.gz\r +```\r +\r +Each row represents a pair of fastq files (paired end).\r +\r +Now, you can run the pipeline using:\r +\r +```bash\r +nextflow run nf-core/sarek \\\r + -profile \\\r + --input samplesheet.csv \\\r + --outdir \r +```\r +\r +> [!WARNING]\r +> Please provide pipeline parameters via the CLI or Nextflow `-params-file` option. Custom config files including those provided by the `-c` Nextflow option can be used to provide any configuration _**except for parameters**_;\r +> see [docs](https://nf-co.re/usage/configuration#custom-configuration-files).\r +\r +For more details and further functionality, please refer to the [usage documentation](https://nf-co.re/sarek/usage) and the [parameter documentation](https://nf-co.re/sarek/parameters).\r +\r +## Pipeline output\r +\r +To see the results of an example test run with a full size dataset refer to the [results](https://nf-co.re/sarek/results) tab on the nf-core website pipeline page.\r +For more details about the output files and reports, please refer to the\r +[output documentation](https://nf-co.re/sarek/output).\r +\r +## Benchmarking\r +\r +On each release, the pipeline is run on 3 full size tests:\r +\r +- `test_full` runs tumor-normal data for one patient from the SEQ2C consortium\r +- `test_full_germline` runs a WGS 30X Genome-in-a-Bottle(NA12878) dataset\r +- `test_full_germline_ncbench_agilent` runs two WES samples with 75M and 200M reads (data available [here](https://github.com/ncbench/ncbench-workflow#contributing-callsets)). The results are uploaded to Zenodo, evaluated against a truth dataset, and results are made available via the [NCBench dashboard](https://ncbench.github.io/report/report.html#).\r +\r +## Credits\r +\r +Sarek was originally written by Maxime U Garcia and Szilveszter Juhos at the [National Genomics Infastructure](https://ngisweden.scilifelab.se) and [National Bioinformatics Infastructure Sweden](https://nbis.se) which are both platforms at [SciLifeLab](https://scilifelab.se), with the support of [The Swedish Childhood Tumor Biobank (Barntumörbanken)](https://ki.se/forskning/barntumorbanken).\r +Friederike Hanssen and Gisela Gabernet at [QBiC](https://www.qbic.uni-tuebingen.de/) later joined and helped with further development.\r +\r +The Nextflow DSL2 conversion of the pipeline was lead by Friederike Hanssen and Maxime U Garcia.\r +\r +Maintenance is now lead by Friederike Hanssen and Maxime U Garcia (now at [Seqera Labs](https://seqera/io))\r +\r +Main developers:\r +\r +- [Maxime U Garcia](https://github.com/maxulysse)\r +- [Friederike Hanssen](https://github.com/FriederikeHanssen)\r +\r +We thank the following people for their extensive assistance in the development of this pipeline:\r +\r +- [Abhinav Sharma](https://github.com/abhi18av)\r +- [Adam Talbot](https://github.com/adamrtalbot)\r +- [Adrian Lärkeryd](https://github.com/adrlar)\r +- [Alexander Peltzer](https://github.com/apeltzer)\r +- [Alison Meynert](https://github.com/ameynert)\r +- [Anders Sune Pedersen](https://github.com/asp8200)\r +- [arontommi](https://github.com/arontommi)\r +- [BarryDigby](https://github.com/BarryDigby)\r +- [Bekir Ergüner](https://github.com/berguner)\r +- [bjornnystedt](https://github.com/bjornnystedt)\r +- [cgpu](https://github.com/cgpu)\r +- [Chela James](https://github.com/chelauk)\r +- [David Mas-Ponte](https://github.com/davidmasp)\r +- [Edmund Miller](https://github.com/edmundmiller)\r +- [Francesco Lescai](https://github.com/lescai)\r +- [Gavin Mackenzie](https://github.com/GCJMackenzie)\r +- [Gisela Gabernet](https://github.com/ggabernet)\r +- [Grant Neilson](https://github.com/grantn5)\r +- [gulfshores](https://github.com/gulfshores)\r +- [Harshil Patel](https://github.com/drpatelh)\r +- [James A. Fellows Yates](https://github.com/jfy133)\r +- [Jesper Eisfeldt](https://github.com/J35P312)\r +- [Johannes Alneberg](https://github.com/alneberg)\r +- [José Fernández Navarro](https://github.com/jfnavarro)\r +- [Júlia Mir Pedrol](https://github.com/mirpedrol)\r +- [Ken Brewer](https://github.com/kenibrewer)\r +- [Lasse Westergaard Folkersen](https://github.com/lassefolkersen)\r +- [Lucia Conde](https://github.com/lconde-ucl)\r +- [Malin Larsson](https://github.com/malinlarsson)\r +- [Marcel Martin](https://github.com/marcelm)\r +- [Nick Smith](https://github.com/nickhsmith)\r +- [Nicolas Schcolnicov](https://github.com/nschcolnicov)\r +- [Nilesh Tawari](https://github.com/nilesh-tawari)\r +- [Nils Homer](https://github.com/nh13)\r +- [Olga Botvinnik](https://github.com/olgabot)\r +- [Oskar Wacker](https://github.com/WackerO)\r +- [pallolason](https://github.com/pallolason)\r +- [Paul Cantalupo](https://github.com/pcantalupo)\r +- [Phil Ewels](https://github.com/ewels)\r +- [Sabrina Krakau](https://github.com/skrakau)\r +- [Sam Minot](https://github.com/sminot)\r +- [Sebastian-D](https://github.com/Sebastian-D)\r +- [Silvia Morini](https://github.com/silviamorins)\r +- [Simon Pearce](https://github.com/SPPearce)\r +- [Solenne Correard](https://github.com/scorreard)\r +- [Susanne Jodoin](https://github.com/SusiJo)\r +- [Szilveszter Juhos](https://github.com/szilvajuhos)\r +- [Tobias Koch](https://github.com/KochTobi)\r +- [Winni Kretzschmar](https://github.com/winni2k)\r +\r +## Acknowledgements\r +\r +| [![Barntumörbanken](docs/images/BTB_logo.png)](https://ki.se/forskning/barntumorbanken) | [![SciLifeLab](docs/images/SciLifeLab_logo.png)](https://scilifelab.se) |\r +| :-----------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------: |\r +| [![National Genomics Infrastructure](docs/images/NGI_logo.png)](https://ngisweden.scilifelab.se/) | [![National Bioinformatics Infrastructure Sweden](docs/images/NBIS_logo.png)](https://nbis.se) |\r +| [![QBiC](docs/images/QBiC_logo.png)](https://www.qbic.uni-tuebingen.de) | [![GHGA](docs/images/GHGA_logo.png)](https://www.ghga.de/) |\r +| [![DNGC](docs/images/DNGC_logo.png)](https://eng.ngc.dk/) | |\r +\r +## Contributions & Support\r +\r +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\r +\r +For further information or help, don't hesitate to get in touch on the [Slack `#sarek` channel](https://nfcore.slack.com/channels/sarek) (you can join with [this invite](https://nf-co.re/join/slack)), or contact us: [Maxime U Garcia](mailto:maxime.garcia@seqera.io?subject=[GitHub]%20nf-core/sarek), [Friederike Hanssen](mailto:friederike.hanssen@qbic.uni-tuebingen.de?subject=[GitHub]%20nf-core/sarek)\r +\r +## Citations\r +\r +If you use `nf-core/sarek` for your analysis, please cite the `Sarek` article as follows:\r +\r +> Friederike Hanssen, Maxime U Garcia, Lasse Folkersen, Anders Sune Pedersen, Francesco Lescai, Susanne Jodoin, Edmund Miller, Oskar Wacker, Nicholas Smith, nf-core community, Gisela Gabernet, Sven Nahnsen **Scalable and efficient DNA sequencing analysis on different compute infrastructures aiding variant discovery** _NAR Genomics and Bioinformatics_ Volume 6, Issue 2, June 2024, lqae031, [doi: 10.1093/nargab/lqae031](https://doi.org/10.1093/nargab/lqae031).\r +\r +> Garcia M, Juhos S, Larsson M et al. **Sarek: A portable workflow for whole-genome sequencing analysis of germline and somatic variants [version 2; peer review: 2 approved]** _F1000Research_ 2020, 9:63 [doi: 10.12688/f1000research.16665.2](http://dx.doi.org/10.12688/f1000research.16665.2).\r +\r +You can cite the sarek zenodo record for a specific version using the following [doi: 10.5281/zenodo.3476425](https://doi.org/10.5281/zenodo.3476425)\r +\r +An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\r +\r +You can cite the `nf-core` publication as follows:\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +\r +## CHANGELOG\r +\r +- [CHANGELOG](CHANGELOG.md)\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1030?version=3" ; + schema1:isBasedOn "https://github.com/EGA-archive/sarek-IMPaCT-data-QC.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for IMPaCT-Data quality control workflow implementation in nf-core/Sarek" ; + schema1:sdDatePublished "2024-07-12 13:17:46 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1030/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 19394 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-06-25T06:53:21Z" ; + schema1:dateModified "2024-06-25T07:30:55Z" ; + schema1:description """# ![IMPaCT program](https://github.com/EGA-archive/sarek-IMPaCT-data-QC/blob/master/impact_qc/docs/png/impact_data_logo_pink_horitzontal.png)\r +\r +[![IMPaCT](https://img.shields.io/badge/Web%20-IMPaCT-blue)](https://impact.isciii.es/)\r +[![IMPaCT-isciii](https://img.shields.io/badge/Web%20-IMPaCT--isciii-red)](https://www.isciii.es/QueHacemos/Financiacion/IMPaCT/Paginas/default.aspx)\r +[![IMPaCT-Data](https://img.shields.io/badge/Web%20-IMPaCT--Data-1d355c.svg?labelColor=000000)](https://impact-data.bsc.es/)\r +\r +## Introduction of the project\r +\r +IMPaCT-Data is the IMPaCT program that aims to support the development of a common, interoperable and integrated system for the collection and analysis of clinical and molecular data by providing the knowledge and resources available in the Spanish Science and Technology System. This development will make it possible to answer research questions based on the different clinical and molecular information systems available. Fundamentally, it aims to provide researchers with a population perspective based on individual data.\r +\r +The IMPaCT-Data project is divided into different work packages (WP). In the context of IMPaCT-Data WP3 (Genomics), a working group of experts worked on the generation of a specific quality control (QC) workflow for germline exome samples.\r +\r +To achieve this, a set of metrics related to human genomic data was decided upon, and the toolset or software to extract these metrics was implemented in an existing variant calling workflow called Sarek, part of the nf-core community. The final outcome is a Nextflow subworkflow, called IMPaCT-QC implemented in the Sarek pipeline.\r +\r +Below you can find the explanation of this workflow (raw pipeline), the link to the documentation of the IMPaCT QC subworkflow and a linked documentation associated to the QC metrics added in the mentioned workflow.\r +\r +- [IMPaCT-data subworkflow documentation](https://github.com/EGA-archive/sarek-IMPaCT-data-QC/tree/master/impact_qc)\r +\r +- [Metrics documentation](https://github.com/EGA-archive/sarek-IMPaCT-data-QC/blob/master/impact_qc/docs/QC_Sarek_supporing_documentation.pdf)\r +\r +

\r + \r + \r + nf-core/sarek\r + \r +

\r +\r +[![GitHub Actions CI Status](https://github.com/nf-core/sarek/actions/workflows/ci.yml/badge.svg)](https://github.com/nf-core/sarek/actions/workflows/ci.yml)\r +[![GitHub Actions Linting Status](https://github.com/nf-core/sarek/actions/workflows/linting.yml/badge.svg)](https://github.com/nf-core/sarek/actions/workflows/linting.yml)\r +[![AWS CI](https://img.shields.io/badge/CI%20tests-full%20size-FF9900?labelColor=000000&logo=Amazon%20AWS)](https://nf-co.re/sarek/results)\r +[![nf-test](https://img.shields.io/badge/unit_tests-nf--test-337ab7.svg)](https://www.nf-test.com)\r +[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.3476425-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.3476425)\r +[![nf-test](https://img.shields.io/badge/unit_tests-nf--test-337ab7.svg)](https://www.nf-test.com)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A523.04.0-23aa62.svg)](https://www.nextflow.io/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +[![Launch on Seqera Platform](https://img.shields.io/badge/Launch%20%F0%9F%9A%80-Seqera%20Platform-%234256e7)](https://tower.nf/launch?pipeline=https://github.com/nf-core/sarek)\r +\r +[![Get help on Slack](http://img.shields.io/badge/slack-nf--core%20%23sarek-4A154B?labelColor=000000&logo=slack)](https://nfcore.slack.com/channels/sarek)\r +[![Follow on Twitter](http://img.shields.io/badge/twitter-%40nf__core-1DA1F2?labelColor=000000&logo=twitter)](https://twitter.com/nf_core)\r +[![Follow on Mastodon](https://img.shields.io/badge/mastodon-nf__core-6364ff?labelColor=FFFFFF&logo=mastodon)](https://mstdn.science/@nf_core)\r +[![Watch on YouTube](http://img.shields.io/badge/youtube-nf--core-FF0000?labelColor=000000&logo=youtube)](https://www.youtube.com/c/nf-core)\r +\r +## Introduction\r +\r +**nf-core/sarek** is a workflow designed to detect variants on whole genome or targeted sequencing data. Initially designed for Human, and Mouse, it can work on any species with a reference genome. Sarek can also handle tumour / normal pairs and could include additional relapses.\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!\r +\r +On release, automated continuous integration tests run the pipeline on a full-sized dataset on the AWS cloud infrastructure. This ensures that the pipeline runs on AWS, has sensible resource allocation defaults set to run on real-world datasets, and permits the persistent storage of results to benchmark between pipeline releases and other analysis sources. The results obtained from the full-sized test can be viewed on the [nf-core website](https://nf-co.re/sarek/results).\r +\r +It's listed on [Elixir - Tools and Data Services Registry](https://bio.tools/nf-core-sarek) and [Dockstore](https://dockstore.org/workflows/github.com/nf-core/sarek).\r +\r +

\r + \r +

\r +\r +## Pipeline summary\r +\r +Depending on the options and samples provided, the pipeline can currently perform the following:\r +\r +- Form consensus reads from UMI sequences (`fgbio`)\r +- Sequencing quality control and trimming (enabled by `--trim_fastq`) (`FastQC`, `fastp`)\r +- Map Reads to Reference (`BWA-mem`, `BWA-mem2`, `dragmap` or `Sentieon BWA-mem`)\r +- Process BAM file (`GATK MarkDuplicates`, `GATK BaseRecalibrator` and `GATK ApplyBQSR` or `Sentieon LocusCollector` and `Sentieon Dedup`)\r +- Summarise alignment statistics (`samtools stats`, `mosdepth`)\r +- Variant calling (enabled by `--tools`, see [compatibility](https://nf-co.re/sarek/latest/docs/usage#which-variant-calling-tool-is-implemented-for-which-data-type)):\r + - `ASCAT`\r + - `CNVkit`\r + - `Control-FREEC`\r + - `DeepVariant`\r + - `freebayes`\r + - `GATK HaplotypeCaller`\r + - `Manta`\r + - `mpileup`\r + - `MSIsensor-pro`\r + - `Mutect2`\r + - `Sentieon Haplotyper`\r + - `Strelka2`\r + - `TIDDIT`\r +- Variant filtering and annotation (`SnpEff`, `Ensembl VEP`, `BCFtools annotate`)\r +- Summarise and represent QC (`MultiQC`)\r +\r +

\r + \r +

\r +\r +## Usage\r +\r +> [!NOTE]\r +> If you are new to Nextflow and nf-core, please refer to [this page](https://nf-co.re/docs/usage/installation) on how to set-up Nextflow. Make sure to [test your setup](https://nf-co.re/docs/usage/introduction#how-to-run-a-pipeline) with `-profile test` before running the workflow on actual data.\r +\r +First, prepare a samplesheet with your input data that looks as follows:\r +\r +`samplesheet.csv`:\r +\r +```csv\r +patient,sample,lane,fastq_1,fastq_2\r +ID1,S1,L002,ID1_S1_L002_R1_001.fastq.gz,ID1_S1_L002_R2_001.fastq.gz\r +```\r +\r +Each row represents a pair of fastq files (paired end).\r +\r +Now, you can run the pipeline using:\r +\r +```bash\r +nextflow run nf-core/sarek \\\r + -profile \\\r + --input samplesheet.csv \\\r + --outdir \r +```\r +\r +> [!WARNING]\r +> Please provide pipeline parameters via the CLI or Nextflow `-params-file` option. Custom config files including those provided by the `-c` Nextflow option can be used to provide any configuration _**except for parameters**_;\r +> see [docs](https://nf-co.re/usage/configuration#custom-configuration-files).\r +\r +For more details and further functionality, please refer to the [usage documentation](https://nf-co.re/sarek/usage) and the [parameter documentation](https://nf-co.re/sarek/parameters).\r +\r +## Pipeline output\r +\r +To see the results of an example test run with a full size dataset refer to the [results](https://nf-co.re/sarek/results) tab on the nf-core website pipeline page.\r +For more details about the output files and reports, please refer to the\r +[output documentation](https://nf-co.re/sarek/output).\r +\r +## Benchmarking\r +\r +On each release, the pipeline is run on 3 full size tests:\r +\r +- `test_full` runs tumor-normal data for one patient from the SEQ2C consortium\r +- `test_full_germline` runs a WGS 30X Genome-in-a-Bottle(NA12878) dataset\r +- `test_full_germline_ncbench_agilent` runs two WES samples with 75M and 200M reads (data available [here](https://github.com/ncbench/ncbench-workflow#contributing-callsets)). The results are uploaded to Zenodo, evaluated against a truth dataset, and results are made available via the [NCBench dashboard](https://ncbench.github.io/report/report.html#).\r +\r +## Credits\r +\r +Sarek was originally written by Maxime U Garcia and Szilveszter Juhos at the [National Genomics Infastructure](https://ngisweden.scilifelab.se) and [National Bioinformatics Infastructure Sweden](https://nbis.se) which are both platforms at [SciLifeLab](https://scilifelab.se), with the support of [The Swedish Childhood Tumor Biobank (Barntumörbanken)](https://ki.se/forskning/barntumorbanken).\r +Friederike Hanssen and Gisela Gabernet at [QBiC](https://www.qbic.uni-tuebingen.de/) later joined and helped with further development.\r +\r +The Nextflow DSL2 conversion of the pipeline was lead by Friederike Hanssen and Maxime U Garcia.\r +\r +Maintenance is now lead by Friederike Hanssen and Maxime U Garcia (now at [Seqera Labs](https://seqera/io))\r +\r +Main developers:\r +\r +- [Maxime U Garcia](https://github.com/maxulysse)\r +- [Friederike Hanssen](https://github.com/FriederikeHanssen)\r +\r +We thank the following people for their extensive assistance in the development of this pipeline:\r +\r +- [Abhinav Sharma](https://github.com/abhi18av)\r +- [Adam Talbot](https://github.com/adamrtalbot)\r +- [Adrian Lärkeryd](https://github.com/adrlar)\r +- [Alexander Peltzer](https://github.com/apeltzer)\r +- [Alison Meynert](https://github.com/ameynert)\r +- [Anders Sune Pedersen](https://github.com/asp8200)\r +- [arontommi](https://github.com/arontommi)\r +- [BarryDigby](https://github.com/BarryDigby)\r +- [Bekir Ergüner](https://github.com/berguner)\r +- [bjornnystedt](https://github.com/bjornnystedt)\r +- [cgpu](https://github.com/cgpu)\r +- [Chela James](https://github.com/chelauk)\r +- [David Mas-Ponte](https://github.com/davidmasp)\r +- [Edmund Miller](https://github.com/edmundmiller)\r +- [Francesco Lescai](https://github.com/lescai)\r +- [Gavin Mackenzie](https://github.com/GCJMackenzie)\r +- [Gisela Gabernet](https://github.com/ggabernet)\r +- [Grant Neilson](https://github.com/grantn5)\r +- [gulfshores](https://github.com/gulfshores)\r +- [Harshil Patel](https://github.com/drpatelh)\r +- [James A. Fellows Yates](https://github.com/jfy133)\r +- [Jesper Eisfeldt](https://github.com/J35P312)\r +- [Johannes Alneberg](https://github.com/alneberg)\r +- [José Fernández Navarro](https://github.com/jfnavarro)\r +- [Júlia Mir Pedrol](https://github.com/mirpedrol)\r +- [Ken Brewer](https://github.com/kenibrewer)\r +- [Lasse Westergaard Folkersen](https://github.com/lassefolkersen)\r +- [Lucia Conde](https://github.com/lconde-ucl)\r +- [Malin Larsson](https://github.com/malinlarsson)\r +- [Marcel Martin](https://github.com/marcelm)\r +- [Nick Smith](https://github.com/nickhsmith)\r +- [Nicolas Schcolnicov](https://github.com/nschcolnicov)\r +- [Nilesh Tawari](https://github.com/nilesh-tawari)\r +- [Nils Homer](https://github.com/nh13)\r +- [Olga Botvinnik](https://github.com/olgabot)\r +- [Oskar Wacker](https://github.com/WackerO)\r +- [pallolason](https://github.com/pallolason)\r +- [Paul Cantalupo](https://github.com/pcantalupo)\r +- [Phil Ewels](https://github.com/ewels)\r +- [Sabrina Krakau](https://github.com/skrakau)\r +- [Sam Minot](https://github.com/sminot)\r +- [Sebastian-D](https://github.com/Sebastian-D)\r +- [Silvia Morini](https://github.com/silviamorins)\r +- [Simon Pearce](https://github.com/SPPearce)\r +- [Solenne Correard](https://github.com/scorreard)\r +- [Susanne Jodoin](https://github.com/SusiJo)\r +- [Szilveszter Juhos](https://github.com/szilvajuhos)\r +- [Tobias Koch](https://github.com/KochTobi)\r +- [Winni Kretzschmar](https://github.com/winni2k)\r +\r +## Acknowledgements\r +\r +| [![Barntumörbanken](docs/images/BTB_logo.png)](https://ki.se/forskning/barntumorbanken) | [![SciLifeLab](docs/images/SciLifeLab_logo.png)](https://scilifelab.se) |\r +| :-----------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------: |\r +| [![National Genomics Infrastructure](docs/images/NGI_logo.png)](https://ngisweden.scilifelab.se/) | [![National Bioinformatics Infrastructure Sweden](docs/images/NBIS_logo.png)](https://nbis.se) |\r +| [![QBiC](docs/images/QBiC_logo.png)](https://www.qbic.uni-tuebingen.de) | [![GHGA](docs/images/GHGA_logo.png)](https://www.ghga.de/) |\r +| [![DNGC](docs/images/DNGC_logo.png)](https://eng.ngc.dk/) | |\r +\r +## Contributions & Support\r +\r +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\r +\r +For further information or help, don't hesitate to get in touch on the [Slack `#sarek` channel](https://nfcore.slack.com/channels/sarek) (you can join with [this invite](https://nf-co.re/join/slack)), or contact us: [Maxime U Garcia](mailto:maxime.garcia@seqera.io?subject=[GitHub]%20nf-core/sarek), [Friederike Hanssen](mailto:friederike.hanssen@qbic.uni-tuebingen.de?subject=[GitHub]%20nf-core/sarek)\r +\r +## Citations\r +\r +If you use `nf-core/sarek` for your analysis, please cite the `Sarek` article as follows:\r +\r +> Friederike Hanssen, Maxime U Garcia, Lasse Folkersen, Anders Sune Pedersen, Francesco Lescai, Susanne Jodoin, Edmund Miller, Oskar Wacker, Nicholas Smith, nf-core community, Gisela Gabernet, Sven Nahnsen **Scalable and efficient DNA sequencing analysis on different compute infrastructures aiding variant discovery** _NAR Genomics and Bioinformatics_ Volume 6, Issue 2, June 2024, lqae031, [doi: 10.1093/nargab/lqae031](https://doi.org/10.1093/nargab/lqae031).\r +\r +> Garcia M, Juhos S, Larsson M et al. **Sarek: A portable workflow for whole-genome sequencing analysis of germline and somatic variants [version 2; peer review: 2 approved]** _F1000Research_ 2020, 9:63 [doi: 10.12688/f1000research.16665.2](http://dx.doi.org/10.12688/f1000research.16665.2).\r +\r +You can cite the sarek zenodo record for a specific version using the following [doi: 10.5281/zenodo.3476425](https://doi.org/10.5281/zenodo.3476425)\r +\r +An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\r +\r +You can cite the `nf-core` publication as follows:\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +\r +## CHANGELOG\r +\r +- [CHANGELOG](CHANGELOG.md)\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1030?version=2" ; + schema1:keywords "Bioinformatics, Nextflow, variant calling, wes, WGS, NGS, EGA-archive, quality control" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "IMPaCT-Data quality control workflow implementation in nf-core/Sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1030?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1027?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/viralrecon" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/viralrecon" ; + schema1:sdDatePublished "2024-07-12 13:18:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1027/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10146 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:20Z" ; + schema1:dateModified "2024-06-11T12:55:20Z" ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1027?version=10" ; + schema1:keywords "Amplicon, ARTIC, Assembly, covid-19, covid19, illumina, long-read-sequencing, Metagenomics, nanopore, ONT, oxford-nanopore, SARS-CoV-2, variant-calling, viral, Virus" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/viralrecon" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1027?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Analysis of Chromosome Conformation Capture data (Hi-C)" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/989?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/hic" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hic" ; + schema1:sdDatePublished "2024-07-12 13:21:25 +0100" ; + schema1:url "https://workflowhub.eu/workflows/989/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4842 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:54Z" ; + schema1:dateModified "2024-06-11T12:54:54Z" ; + schema1:description "Analysis of Chromosome Conformation Capture data (Hi-C)" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/989?version=7" ; + schema1:keywords "chromosome-conformation-capture, Hi-C" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hic" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/989?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-07-12 13:19:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5714 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:14Z" ; + schema1:dateModified "2024-06-11T12:55:14Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly and annotation of metatranscriptomic data, both prokaryotic and eukaryotic" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/997?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/metatdenovo" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/metatdenovo" ; + schema1:sdDatePublished "2024-07-12 13:20:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/997/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11793 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:00Z" ; + schema1:dateModified "2024-06-11T12:55:00Z" ; + schema1:description "Assembly and annotation of metatranscriptomic data, both prokaryotic and eukaryotic" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/997?version=1" ; + schema1:keywords "eukaryotes, Metagenomics, metatranscriptomics, prokaryotes, viruses" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/metatdenovo" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/997?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A pipeline to demultiplex, QC and map Nanopore data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1002?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/nanoseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/nanoseq" ; + schema1:sdDatePublished "2024-07-12 13:20:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1002/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9734 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:05Z" ; + schema1:dateModified "2024-06-11T12:55:05Z" ; + schema1:description "A pipeline to demultiplex, QC and map Nanopore data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1002?version=5" ; + schema1:keywords "Alignment, demultiplexing, nanopore, QC" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/nanoseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1002?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +**Based on the official [pmx tutorial](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate how to compute a **fast-growth mutation free energy** calculation, step by step, using the BioExcel **Building Blocks library (biobb)**. The particular example used is the **Staphylococcal nuclease** protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +The **non-equilibrium free energy calculation** protocol performs a **fast alchemical transition** in the direction **WT->Mut** and back **Mut->WT**. The two equilibrium trajectories needed for the tutorial, one for **Wild Type (WT)** and another for the **Mutated (Mut)** protein (Isoleucine 10 to Alanine -I10A-), have already been generated and are included in this example. We will name **WT as stateA** and **Mut as stateB**.\r +\r +![](https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/schema.png)\r +\r +The tutorial calculates the **free energy difference** in the folded state of a protein. Starting from **two 1ns-length independent equilibrium simulations** (WT and mutant), snapshots are selected to start **fast (50ps) transitions** driving the system in the **forward** (WT to mutant) and **reverse** (mutant to WT) directions, and the **work values** required to perform these transitions are collected. With these values, **Crooks Gaussian Intersection** (CGI), **Bennett Acceptance Ratio** (BAR) and **Jarzynski estimator** methods are used to calculate the **free energy difference** between the two states.\r +\r +*Please note that for the sake of disk space this tutorial is using 1ns-length equilibrium trajectories, whereas in the [original example](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/eq.mdp) the equilibrium trajectories used were obtained from 10ns-length simulations.*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.328.4" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_pmx_tutorial/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)" ; + schema1:sdDatePublished "2024-07-12 13:35:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/328/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8390 ; + schema1:creator , + ; + schema1:dateCreated "2023-07-31T07:33:55Z" ; + schema1:dateModified "2023-07-31T07:37:03Z" ; + schema1:description """# Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +**Based on the official [pmx tutorial](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate how to compute a **fast-growth mutation free energy** calculation, step by step, using the BioExcel **Building Blocks library (biobb)**. The particular example used is the **Staphylococcal nuclease** protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +The **non-equilibrium free energy calculation** protocol performs a **fast alchemical transition** in the direction **WT->Mut** and back **Mut->WT**. The two equilibrium trajectories needed for the tutorial, one for **Wild Type (WT)** and another for the **Mutated (Mut)** protein (Isoleucine 10 to Alanine -I10A-), have already been generated and are included in this example. We will name **WT as stateA** and **Mut as stateB**.\r +\r +![](https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/schema.png)\r +\r +The tutorial calculates the **free energy difference** in the folded state of a protein. Starting from **two 1ns-length independent equilibrium simulations** (WT and mutant), snapshots are selected to start **fast (50ps) transitions** driving the system in the **forward** (WT to mutant) and **reverse** (mutant to WT) directions, and the **work values** required to perform these transitions are collected. With these values, **Crooks Gaussian Intersection** (CGI), **Bennett Acceptance Ratio** (BAR) and **Jarzynski estimator** methods are used to calculate the **free energy difference** between the two states.\r +\r +*Please note that for the sake of disk space this tutorial is using 1ns-length equilibrium trajectories, whereas in the [original example](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/eq.mdp) the equilibrium trajectories used were obtained from 10ns-length simulations.*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/328?version=3" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_pmx_tutorial/python/workflow.py" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """# BACPAGE\r +\r +This repository contains an easy-to-use pipeline for the assembly and analysis of bacterial genomes using ONT long-read or Illumina short-read technology. \r +Read the complete documentation and instructions for bacpage and each of its functions [here](https://cholgen.github.io/sequencing-resources/bacpage-command.html)\r +\r +# Introduction\r +Advances in sequencing technology during the COVID-19 pandemic has led to massive increases in the generation of sequencing data. Many bioinformatics tools have been developed to analyze this data, but very few tools can be utilized by individuals without prior bioinformatics training.\r +\r +This pipeline was designed to encapsulate pre-existing tools to automate analysis of whole genome sequencing of bacteria. \r +Installation is fast and straightfoward. \r +The pipeline is easy to setup and contains rationale defaults, but is highly modular and configurable by more advance users.\r +Bacpage has individual commands to generate consensus sequences, perform *de novo* assembly, construct phylogenetic tree, and generate quality control reports.\r +\r +# Features\r +We anticipate the pipeline will be able to perform the following functions:\r +- [x] Reference-based assembly of Illumina paired-end reads\r +- [x] *De novo* assembly of Illumina paired-end reads\r +- [ ] *De novo* assembly of ONT long reads\r +- [x] Run quality control checks\r +- [x] Variant calling using [bcftools](https://github.com/samtools/bcftools)\r +- [x] Maximum-likelihood phylogenetic inference of processed samples and background dataset using [iqtree](https://github.com/iqtree/iqtree2) \r +- [x] MLST profiling and virulence factor detection\r +- [x] Antimicrobial resistance genes detection\r +- [ ] Plasmid detection\r +\r +# Installation\r +1. Install `mamba` by running the following two command:\r +```commandline\r +curl -L -O "https://github.com/conda-forge/miniforge/releases/latest/download/Mambaforge-$(uname)-$(uname -m).sh"\r +bash Mambaforge-$(uname)-$(uname -m).sh\r +```\r +\r +2. Clone the bacpage repository:\r +```commandline\r +git clone https://github.com/CholGen/bacpage.git\r +```\r +\r +3. Switch to the development branch of the pipeline:\r +```commandline\r +cd bacpage/\r +git checkout -b split_into_command\r +```\r +\r +3. Install and activate the pipeline's conda environment:\r +```commandline\r +mamba env create -f environment.yaml\r +mamba activate bacpage\r +```\r +\r +4. Install the `bacpage` command:\r +```commandline\r +pip install .\r +```\r +\r +5. Test the installation:\r +```commandline\r +bacpage -h\r +bacpage version\r +```\r +These command should print the help and version of the program. Please create an issue if this is not the case.\r +\r +# Updating\r +\r +1. Navigate to the directory where you cloned the bacpage repository on the command line:\r +```commandline\r +cd bacpage/\r +```\r +2. Activate the bacpage conda environment:\r +```commandline\r +mamba activate bacpage\r +```\r +3. Pull the lastest changes from GitHub:\r +```commandline\r +git pull\r +```\r +4. Update the bacpage conda environemnt:\r +```commandline\r +mamba env update -f environment.yaml\r +```\r +5. Reinstall the `bacpage` command:\r +```commandline\r +pip install .\r +```\r +\r +# Usage\r +0. Activate the bacpage conda environment:\r +```commandline\r +mamba activate bacpage\r +```\r +1. Create a directory specifically for the batch of samples you would like to analyze (called a project directory).\r +```commandline\r +bacpage setup [your-project-directory-name]\r +```\r +2. Place paired sequencing reads in the `input/` directory of your project directory.\r +3. From the pipeline's directory, run the reference-based assembly pipeline on your samples using the following command:\r +```commandline\r +bacpage assemble [your-project-directory-name]\r +```\r +This will generate a consensus sequence in FASTA format for each of your samples and place them in \r +`/results/consensus_sequences/.masked.fasta`. An HTML report containing alignment \r +and quality metrics for your samples can be found at `/results/reports/qc_report.html`.\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/695?version=2" ; + schema1:isBasedOn "https://github.com/CholGen/bacpage.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Phylogeny reconstruction using bacpage" ; + schema1:sdDatePublished "2024-07-12 13:25:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/695/ro_crate?version=2" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 13473 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3380 ; + schema1:dateCreated "2024-01-09T17:12:32Z" ; + schema1:dateModified "2024-01-09T17:12:32Z" ; + schema1:description """# BACPAGE\r +\r +This repository contains an easy-to-use pipeline for the assembly and analysis of bacterial genomes using ONT long-read or Illumina short-read technology. \r +Read the complete documentation and instructions for bacpage and each of its functions [here](https://cholgen.github.io/sequencing-resources/bacpage-command.html)\r +\r +# Introduction\r +Advances in sequencing technology during the COVID-19 pandemic has led to massive increases in the generation of sequencing data. Many bioinformatics tools have been developed to analyze this data, but very few tools can be utilized by individuals without prior bioinformatics training.\r +\r +This pipeline was designed to encapsulate pre-existing tools to automate analysis of whole genome sequencing of bacteria. \r +Installation is fast and straightfoward. \r +The pipeline is easy to setup and contains rationale defaults, but is highly modular and configurable by more advance users.\r +Bacpage has individual commands to generate consensus sequences, perform *de novo* assembly, construct phylogenetic tree, and generate quality control reports.\r +\r +# Features\r +We anticipate the pipeline will be able to perform the following functions:\r +- [x] Reference-based assembly of Illumina paired-end reads\r +- [x] *De novo* assembly of Illumina paired-end reads\r +- [ ] *De novo* assembly of ONT long reads\r +- [x] Run quality control checks\r +- [x] Variant calling using [bcftools](https://github.com/samtools/bcftools)\r +- [x] Maximum-likelihood phylogenetic inference of processed samples and background dataset using [iqtree](https://github.com/iqtree/iqtree2) \r +- [x] MLST profiling and virulence factor detection\r +- [x] Antimicrobial resistance genes detection\r +- [ ] Plasmid detection\r +\r +# Installation\r +1. Install `mamba` by running the following two command:\r +```commandline\r +curl -L -O "https://github.com/conda-forge/miniforge/releases/latest/download/Mambaforge-$(uname)-$(uname -m).sh"\r +bash Mambaforge-$(uname)-$(uname -m).sh\r +```\r +\r +2. Clone the bacpage repository:\r +```commandline\r +git clone https://github.com/CholGen/bacpage.git\r +```\r +\r +3. Switch to the development branch of the pipeline:\r +```commandline\r +cd bacpage/\r +git checkout -b split_into_command\r +```\r +\r +3. Install and activate the pipeline's conda environment:\r +```commandline\r +mamba env create -f environment.yaml\r +mamba activate bacpage\r +```\r +\r +4. Install the `bacpage` command:\r +```commandline\r +pip install .\r +```\r +\r +5. Test the installation:\r +```commandline\r +bacpage -h\r +bacpage version\r +```\r +These command should print the help and version of the program. Please create an issue if this is not the case.\r +\r +# Updating\r +\r +1. Navigate to the directory where you cloned the bacpage repository on the command line:\r +```commandline\r +cd bacpage/\r +```\r +2. Activate the bacpage conda environment:\r +```commandline\r +mamba activate bacpage\r +```\r +3. Pull the lastest changes from GitHub:\r +```commandline\r +git pull\r +```\r +4. Update the bacpage conda environemnt:\r +```commandline\r +mamba env update -f environment.yaml\r +```\r +5. Reinstall the `bacpage` command:\r +```commandline\r +pip install .\r +```\r +\r +# Usage\r +0. Activate the bacpage conda environment:\r +```commandline\r +mamba activate bacpage\r +```\r +1. Create a directory specifically for the batch of samples you would like to analyze (called a project directory).\r +```commandline\r +bacpage setup [your-project-directory-name]\r +```\r +2. Place paired sequencing reads in the `input/` directory of your project directory.\r +3. From the pipeline's directory, run the reference-based assembly pipeline on your samples using the following command:\r +```commandline\r +bacpage assemble [your-project-directory-name]\r +```\r +This will generate a consensus sequence in FASTA format for each of your samples and place them in \r +`/results/consensus_sequences/.masked.fasta`. An HTML report containing alignment \r +and quality metrics for your samples can be found at `/results/reports/qc_report.html`.\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/695?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Phylogeny reconstruction using bacpage" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/695?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/986?version=11" ; + schema1:isBasedOn "https://github.com/nf-core/fetchngs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/fetchngs" ; + schema1:sdDatePublished "2024-07-12 13:21:29 +0100" ; + schema1:url "https://workflowhub.eu/workflows/986/ro_crate?version=11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9188 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:53Z" ; + schema1:dateModified "2024-06-11T12:54:53Z" ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/986?version=13" ; + schema1:keywords "ddbj, download, ena, FASTQ, geo, sra, synapse" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/fetchngs" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/986?version=11" ; + schema1:version 11 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.826.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/main/biobb_wf_md_setup/docker" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Docker Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:23:29 +0100" ; + schema1:url "https://workflowhub.eu/workflows/826/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 746 ; + schema1:creator , + ; + schema1:dateCreated "2024-04-22T11:00:31Z" ; + schema1:dateModified "2024-05-22T13:38:14Z" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Docker Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/main/biobb_wf_md_setup/docker/Dockerfile" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1017?version=9" ; + schema1:isBasedOn "https://github.com/nf-core/rnafusion" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnafusion" ; + schema1:sdDatePublished "2024-07-12 13:18:21 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1017/ro_crate?version=9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10551 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:10Z" ; + schema1:dateModified "2024-06-11T12:55:10Z" ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1017?version=16" ; + schema1:keywords "fusion, fusion-genes, gene-fusion, rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnafusion" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1017?version=9" ; + schema1:version 9 . + + a schema1:Dataset ; + schema1:datePublished "2024-03-26T19:19:56.092517" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Purge-duplicates-one-haplotype-VGP6b" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Purge-duplicates-one-haplotype-VGP6b/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:Dataset ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-variant-calling" ; + schema1:mainEntity ; + schema1:name "COVID-19-PE-ARTIC-ILLUMINA (v0.3)" ; + schema1:sdDatePublished "2021-06-06 03:00:39 +0100" ; + schema1:softwareVersion "v0.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 66812 ; + schema1:name "COVID-19-PE-ARTIC-ILLUMINA" ; + schema1:programmingLanguage . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.120.6" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:34:18 +0100" ; + schema1:url "https://workflowhub.eu/workflows/120/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 52982 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-04T14:12:30Z" ; + schema1:dateModified "2024-05-14T10:08:33Z" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/120?version=5" ; + schema1:isPartOf , + , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_md_setup/blob/main/biobb_wf_md_setup/notebooks/biobb_MDsetup_tutorial.ipynb" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# score-assemblies\r +\r +A Snakemake-wrapper for evaluating *de novo* bacterial genome assemblies, e.g. from Oxford Nanopore (ONT) or Illumina sequencing.\r +\r +The workflow includes the following programs:\r +* [pomoxis](https://github.com/nanoporetech/pomoxis) assess_assembly and assess_homopolymers\r +* dnadiff from the [mummer](https://mummer4.github.io/index.html) package\r +* [NucDiff](https://github.com/uio-cels/NucDiff/)\r +* [QUAST](http://quast.sourceforge.net/quast)\r +* [BUSCO](https://busco.ezlab.org/)\r +* [ideel](https://github.com/mw55309/ideel/), which uses [prodigal](https://github.com/hyattpd/Prodigal) and [diamond](https://github.com/bbuchfink/diamond)\r +* [bakta](https://github.com/oschwengers/bakta)\r +\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.786.1" ; + schema1:isBasedOn "https://github.com/pmenzel/score-assemblies" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for score-assemblies" ; + schema1:sdDatePublished "2024-07-12 13:23:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/786/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 23878 ; + schema1:creator ; + schema1:dateCreated "2024-03-06T10:19:58Z" ; + schema1:dateModified "2024-03-06T10:54:09Z" ; + schema1:description """# score-assemblies\r +\r +A Snakemake-wrapper for evaluating *de novo* bacterial genome assemblies, e.g. from Oxford Nanopore (ONT) or Illumina sequencing.\r +\r +The workflow includes the following programs:\r +* [pomoxis](https://github.com/nanoporetech/pomoxis) assess_assembly and assess_homopolymers\r +* dnadiff from the [mummer](https://mummer4.github.io/index.html) package\r +* [NucDiff](https://github.com/uio-cels/NucDiff/)\r +* [QUAST](http://quast.sourceforge.net/quast)\r +* [BUSCO](https://busco.ezlab.org/)\r +* [ideel](https://github.com/mw55309/ideel/), which uses [prodigal](https://github.com/hyattpd/Prodigal) and [diamond](https://github.com/bbuchfink/diamond)\r +* [bakta](https://github.com/oschwengers/bakta)\r +\r +\r +""" ; + schema1:keywords "genome_assembly, genome-annotation" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "score-assemblies" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/786?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.127.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_virtual-screening" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein-ligand Docking tutorial (Cluster90)" ; + schema1:sdDatePublished "2024-07-12 13:34:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/127/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 39064 ; + schema1:creator , + , + ; + schema1:dateCreated "2021-06-29T09:06:40Z" ; + schema1:dateModified "2022-09-15T11:15:21Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/127?version=5" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein-ligand Docking tutorial (Cluster90)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_virtual-screening/0ab8d1d3410c67db6a5a25d3dde6f3e0303af08f/biobb_wf_virtual-screening/notebooks/clusterBindingSite/wf_vs_clusterBindingSite.ipynb" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """ Joint multi-omics dimensionality reduction approaches for CAKUT data using peptidome and proteome data\r + \r + **Brief description**\r + In (Cantini et al. 2020), Cantini et al. evaluated 9 representative joint dimensionality reduction (jDR) methods for multi-omics integration and analysis and . The methods are Regularized Generalized Canonical Correlation Analysis (RGCCA), Multiple co-inertia analysis (MCIA), Multi-Omics Factor Analysis (MOFA), Multi-Study Factor Analysis (MSFA), iCluster, Integrative NMF (intNMF), Joint and Individual Variation Explained (JIVE), tensorial Independent Component Analysis (tICA), and matrix-tri-factorization (scikit-fusion) (Tenenhaus, Tenenhaus, and Groenen 2017; Bady et al. 2004; Argelaguet et al. 2018; De Vito et al. 2019; Shen, Olshen, and Ladanyi 2009; Chalise and Fridley 2017; Lock et al. 2013; Teschendorff et al. 2018; Žitnik and Zupan 2015).\r +\r +The authors provided their benchmarking procedure, multi-omics mix (momix), as Jupyter Notebook on GitHub (https://github.com/ComputationalSystemsBiology/momix-notebook) and project environment through Conda. In momix, the factorization methods are called from an R script, and parameters of the methods are also set in that script. We did not modify the parameters of the methods in the provided script. We set factor number to 2.\r +""" ; + schema1:identifier "https://workflowhub.eu/workflows/126?version=1" ; + schema1:isBasedOn "https://gitlab.cmbi.umcn.nl/bayjan/cakut_dre" ; + schema1:license "GPL-3.0" ; + schema1:name "Research Object Crate for EJP-RD WP13 case-study CAKUT momix analysis" ; + schema1:sdDatePublished "2024-07-12 13:34:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/126/ro_crate?version=1" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A Nanostring nCounter analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1003?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/nanostring" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/nanostring" ; + schema1:sdDatePublished "2024-07-12 13:20:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1003/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9869 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:06Z" ; + schema1:dateModified "2024-06-11T12:55:06Z" ; + schema1:description "A Nanostring nCounter analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1003?version=4" ; + schema1:keywords "nanostring, nanostringnorm" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/nanostring" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1003?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "Non-functional workflow to get a global view of possibilities for plant virus classification." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/100?version=1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for 0: View complete virus identification" ; + schema1:sdDatePublished "2024-07-12 13:37:14 +0100" ; + schema1:url "https://workflowhub.eu/workflows/100/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 46782 ; + schema1:dateCreated "2021-02-04T09:05:08Z" ; + schema1:dateModified "2023-02-13T14:06:45Z" ; + schema1:description "Non-functional workflow to get a global view of possibilities for plant virus classification." ; + schema1:keywords "Virus, identification, exploration" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "0: View complete virus identification" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/100?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "MARS-seq v2 preprocessing pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/996?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/marsseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/marsseq" ; + schema1:sdDatePublished "2024-07-12 13:20:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/996/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9524 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:00Z" ; + schema1:dateModified "2024-06-11T12:55:00Z" ; + schema1:description "MARS-seq v2 preprocessing pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/996?version=4" ; + schema1:keywords "facs-sorting, MARS-seq, single-cell, single-cell-rna-seq, star-solo, transcriptional-dynamics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/marsseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/996?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state of the art epitope prediction pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/984?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/epitopeprediction" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/epitopeprediction" ; + schema1:sdDatePublished "2024-07-12 13:21:30 +0100" ; + schema1:url "https://workflowhub.eu/workflows/984/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9461 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:51Z" ; + schema1:dateModified "2024-06-11T12:54:51Z" ; + schema1:description "A fully reproducible and state of the art epitope prediction pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/984?version=7" ; + schema1:keywords "epitope, epitope-prediction, mhc-binding-prediction" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/epitopeprediction" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/984?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=14" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-07-12 13:20:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=14" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17479 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:59Z" ; + schema1:dateModified "2024-06-11T12:54:59Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=14" ; + schema1:version 14 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1023?version=9" ; + schema1:isBasedOn "https://github.com/nf-core/smrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/smrnaseq" ; + schema1:sdDatePublished "2024-07-12 13:18:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1023/ro_crate?version=9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11444 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:19Z" ; + schema1:dateModified "2024-06-11T12:55:19Z" ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1023?version=10" ; + schema1:keywords "small-rna, smrna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/smrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1023?version=9" ; + schema1:version 9 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Analysis of Chromosome Conformation Capture data (Hi-C)" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/989?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/hic" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hic" ; + schema1:sdDatePublished "2024-07-12 13:21:26 +0100" ; + schema1:url "https://workflowhub.eu/workflows/989/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10893 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:55Z" ; + schema1:dateModified "2024-06-11T12:54:55Z" ; + schema1:description "Analysis of Chromosome Conformation Capture data (Hi-C)" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/989?version=7" ; + schema1:keywords "chromosome-conformation-capture, Hi-C" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hic" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/989?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +****\r +\r +Workflow information:\r +* Input = genome.fasta.\r +* Outputs = masked_genome.fasta and table of repeats found. \r +* Runs RepeatModeler with default settings, uses the output of this (repeat library) as input into RepeatMasker. \r +* Runs RepeatMasker with default settings except for: Skip masking of simple tandem repeats and low complexity regions. (-nolow) : default set to yes. Perform softmasking instead of hardmasking - set to yes. \r +* Workflow report displays an edited table of repeats found. Note: a known bug is that sometimes the workflow report text resets to default text. To restore, look for an earlier workflow version with correct workflow report text, and copy and paste report text into current version.\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.875.2" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Repeat masking - TSI" ; + schema1:sdDatePublished "2024-07-12 13:17:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/875/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8659 ; + schema1:creator , + ; + schema1:dateCreated "2024-05-08T06:24:55Z" ; + schema1:dateModified "2024-05-09T04:01:22Z" ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +****\r +\r +Workflow information:\r +* Input = genome.fasta.\r +* Outputs = masked_genome.fasta and table of repeats found. \r +* Runs RepeatModeler with default settings, uses the output of this (repeat library) as input into RepeatMasker. \r +* Runs RepeatMasker with default settings except for: Skip masking of simple tandem repeats and low complexity regions. (-nolow) : default set to yes. Perform softmasking instead of hardmasking - set to yes. \r +* Workflow report displays an edited table of repeats found. Note: a known bug is that sometimes the workflow report text resets to default text. To restore, look for an earlier workflow version with correct workflow report text, and copy and paste report text into current version.\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/875?version=2" ; + schema1:isPartOf ; + schema1:keywords "TSI-annotation" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Repeat masking - TSI" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/875?version=2" ; + schema1:version 2 ; + ns1:input ; + ns1:output , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 403507 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Demultiplexing pipeline for Illumina sequencing data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/978?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/demultiplex" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/demultiplex" ; + schema1:sdDatePublished "2024-07-12 13:21:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/978/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8121 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:47Z" ; + schema1:dateModified "2024-06-11T12:54:47Z" ; + schema1:description "Demultiplexing pipeline for Illumina sequencing data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/978?version=7" ; + schema1:keywords "bases2fastq, bcl2fastq, demultiplexing, elementbiosciences, illumina" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/demultiplex" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/978?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Rare disease researchers workflow is that they submit their raw data (fastq), run the mapping and variant calling RD-Connect pipeline and obtain unannotated gvcf files to further submit to the RD-Connect GPAP or analyse on their own.\r +\r +This demonstrator focuses on the variant calling pipeline. The raw genomic data is processed using the RD-Connect pipeline ([Laurie et al., 2016](https://www.ncbi.nlm.nih.gov/pubmed/27604516)) running on the standards (GA4GH) compliant, interoperable container orchestration platform.\r +\r +This demonstrator will be aligned with the current implementation study on [Development of Architecture for Software Containers at ELIXIR and its use by EXCELERATE use-case communities](docs/Appendix%201%20-%20Project%20Plan%202018-biocontainers%2020171117.pdf) \r +\r +For this implementation, different steps are required:\r +\r +1. Adapt the pipeline to CWL and dockerise elements \r +2. Align with IS efforts on software containers to package the different components (Nextflow) \r +3. Submit trio of Illumina NA12878 Platinum Genome or Exome to the GA4GH platform cloud (by Aspera or ftp server)\r +4. Run the RD-Connect pipeline on the container platform\r +5. Return corresponding gvcf files\r +6. OPTIONAL: annotate and update to RD-Connect playground instance\r +\r +N.B: The demonstrator might have some manual steps, which will not be in production. \r +\r +## RD-Connect pipeline\r +\r +Detailed information about the RD-Connect pipeline can be found in [Laurie et al., 2016](https://www.ncbi.nlm.nih.gov/pubmed/?term=27604516)\r +\r +![alt text](https://raw.githubusercontent.com/inab/Wetlab2Variations/eosc-life/docs/RD-Connect_pipeline.jpg)\r +\r +## The applications\r +\r +**1\\. Name of the application: Adaptor removal**\r +Function: remove sequencing adaptors \r +Container (readiness status, location, version): [cutadapt (v.1.18)](https://hub.docker.com/r/cnag/cutadapt) \r +Required resources in cores and RAM: current container size 169MB \r +Input data (amount, format, directory..): raw fastq \r +Output data: paired fastq without adaptors \r +\r +**2\\. Name of the application: Mapping and bam sorting**\r +Function: align data to reference genome \r +Container : [bwa-mem (v.0.7.17)](https://hub.docker.com/r/cnag/bwa) / [Sambamba (v. 0.6.8 )](https://hub.docker.com/r/cnag/sambamba)(or samtools) \r +Resources :current container size 111MB / 32MB \r +Input data: paired fastq without adaptors \r +Output data: sorted bam \r +\r +**3\\. Name of the application: MarkDuplicates** \r +Function: Mark (and remove) duplicates \r +Container: [Picard (v.2.18.25)](https://hub.docker.com/r/cnag/picard)\r +Resources: current container size 261MB \r +Input data:sorted bam \r +Output data: Sorted bam with marked (or removed) duplicates \r +\r +**4\\. Name of the application: Base quality recalibration (BQSR)** \r +Function: Base quality recalibration \r +Container: [GATK (v.3.6-0)](https://hub.docker.com/r/cnag/gatk)\r +Resources: current container size 270MB \r +Input data: Sorted bam with marked (or removed) duplicates \r +Output data: Sorted bam with marked duplicates & base quality recalculated \r +\r +**5\\. Name of the application: Variant calling** \r +Function: variant calling \r +Container: [GATK (v.3.6-0)](https://hub.docker.com/r/cnag/gatk)\r +Resources: current container size 270MB \r +Input data:Sorted bam with marked duplicates & base quality recalculated \r +Output data: unannotated gvcf per sample \r +\r +**6\\. (OPTIONAL)Name of the application: Quality of the fastq** \r +Function: report on the sequencing quality \r +Container: [fastqc 0.11.8](https://hub.docker.com/r/cnag/fastqc)\r +Resources: current container size 173MB \r +Input data: raw fastq \r +Output data: QC report \r +\r +## Licensing\r +\r +GATK declares that archived packages are made available for free to academic researchers under a limited license for non-commercial use. If you need to use one of these packages for commercial use. https://software.broadinstitute.org/gatk/download/archive """ ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/106?version=1" ; + schema1:isBasedOn "https://github.com/inab/Wetlab2Variations/tree/eosc-life/nextflow" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for VariantCaller_GATK3.6" ; + schema1:sdDatePublished "2024-07-12 13:37:02 +0100" ; + schema1:url "https://workflowhub.eu/workflows/106/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 21588 ; + schema1:creator ; + schema1:dateCreated "2021-02-18T14:20:09Z" ; + schema1:dateModified "2021-04-19T15:39:56Z" ; + schema1:description """Rare disease researchers workflow is that they submit their raw data (fastq), run the mapping and variant calling RD-Connect pipeline and obtain unannotated gvcf files to further submit to the RD-Connect GPAP or analyse on their own.\r +\r +This demonstrator focuses on the variant calling pipeline. The raw genomic data is processed using the RD-Connect pipeline ([Laurie et al., 2016](https://www.ncbi.nlm.nih.gov/pubmed/27604516)) running on the standards (GA4GH) compliant, interoperable container orchestration platform.\r +\r +This demonstrator will be aligned with the current implementation study on [Development of Architecture for Software Containers at ELIXIR and its use by EXCELERATE use-case communities](docs/Appendix%201%20-%20Project%20Plan%202018-biocontainers%2020171117.pdf) \r +\r +For this implementation, different steps are required:\r +\r +1. Adapt the pipeline to CWL and dockerise elements \r +2. Align with IS efforts on software containers to package the different components (Nextflow) \r +3. Submit trio of Illumina NA12878 Platinum Genome or Exome to the GA4GH platform cloud (by Aspera or ftp server)\r +4. Run the RD-Connect pipeline on the container platform\r +5. Return corresponding gvcf files\r +6. OPTIONAL: annotate and update to RD-Connect playground instance\r +\r +N.B: The demonstrator might have some manual steps, which will not be in production. \r +\r +## RD-Connect pipeline\r +\r +Detailed information about the RD-Connect pipeline can be found in [Laurie et al., 2016](https://www.ncbi.nlm.nih.gov/pubmed/?term=27604516)\r +\r +![alt text](https://raw.githubusercontent.com/inab/Wetlab2Variations/eosc-life/docs/RD-Connect_pipeline.jpg)\r +\r +## The applications\r +\r +**1\\. Name of the application: Adaptor removal**\r +Function: remove sequencing adaptors \r +Container (readiness status, location, version): [cutadapt (v.1.18)](https://hub.docker.com/r/cnag/cutadapt) \r +Required resources in cores and RAM: current container size 169MB \r +Input data (amount, format, directory..): raw fastq \r +Output data: paired fastq without adaptors \r +\r +**2\\. Name of the application: Mapping and bam sorting**\r +Function: align data to reference genome \r +Container : [bwa-mem (v.0.7.17)](https://hub.docker.com/r/cnag/bwa) / [Sambamba (v. 0.6.8 )](https://hub.docker.com/r/cnag/sambamba)(or samtools) \r +Resources :current container size 111MB / 32MB \r +Input data: paired fastq without adaptors \r +Output data: sorted bam \r +\r +**3\\. Name of the application: MarkDuplicates** \r +Function: Mark (and remove) duplicates \r +Container: [Picard (v.2.18.25)](https://hub.docker.com/r/cnag/picard)\r +Resources: current container size 261MB \r +Input data:sorted bam \r +Output data: Sorted bam with marked (or removed) duplicates \r +\r +**4\\. Name of the application: Base quality recalibration (BQSR)** \r +Function: Base quality recalibration \r +Container: [GATK (v.3.6-0)](https://hub.docker.com/r/cnag/gatk)\r +Resources: current container size 270MB \r +Input data: Sorted bam with marked (or removed) duplicates \r +Output data: Sorted bam with marked duplicates & base quality recalculated \r +\r +**5\\. Name of the application: Variant calling** \r +Function: variant calling \r +Container: [GATK (v.3.6-0)](https://hub.docker.com/r/cnag/gatk)\r +Resources: current container size 270MB \r +Input data:Sorted bam with marked duplicates & base quality recalculated \r +Output data: unannotated gvcf per sample \r +\r +**6\\. (OPTIONAL)Name of the application: Quality of the fastq** \r +Function: report on the sequencing quality \r +Container: [fastqc 0.11.8](https://hub.docker.com/r/cnag/fastqc)\r +Resources: current container size 173MB \r +Input data: raw fastq \r +Output data: QC report \r +\r +## Licensing\r +\r +GATK declares that archived packages are made available for free to academic researchers under a limited license for non-commercial use. If you need to use one of these packages for commercial use. https://software.broadinstitute.org/gatk/download/archive """ ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/106?version=2" ; + schema1:keywords "Nextflow, variant_calling" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "VariantCaller_GATK3.6" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/106?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 83411 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# ![sanger-tol/ensemblgenedownload](docs/images/sanger-tol-ensemblgenedownload_logo.png)\r +\r +[![GitHub Actions CI Status](https://github.com/sanger-tol/ensemblgenedownload/workflows/nf-core%20CI/badge.svg)](https://github.com/sanger-tol/ensemblgenedownload/actions?query=workflow%3A%22nf-core+CI%22)\r +\r +\r +\r +[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.7183206-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.7183206)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A522.04.0-23aa62.svg)](https://www.nextflow.io/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +\r +[![Get help on Slack](http://img.shields.io/badge/slack-SangerTreeofLife%20%23pipelines-4A154B?labelColor=000000&logo=slack)](https://SangerTreeofLife.slack.com/channels/pipelines)\r +[![Follow on Twitter](http://img.shields.io/badge/twitter-%40sangertol-1DA1F2?labelColor=000000&logo=twitter)](https://twitter.com/sangertol)\r +[![Watch on YouTube](http://img.shields.io/badge/youtube-tree--of--life-FF0000?labelColor=000000&logo=youtube)](https://www.youtube.com/channel/UCFeDpvjU58SA9V0ycRXejhA)\r +\r +## Introduction\r +\r +**sanger-tol/ensemblgenedownload** is a pipeline that downloads gene annotations from Ensembl into the Tree of Life directory structure.\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!\r +\r +On release, automated continuous integration tests run the pipeline on a full-sized dataset on the GitHub CI infrastructure. This ensures that the pipeline runs in a third-party environment, and has sensible resource allocation defaults set to run on real-world datasets.\r +\r +## Pipeline summary\r +\r +## Overview\r +\r +The pipeline takes a CSV file that contains assembly accession number, Ensembl species names (as they may differ from Tree of Life ones !), output directories, and geneset versions.\r +Assembly accession numbers are optional. If missing, the pipeline assumes it can be retrieved from files named `ACCESSION` in the standard location on disk.\r +The pipeline downloads the Fasta files of the genes (cdna, cds, and protein sequences) as well as the GFF3 file.\r +All files are compressed with `bgzip`, and indexed with `samtools faidx` or `tabix`.\r +\r +Steps involved:\r +\r +- Download from Ensembl the GFF3 file, and the sequences of the genes in\r + Fasta format.\r +- Compress and index all Fasta files with `bgzip`, `samtools faidx`, and\r + `samtools dict`.\r +- Compress and index the GFF3 file with `bgzip` and `tabix`.\r +\r +## Quick Start\r +\r +1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=22.04.0`)\r +\r +2. Install any of [`Docker`](https://docs.docker.com/engine/installation/), [`Singularity`](https://www.sylabs.io/guides/3.0/user-guide/) (you can follow [this tutorial](https://singularity-tutorial.github.io/01-installation/)), [`Podman`](https://podman.io/), [`Shifter`](https://nersc.gitlab.io/development/shifter/how-to-use/) or [`Charliecloud`](https://hpc.github.io/charliecloud/) for full pipeline reproducibility _(you can use [`Conda`](https://conda.io/miniconda.html) both to install Nextflow itself and also to manage software within pipelines. Please only use it within pipelines as a last resort; see [docs](https://nf-co.re/usage/configuration#basic-configuration-profiles))_.\r +\r +3. Download the pipeline and test it on a minimal dataset with a single command:\r +\r + ```bash\r + nextflow run sanger-tol/ensemblgenedownload -profile test,YOURPROFILE --outdir \r + ```\r +\r + Note that some form of configuration will be needed so that Nextflow knows how to fetch the required software. This is usually done in the form of a config profile (`YOURPROFILE` in the example command above). You can chain multiple config profiles in a comma-separated string.\r +\r + > - The pipeline comes with config profiles called `docker`, `singularity`, `podman`, `shifter`, `charliecloud` and `conda` which instruct the pipeline to use the named tool for software management. For example, `-profile test,docker`.\r + > - Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment.\r + > - If you are using `singularity`, please use the [`nf-core download`](https://nf-co.re/tools/#downloading-pipelines-for-offline-use) command to download images first, before running the pipeline. Setting the [`NXF_SINGULARITY_CACHEDIR` or `singularity.cacheDir`](https://www.nextflow.io/docs/latest/singularity.html?#singularity-docker-hub) Nextflow options enables you to store and re-use the images from a central location for future pipeline runs.\r + > - If you are using `conda`, it is highly recommended to use the [`NXF_CONDA_CACHEDIR` or `conda.cacheDir`](https://www.nextflow.io/docs/latest/conda.html) settings to store the environments in a central location for future pipeline runs.\r +\r +4. Start running your own analysis!\r +\r + ```console\r + nextflow run sanger-tol/ensemblgenedownload --input $PWD/assets/samplesheet.csv --outdir -profile \r + ```\r +\r +## Documentation\r +\r +The sanger-tol/ensemblgenedownload pipeline comes with documentation about the pipeline [usage](docs/usage.md) and [output](docs/output.md).\r +\r +## Credits\r +\r +sanger-tol/ensemblgenedownload was originally written by @muffato.\r +\r +## Contributions and Support\r +\r +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\r +\r +For further information or help, don't hesitate to get in touch on the [Slack `#pipelines` channel](https://sangertreeoflife.slack.com/channels/pipelines). Please [create an issue](https://github.com/sanger-tol/ensemblgenedownload/issues/new/choose) on GitHub if you are not on the Sanger slack channel.\r +\r +## Citations\r +\r +If you use sanger-tol/ensemblgenedownload for your analysis, please cite it using the following doi: [10.5281/zenodo.7183206](https://doi.org/10.5281/zenodo.7183206)\r +\r +An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\r +\r +This pipeline uses code and infrastructure developed and maintained by the [nf-core](https://nf-co.re) community, reused here under the [MIT license](https://github.com/nf-core/tools/blob/master/LICENSE).\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/666?version=1" ; + schema1:isBasedOn "https://github.com/sanger-tol/ensemblgenedownload" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for sanger-tol/insdcdownload v1.0.1 - Hefty mûmakil" ; + schema1:sdDatePublished "2024-07-12 13:26:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/666/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1705 ; + schema1:creator , + ; + schema1:dateCreated "2023-11-14T12:03:59Z" ; + schema1:dateModified "2023-11-14T12:03:59Z" ; + schema1:description """# ![sanger-tol/ensemblgenedownload](docs/images/sanger-tol-ensemblgenedownload_logo.png)\r +\r +[![GitHub Actions CI Status](https://github.com/sanger-tol/ensemblgenedownload/workflows/nf-core%20CI/badge.svg)](https://github.com/sanger-tol/ensemblgenedownload/actions?query=workflow%3A%22nf-core+CI%22)\r +\r +\r +\r +[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.7183206-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.7183206)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A522.04.0-23aa62.svg)](https://www.nextflow.io/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +\r +[![Get help on Slack](http://img.shields.io/badge/slack-SangerTreeofLife%20%23pipelines-4A154B?labelColor=000000&logo=slack)](https://SangerTreeofLife.slack.com/channels/pipelines)\r +[![Follow on Twitter](http://img.shields.io/badge/twitter-%40sangertol-1DA1F2?labelColor=000000&logo=twitter)](https://twitter.com/sangertol)\r +[![Watch on YouTube](http://img.shields.io/badge/youtube-tree--of--life-FF0000?labelColor=000000&logo=youtube)](https://www.youtube.com/channel/UCFeDpvjU58SA9V0ycRXejhA)\r +\r +## Introduction\r +\r +**sanger-tol/ensemblgenedownload** is a pipeline that downloads gene annotations from Ensembl into the Tree of Life directory structure.\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!\r +\r +On release, automated continuous integration tests run the pipeline on a full-sized dataset on the GitHub CI infrastructure. This ensures that the pipeline runs in a third-party environment, and has sensible resource allocation defaults set to run on real-world datasets.\r +\r +## Pipeline summary\r +\r +## Overview\r +\r +The pipeline takes a CSV file that contains assembly accession number, Ensembl species names (as they may differ from Tree of Life ones !), output directories, and geneset versions.\r +Assembly accession numbers are optional. If missing, the pipeline assumes it can be retrieved from files named `ACCESSION` in the standard location on disk.\r +The pipeline downloads the Fasta files of the genes (cdna, cds, and protein sequences) as well as the GFF3 file.\r +All files are compressed with `bgzip`, and indexed with `samtools faidx` or `tabix`.\r +\r +Steps involved:\r +\r +- Download from Ensembl the GFF3 file, and the sequences of the genes in\r + Fasta format.\r +- Compress and index all Fasta files with `bgzip`, `samtools faidx`, and\r + `samtools dict`.\r +- Compress and index the GFF3 file with `bgzip` and `tabix`.\r +\r +## Quick Start\r +\r +1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=22.04.0`)\r +\r +2. Install any of [`Docker`](https://docs.docker.com/engine/installation/), [`Singularity`](https://www.sylabs.io/guides/3.0/user-guide/) (you can follow [this tutorial](https://singularity-tutorial.github.io/01-installation/)), [`Podman`](https://podman.io/), [`Shifter`](https://nersc.gitlab.io/development/shifter/how-to-use/) or [`Charliecloud`](https://hpc.github.io/charliecloud/) for full pipeline reproducibility _(you can use [`Conda`](https://conda.io/miniconda.html) both to install Nextflow itself and also to manage software within pipelines. Please only use it within pipelines as a last resort; see [docs](https://nf-co.re/usage/configuration#basic-configuration-profiles))_.\r +\r +3. Download the pipeline and test it on a minimal dataset with a single command:\r +\r + ```bash\r + nextflow run sanger-tol/ensemblgenedownload -profile test,YOURPROFILE --outdir \r + ```\r +\r + Note that some form of configuration will be needed so that Nextflow knows how to fetch the required software. This is usually done in the form of a config profile (`YOURPROFILE` in the example command above). You can chain multiple config profiles in a comma-separated string.\r +\r + > - The pipeline comes with config profiles called `docker`, `singularity`, `podman`, `shifter`, `charliecloud` and `conda` which instruct the pipeline to use the named tool for software management. For example, `-profile test,docker`.\r + > - Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment.\r + > - If you are using `singularity`, please use the [`nf-core download`](https://nf-co.re/tools/#downloading-pipelines-for-offline-use) command to download images first, before running the pipeline. Setting the [`NXF_SINGULARITY_CACHEDIR` or `singularity.cacheDir`](https://www.nextflow.io/docs/latest/singularity.html?#singularity-docker-hub) Nextflow options enables you to store and re-use the images from a central location for future pipeline runs.\r + > - If you are using `conda`, it is highly recommended to use the [`NXF_CONDA_CACHEDIR` or `conda.cacheDir`](https://www.nextflow.io/docs/latest/conda.html) settings to store the environments in a central location for future pipeline runs.\r +\r +4. Start running your own analysis!\r +\r + ```console\r + nextflow run sanger-tol/ensemblgenedownload --input $PWD/assets/samplesheet.csv --outdir -profile \r + ```\r +\r +## Documentation\r +\r +The sanger-tol/ensemblgenedownload pipeline comes with documentation about the pipeline [usage](docs/usage.md) and [output](docs/output.md).\r +\r +## Credits\r +\r +sanger-tol/ensemblgenedownload was originally written by @muffato.\r +\r +## Contributions and Support\r +\r +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\r +\r +For further information or help, don't hesitate to get in touch on the [Slack `#pipelines` channel](https://sangertreeoflife.slack.com/channels/pipelines). Please [create an issue](https://github.com/sanger-tol/ensemblgenedownload/issues/new/choose) on GitHub if you are not on the Sanger slack channel.\r +\r +## Citations\r +\r +If you use sanger-tol/ensemblgenedownload for your analysis, please cite it using the following doi: [10.5281/zenodo.7183206](https://doi.org/10.5281/zenodo.7183206)\r +\r +An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\r +\r +This pipeline uses code and infrastructure developed and maintained by the [nf-core](https://nf-co.re) community, reused here under the [MIT license](https://github.com/nf-core/tools/blob/master/LICENSE).\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "sanger-tol/insdcdownload v1.0.1 - Hefty mûmakil" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/666?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# BridgeDb tutorial: Gene HGNC name to Ensembl identifier\r +\r +This tutorial explains how to use the BridgeDb identifier mapping service to translate HGNC names to Ensembl identifiers. This step is part of the OpenRiskNet use case to link Adverse Outcome Pathways to [WikiPathways](https://wikipathways.org/).\r +\r +First we need to load the Python library to allow calls to the [BridgeDb REST webservice](http://bridgedb.prod.openrisknet.org/swagger/):\r +\r +\r +```python\r +import requests\r +```\r +\r +Let's assume we're interested in the gene with HGNC MECP2 (FIXME: look up a gene in AOPWiki), the API call to make mappings is given below as `callUrl`. Here, the `H` indicates that the query (`MECP2`) is an HGNC symbol:\r +\r +\r +```python\r +callUrl = 'http://bridgedb.prod.openrisknet.org/Human/xrefs/H/MECP2'\r +```\r +\r +The default call returns all identifiers, not just for Ensembl:\r +\r +\r +```python\r +response = requests.get(callUrl)\r +response.text\r +```\r +\r +\r +\r +\r + 'GO:0001964\\tGeneOntology\\nuc065cav.1\\tUCSC Genome Browser\\n312750\\tOMIM\\nGO:0042551\\tGeneOntology\\nuc065car.1\\tUCSC Genome Browser\\nA0A087X1U4\\tUniprot-TrEMBL\\n4204\\tWikiGenes\\nGO:0043524\\tGeneOntology\\nILMN_1702715\\tIllumina\\n34355_at\\tAffy\\nGO:0007268\\tGeneOntology\\nMECP2\\tHGNC\\nuc065caz.1\\tUCSC Genome Browser\\nA_33_P3339036\\tAgilent\\nGO:0006576\\tGeneOntology\\nuc065cbg.1\\tUCSC Genome Browser\\nGO:0006342\\tGeneOntology\\n300496\\tOMIM\\nGO:0035176\\tGeneOntology\\nuc065cbc.1\\tUCSC Genome Browser\\nGO:0033555\\tGeneOntology\\nGO:0045892\\tGeneOntology\\nA_23_P114361\\tAgilent\\nGO:0045893\\tGeneOntology\\nENSG00000169057\\tEnsembl\\nGO:0090063\\tGeneOntology\\nGO:0005515\\tGeneOntology\\nGO:0002087\\tGeneOntology\\nGO:0005634\\tGeneOntology\\nGO:0007416\\tGeneOntology\\nGO:0008104\\tGeneOntology\\nGO:0042826\\tGeneOntology\\nGO:0007420\\tGeneOntology\\nGO:0035067\\tGeneOntology\\n300005\\tOMIM\\nNP_001104262\\tRefSeq\\nA0A087WVW7\\tUniprot-TrEMBL\\nNP_004983\\tRefSeq\\nGO:0046470\\tGeneOntology\\nGO:0010385\\tGeneOntology\\n11722682_at\\tAffy\\nGO:0051965\\tGeneOntology\\nNM_001316337\\tRefSeq\\nuc065caw.1\\tUCSC Genome Browser\\nA0A0D9SFX7\\tUniprot-TrEMBL\\nA0A140VKC4\\tUniprot-TrEMBL\\nGO:0003723\\tGeneOntology\\nGO:0019233\\tGeneOntology\\nGO:0001666\\tGeneOntology\\nGO:0003729\\tGeneOntology\\nGO:0021591\\tGeneOntology\\nuc065cas.1\\tUCSC Genome Browser\\nGO:0019230\\tGeneOntology\\nGO:0003682\\tGeneOntology\\nGO:0001662\\tGeneOntology\\nuc065cbh.1\\tUCSC Genome Browser\\nX99687_at\\tAffy\\nGO:0008344\\tGeneOntology\\nGO:0009791\\tGeneOntology\\nuc065cbd.1\\tUCSC Genome Browser\\nGO:0019904\\tGeneOntology\\nGO:0030182\\tGeneOntology\\nGO:0035197\\tGeneOntology\\n8175998\\tAffy\\nGO:0016358\\tGeneOntology\\nNM_004992\\tRefSeq\\nGO:0003714\\tGeneOntology\\nGO:0005739\\tGeneOntology\\nGO:0005615\\tGeneOntology\\nGO:0005737\\tGeneOntology\\nuc004fjv.3\\tUCSC Genome Browser\\n202617_s_at\\tAffy\\nGO:0050905\\tGeneOntology\\nGO:0008327\\tGeneOntology\\nD3YJ43\\tUniprot-TrEMBL\\nGO:0003677\\tGeneOntology\\nGO:0006541\\tGeneOntology\\nGO:0040029\\tGeneOntology\\nA_33_P3317211\\tAgilent\\nNP_001303266\\tRefSeq\\n11722683_a_at\\tAffy\\nGO:0008211\\tGeneOntology\\nGO:0051151\\tGeneOntology\\nNM_001110792\\tRefSeq\\nX89430_at\\tAffy\\nGO:2000820\\tGeneOntology\\nuc065cat.1\\tUCSC Genome Browser\\nGO:0003700\\tGeneOntology\\nGO:0047485\\tGeneOntology\\n4204\\tEntrez Gene\\nGO:0009405\\tGeneOntology\\nA0A0D9SEX1\\tUniprot-TrEMBL\\nGO:0098794\\tGeneOntology\\n3C2I\\tPDB\\nHs.200716\\tUniGene\\nGO:0000792\\tGeneOntology\\nuc065cax.1\\tUCSC Genome Browser\\n300055\\tOMIM\\n5BT2\\tPDB\\nGO:0006020\\tGeneOntology\\nGO:0031175\\tGeneOntology\\nuc065cbe.1\\tUCSC Genome Browser\\nGO:0008284\\tGeneOntology\\nuc065cba.1\\tUCSC Genome Browser\\nGO:0060291\\tGeneOntology\\n202618_s_at\\tAffy\\nGO:0016573\\tGeneOntology\\n17115453\\tAffy\\nA0A1B0GTV0\\tUniprot-TrEMBL\\nuc065cbi.1\\tUCSC Genome Browser\\nGO:0048167\\tGeneOntology\\nGO:0007616\\tGeneOntology\\nGO:0016571\\tGeneOntology\\nuc004fjw.3\\tUCSC Genome Browser\\nGO:0007613\\tGeneOntology\\nGO:0007612\\tGeneOntology\\nGO:0021549\\tGeneOntology\\n11722684_a_at\\tAffy\\nGO:0001078\\tGeneOntology\\nX94628_rna1_s_at\\tAffy\\nGO:0007585\\tGeneOntology\\nGO:0010468\\tGeneOntology\\nGO:0031061\\tGeneOntology\\nA_24_P237486\\tAgilent\\nGO:0050884\\tGeneOntology\\nGO:0000930\\tGeneOntology\\nGO:0005829\\tGeneOntology\\nuc065cau.1\\tUCSC Genome Browser\\nH7BY72\\tUniprot-TrEMBL\\n202616_s_at\\tAffy\\nGO:0006355\\tGeneOntology\\nuc065cay.1\\tUCSC Genome Browser\\nGO:0010971\\tGeneOntology\\n300673\\tOMIM\\nGO:0008542\\tGeneOntology\\nGO:0060079\\tGeneOntology\\nuc065cbf.1\\tUCSC Genome Browser\\nGO:0006122\\tGeneOntology\\nuc065cbb.1\\tUCSC Genome Browser\\nGO:0007052\\tGeneOntology\\nC9JH89\\tUniprot-TrEMBL\\nB5MCB4\\tUniprot-TrEMBL\\nGO:0032048\\tGeneOntology\\nGO:0050432\\tGeneOntology\\nGO:0001976\\tGeneOntology\\nI6LM39\\tUniprot-TrEMBL\\nGO:0005813\\tGeneOntology\\nILMN_1682091\\tIllumina\\nP51608\\tUniprot-TrEMBL\\n1QK9\\tPDB\\nGO:0006349\\tGeneOntology\\nGO:1900114\\tGeneOntology\\nGO:0000122\\tGeneOntology\\nGO:0006351\\tGeneOntology\\nGO:0008134\\tGeneOntology\\nILMN_1824898\\tIllumina\\n300260\\tOMIM\\n0006510725\\tIllumina\\n'\r +\r +\r +\r +You can also see the results are returned as a TSV file, consisting of two columns, the identifier and the matching database.\r +\r +We will want to convert this reply into a Python dictionary (with the identifier as key, as one database may have multiple identifiers):\r +\r +\r +```python\r +lines = response.text.split("\\n")\r +mappings = {}\r +for line in lines:\r + if ('\\t' in line):\r + tuple = line.split('\\t')\r + identifier = tuple[0]\r + database = tuple[1]\r + if (database == "Ensembl"):\r + mappings[identifier] = database\r +\r +print(mappings)\r +```\r +\r + {'ENSG00000169057': 'Ensembl'}\r +\r +\r +Alternatively, we can restrivct the return values from the BridgeDb webservice to just return Ensembl identifiers (system code `En`). For this, we add the `?dataSource=En` call parameter:\r +\r +\r +```python\r +callUrl = 'http://bridgedb-swagger.prod.openrisknet.org/Human/xrefs/H/MECP2?dataSource=En'\r +response = requests.get(callUrl)\r +response.text\r +```\r +\r +\r +\r +\r + 'ENSG00000169057\\tEnsembl\\n'\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/326?version=1" ; + schema1:isBasedOn "https://github.com/OpenRiskNet/notebooks.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for BridgeDb tutorial: Gene HGNC name to Ensembl identifier" ; + schema1:sdDatePublished "2024-07-12 13:35:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/326/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8122 ; + schema1:creator , + ; + schema1:dateCreated "2022-04-06T13:02:33Z" ; + schema1:dateModified "2022-04-06T13:02:33Z" ; + schema1:description """# BridgeDb tutorial: Gene HGNC name to Ensembl identifier\r +\r +This tutorial explains how to use the BridgeDb identifier mapping service to translate HGNC names to Ensembl identifiers. This step is part of the OpenRiskNet use case to link Adverse Outcome Pathways to [WikiPathways](https://wikipathways.org/).\r +\r +First we need to load the Python library to allow calls to the [BridgeDb REST webservice](http://bridgedb.prod.openrisknet.org/swagger/):\r +\r +\r +```python\r +import requests\r +```\r +\r +Let's assume we're interested in the gene with HGNC MECP2 (FIXME: look up a gene in AOPWiki), the API call to make mappings is given below as `callUrl`. Here, the `H` indicates that the query (`MECP2`) is an HGNC symbol:\r +\r +\r +```python\r +callUrl = 'http://bridgedb.prod.openrisknet.org/Human/xrefs/H/MECP2'\r +```\r +\r +The default call returns all identifiers, not just for Ensembl:\r +\r +\r +```python\r +response = requests.get(callUrl)\r +response.text\r +```\r +\r +\r +\r +\r + 'GO:0001964\\tGeneOntology\\nuc065cav.1\\tUCSC Genome Browser\\n312750\\tOMIM\\nGO:0042551\\tGeneOntology\\nuc065car.1\\tUCSC Genome Browser\\nA0A087X1U4\\tUniprot-TrEMBL\\n4204\\tWikiGenes\\nGO:0043524\\tGeneOntology\\nILMN_1702715\\tIllumina\\n34355_at\\tAffy\\nGO:0007268\\tGeneOntology\\nMECP2\\tHGNC\\nuc065caz.1\\tUCSC Genome Browser\\nA_33_P3339036\\tAgilent\\nGO:0006576\\tGeneOntology\\nuc065cbg.1\\tUCSC Genome Browser\\nGO:0006342\\tGeneOntology\\n300496\\tOMIM\\nGO:0035176\\tGeneOntology\\nuc065cbc.1\\tUCSC Genome Browser\\nGO:0033555\\tGeneOntology\\nGO:0045892\\tGeneOntology\\nA_23_P114361\\tAgilent\\nGO:0045893\\tGeneOntology\\nENSG00000169057\\tEnsembl\\nGO:0090063\\tGeneOntology\\nGO:0005515\\tGeneOntology\\nGO:0002087\\tGeneOntology\\nGO:0005634\\tGeneOntology\\nGO:0007416\\tGeneOntology\\nGO:0008104\\tGeneOntology\\nGO:0042826\\tGeneOntology\\nGO:0007420\\tGeneOntology\\nGO:0035067\\tGeneOntology\\n300005\\tOMIM\\nNP_001104262\\tRefSeq\\nA0A087WVW7\\tUniprot-TrEMBL\\nNP_004983\\tRefSeq\\nGO:0046470\\tGeneOntology\\nGO:0010385\\tGeneOntology\\n11722682_at\\tAffy\\nGO:0051965\\tGeneOntology\\nNM_001316337\\tRefSeq\\nuc065caw.1\\tUCSC Genome Browser\\nA0A0D9SFX7\\tUniprot-TrEMBL\\nA0A140VKC4\\tUniprot-TrEMBL\\nGO:0003723\\tGeneOntology\\nGO:0019233\\tGeneOntology\\nGO:0001666\\tGeneOntology\\nGO:0003729\\tGeneOntology\\nGO:0021591\\tGeneOntology\\nuc065cas.1\\tUCSC Genome Browser\\nGO:0019230\\tGeneOntology\\nGO:0003682\\tGeneOntology\\nGO:0001662\\tGeneOntology\\nuc065cbh.1\\tUCSC Genome Browser\\nX99687_at\\tAffy\\nGO:0008344\\tGeneOntology\\nGO:0009791\\tGeneOntology\\nuc065cbd.1\\tUCSC Genome Browser\\nGO:0019904\\tGeneOntology\\nGO:0030182\\tGeneOntology\\nGO:0035197\\tGeneOntology\\n8175998\\tAffy\\nGO:0016358\\tGeneOntology\\nNM_004992\\tRefSeq\\nGO:0003714\\tGeneOntology\\nGO:0005739\\tGeneOntology\\nGO:0005615\\tGeneOntology\\nGO:0005737\\tGeneOntology\\nuc004fjv.3\\tUCSC Genome Browser\\n202617_s_at\\tAffy\\nGO:0050905\\tGeneOntology\\nGO:0008327\\tGeneOntology\\nD3YJ43\\tUniprot-TrEMBL\\nGO:0003677\\tGeneOntology\\nGO:0006541\\tGeneOntology\\nGO:0040029\\tGeneOntology\\nA_33_P3317211\\tAgilent\\nNP_001303266\\tRefSeq\\n11722683_a_at\\tAffy\\nGO:0008211\\tGeneOntology\\nGO:0051151\\tGeneOntology\\nNM_001110792\\tRefSeq\\nX89430_at\\tAffy\\nGO:2000820\\tGeneOntology\\nuc065cat.1\\tUCSC Genome Browser\\nGO:0003700\\tGeneOntology\\nGO:0047485\\tGeneOntology\\n4204\\tEntrez Gene\\nGO:0009405\\tGeneOntology\\nA0A0D9SEX1\\tUniprot-TrEMBL\\nGO:0098794\\tGeneOntology\\n3C2I\\tPDB\\nHs.200716\\tUniGene\\nGO:0000792\\tGeneOntology\\nuc065cax.1\\tUCSC Genome Browser\\n300055\\tOMIM\\n5BT2\\tPDB\\nGO:0006020\\tGeneOntology\\nGO:0031175\\tGeneOntology\\nuc065cbe.1\\tUCSC Genome Browser\\nGO:0008284\\tGeneOntology\\nuc065cba.1\\tUCSC Genome Browser\\nGO:0060291\\tGeneOntology\\n202618_s_at\\tAffy\\nGO:0016573\\tGeneOntology\\n17115453\\tAffy\\nA0A1B0GTV0\\tUniprot-TrEMBL\\nuc065cbi.1\\tUCSC Genome Browser\\nGO:0048167\\tGeneOntology\\nGO:0007616\\tGeneOntology\\nGO:0016571\\tGeneOntology\\nuc004fjw.3\\tUCSC Genome Browser\\nGO:0007613\\tGeneOntology\\nGO:0007612\\tGeneOntology\\nGO:0021549\\tGeneOntology\\n11722684_a_at\\tAffy\\nGO:0001078\\tGeneOntology\\nX94628_rna1_s_at\\tAffy\\nGO:0007585\\tGeneOntology\\nGO:0010468\\tGeneOntology\\nGO:0031061\\tGeneOntology\\nA_24_P237486\\tAgilent\\nGO:0050884\\tGeneOntology\\nGO:0000930\\tGeneOntology\\nGO:0005829\\tGeneOntology\\nuc065cau.1\\tUCSC Genome Browser\\nH7BY72\\tUniprot-TrEMBL\\n202616_s_at\\tAffy\\nGO:0006355\\tGeneOntology\\nuc065cay.1\\tUCSC Genome Browser\\nGO:0010971\\tGeneOntology\\n300673\\tOMIM\\nGO:0008542\\tGeneOntology\\nGO:0060079\\tGeneOntology\\nuc065cbf.1\\tUCSC Genome Browser\\nGO:0006122\\tGeneOntology\\nuc065cbb.1\\tUCSC Genome Browser\\nGO:0007052\\tGeneOntology\\nC9JH89\\tUniprot-TrEMBL\\nB5MCB4\\tUniprot-TrEMBL\\nGO:0032048\\tGeneOntology\\nGO:0050432\\tGeneOntology\\nGO:0001976\\tGeneOntology\\nI6LM39\\tUniprot-TrEMBL\\nGO:0005813\\tGeneOntology\\nILMN_1682091\\tIllumina\\nP51608\\tUniprot-TrEMBL\\n1QK9\\tPDB\\nGO:0006349\\tGeneOntology\\nGO:1900114\\tGeneOntology\\nGO:0000122\\tGeneOntology\\nGO:0006351\\tGeneOntology\\nGO:0008134\\tGeneOntology\\nILMN_1824898\\tIllumina\\n300260\\tOMIM\\n0006510725\\tIllumina\\n'\r +\r +\r +\r +You can also see the results are returned as a TSV file, consisting of two columns, the identifier and the matching database.\r +\r +We will want to convert this reply into a Python dictionary (with the identifier as key, as one database may have multiple identifiers):\r +\r +\r +```python\r +lines = response.text.split("\\n")\r +mappings = {}\r +for line in lines:\r + if ('\\t' in line):\r + tuple = line.split('\\t')\r + identifier = tuple[0]\r + database = tuple[1]\r + if (database == "Ensembl"):\r + mappings[identifier] = database\r +\r +print(mappings)\r +```\r +\r + {'ENSG00000169057': 'Ensembl'}\r +\r +\r +Alternatively, we can restrivct the return values from the BridgeDb webservice to just return Ensembl identifiers (system code `En`). For this, we add the `?dataSource=En` call parameter:\r +\r +\r +```python\r +callUrl = 'http://bridgedb-swagger.prod.openrisknet.org/Human/xrefs/H/MECP2?dataSource=En'\r +response = requests.get(callUrl)\r +response.text\r +```\r +\r +\r +\r +\r + 'ENSG00000169057\\tEnsembl\\n'\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/326?version=2" ; + schema1:keywords "Toxicology, jupyter" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "BridgeDb tutorial: Gene HGNC name to Ensembl identifier" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/326?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2023-09-07T08:42:48.614290" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/polish-with-long-reads" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "polish-with-long-reads/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.10" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=17" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-07-12 13:22:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=17" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9564 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:12Z" ; + schema1:dateModified "2024-06-11T12:55:12Z" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=17" ; + schema1:version 17 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-07-12 13:20:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9902 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:58Z" ; + schema1:dateModified "2024-06-11T12:54:58Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + ; + schema1:description "Implementation of the IMPaCT-Data quality control workflow for WES data in nf-core/sarek workflow. Sarek is an analysis pipeline to detect germline or somatic variants (pre-processing, variant calling and annotation) from WGS / targeted sequencing." ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.1030.1" ; + schema1:isBasedOn "https://github.com/EGA-archive/sarek-IMPaCT-data-QC.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for IMPaCT-Data quality control workflow implementation in nf-core/Sarek" ; + schema1:sdDatePublished "2024-07-12 13:17:46 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1030/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 19394 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-06-05T15:05:57Z" ; + schema1:dateModified "2024-06-12T13:08:47Z" ; + schema1:description "Implementation of the IMPaCT-Data quality control workflow for WES data in nf-core/sarek workflow. Sarek is an analysis pipeline to detect germline or somatic variants (pre-processing, variant calling and annotation) from WGS / targeted sequencing." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1030?version=2" ; + schema1:keywords "Bioinformatics, Nextflow, variant calling, wes, WGS, NGS, EGA-archive, quality control" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "IMPaCT-Data quality control workflow implementation in nf-core/Sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1030?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.120.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:34:11 +0100" ; + schema1:url "https://workflowhub.eu/workflows/120/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 53050 ; + schema1:creator , + ; + schema1:dateCreated "2021-07-01T14:51:52Z" ; + schema1:dateModified "2022-09-15T07:42:03Z" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/120?version=5" ; + schema1:isPartOf , + , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/120?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=24" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-07-12 13:19:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=24" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 21667 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:17Z" ; + schema1:dateModified "2024-06-11T12:55:17Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=24" ; + schema1:version 24 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-23T13:33:07.970641" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/gromacs-mmgbsa" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "gromacs-mmgbsa/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/998?version=10" ; + schema1:isBasedOn "https://github.com/nf-core/methylseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/methylseq" ; + schema1:sdDatePublished "2024-07-12 13:18:24 +0100" ; + schema1:url "https://workflowhub.eu/workflows/998/ro_crate?version=10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9999 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:02Z" ; + schema1:dateModified "2024-06-11T12:55:02Z" ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/998?version=14" ; + schema1:keywords "bisulfite-sequencing, dna-methylation, em-seq, epigenome, Epigenomics, methyl-seq, pbat, rrbs" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/methylseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/998?version=10" ; + schema1:version 10 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """Assembly polishing subworkflow: Racon polishing with short reads\r +\r +Inputs: short reads and assembly (usually pre-polished with other tools first, e.g. Racon + long reads; Medaka)\r +\r +Workflow steps: \r +* minimap2: short reads (R1 only) are mapped to the assembly => overlaps.paf. Minimap2 setting is for short reads.\r +* overlaps + short reads + assembly => Racon => polished assembly 1\r +* using polished assembly 1 as input; repeat minimap2 + racon => polished assembly 2\r +* Racon short-read polished assembly => Fasta statistics\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.228.1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Racon polish with Illumina reads, x2" ; + schema1:sdDatePublished "2024-07-12 13:36:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/228/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13895 ; + schema1:creator ; + schema1:dateCreated "2021-11-08T05:50:40Z" ; + schema1:dateModified "2023-01-30T18:21:31Z" ; + schema1:description """Assembly polishing subworkflow: Racon polishing with short reads\r +\r +Inputs: short reads and assembly (usually pre-polished with other tools first, e.g. Racon + long reads; Medaka)\r +\r +Workflow steps: \r +* minimap2: short reads (R1 only) are mapped to the assembly => overlaps.paf. Minimap2 setting is for short reads.\r +* overlaps + short reads + assembly => Racon => polished assembly 1\r +* using polished assembly 1 as input; repeat minimap2 + racon => polished assembly 2\r +* Racon short-read polished assembly => Fasta statistics\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "Large-genome-assembly" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Racon polish with Illumina reads, x2" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/228?version=1" ; + schema1:version 1 ; + ns1:input , + ; + ns1:output . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 226509 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.127.5" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_virtual-screening" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein-ligand Docking tutorial (Cluster90)" ; + schema1:sdDatePublished "2024-07-12 13:34:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/127/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 39368 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-07-26T09:27:52Z" ; + schema1:dateModified "2023-07-26T09:28:39Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/127?version=5" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein-ligand Docking tutorial (Cluster90)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_virtual-screening/blob/master/biobb_wf_virtual-screening/notebooks/clusterBindingSite/wf_vs_clusterBindingSite.ipynb" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=9" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-07-12 13:18:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8364 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:39Z" ; + schema1:dateModified "2024-06-11T12:54:39Z" ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=9" ; + schema1:version 9 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """Galaxy Workflow created on Galaxy-E european instance, ecology.usegalaxy.eu, related to the Galaxy training tutorial "[OBIS marine indicators](https://training.galaxyproject.org/training-material/topics/ecology/tutorials/obisindicators/tutorial.html)" .\r +\r +This workflow allows to compute and visualize marine biodiversity indicators from OBIS data.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/662?version=1" ; + schema1:isBasedOn "https://ecology.usegalaxy.eu/u/ylebras/w/workflow-constructed-from-history-imported-tuto-obis-asian-pacific" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Obis biodiversity indicator on Asian pacific" ; + schema1:sdDatePublished "2024-07-12 13:26:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/662/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6936 ; + schema1:creator , + ; + schema1:dateCreated "2023-11-10T09:00:00Z" ; + schema1:dateModified "2023-11-10T09:00:00Z" ; + schema1:description """Galaxy Workflow created on Galaxy-E european instance, ecology.usegalaxy.eu, related to the Galaxy training tutorial "[OBIS marine indicators](https://training.galaxyproject.org/training-material/topics/ecology/tutorials/obisindicators/tutorial.html)" .\r +\r +This workflow allows to compute and visualize marine biodiversity indicators from OBIS data.\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Obis biodiversity indicator on Asian pacific" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/662?version=1" ; + schema1:version 1 ; + ns1:input . + + a schema1:Dataset ; + schema1:datePublished "2024-02-14T16:08:01.432692" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Purge-duplicates-one-haplotype-VGP6b" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Purge-duplicates-one-haplotype-VGP6b/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Alternative splicing analysis using RNA-seq." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1018?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/rnasplice" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnasplice" ; + schema1:sdDatePublished "2024-07-12 13:19:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1018/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13873 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:14Z" ; + schema1:dateModified "2024-06-11T12:55:14Z" ; + schema1:description "Alternative splicing analysis using RNA-seq." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1018?version=4" ; + schema1:keywords "alternative-splicing, rna, rna-seq, splicing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnasplice" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1018?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Basic processing of a QC-filtered Anndata Object. UMAP, clustering e.t.c " ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/468?version=3" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq_QCtoBasicProcessing" ; + schema1:sdDatePublished "2024-07-12 13:22:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/468/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 33510 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-05-30T05:52:35Z" ; + schema1:dateModified "2024-05-30T05:52:35Z" ; + schema1:description "Basic processing of a QC-filtered Anndata Object. UMAP, clustering e.t.c " ; + schema1:isBasedOn "https://workflowhub.eu/workflows/468?version=2" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "scRNAseq_QCtoBasicProcessing" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/468?version=3" ; + schema1:version 3 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2023-10-30T06:18:00.536806" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Purge-duplicate-contigs-VGP6/main" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Purge-duplicate-contigs-VGP6/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:version "0.1" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/772?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for How Usable Are Published Permeability Data?" ; + schema1:sdDatePublished "2024-07-12 13:24:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/772/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 437141 ; + schema1:creator ; + schema1:dateCreated "2024-03-01T08:58:59Z" ; + schema1:dateModified "2024-03-18T09:40:50Z" ; + schema1:description "" ; + schema1:image ; + schema1:keywords "Cheminformatics, Databases, Permeability, Knime" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "How Usable Are Published Permeability Data?" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/772?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 39634 . + + a schema1:Dataset ; + schema1:datePublished "2024-04-22T12:09:22.942162" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-only-VGP3" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-only-VGP3/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:Dataset ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/parallel-accession-download" ; + schema1:mainEntity ; + schema1:name "main (v0.1)" ; + schema1:sdDatePublished "2021-07-23 10:18:19 +0100" ; + schema1:softwareVersion "v0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 14243 ; + schema1:name "main" ; + schema1:programmingLanguage . + + a schema1:Dataset ; + schema1:datePublished "2023-12-01T20:48:14.125372" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + , + ; + schema1:name "consensus-peaks/consensus-peaks-chip-sr" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-atac-cutandrun" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.4" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-chip-pe" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.4" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """hutch-workflow executes rquest in one shot.\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.471.1" ; + schema1:isBasedOn "https://github.com/HDRUK/rquest-omop-worker-workflows" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for hutch-workflow (x86)" ; + schema1:sdDatePublished "2024-07-12 13:27:10 +0100" ; + schema1:url "https://workflowhub.eu/workflows/471/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 9092 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 854 ; + schema1:creator ; + schema1:dateCreated "2023-05-15T16:15:53Z" ; + schema1:dateModified "2023-10-10T14:56:47Z" ; + schema1:description """hutch-workflow executes rquest in one shot.\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/471?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "hutch-workflow (x86)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/471?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + ; + ns1:output . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """The project allowed us to manage and build structured code scripts on the Jupyter Notebook, a simple web application which is user-friendly, flexible to use in the research community. The script is developed to address the specific needs of research between different platforms of dataset.\r +These stakeholders have developed their own platforms for the annotation and standardisation of both data and metadata produced within their respective field.\r +-The INFRAFRONTIER - European Mutant Mouse Archive (EMMA) comprises over 7200 mutant mouse lines that are extensively integrated and enriched with other public dataset.\r +-The EU-OpenScreen offers compound screening protocols containing several metadata and will contribute to the development of tools for linking to the chemical entity database.\r +-The IDR Image Data Resource is a public repository of reference image datasets from published scientific studies, where the community can submit, search and access high-quality bio-image data. \r +-The CIM-XNAT is an XNAT deployment of the Molecular Imaging Center at UniTo that offers a suite of tools for uploading preclinical images.\r +To address the challenges of integrating several EU-RI datasets with focus on preclinical and discovery research bioimaging, our aim is to develop cross researching queries through a web based interface to combine the resources of the RIs for integrating the information associated with data belonging to the involved RIs. Furthermore, the open-source tool provides users with free, open access to collections of datasets distributed over multiple sources that result from searches by specific keywords. \r +The script allows the cross research in different fields of research as: Species, Strain, Gene, Cell line, Disease model, Chemical Compound.\r +The novel aspects of this tool are mainly:\r +a) user friendly, e.g. the user has the flexibility to research among the dataset easily with a simple API, intuitive for researchers and biomedical users. \r +b) the possibility of making a research between different platforms and repositories, from a unique simple way. \r +c) the workflow project follows the FAIR principles in the treatment of data and datasets. \r +The access to Notebook Jupyter needs the installation of Anaconda, which consents to open the web application. \r +Inside the Jupyter, the script was built using Python. The query code is also easy to download and share in a .ipynb file.\r +A visual representation of the detailed results (dataset, metadata, information, query results) of the workflow can be printed immediately after the query run. \r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/516?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Life Science cross-RI (Research Infrastructure) project" ; + schema1:sdDatePublished "2024-07-12 13:32:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/516/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 532334 ; + schema1:creator ; + schema1:dateCreated "2023-06-27T07:39:45Z" ; + schema1:dateModified "2023-06-27T09:25:04Z" ; + schema1:description """The project allowed us to manage and build structured code scripts on the Jupyter Notebook, a simple web application which is user-friendly, flexible to use in the research community. The script is developed to address the specific needs of research between different platforms of dataset.\r +These stakeholders have developed their own platforms for the annotation and standardisation of both data and metadata produced within their respective field.\r +-The INFRAFRONTIER - European Mutant Mouse Archive (EMMA) comprises over 7200 mutant mouse lines that are extensively integrated and enriched with other public dataset.\r +-The EU-OpenScreen offers compound screening protocols containing several metadata and will contribute to the development of tools for linking to the chemical entity database.\r +-The IDR Image Data Resource is a public repository of reference image datasets from published scientific studies, where the community can submit, search and access high-quality bio-image data. \r +-The CIM-XNAT is an XNAT deployment of the Molecular Imaging Center at UniTo that offers a suite of tools for uploading preclinical images.\r +To address the challenges of integrating several EU-RI datasets with focus on preclinical and discovery research bioimaging, our aim is to develop cross researching queries through a web based interface to combine the resources of the RIs for integrating the information associated with data belonging to the involved RIs. Furthermore, the open-source tool provides users with free, open access to collections of datasets distributed over multiple sources that result from searches by specific keywords. \r +The script allows the cross research in different fields of research as: Species, Strain, Gene, Cell line, Disease model, Chemical Compound.\r +The novel aspects of this tool are mainly:\r +a) user friendly, e.g. the user has the flexibility to research among the dataset easily with a simple API, intuitive for researchers and biomedical users. \r +b) the possibility of making a research between different platforms and repositories, from a unique simple way. \r +c) the workflow project follows the FAIR principles in the treatment of data and datasets. \r +The access to Notebook Jupyter needs the installation of Anaconda, which consents to open the web application. \r +Inside the Jupyter, the script was built using Python. The query code is also easy to download and share in a .ipynb file.\r +A visual representation of the detailed results (dataset, metadata, information, query results) of the workflow can be printed immediately after the query run. \r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Life Science cross-RI (Research Infrastructure) project" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/516?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for the analysis of CRISPR data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/975?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/crisprseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/crisprseq" ; + schema1:sdDatePublished "2024-07-12 13:18:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/975/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11160 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:46Z" ; + schema1:dateModified "2024-06-11T12:54:46Z" ; + schema1:description "Pipeline for the analysis of CRISPR data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/975?version=4" ; + schema1:keywords "crispr, crispr-analysis, crispr-cas, NGS" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/crisprseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/975?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "The workflow takes trimmed HiC forward and reverse reads, and Hap1/Hap2 assemblies to produce Hap1 and Hap2 scaffolded assemblies using YaHS. It also runs all the QC analyses (gfastats, BUSCO, Merqury and Pretext)." ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.608.1" ; + schema1:isBasedOn "https://github.com/ERGA-consortium/pipelines/tree/main/assembly/galaxy" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ERGA HiC Hap1Hap2 Scaffolding+QC YaHS v2309 (WF4)" ; + schema1:sdDatePublished "2024-07-12 13:26:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/608/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 84357 ; + schema1:creator , + ; + schema1:dateCreated "2023-10-11T15:06:56Z" ; + schema1:dateModified "2024-03-13T09:55:11Z" ; + schema1:description "The workflow takes trimmed HiC forward and reverse reads, and Hap1/Hap2 assemblies to produce Hap1 and Hap2 scaffolded assemblies using YaHS. It also runs all the QC analyses (gfastats, BUSCO, Merqury and Pretext)." ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "ERGA, Assembly+QC, Hi-C" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ERGA HiC Hap1Hap2 Scaffolding+QC YaHS v2309 (WF4)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/4.Scaffolding/Galaxy-Workflow-ERGA_HiC_Hap1Hap2_Scaffolding_QC_YaHS_v2309_(WF4).ga" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 592516 ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/4.Scaffolding/pics/Scaf_yahs_h1h2_2309.png" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:description """Workflow for quality assessment of paired reads and classification using NGTax 2.0 and functional annotation using picrust2. \r +In addition files are exported to their respective subfolders for easier data management in a later stage.\r +Steps: \r + - FastQC (read quality control)\r + - NGTax 2.0\r + - Picrust 2\r + - Export module\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/154?version=1" ; + schema1:isBasedOn "https://git.wur.nl/unlock/cwl/-/raw/master/cwl/workflows/workflow_ngtax_picrust2.cwl" ; + schema1:license "CC0-1.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Quality assessment, amplicon classification and functional prediction" ; + schema1:sdDatePublished "2024-07-12 13:36:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/154/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 29547 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5820 ; + schema1:creator , + ; + schema1:dateCreated "2021-08-30T07:18:44Z" ; + schema1:dateModified "2021-08-30T07:18:44Z" ; + schema1:description """Workflow for quality assessment of paired reads and classification using NGTax 2.0 and functional annotation using picrust2. \r +In addition files are exported to their respective subfolders for easier data management in a later stage.\r +Steps: \r + - FastQC (read quality control)\r + - NGTax 2.0\r + - Picrust 2\r + - Export module\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/154?version=1" ; + schema1:keywords "Amplicon, Classification, CWL" ; + schema1:license "https://spdx.org/licenses/CC0-1.0" ; + schema1:name "Quality assessment, amplicon classification and functional prediction" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/154?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=27" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-07-12 13:21:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=27" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13557 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:51Z" ; + schema1:dateModified "2024-06-11T12:54:51Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=27" ; + schema1:version 27 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """[![Development](https://img.shields.io/badge/development-active-blue.svg)](https://img.shields.io/badge/development-active-blue.svg)\r +[![Reads2Map](https://circleci.com/gh/Cristianetaniguti/Reads2Map.svg?style=svg)](https://app.circleci.com/pipelines/github/Cristianetaniguti/Reads2Map)\r +\r +## Reads2Map \r +\r +Reads2Map presents a collection of [WDL workflows](https://openwdl.org/) to build linkage maps from sequencing reads. Each workflow release is described in the [Read2Map releases page](https://github.com/Cristianetaniguti/Reads2Map/releases). \r +\r +The main workflows are the `EmpiricalReads2Map.wdl` and the `SimulatedReads2Map.wdl`. The `EmpiricalReads2Map.wdl` is composed by the `EmpiricalSNPCalling.wdl` that performs the SNP calling, and the `EmpiricalMaps.wdl` that performs the genotype calling and map building in empirical reads. The `SimulatedReads2Map.wdl` simulates Illumina reads for RADseq, exome, or WGS data and performs the SNP and genotype calling and genetic map building.\r +\r +By now, [GATK](https://github.com/broadinstitute/gatk), [Freebayes](https://github.com/ekg/freebayes) are included for SNP calling; [updog](https://github.com/dcgerard/updog), [polyRAD](https://github.com/lvclark/polyRAD), [SuperMASSA](https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0030906) for dosage calling; and [OneMap](https://github.com/augusto-garcia/onemap), and [GUSMap](https://github.com/tpbilton/GUSMap) for linkage map build.\r +\r +![math_meth2](https://user-images.githubusercontent.com/7572527/203172239-e4d2d857-84e2-48c5-bb88-01052a287004.png)\r +\r +## How to use\r +\r +Multiple systems are available to run WDL workflows such as Cromwell, miniWDL, and dxWDL. See further information in the [openwdl documentation](https://github.com/openwdl/wdl#execution-engines).\r +\r +To run a pipeline, first navigate to [Reads2Map releases page](https://github.com/Cristianetaniguti/Reads2Map/releases), search for the pipeline tag you which to run, and download the pipeline’s assets (the WDL workflow, the JSON, and the ZIP with accompanying dependencies).\r +\r +## Documentation\r +\r +Check the description of the inputs for the pipelines:\r +\r +* [EmpiricalReads2Map (EmpiricalSNPCalling and EmpiricalMaps)](https://cristianetaniguti.github.io/Tutorials/Reads2Map/EmpiricalReads.html)\r +\r +* [SimulatedReads2Map](https://cristianetaniguti.github.io/Tutorials/Reads2Map/simulatedreads.html)\r +\r +Check how to evaluate the workflows results in Reads2MapApp Shiny:\r +\r +* [Reads2MapApp](https://github.com/Cristianetaniguti/Reads2MapApp)\r +\r +Once you selected the best pipeline using a subset of your data, you can build a complete high-density linkage map:\r +\r +* [A Guide to Build High-Density Linkage Maps](https://cristianetaniguti.github.io/Tutorials/onemap/Quick_HighDens/High_density_maps.html)\r +\r +Check more information and examples of usage in:\r +\r +* [Taniguti, C. H., Taniguti, L. M., Amadeu, R. R., Mollinari, M., Da, G., Pereira, S., Riera-Lizarazu, O., Lau, J., Byrne, D., de Siqueira Gesteira, G., De, T., Oliveira, P., Ferreira, G. C., & Franco Garcia, A. A. Developing best practices for genotyping-by-sequencing analysis using linkage maps as benchmarks. BioRxiv. https://doi.org/10.1101/2022.11.24.517847](https://www.biorxiv.org/content/10.1101/2022.11.24.517847v2)\r +\r +## Third-party software and images\r +\r +- [BWA](https://github.com/lh3/bwa) in [us.gcr.io/broad-gotc-prod/genomes-in-the-cloud:2.5.7-2021-06-09_16-47-48Z](https://console.cloud.google.com/gcr/images/broad-gotc-prod/US/genomes-in-the-cloud): Used to align simulated reads to reference;\r +- [cutadapt](https://github.com/marcelm/cutadapt) in [cristaniguti/ pirs-ddrad-cutadapt:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/pirs-ddrad-cutadapt): Trim simulated reads;\r +- [ddRADseqTools](https://github.com/GGFHF/ddRADseqTools) in [cristaniguti/ pirs-ddrad-cutadapt:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/pirs-ddrad-cutadapt): Set of applications useful to in silico design and testing of double digest RADseq (ddRADseq) experiments;\r +- [Freebayes](https://github.com/ekg/freebayes) in [Cristaniguti/freebayes:0.0.1](): Variant call step;\r +- [GATK](https://github.com/broadinstitute/gatk) in [us.gcr.io/broad-gotc-prod/genomes-in-the-cloud:2.5.7-2021-06-09_16-47-48Z](https://console.cloud.google.com/gcr/images/broad-gotc-prod/US/genomes-in-the-cloud): Variant call step using Haplotype Caller, GenomicsDBImport and GenotypeGVCFs;\r +- [PedigreeSim](https://github.com/PBR/pedigreeSim?files=1) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Simulates progeny genotypes from parents genotypes for different types of populations;\r +- [picard](https://github.com/broadinstitute/picard) in [us.gcr.io/broad-gotc-prod/genomes-in-the-cloud:2.5.7-2021-06-09_16-47-48Z](https://console.cloud.google.com/gcr/images/broad-gotc-prod/US/genomes-in-the-cloud): Process alignment files;\r +- [pirs](https://github.com/galaxy001/pirs) in [cristaniguti/ pirs-ddrad-cutadapt:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/pirs-ddrad-cutadapt): To generate simulates paired-end reads from a reference genome;\r +- [samtools](https://github.com/samtools/samtools) in [us.gcr.io/broad-gotc-prod/genomes-in-the-cloud:2.5.7-2021-06-09_16-47-48Z](https://console.cloud.google.com/gcr/images/broad-gotc-prod/US/genomes-in-the-cloud): Process alignment files;\r +- [SimuSCoP](https://github.com/qasimyu/simuscop) in [cristaniguti/simuscopr:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/simuscopr): Exome and WGS Illumina reads simulations;\r +- [RADinitio](http://catchenlab.life.illinois.edu/radinitio/) in [ cristaniguti/radinitio:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/radinitio): RADseq Illumina reads simulation;\r +- [SuperMASSA](https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0030906) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Efficient Exact Maximum a Posteriori Computation for Bayesian SNP Genotyping in Polyploids;\r +- [bcftools](https://github.com/samtools/bcftools) in [lifebitai/bcftools:1.10.2](https://hub.docker.com/r/lifebitai/bcftools): utilities for variant calling and manipulating VCFs and BCFs;\r +- [vcftools](http://vcftools.sourceforge.net/) in [cristaniguti/split_markers:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/split_markers): program package designed for working with VCF files.\r +- [MCHap](https://github.com/PlantandFoodResearch/MCHap) in [cristaniguti/mchap:0.7.0](https://hub.docker.com/repository/docker/cristaniguti/mchap): Polyploid micro-haplotype assembly using Markov chain Monte Carlo simulation.\r +\r +### R packages\r +\r +- [OneMap](https://github.com/augusto-garcia/onemap) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Is a software for constructing genetic maps in experimental crosses: full-sib, RILs, F2 and backcrosses;\r +- [Reads2MapTools](https://github.com/Cristianetaniguti/Reads2MapTools) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Support package to perform mapping populations simulations and genotyping for OneMap genetic map building\r +- [GUSMap](https://github.com/tpbilton/GUSMap): Genotyping Uncertainty with Sequencing data and linkage MAPping\r +- [updog](https://github.com/dcgerard/updog) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Flexible Genotyping of Polyploids using Next Generation Sequencing Data\r +- [polyRAD](https://github.com/lvclark/polyRAD) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Genotype Calling with Uncertainty from Sequencing Data in Polyploids\r +- [Reads2MapApp](https://github.com/Cristianetaniguti/Reads2MapApp) in [cristaniguti/reads2mapApp:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Shiny app to evaluate Reads2Map workflows results\r +- [simuscopR](https://github.com/Cristianetaniguti/simuscopR) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Wrap-up R package for SimusCop simulations.""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.410.1" ; + schema1:isBasedOn "https://github.com/Cristianetaniguti/Reads2Map" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for SimulatedReads2Map" ; + schema1:sdDatePublished "2024-07-12 13:34:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/410/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2579 ; + schema1:creator ; + schema1:dateCreated "2022-11-29T20:17:01Z" ; + schema1:dateModified "2023-01-16T14:04:54Z" ; + schema1:description """[![Development](https://img.shields.io/badge/development-active-blue.svg)](https://img.shields.io/badge/development-active-blue.svg)\r +[![Reads2Map](https://circleci.com/gh/Cristianetaniguti/Reads2Map.svg?style=svg)](https://app.circleci.com/pipelines/github/Cristianetaniguti/Reads2Map)\r +\r +## Reads2Map \r +\r +Reads2Map presents a collection of [WDL workflows](https://openwdl.org/) to build linkage maps from sequencing reads. Each workflow release is described in the [Read2Map releases page](https://github.com/Cristianetaniguti/Reads2Map/releases). \r +\r +The main workflows are the `EmpiricalReads2Map.wdl` and the `SimulatedReads2Map.wdl`. The `EmpiricalReads2Map.wdl` is composed by the `EmpiricalSNPCalling.wdl` that performs the SNP calling, and the `EmpiricalMaps.wdl` that performs the genotype calling and map building in empirical reads. The `SimulatedReads2Map.wdl` simulates Illumina reads for RADseq, exome, or WGS data and performs the SNP and genotype calling and genetic map building.\r +\r +By now, [GATK](https://github.com/broadinstitute/gatk), [Freebayes](https://github.com/ekg/freebayes) are included for SNP calling; [updog](https://github.com/dcgerard/updog), [polyRAD](https://github.com/lvclark/polyRAD), [SuperMASSA](https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0030906) for dosage calling; and [OneMap](https://github.com/augusto-garcia/onemap), and [GUSMap](https://github.com/tpbilton/GUSMap) for linkage map build.\r +\r +![math_meth2](https://user-images.githubusercontent.com/7572527/203172239-e4d2d857-84e2-48c5-bb88-01052a287004.png)\r +\r +## How to use\r +\r +Multiple systems are available to run WDL workflows such as Cromwell, miniWDL, and dxWDL. See further information in the [openwdl documentation](https://github.com/openwdl/wdl#execution-engines).\r +\r +To run a pipeline, first navigate to [Reads2Map releases page](https://github.com/Cristianetaniguti/Reads2Map/releases), search for the pipeline tag you which to run, and download the pipeline’s assets (the WDL workflow, the JSON, and the ZIP with accompanying dependencies).\r +\r +## Documentation\r +\r +Check the description of the inputs for the pipelines:\r +\r +* [EmpiricalReads2Map (EmpiricalSNPCalling and EmpiricalMaps)](https://cristianetaniguti.github.io/Tutorials/Reads2Map/EmpiricalReads.html)\r +\r +* [SimulatedReads2Map](https://cristianetaniguti.github.io/Tutorials/Reads2Map/simulatedreads.html)\r +\r +Check how to evaluate the workflows results in Reads2MapApp Shiny:\r +\r +* [Reads2MapApp](https://github.com/Cristianetaniguti/Reads2MapApp)\r +\r +Once you selected the best pipeline using a subset of your data, you can build a complete high-density linkage map:\r +\r +* [A Guide to Build High-Density Linkage Maps](https://cristianetaniguti.github.io/Tutorials/onemap/Quick_HighDens/High_density_maps.html)\r +\r +Check more information and examples of usage in:\r +\r +* [Taniguti, C. H., Taniguti, L. M., Amadeu, R. R., Mollinari, M., Da, G., Pereira, S., Riera-Lizarazu, O., Lau, J., Byrne, D., de Siqueira Gesteira, G., De, T., Oliveira, P., Ferreira, G. C., & Franco Garcia, A. A. Developing best practices for genotyping-by-sequencing analysis using linkage maps as benchmarks. BioRxiv. https://doi.org/10.1101/2022.11.24.517847](https://www.biorxiv.org/content/10.1101/2022.11.24.517847v2)\r +\r +## Third-party software and images\r +\r +- [BWA](https://github.com/lh3/bwa) in [us.gcr.io/broad-gotc-prod/genomes-in-the-cloud:2.5.7-2021-06-09_16-47-48Z](https://console.cloud.google.com/gcr/images/broad-gotc-prod/US/genomes-in-the-cloud): Used to align simulated reads to reference;\r +- [cutadapt](https://github.com/marcelm/cutadapt) in [cristaniguti/ pirs-ddrad-cutadapt:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/pirs-ddrad-cutadapt): Trim simulated reads;\r +- [ddRADseqTools](https://github.com/GGFHF/ddRADseqTools) in [cristaniguti/ pirs-ddrad-cutadapt:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/pirs-ddrad-cutadapt): Set of applications useful to in silico design and testing of double digest RADseq (ddRADseq) experiments;\r +- [Freebayes](https://github.com/ekg/freebayes) in [Cristaniguti/freebayes:0.0.1](): Variant call step;\r +- [GATK](https://github.com/broadinstitute/gatk) in [us.gcr.io/broad-gotc-prod/genomes-in-the-cloud:2.5.7-2021-06-09_16-47-48Z](https://console.cloud.google.com/gcr/images/broad-gotc-prod/US/genomes-in-the-cloud): Variant call step using Haplotype Caller, GenomicsDBImport and GenotypeGVCFs;\r +- [PedigreeSim](https://github.com/PBR/pedigreeSim?files=1) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Simulates progeny genotypes from parents genotypes for different types of populations;\r +- [picard](https://github.com/broadinstitute/picard) in [us.gcr.io/broad-gotc-prod/genomes-in-the-cloud:2.5.7-2021-06-09_16-47-48Z](https://console.cloud.google.com/gcr/images/broad-gotc-prod/US/genomes-in-the-cloud): Process alignment files;\r +- [pirs](https://github.com/galaxy001/pirs) in [cristaniguti/ pirs-ddrad-cutadapt:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/pirs-ddrad-cutadapt): To generate simulates paired-end reads from a reference genome;\r +- [samtools](https://github.com/samtools/samtools) in [us.gcr.io/broad-gotc-prod/genomes-in-the-cloud:2.5.7-2021-06-09_16-47-48Z](https://console.cloud.google.com/gcr/images/broad-gotc-prod/US/genomes-in-the-cloud): Process alignment files;\r +- [SimuSCoP](https://github.com/qasimyu/simuscop) in [cristaniguti/simuscopr:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/simuscopr): Exome and WGS Illumina reads simulations;\r +- [RADinitio](http://catchenlab.life.illinois.edu/radinitio/) in [ cristaniguti/radinitio:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/radinitio): RADseq Illumina reads simulation;\r +- [SuperMASSA](https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0030906) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Efficient Exact Maximum a Posteriori Computation for Bayesian SNP Genotyping in Polyploids;\r +- [bcftools](https://github.com/samtools/bcftools) in [lifebitai/bcftools:1.10.2](https://hub.docker.com/r/lifebitai/bcftools): utilities for variant calling and manipulating VCFs and BCFs;\r +- [vcftools](http://vcftools.sourceforge.net/) in [cristaniguti/split_markers:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/split_markers): program package designed for working with VCF files.\r +- [MCHap](https://github.com/PlantandFoodResearch/MCHap) in [cristaniguti/mchap:0.7.0](https://hub.docker.com/repository/docker/cristaniguti/mchap): Polyploid micro-haplotype assembly using Markov chain Monte Carlo simulation.\r +\r +### R packages\r +\r +- [OneMap](https://github.com/augusto-garcia/onemap) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Is a software for constructing genetic maps in experimental crosses: full-sib, RILs, F2 and backcrosses;\r +- [Reads2MapTools](https://github.com/Cristianetaniguti/Reads2MapTools) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Support package to perform mapping populations simulations and genotyping for OneMap genetic map building\r +- [GUSMap](https://github.com/tpbilton/GUSMap): Genotyping Uncertainty with Sequencing data and linkage MAPping\r +- [updog](https://github.com/dcgerard/updog) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Flexible Genotyping of Polyploids using Next Generation Sequencing Data\r +- [polyRAD](https://github.com/lvclark/polyRAD) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Genotype Calling with Uncertainty from Sequencing Data in Polyploids\r +- [Reads2MapApp](https://github.com/Cristianetaniguti/Reads2MapApp) in [cristaniguti/reads2mapApp:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Shiny app to evaluate Reads2Map workflows results\r +- [simuscopR](https://github.com/Cristianetaniguti/simuscopR) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Wrap-up R package for SimusCop simulations.""" ; + schema1:keywords "linkage_map, variant_calling, WDL, reads_simulation" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "SimulatedReads2Map" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/410?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2024-04-29T16:50:30.687834" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-only-VGP3" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-only-VGP3/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """MGnify (http://www.ebi.ac.uk/metagenomics) provides a free to use platform for the assembly, analysis and archiving of microbiome data derived from sequencing microbial populations that are present in particular environments. Over the past 2 years, MGnify (formerly EBI Metagenomics) has more than doubled the number of publicly available analysed datasets held within the resource. Recently, an updated approach to data analysis has been unveiled (version 5.0), replacing the previous single pipeline with multiple analysis pipelines that are tailored according to the input data, and that are formally described using the Common Workflow Language, enabling greater provenance, reusability, and reproducibility. MGnify's new analysis pipelines offer additional approaches for taxonomic assertions based on ribosomal internal transcribed spacer regions (ITS1/2) and expanded protein functional annotations. Biochemical pathways and systems predictions have also been added for assembled contigs. MGnify's growing focus on the assembly of metagenomic data has also seen the number of datasets it has assembled and analysed increase six-fold. The non-redundant protein database constructed from the proteins encoded by these assemblies now exceeds 1 billion sequences. Meanwhile, a newly developed contig viewer provides fine-grained visualisation of the assembled contigs and their enriched annotations.\r +\r +Documentation: https://docs.mgnify.org/en/latest/analysis.html#amplicon-analysis-pipeline\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/361?version=1" ; + schema1:isBasedOn "https://github.com/EBI-Metagenomics/pipeline-v5.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for MGnify - amplicon analysis pipeline" ; + schema1:sdDatePublished "2024-07-12 13:35:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/361/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 37548 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4228 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2022-06-07T08:28:11Z" ; + schema1:dateModified "2023-01-16T14:01:07Z" ; + schema1:description """MGnify (http://www.ebi.ac.uk/metagenomics) provides a free to use platform for the assembly, analysis and archiving of microbiome data derived from sequencing microbial populations that are present in particular environments. Over the past 2 years, MGnify (formerly EBI Metagenomics) has more than doubled the number of publicly available analysed datasets held within the resource. Recently, an updated approach to data analysis has been unveiled (version 5.0), replacing the previous single pipeline with multiple analysis pipelines that are tailored according to the input data, and that are formally described using the Common Workflow Language, enabling greater provenance, reusability, and reproducibility. MGnify's new analysis pipelines offer additional approaches for taxonomic assertions based on ribosomal internal transcribed spacer regions (ITS1/2) and expanded protein functional annotations. Biochemical pathways and systems predictions have also been added for assembled contigs. MGnify's growing focus on the assembly of metagenomic data has also seen the number of datasets it has assembled and analysed increase six-fold. The non-redundant protein database constructed from the proteins encoded by these assemblies now exceeds 1 billion sequences. Meanwhile, a newly developed contig viewer provides fine-grained visualisation of the assembled contigs and their enriched annotations.\r +\r +Documentation: https://docs.mgnify.org/en/latest/analysis.html#amplicon-analysis-pipeline\r +""" ; + schema1:image ; + schema1:keywords "CWL, Metagenomics, rna, workflow" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "MGnify - amplicon analysis pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/361?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-07-12 13:22:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5800 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:11Z" ; + schema1:dateModified "2024-06-11T12:55:11Z" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1017?version=11" ; + schema1:isBasedOn "https://github.com/nf-core/rnafusion" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnafusion" ; + schema1:sdDatePublished "2024-07-12 13:18:22 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1017/ro_crate?version=11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10551 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:10Z" ; + schema1:dateModified "2024-06-11T12:55:10Z" ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1017?version=16" ; + schema1:keywords "fusion, fusion-genes, gene-fusion, rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnafusion" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1017?version=11" ; + schema1:version 11 . + + a schema1:Dataset ; + schema1:datePublished "2024-04-22T14:55:25.506908" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Scaffolding-Bionano-VGP7" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Scaffolding-Bionano-VGP7/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.132.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Amber Constant pH MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/132/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 54708 ; + schema1:creator , + ; + schema1:dateCreated "2023-04-14T08:40:22Z" ; + schema1:dateModified "2023-04-14T08:41:23Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/132?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Amber Constant pH MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_amber_md_setup/master/biobb_wf_amber_md_setup/notebooks/mdsetup_ph/biobb_amber_CpHMD_notebook.ipynb" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Galaxy Workflow created on Galaxy-E european instance, ecology.usegalaxy.eu, related to the Galaxy training tutorial "[Sentinel 2 biodiversity](https://training.galaxyproject.org/training-material/topics/ecology/tutorials/species-distribution-modeling/tutorial.html)" .\r +\r +This workflow allows to analyze remote sensing sentinel 2 satellites data to compute spectral indices such as the NDVI and visualizing biodiversity indicators\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/657?version=1" ; + schema1:isBasedOn "https://ecology.usegalaxy.eu/u/ylebras/w/remote-sensing-sentinel-2-data-analysis-to-produce-biodiversity-metrics" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Remote sensing Sentinel 2 data analysis to produce biodiversity metrics" ; + schema1:sdDatePublished "2024-07-12 13:26:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/657/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 20815 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-11-09T12:52:32Z" ; + schema1:dateModified "2023-11-09T20:59:33Z" ; + schema1:description """Galaxy Workflow created on Galaxy-E european instance, ecology.usegalaxy.eu, related to the Galaxy training tutorial "[Sentinel 2 biodiversity](https://training.galaxyproject.org/training-material/topics/ecology/tutorials/species-distribution-modeling/tutorial.html)" .\r +\r +This workflow allows to analyze remote sensing sentinel 2 satellites data to compute spectral indices such as the NDVI and visualizing biodiversity indicators\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Remote sensing Sentinel 2 data analysis to produce biodiversity metrics" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/657?version=1" ; + schema1:version 1 ; + ns1:input , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """![workflow](https://github.com/naturalis/barcode-constrained-phylogeny/actions/workflows/python-package-conda.yml/badge.svg)\r +[![License: Apache-2.0](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)\r +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.10519081.svg)](https://doi.org/10.5281/zenodo.10519081)\r +\r +![Logo](https://github.com/naturalis/barcode-constrained-phylogeny/blob/main/doc/logo-small.png?raw=true)\r +\r +# Bactria: BarCode TRee Inference and Analysis\r +This repository contains code and data for building very large, topologically-constrained \r +barcode phylogenies through a divide-and-conquer strategy. Such trees are useful as \r +reference materials for curating barcode data by detecting rogue terminals (indicating\r +incorrect taxonomic annotation) and in the comparable calculation of alpha and beta \r +biodiversity metrics across metabarcoding assays. \r +\r +The input data for the approach we develop here currently comes from BOLD data dumps. \r +The international database [BOLD Systems](https://www.boldsystems.org/index.php) \r +contains DNA barcodes for hundreds of thousands of species, with multiple barcodes per \r +species. The data dumps we use here are TSV files whose columns conform to the nascent\r +BCDM (barcode data model) vocabulary. As such, other data sources that conform to this\r +vocabulary could in the future be used as well, such as [UNITE](https://unite.ut.ee/).\r +\r +Theoretically, such data could be filtered and aligned per DNA marker to make \r +phylogenetic trees. However, there are two limiting factors: building very large \r +phylogenies is computationally intensive, and barcodes are not considered ideal for \r +building big trees because they are short (providing insufficient signal to resolve large \r +trees) and because they tend to saturate across large patristic distances.\r +\r +![concept](https://github.com/naturalis/barcode-constrained-phylogeny/blob/main/doc/concept.png)\r +\r +Both problems can be mitigated by using the \r +[Open Tree of Life](https://tree.opentreeoflife.org/opentree/argus/opentree13.4@ott93302) \r +as a further source of phylogenetic signal. The BOLD data can be split into chunks that \r +correspond to Open Tree of Life clades. These chunks can be made into alignments and \r +subtrees. The OpenTOL can be used as a constraint in the algorithms to make these. The \r +chunks are then combined in a large synthesis by grafting them on a backbone made from \r +exemplar taxa from the subtrees. Here too, the OpenTOL is a source of phylogenetic \r +constraint.\r +\r +In this repository this concept is developed for both animal species and plant species.\r +\r +## Installation\r +\r +The pipeline and its dependencies are managed using conda. On a linux or osx system, you \r +can follow these steps to set up the `bactria` Conda environment using an `environment.yml` \r +file and a `requirements.txt` file:\r +\r +1. **Clone the Repository:** \r + Clone the repository containing the environment files to your local machine:\r + ```bash\r + git clone https://github.com/naturalis/barcode-constrained-phylogeny.git\r + cd barcode-constrained-phylogeny\r + ```\r +2. **Create the Conda Environment:**\r + Create the bactria Conda environment using the environment.yml file with the following \r + command:\r + ```bash\r + conda env create -f workflow/envs/environment.yml\r + ```\r + This command will create a new Conda environment named bactria with the packages \r + specified in the environment.yml file. This step is largely a placeholder because\r + most of the dependency management is handled at the level of individual pipeline\r + steps, which each have their own environment specification.\r +3. **Activate the Environment:**\r + After creating the environment, activate it using the conda activate command:\r + ```bash\r + conda activate bactria\r + ```\r +4. **Verify the Environment:**\r + Verify that the bactria environment was set up correctly and that all packages were \r + installed using the conda list command:\r + ```bash\r + conda list\r + ```\r + This command will list all packages installed in the active conda environment. You should \r + see all the packages specified in the environment.yml file and the requirements.txt file.\r +\r +## How to run\r +\r +The pipeline is implemented using snakemake, which is available within the conda \r +environment that results from the installation. Important before running the snakemake pipeline \r +is to change in [config/config.yaml](config/config.yaml) the number of threads available on your \r +computer. Which marker gene is used in the pipeline is also specified in the config.yaml (default \r +COI-5P). Prior to execution, the BOLD data package to use (we used the \r +[release of 30 December 2022](https://www.boldsystems.org/index.php/datapackage?id=BOLD_Public.30-Dec-2022)) \r +must be downloaded manually and stored in the [resources/](resources/) directory. If a BOLD release \r +from another date is used the file names in config.yaml need to be updated. \r +\r +How to run the entire pipeline:\r +\r +```bash \r +snakemake -j {number of threads} --use-conda\r +```\r +\r +Snakemake rules can be performed separately:\r +```bash \r +snakemake -R {Rule} -j {number of threads} --use-conda\r +```\r +\r +Enter the same number at {number of threads} as you filled in previously in src/config.yaml.\r +In {Rule} insert the rule to be performed.\r +\r +Here is an overview of all the rules in the Snakefile:\r +\r +![graphviz (1)](https://github.com/naturalis/barcode-constrained-phylogeny/blob/main/doc/dag.svg)\r +(zoomed view is available [here](https://raw.githubusercontent.com/naturalis/barcode-constrained-phylogeny/main/doc/dag.svg))\r +\r +## Repository layout\r +\r +Below is the top-level layout of the repository. This layout is in line with \r +[community standards](https://snakemake.readthedocs.io/en/stable/snakefiles/deployment.html) and must be adhered to.\r +All of these subfolders contains further explanatory READMEs to explain their contents in more detail.\r +\r +- [config](config/) - configuration files\r +- [doc](doc/) - documentation and background literature\r +- [logs](logs/) - where log files are written during pipeline runtime\r +- [resources](resources/) - external data resources (from BOLD and OpenTree) are downloaded here\r +- [results](results/) - intermediate and final results are generated here\r +- [workflow](workflow/) - script source code and driver snakefile \r +\r +## License\r +\r +© 2023 Naturalis Biodiversity Center\r +\r +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except \r +in compliance with the License. You may obtain a copy of the License at\r +\r +[http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0)\r + \r +Unless required by applicable law or agreed to in writing, software distributed under the License \r +is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express \r +or implied. See the License for the specific language governing permissions and limitations under \r +the License.""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/706?version=1" ; + schema1:isBasedOn "https://github.com/naturalis/barcode-constrained-phylogeny.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Bactria: BarCode TRee Inference and Analysis" ; + schema1:sdDatePublished "2024-07-12 13:24:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/706/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 131932 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13366 ; + schema1:dateCreated "2024-01-24T10:38:28Z" ; + schema1:dateModified "2024-02-05T10:09:43Z" ; + schema1:description """![workflow](https://github.com/naturalis/barcode-constrained-phylogeny/actions/workflows/python-package-conda.yml/badge.svg)\r +[![License: Apache-2.0](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)\r +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.10519081.svg)](https://doi.org/10.5281/zenodo.10519081)\r +\r +![Logo](https://github.com/naturalis/barcode-constrained-phylogeny/blob/main/doc/logo-small.png?raw=true)\r +\r +# Bactria: BarCode TRee Inference and Analysis\r +This repository contains code and data for building very large, topologically-constrained \r +barcode phylogenies through a divide-and-conquer strategy. Such trees are useful as \r +reference materials for curating barcode data by detecting rogue terminals (indicating\r +incorrect taxonomic annotation) and in the comparable calculation of alpha and beta \r +biodiversity metrics across metabarcoding assays. \r +\r +The input data for the approach we develop here currently comes from BOLD data dumps. \r +The international database [BOLD Systems](https://www.boldsystems.org/index.php) \r +contains DNA barcodes for hundreds of thousands of species, with multiple barcodes per \r +species. The data dumps we use here are TSV files whose columns conform to the nascent\r +BCDM (barcode data model) vocabulary. As such, other data sources that conform to this\r +vocabulary could in the future be used as well, such as [UNITE](https://unite.ut.ee/).\r +\r +Theoretically, such data could be filtered and aligned per DNA marker to make \r +phylogenetic trees. However, there are two limiting factors: building very large \r +phylogenies is computationally intensive, and barcodes are not considered ideal for \r +building big trees because they are short (providing insufficient signal to resolve large \r +trees) and because they tend to saturate across large patristic distances.\r +\r +![concept](https://github.com/naturalis/barcode-constrained-phylogeny/blob/main/doc/concept.png)\r +\r +Both problems can be mitigated by using the \r +[Open Tree of Life](https://tree.opentreeoflife.org/opentree/argus/opentree13.4@ott93302) \r +as a further source of phylogenetic signal. The BOLD data can be split into chunks that \r +correspond to Open Tree of Life clades. These chunks can be made into alignments and \r +subtrees. The OpenTOL can be used as a constraint in the algorithms to make these. The \r +chunks are then combined in a large synthesis by grafting them on a backbone made from \r +exemplar taxa from the subtrees. Here too, the OpenTOL is a source of phylogenetic \r +constraint.\r +\r +In this repository this concept is developed for both animal species and plant species.\r +\r +## Installation\r +\r +The pipeline and its dependencies are managed using conda. On a linux or osx system, you \r +can follow these steps to set up the `bactria` Conda environment using an `environment.yml` \r +file and a `requirements.txt` file:\r +\r +1. **Clone the Repository:** \r + Clone the repository containing the environment files to your local machine:\r + ```bash\r + git clone https://github.com/naturalis/barcode-constrained-phylogeny.git\r + cd barcode-constrained-phylogeny\r + ```\r +2. **Create the Conda Environment:**\r + Create the bactria Conda environment using the environment.yml file with the following \r + command:\r + ```bash\r + conda env create -f workflow/envs/environment.yml\r + ```\r + This command will create a new Conda environment named bactria with the packages \r + specified in the environment.yml file. This step is largely a placeholder because\r + most of the dependency management is handled at the level of individual pipeline\r + steps, which each have their own environment specification.\r +3. **Activate the Environment:**\r + After creating the environment, activate it using the conda activate command:\r + ```bash\r + conda activate bactria\r + ```\r +4. **Verify the Environment:**\r + Verify that the bactria environment was set up correctly and that all packages were \r + installed using the conda list command:\r + ```bash\r + conda list\r + ```\r + This command will list all packages installed in the active conda environment. You should \r + see all the packages specified in the environment.yml file and the requirements.txt file.\r +\r +## How to run\r +\r +The pipeline is implemented using snakemake, which is available within the conda \r +environment that results from the installation. Important before running the snakemake pipeline \r +is to change in [config/config.yaml](config/config.yaml) the number of threads available on your \r +computer. Which marker gene is used in the pipeline is also specified in the config.yaml (default \r +COI-5P). Prior to execution, the BOLD data package to use (we used the \r +[release of 30 December 2022](https://www.boldsystems.org/index.php/datapackage?id=BOLD_Public.30-Dec-2022)) \r +must be downloaded manually and stored in the [resources/](resources/) directory. If a BOLD release \r +from another date is used the file names in config.yaml need to be updated. \r +\r +How to run the entire pipeline:\r +\r +```bash \r +snakemake -j {number of threads} --use-conda\r +```\r +\r +Snakemake rules can be performed separately:\r +```bash \r +snakemake -R {Rule} -j {number of threads} --use-conda\r +```\r +\r +Enter the same number at {number of threads} as you filled in previously in src/config.yaml.\r +In {Rule} insert the rule to be performed.\r +\r +Here is an overview of all the rules in the Snakefile:\r +\r +![graphviz (1)](https://github.com/naturalis/barcode-constrained-phylogeny/blob/main/doc/dag.svg)\r +(zoomed view is available [here](https://raw.githubusercontent.com/naturalis/barcode-constrained-phylogeny/main/doc/dag.svg))\r +\r +## Repository layout\r +\r +Below is the top-level layout of the repository. This layout is in line with \r +[community standards](https://snakemake.readthedocs.io/en/stable/snakefiles/deployment.html) and must be adhered to.\r +All of these subfolders contains further explanatory READMEs to explain their contents in more detail.\r +\r +- [config](config/) - configuration files\r +- [doc](doc/) - documentation and background literature\r +- [logs](logs/) - where log files are written during pipeline runtime\r +- [resources](resources/) - external data resources (from BOLD and OpenTree) are downloaded here\r +- [results](results/) - intermediate and final results are generated here\r +- [workflow](workflow/) - script source code and driver snakefile \r +\r +## License\r +\r +© 2023 Naturalis Biodiversity Center\r +\r +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except \r +in compliance with the License. You may obtain a copy of the License at\r +\r +[http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0)\r + \r +Unless required by applicable law or agreed to in writing, software distributed under the License \r +is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express \r +or implied. See the License for the specific language governing permissions and limitations under \r +the License.""" ; + schema1:image ; + schema1:keywords "Bioinformatics, Python, Snakemake, phylogenetics" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Bactria: BarCode TRee Inference and Analysis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/706?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/986?version=10" ; + schema1:isBasedOn "https://github.com/nf-core/fetchngs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/fetchngs" ; + schema1:sdDatePublished "2024-07-12 13:21:29 +0100" ; + schema1:url "https://workflowhub.eu/workflows/986/ro_crate?version=10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7986 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:53Z" ; + schema1:dateModified "2024-06-11T12:54:53Z" ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/986?version=13" ; + schema1:keywords "ddbj, download, ena, FASTQ, geo, sra, synapse" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/fetchngs" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/986?version=10" ; + schema1:version 10 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/998?version=15" ; + schema1:isBasedOn "https://github.com/nf-core/methylseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/methylseq" ; + schema1:sdDatePublished "2024-07-12 13:18:24 +0100" ; + schema1:url "https://workflowhub.eu/workflows/998/ro_crate?version=15" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11971 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:02Z" ; + schema1:dateModified "2024-06-11T12:55:02Z" ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/998?version=14" ; + schema1:keywords "bisulfite-sequencing, dna-methylation, em-seq, epigenome, Epigenomics, methyl-seq, pbat, rrbs" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/methylseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/998?version=15" ; + schema1:version 15 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=18" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-07-12 13:22:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=18" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9566 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:12Z" ; + schema1:dateModified "2024-06-11T12:55:12Z" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=18" ; + schema1:version 18 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """The input to this workflow is a data matrix of gene expression that was collected from a pediatric patient tumor patient from the KidsFirst Common Fund program [1]. The RNA-seq samples are the columns of the matrix, and the rows are the raw expression gene count for all human coding genes (Table 1). This data matrix is fed into TargetRanger [2] to screen for targets which are highly expressed in the tumor but lowly expressed across most healthy human tissues based on gene expression data collected from postmortem patients with RNA-seq by the GTEx Common Fund program [3]. Based on this analysis the gene IMP U3 small nucleolar ribonucleoprotein 3 (IMP3) was selected because it was the top candidate returned from the TargetRanger analysis (Tables 2-3). IMP3 is also commonly called insulin-like growth factor 2 mRNA-binding protein 3 (IGF2BP3). Next, we leverage unique knowledge from various other Common Fund programs to examine various functions and knowledge related to IMP3. First, we queried the LINCS L1000 data [4] from the LINCS program [5] converted into RNA-seq-like LINCS L1000 Signatures [6] using the SigCom LINCS API [7] to identify mimicker or reverser small molecules that maximally impact the expression of IMP3 in human cell lines (Fig. 1, Table 4). In addition, we also queried the LINCS L1000 data to identify single gene CRISPR knockouts that down-regulate the expression of IMP3 (Fig. 1, Table 5). These potential drug targets were filtered using the Common Fund IDG program's list of understudied proteins [8] to produce a set of additional targets (Table 6). Next, IMP3 was searched for knowledge provided by the with the Metabolomics Workbench MetGENE tool [9]. MetGENE aggregates knowledge about pathways, reactions, metabolites, and studies from the Metabolomics Workbench Common Fund supported resource [10]. The Metabolomics Workbench was searched to find associated metabolites linked to IMP3 [10]. Furthermore, we leveraged the Linked Data Hub API [11] to list knowledge about regulatory elements associated with IMP3 (Table 6). Finally, the GlyGen database [12] was queried to identify relevant sets of proteins that are the product of the IMP3 genes, as well as known post-translational modifications discovered on IMP3.\r +\r +1. Lonsdale, J. et al. The Genotype-Tissue Expression (GTEx) project. Nature Genetics vol. 45 580–585 (2013). doi:10.1038/ng.2653\r +2. Evangelista, J. E. et al. SigCom LINCS: data and metadata search engine for a million gene expression signatures. Nucleic Acids Research vol. 50 W697–W709 (2022). doi:10.1093/nar/gkac328\r +3. IDG Understudied Proteins, https://druggablegenome.net/AboutIDGProteinList\r +4. MetGENE, https://sc-cfdewebdev.sdsc.edu/MetGENE/metGene.php\r +5. The Metabolomics Workbench, https://www.metabolomicsworkbench.org/\r +6. Linked Data Hub, https://ldh.genome.network/cfde/ldh/\r +7. York, W. S. et al. GlyGen: Computational and Informatics Resources for Glycoscience. Glycobiology vol. 30 72–73 (2019). doi:10.1093/glycob/cwz080""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/814?version=1" ; + schema1:license "CC-BY-NC-SA-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Use Case 13: Novel Cell Surface Targets for Individual Cancer Patients Analyzed with Common Fund Datasets" ; + schema1:sdDatePublished "2024-07-12 13:23:05 +0100" ; + schema1:url "https://workflowhub.eu/workflows/814/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 32077 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8205 ; + schema1:dateCreated "2024-04-16T21:42:58Z" ; + schema1:dateModified "2024-04-23T15:54:49Z" ; + schema1:description """The input to this workflow is a data matrix of gene expression that was collected from a pediatric patient tumor patient from the KidsFirst Common Fund program [1]. The RNA-seq samples are the columns of the matrix, and the rows are the raw expression gene count for all human coding genes (Table 1). This data matrix is fed into TargetRanger [2] to screen for targets which are highly expressed in the tumor but lowly expressed across most healthy human tissues based on gene expression data collected from postmortem patients with RNA-seq by the GTEx Common Fund program [3]. Based on this analysis the gene IMP U3 small nucleolar ribonucleoprotein 3 (IMP3) was selected because it was the top candidate returned from the TargetRanger analysis (Tables 2-3). IMP3 is also commonly called insulin-like growth factor 2 mRNA-binding protein 3 (IGF2BP3). Next, we leverage unique knowledge from various other Common Fund programs to examine various functions and knowledge related to IMP3. First, we queried the LINCS L1000 data [4] from the LINCS program [5] converted into RNA-seq-like LINCS L1000 Signatures [6] using the SigCom LINCS API [7] to identify mimicker or reverser small molecules that maximally impact the expression of IMP3 in human cell lines (Fig. 1, Table 4). In addition, we also queried the LINCS L1000 data to identify single gene CRISPR knockouts that down-regulate the expression of IMP3 (Fig. 1, Table 5). These potential drug targets were filtered using the Common Fund IDG program's list of understudied proteins [8] to produce a set of additional targets (Table 6). Next, IMP3 was searched for knowledge provided by the with the Metabolomics Workbench MetGENE tool [9]. MetGENE aggregates knowledge about pathways, reactions, metabolites, and studies from the Metabolomics Workbench Common Fund supported resource [10]. The Metabolomics Workbench was searched to find associated metabolites linked to IMP3 [10]. Furthermore, we leveraged the Linked Data Hub API [11] to list knowledge about regulatory elements associated with IMP3 (Table 6). Finally, the GlyGen database [12] was queried to identify relevant sets of proteins that are the product of the IMP3 genes, as well as known post-translational modifications discovered on IMP3.\r +\r +1. Lonsdale, J. et al. The Genotype-Tissue Expression (GTEx) project. Nature Genetics vol. 45 580–585 (2013). doi:10.1038/ng.2653\r +2. Evangelista, J. E. et al. SigCom LINCS: data and metadata search engine for a million gene expression signatures. Nucleic Acids Research vol. 50 W697–W709 (2022). doi:10.1093/nar/gkac328\r +3. IDG Understudied Proteins, https://druggablegenome.net/AboutIDGProteinList\r +4. MetGENE, https://sc-cfdewebdev.sdsc.edu/MetGENE/metGene.php\r +5. The Metabolomics Workbench, https://www.metabolomicsworkbench.org/\r +6. Linked Data Hub, https://ldh.genome.network/cfde/ldh/\r +7. York, W. S. et al. GlyGen: Computational and Informatics Resources for Glycoscience. Glycobiology vol. 30 72–73 (2019). doi:10.1093/glycob/cwz080""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-NC-SA-4.0" ; + schema1:name "Use Case 13: Novel Cell Surface Targets for Individual Cancer Patients Analyzed with Common Fund Datasets" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/814?version=1" ; + schema1:version 1 ; + ns1:input , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Genome assessment post assembly\r +\r +## General usage recommendations\r +\r +Please see the [Genome assessment post assembly](https://australianbiocommons.github.io/how-to-guides/genome_assembly/assembly_qc) guide.\r +\r +## See [change log](./change_log.md)\r +\r +## Attributions\r +\r +The workflow & the [doc_guidelines template](https://github.com/AustralianBioCommons/doc_guidelines) used are supported by the Australian BioCommons via Bioplatforms Australia funding, the Australian Research Data Commons (https://doi.org/10.47486/PL105) and the Queensland Government RICF programme. Bioplatforms Australia and the Australian Research Data Commons are enabled by the National Collaborative Research Infrastructure Strategy (NCRIS).\r +\r +\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.403.1" ; + schema1:isBasedOn "https://github.com/AustralianBioCommons/Genome-assessment-post-assembly.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genome assessment post assembly" ; + schema1:sdDatePublished "2024-07-12 13:23:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/403/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14172 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2022-11-07T07:10:28Z" ; + schema1:dateModified "2023-01-30T18:19:40Z" ; + schema1:description """# Genome assessment post assembly\r +\r +## General usage recommendations\r +\r +Please see the [Genome assessment post assembly](https://australianbiocommons.github.io/how-to-guides/genome_assembly/assembly_qc) guide.\r +\r +## See [change log](./change_log.md)\r +\r +## Attributions\r +\r +The workflow & the [doc_guidelines template](https://github.com/AustralianBioCommons/doc_guidelines) used are supported by the Australian BioCommons via Bioplatforms Australia funding, the Australian Research Data Commons (https://doi.org/10.47486/PL105) and the Queensland Government RICF programme. Bioplatforms Australia and the Australian Research Data Commons are enabled by the National Collaborative Research Infrastructure Strategy (NCRIS).\r +\r +\r +\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/403?version=3" ; + schema1:isPartOf , + ; + schema1:keywords "HiFi, hifiasm, QC, Quast, Meryl, Merqury, BUSCO" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Genome assessment post assembly" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/403?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Taxonomic classification and profiling of shotgun short- and long-read metagenomic data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1025?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/taxprofiler" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/taxprofiler" ; + schema1:sdDatePublished "2024-07-12 13:18:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1025/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15368 ; + schema1:creator , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:19Z" ; + schema1:dateModified "2024-06-11T12:55:19Z" ; + schema1:description "Taxonomic classification and profiling of shotgun short- and long-read metagenomic data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1025?version=10" ; + schema1:keywords "Classification, illumina, long-reads, Metagenomics, microbiome, nanopore, pathogen, Profiling, shotgun, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/taxprofiler" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1025?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + schema1:datePublished "2022-11-28T16:36:23.118637" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/chipseq-pe" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "chipseq-pe/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.2" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "ATACSeq peak-calling and differential analysis pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/965?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/atacseq" ; + schema1:sdDatePublished "2024-07-12 13:22:33 +0100" ; + schema1:url "https://workflowhub.eu/workflows/965/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9720 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:42Z" ; + schema1:dateModified "2024-06-11T12:54:42Z" ; + schema1:description "ATACSeq peak-calling and differential analysis pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/965?version=8" ; + schema1:keywords "ATAC-seq, chromatin-accessibiity" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/atacseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/965?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +**Based on the official [pmx tutorial](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate how to compute a **fast-growth mutation free energy** calculation, step by step, using the BioExcel **Building Blocks library (biobb)**. The particular example used is the **Staphylococcal nuclease** protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +The **non-equilibrium free energy calculation** protocol performs a **fast alchemical transition** in the direction **WT->Mut** and back **Mut->WT**. The two equilibrium trajectories needed for the tutorial, one for **Wild Type (WT)** and another for the **Mutated (Mut)** protein (Isoleucine 10 to Alanine -I10A-), have already been generated and are included in this example. We will name **WT as stateA** and **Mut as stateB**.\r +\r +![](https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/schema.png)\r +\r +The tutorial calculates the **free energy difference** in the folded state of a protein. Starting from **two 1ns-length independent equilibrium simulations** (WT and mutant), snapshots are selected to start **fast (50ps) transitions** driving the system in the **forward** (WT to mutant) and **reverse** (mutant to WT) directions, and the **work values** required to perform these transitions are collected. With these values, **Crooks Gaussian Intersection** (CGI), **Bennett Acceptance Ratio** (BAR) and **Jarzynski estimator** methods are used to calculate the **free energy difference** between the two states.\r +\r +*Please note that for the sake of disk space this tutorial is using 1ns-length equilibrium trajectories, whereas in the [original example](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/eq.mdp) the equilibrium trajectories used were obtained from 10ns-length simulations.*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.328.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_pmx_tutorial/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)" ; + schema1:sdDatePublished "2024-07-12 13:35:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/328/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8358 ; + schema1:creator , + ; + schema1:dateCreated "2022-04-07T10:32:20Z" ; + schema1:dateModified "2022-06-10T09:44:01Z" ; + schema1:description """# Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +**Based on the official [pmx tutorial](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate how to compute a **fast-growth mutation free energy** calculation, step by step, using the BioExcel **Building Blocks library (biobb)**. The particular example used is the **Staphylococcal nuclease** protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +The **non-equilibrium free energy calculation** protocol performs a **fast alchemical transition** in the direction **WT->Mut** and back **Mut->WT**. The two equilibrium trajectories needed for the tutorial, one for **Wild Type (WT)** and another for the **Mutated (Mut)** protein (Isoleucine 10 to Alanine -I10A-), have already been generated and are included in this example. We will name **WT as stateA** and **Mut as stateB**.\r +\r +![](https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/schema.png)\r +\r +The tutorial calculates the **free energy difference** in the folded state of a protein. Starting from **two 1ns-length independent equilibrium simulations** (WT and mutant), snapshots are selected to start **fast (50ps) transitions** driving the system in the **forward** (WT to mutant) and **reverse** (mutant to WT) directions, and the **work values** required to perform these transitions are collected. With these values, **Crooks Gaussian Intersection** (CGI), **Bennett Acceptance Ratio** (BAR) and **Jarzynski estimator** methods are used to calculate the **free energy difference** between the two states.\r +\r +*Please note that for the sake of disk space this tutorial is using 1ns-length equilibrium trajectories, whereas in the [original example](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/eq.mdp) the equilibrium trajectories used were obtained from 10ns-length simulations.*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/328?version=3" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_pmx_tutorial/python/workflow.py" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.284.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_complex_md_setup/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/284/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7128 ; + schema1:creator , + ; + schema1:dateCreated "2023-04-14T08:38:51Z" ; + schema1:dateModified "2023-04-14T08:39:41Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/284?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_complex_md_setup/python/workflow.py" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """From the R1 and R2 fastq files of a single samples, make a scRNAseq counts matrix, and perform basic QC with scanpy. Then, do further processing by making a UMAP and clustering. Produces a processed AnnData \r +\r +Depreciated: use individual workflows insead for multiple samples""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/465?version=4" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq Single Sample Processing STARSolo" ; + schema1:sdDatePublished "2024-07-12 13:22:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/465/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 126954 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-12-12T03:15:52Z" ; + schema1:dateModified "2024-05-30T05:54:50Z" ; + schema1:description """From the R1 and R2 fastq files of a single samples, make a scRNAseq counts matrix, and perform basic QC with scanpy. Then, do further processing by making a UMAP and clustering. Produces a processed AnnData \r +\r +Depreciated: use individual workflows insead for multiple samples""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/465?version=3" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "scRNAseq Single Sample Processing STARSolo" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/465?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Taxonomic classification and profiling of shotgun short- and long-read metagenomic data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1025?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/taxprofiler" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/taxprofiler" ; + schema1:sdDatePublished "2024-07-12 13:18:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1025/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15373 ; + schema1:creator , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:19Z" ; + schema1:dateModified "2024-06-11T12:55:19Z" ; + schema1:description "Taxonomic classification and profiling of shotgun short- and long-read metagenomic data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1025?version=10" ; + schema1:keywords "Classification, illumina, long-reads, Metagenomics, microbiome, nanopore, pathogen, Profiling, shotgun, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/taxprofiler" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1025?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + schema1:datePublished "2024-04-22T12:13:28.281437" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.828.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/main/biobb_wf_protein_complex_md_setup/docker" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Docker Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:23:18 +0100" ; + schema1:url "https://workflowhub.eu/workflows/828/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 786 ; + schema1:creator , + ; + schema1:dateCreated "2024-04-22T11:10:41Z" ; + schema1:dateModified "2024-05-22T13:39:56Z" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Docker Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/main/biobb_wf_protein_complex_md_setup/docker/Dockerfile" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2024-03-06T13:04:13.877998" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + , + ; + schema1:name "consensus-peaks/consensus-peaks-chip-sr" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-atac-cutandrun" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.5" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-chip-pe" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.5" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """# ![nf-core/ampliseq](docs/images/nf-core-ampliseq_logo.png) + +[![Nextflow](https://img.shields.io/badge/nextflow-%E2%89%A519.10.0-brightgreen.svg)](https://www.nextflow.io/) +[![nf-core](https://img.shields.io/badge/nf--core-pipeline-brightgreen.svg)](https://nf-co.re/) +[![DOI](https://zenodo.org/badge/150448201.svg)](https://zenodo.org/badge/latestdoi/150448201) +[![Cite Preprint](https://img.shields.io/badge/Cite%20Us!-Cite%20Publication-important)](https://doi.org/10.3389/fmicb.2020.550420) + +[![GitHub Actions CI Status](https://github.com/nf-core/ampliseq/workflows/nf-core%20CI/badge.svg)](https://github.com/nf-core/ampliseq/actions) +[![GitHub Actions Linting Status](https://github.com/nf-core/ampliseq/workflows/nf-core%20linting/badge.svg)](https://github.com/nf-core/ampliseq/actions) +[![Nextflow](https://img.shields.io/badge/nextflow-%E2%89%A519.10.0-brightgreen.svg)](https://www.nextflow.io/) + +[![install with bioconda](https://img.shields.io/badge/install%20with-bioconda-brightgreen.svg)](https://bioconda.github.io/) +[![Docker](https://img.shields.io/docker/automated/nfcore/ampliseq.svg)](https://hub.docker.com/r/nfcore/ampliseq) +[![Get help on Slack](http://img.shields.io/badge/slack-nf--core%20%23ampliseq-4A154B?logo=slack)](https://nfcore.slack.com/channels/ampliseq) + +## Introduction + +**nfcore/ampliseq** is a bioinformatics analysis pipeline used for 16S rRNA amplicon sequencing data (currently supported is Illumina paired end data). + +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It comes with docker containers making installation trivial and results highly reproducible. + +## Quick Start + +1. Install [`nextflow`](https://nf-co.re/usage/installation) + +2. Install any of [`Docker`](https://docs.docker.com/engine/installation/), [`Singularity`](https://www.sylabs.io/guides/3.0/user-guide/) or [`Podman`](https://podman.io/) for full pipeline reproducibility _(please only use [`Conda`](https://conda.io/miniconda.html) as a last resort; see [docs](https://nf-co.re/usage/configuration#basic-configuration-profiles))_ + +3. Download the pipeline and test it on a minimal dataset with a single command: + + ```bash + nextflow run nf-core/ampliseq -profile test, + ``` + + > Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment. + +4. Start running your own analysis! + + ```bash + nextflow run nf-core/ampliseq -profile --input "data" --FW_primer GTGYCAGCMGCCGCGGTAA --RV_primer GGACTACNVGGGTWTCTAAT --metadata "data/Metadata.tsv" + ``` + +See [usage docs](https://nf-co.re/ampliseq/usage) for all of the available options when running the pipeline. + +## Documentation + +The nf-core/ampliseq pipeline comes with documentation about the pipeline: [usage](https://nf-co.re/ampliseq/usage) and [output](https://nf-co.re/ampliseq/output). + +The workflow processes raw data from FastQ inputs ([FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/)), trims primer sequences from the reads ([Cutadapt](https://journal.embnet.org/index.php/embnetjournal/article/view/200)), imports data into [QIIME2](https://www.nature.com/articles/s41587-019-0209-9), generates amplicon sequencing variants (ASV, [DADA2](https://www.nature.com/articles/nmeth.3869)), classifies features against the [SILVA](https://www.arb-silva.de/) [v132](https://www.arb-silva.de/documentation/release-132/) database, excludes unwanted taxa, produces absolute and relative feature/taxa count tables and plots, plots alpha rarefaction curves, computes alpha and beta diversity indices and plots thereof, and finally calls differentially abundant taxa ([ANCOM](https://www.ncbi.nlm.nih.gov/pubmed/26028277)). See the [output documentation](docs/output.md) for more details of the results. + +## Credits + +These scripts were originally written for use at the [Quantitative Biology Center (QBiC)](http://www.qbic.life) and [Microbial Ecology, Center for Applied Geosciences](http://www.uni-tuebingen.de/de/104325), part of Eberhard Karls Universität Tübingen (Germany) by Daniel Straub ([@d4straub](https://github.com/d4straub)) and Alexander Peltzer ([@apeltzer](https://github.com/apeltzer)). + +## Contributions and Support + +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md). + +For further information or help, don't hesitate to get in touch on the [Slack `#ampliseq` channel](https://nfcore.slack.com/channels/ampliseq) (you can join with [this invite](https://nf-co.re/join/slack)). + +## Citation + +If you use `nf-core/ampliseq` for your analysis, please cite the `ampliseq` article as follows: +> Daniel Straub, Nia Blackwell, Adrian Langarica-Fuentes, Alexander Peltzer, Sven Nahnsen, Sara Kleindienst **Interpretations of Environmental Microbial Community Studies Are Biased by the Selected 16S rRNA (Gene) Amplicon Sequencing Pipeline** *Frontiers in Microbiology* 2020, 11:2652 [doi: 10.3389/fmicb.2020.550420](https://doi.org/10.3389/fmicb.2020.550420). + +You can cite the `nf-core/ampliseq` zenodo record for a specific version using the following [doi: 10.5281/zenodo.1493841](https://zenodo.org/badge/latestdoi/150448201) + +You can cite the `nf-core` publication as follows: + +> **The nf-core framework for community-curated bioinformatics pipelines.** +> +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen. +> +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x). +> ReadCube: [Full Access Link](https://rdcu.be/b1GjZ) +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-07-12 13:18:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5747 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:38Z" ; + schema1:dateModified "2024-06-11T12:54:38Z" ; + schema1:description """# ![nf-core/ampliseq](docs/images/nf-core-ampliseq_logo.png) + +[![Nextflow](https://img.shields.io/badge/nextflow-%E2%89%A519.10.0-brightgreen.svg)](https://www.nextflow.io/) +[![nf-core](https://img.shields.io/badge/nf--core-pipeline-brightgreen.svg)](https://nf-co.re/) +[![DOI](https://zenodo.org/badge/150448201.svg)](https://zenodo.org/badge/latestdoi/150448201) +[![Cite Preprint](https://img.shields.io/badge/Cite%20Us!-Cite%20Publication-important)](https://doi.org/10.3389/fmicb.2020.550420) + +[![GitHub Actions CI Status](https://github.com/nf-core/ampliseq/workflows/nf-core%20CI/badge.svg)](https://github.com/nf-core/ampliseq/actions) +[![GitHub Actions Linting Status](https://github.com/nf-core/ampliseq/workflows/nf-core%20linting/badge.svg)](https://github.com/nf-core/ampliseq/actions) +[![Nextflow](https://img.shields.io/badge/nextflow-%E2%89%A519.10.0-brightgreen.svg)](https://www.nextflow.io/) + +[![install with bioconda](https://img.shields.io/badge/install%20with-bioconda-brightgreen.svg)](https://bioconda.github.io/) +[![Docker](https://img.shields.io/docker/automated/nfcore/ampliseq.svg)](https://hub.docker.com/r/nfcore/ampliseq) +[![Get help on Slack](http://img.shields.io/badge/slack-nf--core%20%23ampliseq-4A154B?logo=slack)](https://nfcore.slack.com/channels/ampliseq) + +## Introduction + +**nfcore/ampliseq** is a bioinformatics analysis pipeline used for 16S rRNA amplicon sequencing data (currently supported is Illumina paired end data). + +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It comes with docker containers making installation trivial and results highly reproducible. + +## Quick Start + +1. Install [`nextflow`](https://nf-co.re/usage/installation) + +2. Install any of [`Docker`](https://docs.docker.com/engine/installation/), [`Singularity`](https://www.sylabs.io/guides/3.0/user-guide/) or [`Podman`](https://podman.io/) for full pipeline reproducibility _(please only use [`Conda`](https://conda.io/miniconda.html) as a last resort; see [docs](https://nf-co.re/usage/configuration#basic-configuration-profiles))_ + +3. Download the pipeline and test it on a minimal dataset with a single command: + + ```bash + nextflow run nf-core/ampliseq -profile test, + ``` + + > Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment. + +4. Start running your own analysis! + + ```bash + nextflow run nf-core/ampliseq -profile --input "data" --FW_primer GTGYCAGCMGCCGCGGTAA --RV_primer GGACTACNVGGGTWTCTAAT --metadata "data/Metadata.tsv" + ``` + +See [usage docs](https://nf-co.re/ampliseq/usage) for all of the available options when running the pipeline. + +## Documentation + +The nf-core/ampliseq pipeline comes with documentation about the pipeline: [usage](https://nf-co.re/ampliseq/usage) and [output](https://nf-co.re/ampliseq/output). + +The workflow processes raw data from FastQ inputs ([FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/)), trims primer sequences from the reads ([Cutadapt](https://journal.embnet.org/index.php/embnetjournal/article/view/200)), imports data into [QIIME2](https://www.nature.com/articles/s41587-019-0209-9), generates amplicon sequencing variants (ASV, [DADA2](https://www.nature.com/articles/nmeth.3869)), classifies features against the [SILVA](https://www.arb-silva.de/) [v132](https://www.arb-silva.de/documentation/release-132/) database, excludes unwanted taxa, produces absolute and relative feature/taxa count tables and plots, plots alpha rarefaction curves, computes alpha and beta diversity indices and plots thereof, and finally calls differentially abundant taxa ([ANCOM](https://www.ncbi.nlm.nih.gov/pubmed/26028277)). See the [output documentation](docs/output.md) for more details of the results. + +## Credits + +These scripts were originally written for use at the [Quantitative Biology Center (QBiC)](http://www.qbic.life) and [Microbial Ecology, Center for Applied Geosciences](http://www.uni-tuebingen.de/de/104325), part of Eberhard Karls Universität Tübingen (Germany) by Daniel Straub ([@d4straub](https://github.com/d4straub)) and Alexander Peltzer ([@apeltzer](https://github.com/apeltzer)). + +## Contributions and Support + +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md). + +For further information or help, don't hesitate to get in touch on the [Slack `#ampliseq` channel](https://nfcore.slack.com/channels/ampliseq) (you can join with [this invite](https://nf-co.re/join/slack)). + +## Citation + +If you use `nf-core/ampliseq` for your analysis, please cite the `ampliseq` article as follows: +> Daniel Straub, Nia Blackwell, Adrian Langarica-Fuentes, Alexander Peltzer, Sven Nahnsen, Sara Kleindienst **Interpretations of Environmental Microbial Community Studies Are Biased by the Selected 16S rRNA (Gene) Amplicon Sequencing Pipeline** *Frontiers in Microbiology* 2020, 11:2652 [doi: 10.3389/fmicb.2020.550420](https://doi.org/10.3389/fmicb.2020.550420). + +You can cite the `nf-core/ampliseq` zenodo record for a specific version using the following [doi: 10.5281/zenodo.1493841](https://zenodo.org/badge/latestdoi/150448201) + +You can cite the `nf-core` publication as follows: + +> **The nf-core framework for community-curated bioinformatics pipelines.** +> +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen. +> +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x). +> ReadCube: [Full Access Link](https://rdcu.be/b1GjZ) +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=5" ; + schema1:version 5 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 182054 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """![CoVigator logo](images/CoVigator_logo_txt_nobg.png "CoVigator logo")\r +\r +# CoVigator pipeline: variant detection pipeline for Sars-CoV-2\r +\r +[![DOI](https://zenodo.org/badge/374669617.svg)](https://zenodo.org/badge/latestdoi/374669617)\r +[![Run tests](https://github.com/TRON-Bioinformatics/covigator-ngs-pipeline/actions/workflows/automated_tests.yml/badge.svg?branch=master)](https://github.com/TRON-Bioinformatics/covigator-ngs-pipeline/actions/workflows/automated_tests.yml)\r +[![Powered by NumFOCUS](https://img.shields.io/badge/powered%20by-Nextflow-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)](https://www.nextflow.io/)\r +[![License](https://img.shields.io/badge/license-MIT-green)](https://opensource.org/licenses/MIT)\r +\r +\r +\r +The Covigator pipeline processes SARS-CoV-2 FASTQ or FASTA files into annotated and normalized analysis ready VCF files.\r +It also classifies samples into lineages using pangolin.\r +The pipeline is implemented in the Nextflow framework (Di Tommaso, 2017), it is a stand-alone pipeline that can be\r +used independently of the CoVigator dashboard and knowledge base.\r +\r +Although it is configured by default for SARS-CoV-2 it can be employed for the analysis of other microbial organisms \r +if the required references are provided.\r +\r +The result of the pipeline is one or more annotated VCFs with the list of SNVs and indels ready for analysis.\r +\r +The results from the CoVigator pipeline populate our CoVigator dashboard [https://covigator.tron-mainz.de](https://covigator.tron-mainz.de) \r +\r +**Table of Contents**\r +\r +1. [Two pipelines in one](#id1)\r +2. [Implementation](#id2)\r +3. [How to run](#id3)\r +4. [Understanding the output](#id4)\r +6. [Annotation resources](#id5)\r +7. [Future work](#id6)\r +8. [Bibliography](#id7)\r +\r +\r +## Two pipelines in one\r +\r +In CoVigator we analyse samples from two different formats, FASTQ files (e.g.: as provided by the European Nucleotide \r +Archive) and FASTA files containing a consensus assembly. While from the first we get the raw reads, \r +from the second we obtain already assembled genomes. Each of these formats has to be \r +analysed differently. Also, the output data that we can obtain from each of these is different.\r +\r +![CoVigator pipeline](images/pipeline.drawio.png)\r +\r +### Pipeline for FASTQ files\r +\r +When FASTQ files are provided the pipeline includes the following steps:\r +- **Trimming**. `fastp` is used to trim reads with default values. This step also includes QC filtering.\r +- **Alignment**. `BWA mem 2` is used for the alignment of single or paired end samples.\r +- **BAM preprocessing**. BAM files are prepared and duplicate reads are marked using GATK and Sambamba tools.\r +- **Primer trimming**. When a BED with primers is provided, these are trimmed from the reads using iVar. This is applicable to the results from all variant callers.\r +- **Coverage analysis**. `samtools coverage` and `samtools depth` are used to compute the horizontal and vertical \r + coverage respectively.\r +- **Variant calling**. Four different variant callers are employed: BCFtools, LoFreq, iVar and GATK. \r + Subsequent processing of resulting VCF files is independent for each caller.\r +- **Variant normalization**. `bcftools norm` is employed to left align indels, trim variant calls and remove variant duplicates.\r +- **Technical annotation**. `VAFator` is employed to add VAF and coverage annotations from the reads pileup.\r +- **Phasing**. Clonal mutations (ie: VAF >= 0.8) occurring in the same amino acid are merged for its correct functional annotation.\r +- **Biological annotation**. `SnpEff` is employed to annotate the variant consequences of variants and\r + `bcftools annotate` is employed to add additional SARS-CoV-2 annotations.\r +- **Lineage determination**. `pangolin` is used for this purpose, this runs over the results from each of the variant callers separately.\r +\r +Both single end and paired end FASTQ files are supported.\r +\r +### Pipeline for FASTA files\r +\r +When a FASTA file is provided with a single assembly sequence the pipeline includes the following steps:\r +- **Variant calling**. A Smith-Waterman global alignment is performed against the reference sequence to call SNVs and \r + indels. Indels longer than 50 bp and at the beginning or end of the assembly sequence are excluded. Any mutation where\r + either reference or assembly contain an N is excluded.\r +- **Variant normalization**. Same as described above.\r +- **Phasing**. mutations occurring in the same amino acid are merged for its correct annotation.\r +- **Biological annotation**. Same as described above.\r +- **Lineage determination**. `pangolin` is used for this purpose.\r +\r +The FASTA file is expected to contain a single assembly sequence. \r +Bear in mind that only clonal variants can be called on the assembly.\r +\r +### Pipeline for VCF files\r +\r +When a VCF file is provided the pipeline includes the following steps:\r +- **Variant normalization**. Same as described above.\r +- **Technical annotation**. Same as described above (optional if BAM is provided)\r +- **Phasing**. mutations occurring in the same amino acid are merged for its correct annotation.\r +- **Biological annotation**. Same as described above\r +- **Lineage determination**. `pangolin` is used for this purpose.\r +\r +## Implementation\r +\r +The pipeline is implemented as a Nextflow workflow with its DSL2 syntax.\r +The dependencies are managed through a conda environment to ensure version traceability and reproducibility.\r +The references for SARS-CoV-2 are embedded in the pipeline.\r +The pipeline is based on a number of third-party tools, plus a custom implementation based on biopython (Cock, 2009) \r +for the alignment and subsequent variant calling over a FASTA file.\r +\r +All code is open sourced in GitHub [https://github.com/TRON-Bioinformatics/covigator-ngs-pipeline](https://github.com/TRON-Bioinformatics/covigator-ngs-pipeline)\r +and made available under the MIT license. We welcome any contribution. \r +If you have troubles using the CoVigator pipeline or you find an issue, we will be thankful if you would report a ticket \r +in GitHub.\r +\r +The alignment, BAM preprocessing and variant normalization pipelines are based on the implementations in additional \r +Nextflow pipelines within the TronFlow initiative [https://tronflow-docs.readthedocs.io/](https://tronflow-docs.readthedocs.io/). \r +\r +\r +### Variant annotations\r +\r +The variants derived from a FASTQ file are annotated on the `FILTER` column using the VAFator \r +(https://github.com/TRON-Bioinformatics/vafator) variant allele frequency \r +(VAF) into `LOW_FREQUENCY`, `SUBCLONAL`, `LOW_QUALITY_CLONAL` and finally `PASS` variants correspond to clonal variants. \r +By default, variants with a VAF < 2 % are considered `LOW_FREQUENCY`, variants with a VAF >= 2 % and < 50 % are \r +considered `SUBCLONAL` and variants with a VAF >= 50 % and < 80 % are considered `LOW_QUALITY_CLONAL`. \r +This thresholds can be changed with the parameters `--low_frequency_variant_threshold`,\r +`--subclonal_variant_threshold` and `--low_quality_clonal_variant_threshold` respectively.\r +\r +VAFator technical annotations:\r +\r +- `INFO/vafator_af`: variant allele frequency of the mutation \r +- `INFO/vafator_ac`: number of reads supporting the mutation \r +- `INFO/vafator_dp`: total number of reads at the position, in the case of indels this represents the number of reads in the previous position\r +\r +SnpEff provides the functional annotations. And all mutations are additionally annotated with the following SARS-CoV-2 specific annotations:\r +- ConsHMM conservation scores as reported in (Kwon, 2021)\r +- Pfam domains as reported in Ensemble annotations.\r +\r +Biological annotations: \r +\r +- `INFO/ANN` are the SnpEff consequence annotations (eg: overlapping gene, effect of the mutation). \r +This are described in detail here [http://pcingola.github.io/SnpEff/se_inputoutput/](http://pcingola.github.io/SnpEff/se_inputoutput/) \r +- `INFO/CONS_HMM_SARS_COV_2` is the ConsHMM conservation score in SARS-CoV-2\r +- `INFO/CONS_HMM_SARBECOVIRUS` is the ConsHMM conservation score among Sarbecovirus\r +- `INFO/CONS_HMM_VERTEBRATE_COV` is the ConsHMM conservation score among vertebrate Corona virus\r +- `INFO/PFAM_NAME` is the Interpro name for the overlapping Pfam domains\r +- `INFO/PFAM_DESCRIPTION` is the Interpro description for the overlapping Pfam domains\r +- `INFO/problematic` contains the filter provided in DeMaio et al. (2020) for problematic mutations\r +\r +According to DeMaio et al. (2020), mutations at the beginning (ie: POS <= 50) and end (ie: POS >= 29,804) of the \r +genome are filtered out\r +\r +This is an example of biological annotations of a missense mutation in the spike protein on the N-terminal subunit 1 domain.\r +```\r +ANN=A|missense_variant|MODERATE|S|gene-GU280_gp02|transcript|TRANSCRIPT_gene-GU280_gp02|protein_coding|1/1|c.118G>A|\r +p.D40N|118/3822|118/3822|40/1273||;CONS_HMM_SARS_COV_2=0.57215;CONS_HMM_SARBECOVIRUS=0.57215;CONS_HMM_VERTEBRATE_COV=0;\r +PFAM_NAME=bCoV_S1_N;PFAM_DESCRIPTION=Betacoronavirus-like spike glycoprotein S1, N-terminal\r +```\r +\r +\r +### Phasing limitations\r +\r +The phasing implementation is applicable only to clonal mutations. It assumes all clonal mutations are in phase and \r +hence it merges those occurring in the same amino acid.\r +In order to phase intrahost mutations we would need to implement a read-backed phasing approach such as in WhatsHap \r +or GATK's ReadBackedPhasing. Unfortunately these tools do not support the scenario of a haploid organism with an\r +undefined number of subclones.\r +For this reason, phasing is implemented with custom Python code at `bin/phasing.py`.\r +\r +### Primers trimming\r +\r +With some library preparation protocols such as ARTIC it is recommended to trim the primers from the reads.\r +We have observed that if primers are not trimmed spurious mutations are being called specially SNVs with lower frequencies and long deletions.\r +Also the variant allele frequencies of clonal mutations are underestimated.\r +\r +The BED files containing the primers for each ARTIC version can be found at https://github.com/artic-network/artic-ncov2019/tree/master/primer_schemes/nCoV-2019.\r +\r +If the adequate BED file is provided to the CoVigator pipeline with `--primers` the primers will be trimmed with iVar. \r +This affects the output of every variant caller, not only iVar.\r +\r +### Reference data\r +\r +The default SARS-CoV-2 reference files correspond to Sars_cov_2.ASM985889v3 and were downloaded from Ensembl servers.\r +No additional parameter needs to be provided to use the default SARS-CoV-2 reference genome.\r +\r +#### Using a custom reference genome\r +\r +These references can be customised to use a different SARS-CoV-2 reference or to analyse a different virus.\r +Two files need to be provided:\r +- Use a custom reference genome by providing the parameter `--reference your.fasta`.\r +- Gene annotation file in GFFv3 format `--gff your.gff`. This is only required to run iVar\r +\r +Additionally, the FASTA needs bwa indexes, .fai index and a .dict index.\r +These indexes can be generated with the following two commands:\r +```\r +bwa index reference.fasta\r +samtools faidx reference.fasta\r +gatk CreateSequenceDictionary --REFERENCE your.fasta\r +```\r +\r +**NOTE**: beware that for Nextflow to find these indices the reference needs to be passed as an absolute path.\r +\r +The SARS-CoV-2 specific annotations will be skipped when using a custom genome.\r +\r +In order to have SnpEff functional annotations available you will also need to provide three parameters:\r +- `--snpeff_organism`: organism to annotate with SnpEff (ie: as registered in SnpEff)\r +- `--snpeff_data`: path to the SnpEff data folder\r +- `--snpeff_config`: path to the SnpEff config file\r +\r +### Intrahost mutations\r +\r +Some mutations may be observed in a subset of the virus sample, this may arise through intrahost virus evolution or\r +co-infection. Intrahost mutations can only be detected when analysing the raw reads (ie: the FASTQs) \r +as in the assembly (ie: the FASTA file) a single virus consensus sequence is represented. \r +BCFtools and GATK do not normally capture intrahost mutations; on the other hand LoFreq and iVar both capture\r +mutations that deviate from a clonal-like VAF. \r +Nevertheless, mutations with lower variant allele frequency (VAF) are challenging to distinguish from sequencing and\r +analytical errors. \r +\r +Mutations are annotated on the `FILTER` column using the VAF into three categories: \r +- `LOW_FREQUENCY`: subset of intrahost mutations with lowest frequencies, potentially enriched with false positive calls (VAF < 2 %).\r +- `SUBCLONAL`: subset of intrahost mutations with higher frequencies (2 % <= VAF < 50 %).\r +- `LOW_QUALITY_CLONAL`: subset of clonal mutations with lower frequencies (50 % <= VAF < 80 %).\r +- `PASS` clonal mutations (VAF >= 80 %)\r +\r +Other low quality mutations are removed from the output.\r +\r +The VAF thresholds can be changed with the parameters `--low_frequency_variant_threshold`,\r +`--subclonal_variant_threshold` and `--low_quality_clonal_variant_threshold`.\r +\r +## How to run\r +\r +### Requirements\r +\r +- Nextflow >= 19.10.0\r +- Java >= 8\r +- Conda >=4.9\r +\r +### Testing\r +\r +To run the workflow on a test assembly dataset run:\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline -profile conda,test_fasta\r +```\r +\r +Find the output in the folder `covigator_test_fasta`.\r +\r +To run the workflow on a test raw reads dataset run:\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline -profile conda,test_fastq\r +```\r +\r +Find the output in the folder `covigator_test_fastq`.\r +\r +The above commands are useful to create the conda environments beforehand.\r +\r +**NOTE**: pangolin is the most time-consuming step of the whole pipeline. To make it faster, locate the conda \r +environment that Nextflow created with pangolin (eg: `find $YOUR_NEXTFOW_CONDA_ENVS_FOLDER -name pangolin`) and run\r +`pangolin --decompress-model`.\r +\r +### Running\r +\r +For paired end reads:\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline \\\r +[-r v0.10.0] \\\r +[-profile conda] \\\r +--fastq1 \\\r +--fastq2 \\\r +--name example_run \\\r +--output \\\r +[--reference /Sars_cov_2.ASM985889v3.fa] \\\r +[--gff /Sars_cov_2.ASM985889v3.gff3]\r +```\r +\r +For single end reads:\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline \\\r +[-r v0.10.0] \\\r +[-profile conda] \\\r +--fastq1 \\\r +--name example_run \\\r +--output \\\r +[--reference /Sars_cov_2.ASM985889v3.fa] \\\r +[--gff /Sars_cov_2.ASM985889v3.gff3]\r +```\r +\r +For assembly:\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline \\\r +[-r v0.10.0] \\\r +[-profile conda] \\\r +--fasta \\\r +--name example_run \\\r +--output \\\r +[--reference /Sars_cov_2.ASM985889v3.fa] \\\r +[--gff /Sars_cov_2.ASM985889v3.gff3]\r +```\r +\r +For VCF:\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline \\\r +[-r v0.10.0] \\\r +[-profile conda] \\\r +--vcf \\\r +--name example_run \\\r +--output \\\r +[--reference /Sars_cov_2.ASM985889v3.fa] \\\r +[--gff /Sars_cov_2.ASM985889v3.gff3]\r +```\r +\r +As an optional input when processing directly VCF files you can provide BAM files to annotate VAFs:\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline \\\r +[-r v0.10.0] \\\r +[-profile conda] \\\r +--vcf \\\r +--bam \\\r +--bai \\\r +--name example_run \\\r +--output \\\r +[--reference /Sars_cov_2.ASM985889v3.fa] \\\r +[--gff /Sars_cov_2.ASM985889v3.gff3]\r +```\r +\r +For batch processing of reads use `--input_fastqs_list` and `--name`.\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline [-profile conda] --input_fastqs_list --library --output [--reference /Sars_cov_2.ASM985889v3.fa] [--gff /Sars_cov_2.ASM985889v3.gff3]\r +```\r +where the TSV file contains two or three columns tab-separated columns **without header**. Columns: sample name, path to FASTQ 1 and optionally path to FASTQ 2. \r +\r +| Sample | FASTQ 1 | FASTQ 2 (optional column) |\r +|-----------|-------------------------------|-------------------------------|\r +| sample1 | /path/to/sample1_fastq1.fastq | /path/to/sample1_fastq2.fastq |\r +| sample2 | /path/to/sample2_fastq1.fastq | /path/to/sample2_fastq2.fastq |\r +| ... | ... | ... |\r +\r +\r +For batch processing of assemblies use `--input_fastas_list`.\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline [-profile conda] --input_fastas_list --library --output [--reference /Sars_cov_2.ASM985889v3.fa] [--gff /Sars_cov_2.ASM985889v3.gff3]\r +```\r +where the TSV file contains two columns tab-separated columns **without header**. Columns: sample name and path to FASTA.\r +\r +| Sample | FASTA | \r +|-----------|------------------------|\r +| sample1 | /path/to/sample1.fasta |\r +| sample2 | /path/to/sample2.fasta |\r +| ... | ... |\r +\r +For batch processing of VCFs use `--input_vcfs_list`.\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline [-profile conda] --input_vcfs_list --output [--reference /Sars_cov_2.ASM985889v3.fa] [--gff /Sars_cov_2.ASM985889v3.gff3]\r +```\r +where the TSV file contains two columns tab-separated columns **without header**. Columns: sample name and path to VCF.\r +\r +| Sample | FASTA |\r +|-----------|------------------------|\r +| sample1 | /path/to/sample1.vcf |\r +| sample2 | /path/to/sample2.vcf |\r +| ... | ... |\r +\r +Optionally, provide BAM files for batch processing of VCFs using `--input_bams_list`.\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline [-profile conda] \\\r + --input_vcfs_list \\\r + --input_bams_list \\\r + --output \\\r + [--reference /Sars_cov_2.ASM985889v3.fa] \\\r + [--gff /Sars_cov_2.ASM985889v3.gff3]\r +```\r +where the BAMs TSV file contains three columns tab-separated columns **without header**. Columns: sample name, \r +path to BAM and path to BAI.\r +\r +| Sample | BAM | BAI |\r +|-----------|----------------------|----------------------|\r +| sample1 | /path/to/sample1.bam | /path/to/sample1.bai |\r +| sample2 | /path/to/sample2.bam | /path/to/sample2.bai |\r +| ... | ... | ... |\r +\r +\r +\r +### Getting help\r +\r +You can always contact us directly or create a GitHub issue, otherwise see all available options using `--help`:\r +```\r +$ nextflow run tron-bioinformatics/covigator-ngs-pipeline -profile conda --help\r +\r +Usage:\r + nextflow run tron-bioinformatics/covigator-ngs-pipeline -profile conda --help\r +\r +Input:\r + * --fastq1: the first input FASTQ file (not compatible with --fasta, nor --vcf)\r + * --fasta: the FASTA file containing the assembly sequence (not compatible with --fastq1, nor --vcf)\r + * --vcf: the VCF file containing mutations to analyze (not compatible with --fastq1, nor --fasta)\r + * --bam: the BAM file containing reads to annotate VAFs on a VCF (not compatible with --fastq1, nor --fasta)\r + * --bai: the BAI index for a BAM file (not compatible with --fastq1, nor --fasta)\r + * --name: the sample name, output files will be named after this name\r + * --output: the folder where to publish output\r + * --input_fastqs_list: alternative to --name and --fastq1 for batch processing\r + * --library: required only when using --input_fastqs\r + * --input_fastas_list: alternative to --name and --fasta for batch processing\r + * --input_vcfs_list: alternative to --name and --vcf for batch processing\r + * --input_bams_list: alternative to --name, --vcf, --bam and --bai for batch processing\r +\r +Optional input only required to use a custom reference:\r + * --reference: the reference genome FASTA file, *.fai, *.dict and bwa indexes are required.\r + * --gff: the GFFv3 gene annotations file (required to run iVar and to phase mutations from all variant callers) \r + * --snpeff_data: path to the SnpEff data folder, it will be useful to use the pipeline on other virus than SARS-CoV-2\r + * --snpeff_config: path to the SnpEff config file, it will be useful to use the pipeline on other virus than SARS-CoV-2\r + * --snpeff_organism: organism to annotate with SnpEff, it will be useful to use the pipeline on other virus than SARS-CoV-2\r +\r +Optional input:\r + * --fastq2: the second input FASTQ file\r + * --primers: a BED file containing the primers used during library preparation. If provided primers are trimmed from the reads.\r + * --min_base_quality: minimum base call quality to take a base into account for variant calling (default: 20)\r + * --min_mapping_quality: minimum mapping quality to take a read into account for variant calling (default: 20)\r + * --vafator_min_base_quality: minimum base call quality to take a base into account for VAF annotation (default: 0)\r + * --vafator_min_mapping_quality: minimum mapping quality to take a read into account for VAF annotation (default: 0)\r + * --low_frequency_variant_threshold: VAF threshold to mark a variant as low frequency (default: 0.02)\r + * --subclonal_variant_threshold: VAF superior threshold to mark a variant as subclonal (default: 0.5)\r + * --lq_clonal_variant_threshold: VAF superior threshold to mark a variant as loq quality clonal (default: 0.8)\r + * --memory: the ammount of memory used by each job (default: 3g)\r + * --cpus: the number of CPUs used by each job (default: 1)\r + * --skip_lofreq: skips calling variants with LoFreq\r + * --skip_gatk: skips calling variants with GATK\r + * --skip_bcftools: skips calling variants with BCFTools\r + * --skip_ivar: skips calling variants with iVar\r + * --skip_pangolin: skips lineage determination with pangolin\r + * --match_score: global alignment match score, only applicable for assemblies (default: 2)\r + * --mismatch_score: global alignment mismatch score, only applicable for assemblies (default: -1)\r + * --open_gap_score: global alignment open gap score, only applicable for assemblies (default: -3)\r + * --extend_gap_score: global alignment extend gap score, only applicable for assemblies (default: -0.1)\r + * --skip_sarscov2_annotations: skip some of the SARS-CoV-2 specific annotations (default: false)\r + * --keep_intermediate: keep intermediate files (ie: BAM files and intermediate VCF files)\r + * --args_bcftools_mpileup: additional arguments for bcftools mpileup command (eg: --args_bcftools_mpileup='--ignore-overlaps')\r + * --args_bcftools_call: additional arguments for bcftools call command (eg: --args_bcftools_call='--something')\r + * --args_lofreq: additional arguments for lofreq command (eg: --args_lofreq='--something')\r + * --args_gatk: additional arguments for gatk command (eg: --args_gatk='--something')\r + * --args_ivar_samtools: additional arguments for ivar samtools mpileup command (eg: --args_ivar_samtools='--ignore-overlaps')\r + * --args_ivar: additional arguments for ivar command (eg: --args_ivar='--something')\r +\r +Output:\r + * Output a VCF file for each of BCFtools, GATK, LoFreq and iVar when FASTQ files are\r + provided or a single VCF obtained from a global alignment when a FASTA file is provided.\r + * A pangolin results file for each of the VCF files.\r + * Only when FASTQs are provided:\r + * FASTP statistics\r + * Depth and breadth of coverage analysis results\r + \r +```\r +\r +## Understanding the output\r +\r +Although the VCFs are normalized for both pipelines, the FASTQ pipeline runs four variant callers, while the FASTA\r +pipeline runs a single variant caller. Also, there are several metrics in the FASTQ pipeline that are not present\r +in the output of the FASTA pipeline. Here we will describe these outputs.\r +\r +### FASTQ pipeline output\r +\r +Find in the table below a description of each of the expected files and a link to a sample file for the FASTQ pipeline.\r +The VCF files will be described in more detail later.\r +\r +| Name | Description | Sample file |\r +|---------------------------------|----------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------|\r +| $NAME.fastp_stats.json | Output metrics of the fastp trimming process in JSON format | [ERR4145453.fastp_stats.json](_static/covigator_pipeline_sample_output_reads/ERR4145453.fastp_stats.json) |\r +| $NAME.fastp_stats.html | Output metrics of the fastp trimming process in HTML format | [ERR4145453.fastp_stats.html](_static/covigator_pipeline_sample_output_reads/ERR4145453.fastp_stats.html) |\r +| $NAME.deduplication_metrics.txt | Deduplication metrics | [ERR4145453.deduplication_metrics.txt](_static/covigator_pipeline_sample_output_reads/ERR4145453.deduplication_metrics.txt) |\r +| $NAME.coverage.tsv | Coverage metrics (eg: mean depth, % horizontal coverage) | [ERR4145453.coverage.tsv](_static/covigator_pipeline_sample_output_reads/ERR4145453.coverage.tsv) |\r +| $NAME.depth.tsv | Depth of coverage per position | [ERR4145453.depth.tsv](_static/covigator_pipeline_sample_output_reads/ERR4145453.depth.tsv) |\r +| $NAME.bcftools.vcf.gz | Bgzipped, tabix-indexed and annotated output VCF from BCFtools | [ERR4145453.bcftools.normalized.annotated.vcf.gz](_static/covigator_pipeline_sample_output_reads/ERR4145453.bcftools.normalized.annotated.vcf.gz) |\r +| $NAME.gatk.vcf.gz | Bgzipped, tabix-indexed and annotated output VCF from GATK | [ERR4145453.gatk.normalized.annotated.vcf.gz](_static/covigator_pipeline_sample_output_reads/ERR4145453.gatk.normalized.annotated.vcf.gz) |\r +| $NAME.lofreq.vcf.gz | Bgzipped, tabix-indexed and annotated output VCF from LoFreq | [ERR4145453.lofreq.normalized.annotated.vcf.gz](_static/covigator_pipeline_sample_output_reads/ERR4145453.lofreq.normalized.annotated.vcf.gz) |\r +| $NAME.ivar.vcf.gz | Bgzipped, tabix-indexed and annotated output VCF from LoFreq | [ERR4145453.ivar.tsv](_static/covigator_pipeline_sample_output_reads/ERR4145453.ivar.tsv) |\r +| $NAME.lofreq.pangolin.csv | Pangolin CSV output file derived from LoFreq mutations | [ERR4145453.lofreq.pangolin.csv](_static/covigator_pipeline_sample_output_reads/ERR4145453.lofreq.pangolin.csv) |\r +\r +\r +### FASTA pipeline output\r +\r +The FASTA pipeline returns a single VCF file. The VCF files will be described in more detail later.\r +\r +| Name | Description | Sample file |\r +|-----------------------------|--------------------------------------------------------------|------------------------------------------------------------------------------------------------------|\r +| $NAME.assembly.vcf.gz | Bgzipped, tabix-indexed and annotated output VCF | [ERR4145453.assembly.normalized.annotated.vcf.gz](_static/covigator_pipeline_sample_output_assembly/hCoV-19_NTXX.assembly.normalized.annotated.vcf.gz) |\r +\r +\r +## Annotations resources\r +\r +SARS-CoV-2 ASM985889v3 references were downloaded from Ensembl on 6th of October 2020:\r +- ftp://ftp.ensemblgenomes.org/pub/viruses/fasta/sars_cov_2/dna/Sars_cov_2.ASM985889v3.dna.toplevel.fa.gz\r +- ftp://ftp.ensemblgenomes.org/pub/viruses/gff3/sars_cov_2/Sars_cov_2.ASM985889v3.101.gff3.gz\r +\r +ConsHMM mutation depletion scores downloaded on 1st of July 2021:\r +- https://github.com/ernstlab/ConsHMM_CoV/blob/master/wuhCor1.mutDepletionConsHMM.bed\r +- https://github.com/ernstlab/ConsHMM_CoV/blob/master/wuhCor1.mutDepletionSarbecovirusConsHMM.bed\r +- https://github.com/ernstlab/ConsHMM_CoV/blob/master/wuhCor1.mutDepletionVertebrateCoVConsHMM.bed\r +\r +Gene annotations including Pfam domains downloaded from Ensembl on 25th of February 2021 from:\r +- ftp://ftp.ensemblgenomes.org/pub/viruses/json/sars_cov_2/sars_cov_2.json\r +\r +\r +## Future work\r +\r +- Primer trimming on an arbitrary sequencing library.\r +- Pipeline for Oxford Nanopore technology.\r +- Variant calls from assemblies contain an abnormally high number of deletions of size greater than 3 bp. This\r +is a technical artifact that would need to be avoided.\r +\r +## Bibliography\r +\r +- Di Tommaso, P., Chatzou, M., Floden, E. W., Barja, P. P., Palumbo, E., & Notredame, C. (2017). Nextflow enables reproducible computational workflows. Nature Biotechnology, 35(4), 316–319. https://doi.org/10.1038/nbt.3820\r +- Vasimuddin Md, Sanchit Misra, Heng Li, Srinivas Aluru. Efficient Architecture-Aware Acceleration of BWA-MEM for Multicore Systems. IEEE Parallel and Distributed Processing Symposium (IPDPS), 2019.\r +- Adrian Tan, Gonçalo R. Abecasis and Hyun Min Kang. Unified Representation of Genetic Variants. Bioinformatics (2015) 31(13): 2202-2204](http://bioinformatics.oxfordjournals.org/content/31/13/2202) and uses bcftools [Li, H. (2011). A statistical framework for SNP calling, mutation discovery, association mapping and population genetical parameter estimation from sequencing data. Bioinformatics (Oxford, England), 27(21), 2987–2993. 10.1093/bioinformatics/btr509\r +- Danecek P, Bonfield JK, Liddle J, Marshall J, Ohan V, Pollard MO, Whitwham A, Keane T, McCarthy SA, Davies RM, Li H. Twelve years of SAMtools and BCFtools. Gigascience. 2021 Feb 16;10(2):giab008. doi: 10.1093/gigascience/giab008. PMID: 33590861; PMCID: PMC7931819.\r +- Van der Auwera GA, Carneiro M, Hartl C, Poplin R, del Angel G, Levy-Moonshine A, Jordan T, Shakir K, Roazen D, Thibault J, Banks E, Garimella K, Altshuler D, Gabriel S, DePristo M. (2013). From FastQ Data to High-Confidence Variant Calls: The Genome Analysis Toolkit Best Practices Pipeline. Curr Protoc Bioinformatics, 43:11.10.1-11.10.33. DOI: 10.1002/0471250953.bi1110s43.\r +- Martin, M., Patterson, M., Garg, S., O Fischer, S., Pisanti, N., Klau, G., Schöenhuth, A., & Marschall, T. (2016). WhatsHap: fast and accurate read-based phasing. BioRxiv, 085050. https://doi.org/10.1101/085050\r +- Danecek, P., & McCarthy, S. A. (2017). BCFtools/csq: haplotype-aware variant consequences. Bioinformatics, 33(13), 2037–2039. https://doi.org/10.1093/bioinformatics/btx100\r +- Wilm, A., Aw, P. P. K., Bertrand, D., Yeo, G. H. T., Ong, S. H., Wong, C. H., Khor, C. C., Petric, R., Hibberd, M. L., & Nagarajan, N. (2012). LoFreq: A sequence-quality aware, ultra-sensitive variant caller for uncovering cell-population heterogeneity from high-throughput sequencing datasets. Nucleic Acids Research, 40(22), 11189–11201. https://doi.org/10.1093/nar/gks918\r +- Grubaugh, N. D., Gangavarapu, K., Quick, J., Matteson, N. L., De Jesus, J. G., Main, B. J., Tan, A. L., Paul, L. M., Brackney, D. E., Grewal, S., Gurfield, N., Van Rompay, K. K. A., Isern, S., Michael, S. F., Coffey, L. L., Loman, N. J., & Andersen, K. G. (2019). An amplicon-based sequencing framework for accurately measuring intrahost virus diversity using PrimalSeq and iVar. Genome Biology, 20(1), 8. https://doi.org/10.1186/s13059-018-1618-7\r +- Shifu Chen, Yanqing Zhou, Yaru Chen, Jia Gu; fastp: an ultra-fast all-in-one FASTQ preprocessor, Bioinformatics, Volume 34, Issue 17, 1 September 2018, Pages i884–i890, https://doi.org/10.1093/bioinformatics/bty560\r +- Kwon, S. Bin, & Ernst, J. (2021). Single-nucleotide conservation state annotation of the SARS-CoV-2 genome. Communications Biology, 4(1), 1–11. https://doi.org/10.1038/s42003-021-02231-w\r +- Cock, P. J., Antao, T., Chang, J. T., Chapman, B. A., Cox, C. J., Dalke, A., et al. (2009). Biopython: freely available Python tools for computational molecular biology and bioinformatics. Bioinformatics, 25(11), 1422–1423.\r +- Artem Tarasov, Albert J. Vilella, Edwin Cuppen, Isaac J. Nijman, Pjotr Prins, Sambamba: fast processing of NGS alignment formats, Bioinformatics, Volume 31, Issue 12, 15 June 2015, Pages 2032–2034, https://doi.org/10.1093/bioinformatics/btv098\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/417?version=1" ; + schema1:isBasedOn "https://github.com/TRON-Bioinformatics/covigator-ngs-pipeline" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CoVigator pipeline: variant detection pipeline for Sars-CoV-2 (and other viruses...)" ; + schema1:sdDatePublished "2024-07-12 13:34:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/417/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 240617 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11134 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-01-17T15:06:13Z" ; + schema1:dateModified "2023-01-17T15:06:13Z" ; + schema1:description """![CoVigator logo](images/CoVigator_logo_txt_nobg.png "CoVigator logo")\r +\r +# CoVigator pipeline: variant detection pipeline for Sars-CoV-2\r +\r +[![DOI](https://zenodo.org/badge/374669617.svg)](https://zenodo.org/badge/latestdoi/374669617)\r +[![Run tests](https://github.com/TRON-Bioinformatics/covigator-ngs-pipeline/actions/workflows/automated_tests.yml/badge.svg?branch=master)](https://github.com/TRON-Bioinformatics/covigator-ngs-pipeline/actions/workflows/automated_tests.yml)\r +[![Powered by NumFOCUS](https://img.shields.io/badge/powered%20by-Nextflow-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)](https://www.nextflow.io/)\r +[![License](https://img.shields.io/badge/license-MIT-green)](https://opensource.org/licenses/MIT)\r +\r +\r +\r +The Covigator pipeline processes SARS-CoV-2 FASTQ or FASTA files into annotated and normalized analysis ready VCF files.\r +It also classifies samples into lineages using pangolin.\r +The pipeline is implemented in the Nextflow framework (Di Tommaso, 2017), it is a stand-alone pipeline that can be\r +used independently of the CoVigator dashboard and knowledge base.\r +\r +Although it is configured by default for SARS-CoV-2 it can be employed for the analysis of other microbial organisms \r +if the required references are provided.\r +\r +The result of the pipeline is one or more annotated VCFs with the list of SNVs and indels ready for analysis.\r +\r +The results from the CoVigator pipeline populate our CoVigator dashboard [https://covigator.tron-mainz.de](https://covigator.tron-mainz.de) \r +\r +**Table of Contents**\r +\r +1. [Two pipelines in one](#id1)\r +2. [Implementation](#id2)\r +3. [How to run](#id3)\r +4. [Understanding the output](#id4)\r +6. [Annotation resources](#id5)\r +7. [Future work](#id6)\r +8. [Bibliography](#id7)\r +\r +\r +## Two pipelines in one\r +\r +In CoVigator we analyse samples from two different formats, FASTQ files (e.g.: as provided by the European Nucleotide \r +Archive) and FASTA files containing a consensus assembly. While from the first we get the raw reads, \r +from the second we obtain already assembled genomes. Each of these formats has to be \r +analysed differently. Also, the output data that we can obtain from each of these is different.\r +\r +![CoVigator pipeline](images/pipeline.drawio.png)\r +\r +### Pipeline for FASTQ files\r +\r +When FASTQ files are provided the pipeline includes the following steps:\r +- **Trimming**. `fastp` is used to trim reads with default values. This step also includes QC filtering.\r +- **Alignment**. `BWA mem 2` is used for the alignment of single or paired end samples.\r +- **BAM preprocessing**. BAM files are prepared and duplicate reads are marked using GATK and Sambamba tools.\r +- **Primer trimming**. When a BED with primers is provided, these are trimmed from the reads using iVar. This is applicable to the results from all variant callers.\r +- **Coverage analysis**. `samtools coverage` and `samtools depth` are used to compute the horizontal and vertical \r + coverage respectively.\r +- **Variant calling**. Four different variant callers are employed: BCFtools, LoFreq, iVar and GATK. \r + Subsequent processing of resulting VCF files is independent for each caller.\r +- **Variant normalization**. `bcftools norm` is employed to left align indels, trim variant calls and remove variant duplicates.\r +- **Technical annotation**. `VAFator` is employed to add VAF and coverage annotations from the reads pileup.\r +- **Phasing**. Clonal mutations (ie: VAF >= 0.8) occurring in the same amino acid are merged for its correct functional annotation.\r +- **Biological annotation**. `SnpEff` is employed to annotate the variant consequences of variants and\r + `bcftools annotate` is employed to add additional SARS-CoV-2 annotations.\r +- **Lineage determination**. `pangolin` is used for this purpose, this runs over the results from each of the variant callers separately.\r +\r +Both single end and paired end FASTQ files are supported.\r +\r +### Pipeline for FASTA files\r +\r +When a FASTA file is provided with a single assembly sequence the pipeline includes the following steps:\r +- **Variant calling**. A Smith-Waterman global alignment is performed against the reference sequence to call SNVs and \r + indels. Indels longer than 50 bp and at the beginning or end of the assembly sequence are excluded. Any mutation where\r + either reference or assembly contain an N is excluded.\r +- **Variant normalization**. Same as described above.\r +- **Phasing**. mutations occurring in the same amino acid are merged for its correct annotation.\r +- **Biological annotation**. Same as described above.\r +- **Lineage determination**. `pangolin` is used for this purpose.\r +\r +The FASTA file is expected to contain a single assembly sequence. \r +Bear in mind that only clonal variants can be called on the assembly.\r +\r +### Pipeline for VCF files\r +\r +When a VCF file is provided the pipeline includes the following steps:\r +- **Variant normalization**. Same as described above.\r +- **Technical annotation**. Same as described above (optional if BAM is provided)\r +- **Phasing**. mutations occurring in the same amino acid are merged for its correct annotation.\r +- **Biological annotation**. Same as described above\r +- **Lineage determination**. `pangolin` is used for this purpose.\r +\r +## Implementation\r +\r +The pipeline is implemented as a Nextflow workflow with its DSL2 syntax.\r +The dependencies are managed through a conda environment to ensure version traceability and reproducibility.\r +The references for SARS-CoV-2 are embedded in the pipeline.\r +The pipeline is based on a number of third-party tools, plus a custom implementation based on biopython (Cock, 2009) \r +for the alignment and subsequent variant calling over a FASTA file.\r +\r +All code is open sourced in GitHub [https://github.com/TRON-Bioinformatics/covigator-ngs-pipeline](https://github.com/TRON-Bioinformatics/covigator-ngs-pipeline)\r +and made available under the MIT license. We welcome any contribution. \r +If you have troubles using the CoVigator pipeline or you find an issue, we will be thankful if you would report a ticket \r +in GitHub.\r +\r +The alignment, BAM preprocessing and variant normalization pipelines are based on the implementations in additional \r +Nextflow pipelines within the TronFlow initiative [https://tronflow-docs.readthedocs.io/](https://tronflow-docs.readthedocs.io/). \r +\r +\r +### Variant annotations\r +\r +The variants derived from a FASTQ file are annotated on the `FILTER` column using the VAFator \r +(https://github.com/TRON-Bioinformatics/vafator) variant allele frequency \r +(VAF) into `LOW_FREQUENCY`, `SUBCLONAL`, `LOW_QUALITY_CLONAL` and finally `PASS` variants correspond to clonal variants. \r +By default, variants with a VAF < 2 % are considered `LOW_FREQUENCY`, variants with a VAF >= 2 % and < 50 % are \r +considered `SUBCLONAL` and variants with a VAF >= 50 % and < 80 % are considered `LOW_QUALITY_CLONAL`. \r +This thresholds can be changed with the parameters `--low_frequency_variant_threshold`,\r +`--subclonal_variant_threshold` and `--low_quality_clonal_variant_threshold` respectively.\r +\r +VAFator technical annotations:\r +\r +- `INFO/vafator_af`: variant allele frequency of the mutation \r +- `INFO/vafator_ac`: number of reads supporting the mutation \r +- `INFO/vafator_dp`: total number of reads at the position, in the case of indels this represents the number of reads in the previous position\r +\r +SnpEff provides the functional annotations. And all mutations are additionally annotated with the following SARS-CoV-2 specific annotations:\r +- ConsHMM conservation scores as reported in (Kwon, 2021)\r +- Pfam domains as reported in Ensemble annotations.\r +\r +Biological annotations: \r +\r +- `INFO/ANN` are the SnpEff consequence annotations (eg: overlapping gene, effect of the mutation). \r +This are described in detail here [http://pcingola.github.io/SnpEff/se_inputoutput/](http://pcingola.github.io/SnpEff/se_inputoutput/) \r +- `INFO/CONS_HMM_SARS_COV_2` is the ConsHMM conservation score in SARS-CoV-2\r +- `INFO/CONS_HMM_SARBECOVIRUS` is the ConsHMM conservation score among Sarbecovirus\r +- `INFO/CONS_HMM_VERTEBRATE_COV` is the ConsHMM conservation score among vertebrate Corona virus\r +- `INFO/PFAM_NAME` is the Interpro name for the overlapping Pfam domains\r +- `INFO/PFAM_DESCRIPTION` is the Interpro description for the overlapping Pfam domains\r +- `INFO/problematic` contains the filter provided in DeMaio et al. (2020) for problematic mutations\r +\r +According to DeMaio et al. (2020), mutations at the beginning (ie: POS <= 50) and end (ie: POS >= 29,804) of the \r +genome are filtered out\r +\r +This is an example of biological annotations of a missense mutation in the spike protein on the N-terminal subunit 1 domain.\r +```\r +ANN=A|missense_variant|MODERATE|S|gene-GU280_gp02|transcript|TRANSCRIPT_gene-GU280_gp02|protein_coding|1/1|c.118G>A|\r +p.D40N|118/3822|118/3822|40/1273||;CONS_HMM_SARS_COV_2=0.57215;CONS_HMM_SARBECOVIRUS=0.57215;CONS_HMM_VERTEBRATE_COV=0;\r +PFAM_NAME=bCoV_S1_N;PFAM_DESCRIPTION=Betacoronavirus-like spike glycoprotein S1, N-terminal\r +```\r +\r +\r +### Phasing limitations\r +\r +The phasing implementation is applicable only to clonal mutations. It assumes all clonal mutations are in phase and \r +hence it merges those occurring in the same amino acid.\r +In order to phase intrahost mutations we would need to implement a read-backed phasing approach such as in WhatsHap \r +or GATK's ReadBackedPhasing. Unfortunately these tools do not support the scenario of a haploid organism with an\r +undefined number of subclones.\r +For this reason, phasing is implemented with custom Python code at `bin/phasing.py`.\r +\r +### Primers trimming\r +\r +With some library preparation protocols such as ARTIC it is recommended to trim the primers from the reads.\r +We have observed that if primers are not trimmed spurious mutations are being called specially SNVs with lower frequencies and long deletions.\r +Also the variant allele frequencies of clonal mutations are underestimated.\r +\r +The BED files containing the primers for each ARTIC version can be found at https://github.com/artic-network/artic-ncov2019/tree/master/primer_schemes/nCoV-2019.\r +\r +If the adequate BED file is provided to the CoVigator pipeline with `--primers` the primers will be trimmed with iVar. \r +This affects the output of every variant caller, not only iVar.\r +\r +### Reference data\r +\r +The default SARS-CoV-2 reference files correspond to Sars_cov_2.ASM985889v3 and were downloaded from Ensembl servers.\r +No additional parameter needs to be provided to use the default SARS-CoV-2 reference genome.\r +\r +#### Using a custom reference genome\r +\r +These references can be customised to use a different SARS-CoV-2 reference or to analyse a different virus.\r +Two files need to be provided:\r +- Use a custom reference genome by providing the parameter `--reference your.fasta`.\r +- Gene annotation file in GFFv3 format `--gff your.gff`. This is only required to run iVar\r +\r +Additionally, the FASTA needs bwa indexes, .fai index and a .dict index.\r +These indexes can be generated with the following two commands:\r +```\r +bwa index reference.fasta\r +samtools faidx reference.fasta\r +gatk CreateSequenceDictionary --REFERENCE your.fasta\r +```\r +\r +**NOTE**: beware that for Nextflow to find these indices the reference needs to be passed as an absolute path.\r +\r +The SARS-CoV-2 specific annotations will be skipped when using a custom genome.\r +\r +In order to have SnpEff functional annotations available you will also need to provide three parameters:\r +- `--snpeff_organism`: organism to annotate with SnpEff (ie: as registered in SnpEff)\r +- `--snpeff_data`: path to the SnpEff data folder\r +- `--snpeff_config`: path to the SnpEff config file\r +\r +### Intrahost mutations\r +\r +Some mutations may be observed in a subset of the virus sample, this may arise through intrahost virus evolution or\r +co-infection. Intrahost mutations can only be detected when analysing the raw reads (ie: the FASTQs) \r +as in the assembly (ie: the FASTA file) a single virus consensus sequence is represented. \r +BCFtools and GATK do not normally capture intrahost mutations; on the other hand LoFreq and iVar both capture\r +mutations that deviate from a clonal-like VAF. \r +Nevertheless, mutations with lower variant allele frequency (VAF) are challenging to distinguish from sequencing and\r +analytical errors. \r +\r +Mutations are annotated on the `FILTER` column using the VAF into three categories: \r +- `LOW_FREQUENCY`: subset of intrahost mutations with lowest frequencies, potentially enriched with false positive calls (VAF < 2 %).\r +- `SUBCLONAL`: subset of intrahost mutations with higher frequencies (2 % <= VAF < 50 %).\r +- `LOW_QUALITY_CLONAL`: subset of clonal mutations with lower frequencies (50 % <= VAF < 80 %).\r +- `PASS` clonal mutations (VAF >= 80 %)\r +\r +Other low quality mutations are removed from the output.\r +\r +The VAF thresholds can be changed with the parameters `--low_frequency_variant_threshold`,\r +`--subclonal_variant_threshold` and `--low_quality_clonal_variant_threshold`.\r +\r +## How to run\r +\r +### Requirements\r +\r +- Nextflow >= 19.10.0\r +- Java >= 8\r +- Conda >=4.9\r +\r +### Testing\r +\r +To run the workflow on a test assembly dataset run:\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline -profile conda,test_fasta\r +```\r +\r +Find the output in the folder `covigator_test_fasta`.\r +\r +To run the workflow on a test raw reads dataset run:\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline -profile conda,test_fastq\r +```\r +\r +Find the output in the folder `covigator_test_fastq`.\r +\r +The above commands are useful to create the conda environments beforehand.\r +\r +**NOTE**: pangolin is the most time-consuming step of the whole pipeline. To make it faster, locate the conda \r +environment that Nextflow created with pangolin (eg: `find $YOUR_NEXTFOW_CONDA_ENVS_FOLDER -name pangolin`) and run\r +`pangolin --decompress-model`.\r +\r +### Running\r +\r +For paired end reads:\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline \\\r +[-r v0.10.0] \\\r +[-profile conda] \\\r +--fastq1 \\\r +--fastq2 \\\r +--name example_run \\\r +--output \\\r +[--reference /Sars_cov_2.ASM985889v3.fa] \\\r +[--gff /Sars_cov_2.ASM985889v3.gff3]\r +```\r +\r +For single end reads:\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline \\\r +[-r v0.10.0] \\\r +[-profile conda] \\\r +--fastq1 \\\r +--name example_run \\\r +--output \\\r +[--reference /Sars_cov_2.ASM985889v3.fa] \\\r +[--gff /Sars_cov_2.ASM985889v3.gff3]\r +```\r +\r +For assembly:\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline \\\r +[-r v0.10.0] \\\r +[-profile conda] \\\r +--fasta \\\r +--name example_run \\\r +--output \\\r +[--reference /Sars_cov_2.ASM985889v3.fa] \\\r +[--gff /Sars_cov_2.ASM985889v3.gff3]\r +```\r +\r +For VCF:\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline \\\r +[-r v0.10.0] \\\r +[-profile conda] \\\r +--vcf \\\r +--name example_run \\\r +--output \\\r +[--reference /Sars_cov_2.ASM985889v3.fa] \\\r +[--gff /Sars_cov_2.ASM985889v3.gff3]\r +```\r +\r +As an optional input when processing directly VCF files you can provide BAM files to annotate VAFs:\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline \\\r +[-r v0.10.0] \\\r +[-profile conda] \\\r +--vcf \\\r +--bam \\\r +--bai \\\r +--name example_run \\\r +--output \\\r +[--reference /Sars_cov_2.ASM985889v3.fa] \\\r +[--gff /Sars_cov_2.ASM985889v3.gff3]\r +```\r +\r +For batch processing of reads use `--input_fastqs_list` and `--name`.\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline [-profile conda] --input_fastqs_list --library --output [--reference /Sars_cov_2.ASM985889v3.fa] [--gff /Sars_cov_2.ASM985889v3.gff3]\r +```\r +where the TSV file contains two or three columns tab-separated columns **without header**. Columns: sample name, path to FASTQ 1 and optionally path to FASTQ 2. \r +\r +| Sample | FASTQ 1 | FASTQ 2 (optional column) |\r +|-----------|-------------------------------|-------------------------------|\r +| sample1 | /path/to/sample1_fastq1.fastq | /path/to/sample1_fastq2.fastq |\r +| sample2 | /path/to/sample2_fastq1.fastq | /path/to/sample2_fastq2.fastq |\r +| ... | ... | ... |\r +\r +\r +For batch processing of assemblies use `--input_fastas_list`.\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline [-profile conda] --input_fastas_list --library --output [--reference /Sars_cov_2.ASM985889v3.fa] [--gff /Sars_cov_2.ASM985889v3.gff3]\r +```\r +where the TSV file contains two columns tab-separated columns **without header**. Columns: sample name and path to FASTA.\r +\r +| Sample | FASTA | \r +|-----------|------------------------|\r +| sample1 | /path/to/sample1.fasta |\r +| sample2 | /path/to/sample2.fasta |\r +| ... | ... |\r +\r +For batch processing of VCFs use `--input_vcfs_list`.\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline [-profile conda] --input_vcfs_list --output [--reference /Sars_cov_2.ASM985889v3.fa] [--gff /Sars_cov_2.ASM985889v3.gff3]\r +```\r +where the TSV file contains two columns tab-separated columns **without header**. Columns: sample name and path to VCF.\r +\r +| Sample | FASTA |\r +|-----------|------------------------|\r +| sample1 | /path/to/sample1.vcf |\r +| sample2 | /path/to/sample2.vcf |\r +| ... | ... |\r +\r +Optionally, provide BAM files for batch processing of VCFs using `--input_bams_list`.\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline [-profile conda] \\\r + --input_vcfs_list \\\r + --input_bams_list \\\r + --output \\\r + [--reference /Sars_cov_2.ASM985889v3.fa] \\\r + [--gff /Sars_cov_2.ASM985889v3.gff3]\r +```\r +where the BAMs TSV file contains three columns tab-separated columns **without header**. Columns: sample name, \r +path to BAM and path to BAI.\r +\r +| Sample | BAM | BAI |\r +|-----------|----------------------|----------------------|\r +| sample1 | /path/to/sample1.bam | /path/to/sample1.bai |\r +| sample2 | /path/to/sample2.bam | /path/to/sample2.bai |\r +| ... | ... | ... |\r +\r +\r +\r +### Getting help\r +\r +You can always contact us directly or create a GitHub issue, otherwise see all available options using `--help`:\r +```\r +$ nextflow run tron-bioinformatics/covigator-ngs-pipeline -profile conda --help\r +\r +Usage:\r + nextflow run tron-bioinformatics/covigator-ngs-pipeline -profile conda --help\r +\r +Input:\r + * --fastq1: the first input FASTQ file (not compatible with --fasta, nor --vcf)\r + * --fasta: the FASTA file containing the assembly sequence (not compatible with --fastq1, nor --vcf)\r + * --vcf: the VCF file containing mutations to analyze (not compatible with --fastq1, nor --fasta)\r + * --bam: the BAM file containing reads to annotate VAFs on a VCF (not compatible with --fastq1, nor --fasta)\r + * --bai: the BAI index for a BAM file (not compatible with --fastq1, nor --fasta)\r + * --name: the sample name, output files will be named after this name\r + * --output: the folder where to publish output\r + * --input_fastqs_list: alternative to --name and --fastq1 for batch processing\r + * --library: required only when using --input_fastqs\r + * --input_fastas_list: alternative to --name and --fasta for batch processing\r + * --input_vcfs_list: alternative to --name and --vcf for batch processing\r + * --input_bams_list: alternative to --name, --vcf, --bam and --bai for batch processing\r +\r +Optional input only required to use a custom reference:\r + * --reference: the reference genome FASTA file, *.fai, *.dict and bwa indexes are required.\r + * --gff: the GFFv3 gene annotations file (required to run iVar and to phase mutations from all variant callers) \r + * --snpeff_data: path to the SnpEff data folder, it will be useful to use the pipeline on other virus than SARS-CoV-2\r + * --snpeff_config: path to the SnpEff config file, it will be useful to use the pipeline on other virus than SARS-CoV-2\r + * --snpeff_organism: organism to annotate with SnpEff, it will be useful to use the pipeline on other virus than SARS-CoV-2\r +\r +Optional input:\r + * --fastq2: the second input FASTQ file\r + * --primers: a BED file containing the primers used during library preparation. If provided primers are trimmed from the reads.\r + * --min_base_quality: minimum base call quality to take a base into account for variant calling (default: 20)\r + * --min_mapping_quality: minimum mapping quality to take a read into account for variant calling (default: 20)\r + * --vafator_min_base_quality: minimum base call quality to take a base into account for VAF annotation (default: 0)\r + * --vafator_min_mapping_quality: minimum mapping quality to take a read into account for VAF annotation (default: 0)\r + * --low_frequency_variant_threshold: VAF threshold to mark a variant as low frequency (default: 0.02)\r + * --subclonal_variant_threshold: VAF superior threshold to mark a variant as subclonal (default: 0.5)\r + * --lq_clonal_variant_threshold: VAF superior threshold to mark a variant as loq quality clonal (default: 0.8)\r + * --memory: the ammount of memory used by each job (default: 3g)\r + * --cpus: the number of CPUs used by each job (default: 1)\r + * --skip_lofreq: skips calling variants with LoFreq\r + * --skip_gatk: skips calling variants with GATK\r + * --skip_bcftools: skips calling variants with BCFTools\r + * --skip_ivar: skips calling variants with iVar\r + * --skip_pangolin: skips lineage determination with pangolin\r + * --match_score: global alignment match score, only applicable for assemblies (default: 2)\r + * --mismatch_score: global alignment mismatch score, only applicable for assemblies (default: -1)\r + * --open_gap_score: global alignment open gap score, only applicable for assemblies (default: -3)\r + * --extend_gap_score: global alignment extend gap score, only applicable for assemblies (default: -0.1)\r + * --skip_sarscov2_annotations: skip some of the SARS-CoV-2 specific annotations (default: false)\r + * --keep_intermediate: keep intermediate files (ie: BAM files and intermediate VCF files)\r + * --args_bcftools_mpileup: additional arguments for bcftools mpileup command (eg: --args_bcftools_mpileup='--ignore-overlaps')\r + * --args_bcftools_call: additional arguments for bcftools call command (eg: --args_bcftools_call='--something')\r + * --args_lofreq: additional arguments for lofreq command (eg: --args_lofreq='--something')\r + * --args_gatk: additional arguments for gatk command (eg: --args_gatk='--something')\r + * --args_ivar_samtools: additional arguments for ivar samtools mpileup command (eg: --args_ivar_samtools='--ignore-overlaps')\r + * --args_ivar: additional arguments for ivar command (eg: --args_ivar='--something')\r +\r +Output:\r + * Output a VCF file for each of BCFtools, GATK, LoFreq and iVar when FASTQ files are\r + provided or a single VCF obtained from a global alignment when a FASTA file is provided.\r + * A pangolin results file for each of the VCF files.\r + * Only when FASTQs are provided:\r + * FASTP statistics\r + * Depth and breadth of coverage analysis results\r + \r +```\r +\r +## Understanding the output\r +\r +Although the VCFs are normalized for both pipelines, the FASTQ pipeline runs four variant callers, while the FASTA\r +pipeline runs a single variant caller. Also, there are several metrics in the FASTQ pipeline that are not present\r +in the output of the FASTA pipeline. Here we will describe these outputs.\r +\r +### FASTQ pipeline output\r +\r +Find in the table below a description of each of the expected files and a link to a sample file for the FASTQ pipeline.\r +The VCF files will be described in more detail later.\r +\r +| Name | Description | Sample file |\r +|---------------------------------|----------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------|\r +| $NAME.fastp_stats.json | Output metrics of the fastp trimming process in JSON format | [ERR4145453.fastp_stats.json](_static/covigator_pipeline_sample_output_reads/ERR4145453.fastp_stats.json) |\r +| $NAME.fastp_stats.html | Output metrics of the fastp trimming process in HTML format | [ERR4145453.fastp_stats.html](_static/covigator_pipeline_sample_output_reads/ERR4145453.fastp_stats.html) |\r +| $NAME.deduplication_metrics.txt | Deduplication metrics | [ERR4145453.deduplication_metrics.txt](_static/covigator_pipeline_sample_output_reads/ERR4145453.deduplication_metrics.txt) |\r +| $NAME.coverage.tsv | Coverage metrics (eg: mean depth, % horizontal coverage) | [ERR4145453.coverage.tsv](_static/covigator_pipeline_sample_output_reads/ERR4145453.coverage.tsv) |\r +| $NAME.depth.tsv | Depth of coverage per position | [ERR4145453.depth.tsv](_static/covigator_pipeline_sample_output_reads/ERR4145453.depth.tsv) |\r +| $NAME.bcftools.vcf.gz | Bgzipped, tabix-indexed and annotated output VCF from BCFtools | [ERR4145453.bcftools.normalized.annotated.vcf.gz](_static/covigator_pipeline_sample_output_reads/ERR4145453.bcftools.normalized.annotated.vcf.gz) |\r +| $NAME.gatk.vcf.gz | Bgzipped, tabix-indexed and annotated output VCF from GATK | [ERR4145453.gatk.normalized.annotated.vcf.gz](_static/covigator_pipeline_sample_output_reads/ERR4145453.gatk.normalized.annotated.vcf.gz) |\r +| $NAME.lofreq.vcf.gz | Bgzipped, tabix-indexed and annotated output VCF from LoFreq | [ERR4145453.lofreq.normalized.annotated.vcf.gz](_static/covigator_pipeline_sample_output_reads/ERR4145453.lofreq.normalized.annotated.vcf.gz) |\r +| $NAME.ivar.vcf.gz | Bgzipped, tabix-indexed and annotated output VCF from LoFreq | [ERR4145453.ivar.tsv](_static/covigator_pipeline_sample_output_reads/ERR4145453.ivar.tsv) |\r +| $NAME.lofreq.pangolin.csv | Pangolin CSV output file derived from LoFreq mutations | [ERR4145453.lofreq.pangolin.csv](_static/covigator_pipeline_sample_output_reads/ERR4145453.lofreq.pangolin.csv) |\r +\r +\r +### FASTA pipeline output\r +\r +The FASTA pipeline returns a single VCF file. The VCF files will be described in more detail later.\r +\r +| Name | Description | Sample file |\r +|-----------------------------|--------------------------------------------------------------|------------------------------------------------------------------------------------------------------|\r +| $NAME.assembly.vcf.gz | Bgzipped, tabix-indexed and annotated output VCF | [ERR4145453.assembly.normalized.annotated.vcf.gz](_static/covigator_pipeline_sample_output_assembly/hCoV-19_NTXX.assembly.normalized.annotated.vcf.gz) |\r +\r +\r +## Annotations resources\r +\r +SARS-CoV-2 ASM985889v3 references were downloaded from Ensembl on 6th of October 2020:\r +- ftp://ftp.ensemblgenomes.org/pub/viruses/fasta/sars_cov_2/dna/Sars_cov_2.ASM985889v3.dna.toplevel.fa.gz\r +- ftp://ftp.ensemblgenomes.org/pub/viruses/gff3/sars_cov_2/Sars_cov_2.ASM985889v3.101.gff3.gz\r +\r +ConsHMM mutation depletion scores downloaded on 1st of July 2021:\r +- https://github.com/ernstlab/ConsHMM_CoV/blob/master/wuhCor1.mutDepletionConsHMM.bed\r +- https://github.com/ernstlab/ConsHMM_CoV/blob/master/wuhCor1.mutDepletionSarbecovirusConsHMM.bed\r +- https://github.com/ernstlab/ConsHMM_CoV/blob/master/wuhCor1.mutDepletionVertebrateCoVConsHMM.bed\r +\r +Gene annotations including Pfam domains downloaded from Ensembl on 25th of February 2021 from:\r +- ftp://ftp.ensemblgenomes.org/pub/viruses/json/sars_cov_2/sars_cov_2.json\r +\r +\r +## Future work\r +\r +- Primer trimming on an arbitrary sequencing library.\r +- Pipeline for Oxford Nanopore technology.\r +- Variant calls from assemblies contain an abnormally high number of deletions of size greater than 3 bp. This\r +is a technical artifact that would need to be avoided.\r +\r +## Bibliography\r +\r +- Di Tommaso, P., Chatzou, M., Floden, E. W., Barja, P. P., Palumbo, E., & Notredame, C. (2017). Nextflow enables reproducible computational workflows. Nature Biotechnology, 35(4), 316–319. https://doi.org/10.1038/nbt.3820\r +- Vasimuddin Md, Sanchit Misra, Heng Li, Srinivas Aluru. Efficient Architecture-Aware Acceleration of BWA-MEM for Multicore Systems. IEEE Parallel and Distributed Processing Symposium (IPDPS), 2019.\r +- Adrian Tan, Gonçalo R. Abecasis and Hyun Min Kang. Unified Representation of Genetic Variants. Bioinformatics (2015) 31(13): 2202-2204](http://bioinformatics.oxfordjournals.org/content/31/13/2202) and uses bcftools [Li, H. (2011). A statistical framework for SNP calling, mutation discovery, association mapping and population genetical parameter estimation from sequencing data. Bioinformatics (Oxford, England), 27(21), 2987–2993. 10.1093/bioinformatics/btr509\r +- Danecek P, Bonfield JK, Liddle J, Marshall J, Ohan V, Pollard MO, Whitwham A, Keane T, McCarthy SA, Davies RM, Li H. Twelve years of SAMtools and BCFtools. Gigascience. 2021 Feb 16;10(2):giab008. doi: 10.1093/gigascience/giab008. PMID: 33590861; PMCID: PMC7931819.\r +- Van der Auwera GA, Carneiro M, Hartl C, Poplin R, del Angel G, Levy-Moonshine A, Jordan T, Shakir K, Roazen D, Thibault J, Banks E, Garimella K, Altshuler D, Gabriel S, DePristo M. (2013). From FastQ Data to High-Confidence Variant Calls: The Genome Analysis Toolkit Best Practices Pipeline. Curr Protoc Bioinformatics, 43:11.10.1-11.10.33. DOI: 10.1002/0471250953.bi1110s43.\r +- Martin, M., Patterson, M., Garg, S., O Fischer, S., Pisanti, N., Klau, G., Schöenhuth, A., & Marschall, T. (2016). WhatsHap: fast and accurate read-based phasing. BioRxiv, 085050. https://doi.org/10.1101/085050\r +- Danecek, P., & McCarthy, S. A. (2017). BCFtools/csq: haplotype-aware variant consequences. Bioinformatics, 33(13), 2037–2039. https://doi.org/10.1093/bioinformatics/btx100\r +- Wilm, A., Aw, P. P. K., Bertrand, D., Yeo, G. H. T., Ong, S. H., Wong, C. H., Khor, C. C., Petric, R., Hibberd, M. L., & Nagarajan, N. (2012). LoFreq: A sequence-quality aware, ultra-sensitive variant caller for uncovering cell-population heterogeneity from high-throughput sequencing datasets. Nucleic Acids Research, 40(22), 11189–11201. https://doi.org/10.1093/nar/gks918\r +- Grubaugh, N. D., Gangavarapu, K., Quick, J., Matteson, N. L., De Jesus, J. G., Main, B. J., Tan, A. L., Paul, L. M., Brackney, D. E., Grewal, S., Gurfield, N., Van Rompay, K. K. A., Isern, S., Michael, S. F., Coffey, L. L., Loman, N. J., & Andersen, K. G. (2019). An amplicon-based sequencing framework for accurately measuring intrahost virus diversity using PrimalSeq and iVar. Genome Biology, 20(1), 8. https://doi.org/10.1186/s13059-018-1618-7\r +- Shifu Chen, Yanqing Zhou, Yaru Chen, Jia Gu; fastp: an ultra-fast all-in-one FASTQ preprocessor, Bioinformatics, Volume 34, Issue 17, 1 September 2018, Pages i884–i890, https://doi.org/10.1093/bioinformatics/bty560\r +- Kwon, S. Bin, & Ernst, J. (2021). Single-nucleotide conservation state annotation of the SARS-CoV-2 genome. Communications Biology, 4(1), 1–11. https://doi.org/10.1038/s42003-021-02231-w\r +- Cock, P. J., Antao, T., Chang, J. T., Chapman, B. A., Cox, C. J., Dalke, A., et al. (2009). Biopython: freely available Python tools for computational molecular biology and bioinformatics. Bioinformatics, 25(11), 1422–1423.\r +- Artem Tarasov, Albert J. Vilella, Edwin Cuppen, Isaac J. Nijman, Pjotr Prins, Sambamba: fast processing of NGS alignment formats, Bioinformatics, Volume 31, Issue 12, 15 June 2015, Pages 2032–2034, https://doi.org/10.1093/bioinformatics/btv098\r +""" ; + schema1:image ; + schema1:keywords "Bioinformatics, SARS-CoV-2, covid-19, variant calling, Nextflow" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "CoVigator pipeline: variant detection pipeline for Sars-CoV-2 (and other viruses...)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/417?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Take an anndata file, and perform basic QC with scanpy. Produces a filtered AnnData object." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/467?version=2" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq_CellQC" ; + schema1:sdDatePublished "2024-07-12 13:22:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/467/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 21389 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-06-21T06:48:26Z" ; + schema1:dateModified "2023-06-22T06:28:07Z" ; + schema1:description "Take an anndata file, and perform basic QC with scanpy. Produces a filtered AnnData object." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/467?version=3" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "scRNAseq_CellQC" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/467?version=2" ; + schema1:version 2 ; + ns1:input , + , + , + , + ; + ns1:output , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2023-01-13T14:08:15.844414" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/rnaseq-pe" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "rnaseq-pe/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.3" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """\r +\r +![Logo](https://cbg-ethz.github.io/V-pipe/img/logo.svg)\r +\r +[![bio.tools](https://img.shields.io/badge/bio-tools-blue.svg)](https://bio.tools/V-Pipe)\r +[![Snakemake](https://img.shields.io/badge/snakemake-≥7.11.0-blue.svg)](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe)\r +[![Deploy Docker image](https://github.com/cbg-ethz/V-pipe/actions/workflows/deploy-docker.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe)\r +[![Tests](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml)\r +[![Mega-Linter](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml)\r +[![License: Apache-2.0](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)\r +\r +V-pipe is a workflow designed for the analysis of next generation sequencing (NGS) data from viral pathogens. It produces a number of results in a curated format (e.g., consensus sequences, SNV calls, local/global haplotypes).\r +V-pipe is written using the Snakemake workflow management system.\r +\r +## Usage\r +\r +Different ways of initializing V-pipe are presented below. We strongly encourage you to deploy it [using the quick install script](#using-quick-install-script), as this is our preferred method.\r +\r +To configure V-pipe refer to the documentation present in [config/README.md](config/README.md).\r +\r +V-pipe expects the input samples to be organized in a [two-level](config/README.md#samples) directory hierarchy,\r +and the sequencing reads must be provided in a sub-folder named `raw_data`. Further details can be found on the [website](https://cbg-ethz.github.io/V-pipe/usage/).\r +Check the utils subdirectory for [mass-importers tools](utils/README.md#samples-mass-importers) that can assist you in generating this hierarchy.\r +\r +We provide [virus-specific base configuration files](config/README.md#virus-base-config) which contain handy defaults for, e.g., HIV and SARS-CoV-2. Set the virus in the general section of the configuration file:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +```\r +\r +Also see [snakemake's documentation](https://snakemake.readthedocs.io/en/stable/executing/cli.html) to learn more about the command-line options available when executing the workflow.\r +\r +Tutorials introducing usage of V-pipe are available in the [docs/](docs/README.md) subdirectory.\r +\r +### Using quick install script\r +\r +To deploy V-pipe, use the [installation script](utils/README.md#quick-installer) with the following parameters:\r +\r +```bash\r +curl -O 'https://raw.githubusercontent.com/cbg-ethz/V-pipe/master/utils/quick_install.sh'\r +./quick_install.sh -w work\r +```\r +\r +This script will download and install miniconda, checkout the V-pipe git repository (use `-b` to specify which branch/tag) and setup a work directory (specified with `-w`) with an executable script that will execute the workflow:\r +\r +```bash\r +cd work\r +# edit config.yaml and provide samples/ directory\r +./vpipe --jobs 4 --printshellcmds --dry-run\r +```\r +\r +### Using Docker\r +\r +Note: the [docker image](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe) is only setup with components to run the workflow for HIV and SARS-CoV-2 virus base configurations.\r +Using V-pipe with other viruses or configurations might require internet connectivity for additional software components.\r +\r +Create `config.yaml` or `vpipe.config` and then populate the `samples/` directory.\r +For example, the following config file could be used:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +\r +output:\r + snv: true\r + local: true\r + global: false\r + visualization: true\r + QA: true\r +```\r +\r +Then execute:\r +\r +```bash\r +docker run --rm -it -v $PWD:/work ghcr.io/cbg-ethz/v-pipe:master --jobs 4 --printshellcmds --dry-run\r +```\r +\r +### Using Snakedeploy\r +\r +First install [mamba](https://github.com/conda-forge/miniforge#mambaforge), then create and activate an environment with Snakemake and Snakedeploy:\r +\r +```bash\r +mamba create -c conda-forge -c bioconda --name snakemake snakemake snakedeploy\r +conda activate snakemake\r +```\r +\r +Snakemake's [official workflow installer Snakedeploy](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe) can now be used:\r +\r +```bash\r +snakedeploy deploy-workflow https://github.com/cbg-ethz/V-pipe --tag master .\r +# edit config/config.yaml and provide samples/ directory\r +snakemake --use-conda --jobs 4 --printshellcmds --dry-run\r +```\r +\r +## Dependencies\r +\r +- **[Conda](https://conda.io/docs/index.html)**\r +\r + Conda is a cross-platform package management system and an environment manager application. Snakemake uses mamba as a package manager.\r +\r +- **[Snakemake](https://snakemake.readthedocs.io/)**\r +\r + Snakemake is the central workflow and dependency manager of V-pipe. It determines the order in which individual tools are invoked and checks that programs do not exit unexpectedly.\r +\r +- **[VICUNA](https://www.broadinstitute.org/viral-genomics/vicuna)**\r +\r + VICUNA is a _de novo_ assembly software designed for populations with high mutation rates. It is used to build an initial reference for mapping reads with ngshmmalign aligner when a `references/cohort_consensus.fasta` file is not provided. Further details can be found in the [wiki](https://github.com/cbg-ethz/V-pipe/wiki/getting-started#input-files) pages.\r +\r +### Computational tools\r +\r +Other dependencies are managed by using isolated conda environments per rule, and below we list some of the computational tools integrated in V-pipe:\r +\r +- **[FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/)**\r +\r + FastQC gives an overview of the raw sequencing data. Flowcells that have been overloaded or otherwise fail during sequencing can easily be determined with FastQC.\r +\r +- **[PRINSEQ](http://prinseq.sourceforge.net/)**\r +\r + Trimming and clipping of reads is performed by PRINSEQ. It is currently the most versatile raw read processor with many customization options.\r +\r +- **[ngshmmalign](https://github.com/cbg-ethz/ngshmmalign)**\r +\r + We perform the alignment of the curated NGS data using our custom ngshmmalign that takes structural variants into account. It produces multiple consensus sequences that include either majority bases or ambiguous bases.\r +\r +- **[bwa](https://github.com/lh3/bwa)**\r +\r + In order to detect specific cross-contaminations with other probes, the Burrows-Wheeler aligner is used. It quickly yields estimates for foreign genomic material in an experiment.\r + Additionally, It can be used as an alternative aligner to ngshmmalign.\r +\r +- **[MAFFT](http://mafft.cbrc.jp/alignment/software/)**\r +\r + To standardise multiple samples to the same reference genome (say HXB2 for HIV-1), the multiple sequence aligner MAFFT is employed. The multiple sequence alignment helps in determining regions of low conservation and thus makes standardisation of alignments more robust.\r +\r +- **[Samtools and bcftools](https://www.htslib.org/)**\r +\r + The Swiss Army knife of alignment postprocessing and diagnostics. bcftools is also used to generate consensus sequence with indels.\r +\r +- **[SmallGenomeUtilities](https://github.com/cbg-ethz/smallgenomeutilities)**\r +\r + We perform genomic liftovers to standardised reference genomes using our in-house developed python library of utilities for rewriting alignments.\r +\r +- **[ShoRAH](https://github.com/cbg-ethz/shorah)**\r +\r + ShoRAh performs SNV calling and local haplotype reconstruction by using bayesian clustering.\r +\r +- **[LoFreq](https://csb5.github.io/lofreq/)**\r +\r + LoFreq (version 2) is SNVs and indels caller from next-generation sequencing data, and can be used as an alternative engine for SNV calling.\r +\r +- **[SAVAGE](https://bitbucket.org/jbaaijens/savage) and [Haploclique](https://github.com/cbg-ethz/haploclique)**\r +\r + We use HaploClique or SAVAGE to perform global haplotype reconstruction for heterogeneous viral populations by using an overlap graph.\r +\r +## Citation\r +\r +If you use this software in your research, please cite:\r +\r +Posada-Céspedes S., Seifert D., Topolsky I., Jablonski K.P., Metzner K.J., and Beerenwinkel N. 2021.\r +"V-pipe: a computational pipeline for assessing viral genetic diversity from high-throughput sequencing data."\r +_Bioinformatics_, January. doi:[10.1093/bioinformatics/btab015](https://doi.org/10.1093/bioinformatics/btab015).\r +\r +## Contributions\r +\r +- [Ivan Topolsky\\* ![orcid]](https://orcid.org/0000-0002-7561-0810), [![github]](https://github.com/dryak)\r +- [Pelin Icer Baykal ![orcid]](https://orcid.org/0000-0002-9542-5292), [![github]](https://github.com/picerbaykal)\r +- [Kim Philipp Jablonski ![orcid]](https://orcid.org/0000-0002-4166-4343), [![github]](https://github.com/kpj)\r +- [Lara Fuhrmann ![orcid]](https://orcid.org/0000-0001-6405-0654), [![github]](https://github.com/LaraFuhrmann)\r +- [Uwe Schmitt ![orcid]](https://orcid.org/0000-0002-4658-0616), [![github]](https://github.com/uweschmitt)\r +- [Michal Okoniewski ![orcid]](https://orcid.org/0000-0003-4722-4506), [![github]](https://github.com/michalogit)\r +- [Monica Dragan ![orcid]](https://orcid.org/0000-0002-7719-5892), [![github]](https://github.com/monicadragan)\r +- [Susana Posada Céspedes ![orcid]](https://orcid.org/0000-0002-7459-8186), [![github]](https://github.com/sposadac)\r +- [David Seifert ![orcid]](https://orcid.org/0000-0003-4739-5110), [![github]](https://github.com/SoapZA)\r +- Tobias Marschall\r +- [Niko Beerenwinkel\\*\\* ![orcid]](https://orcid.org/0000-0002-0573-6119)\r +\r +\\* software maintainer ;\r +\\** group leader\r +\r +[github]: https://cbg-ethz.github.io/V-pipe/img/mark-github.svg\r +[orcid]: https://cbg-ethz.github.io/V-pipe/img/ORCIDiD_iconvector.svg\r +\r +## Contact\r +\r +We encourage users to use the [issue tracker](https://github.com/cbg-ethz/V-pipe/issues). For further enquiries, you can also contact the V-pipe Dev Team .\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/301?version=4" ; + schema1:isBasedOn "https://github.com/cbg-ethz/V-pipe.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for V-pipe (main multi-virus version)" ; + schema1:sdDatePublished "2024-07-12 13:18:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/301/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1582 ; + schema1:creator , + ; + schema1:dateCreated "2023-11-02T09:55:11Z" ; + schema1:dateModified "2023-11-02T09:55:11Z" ; + schema1:description """\r +\r +![Logo](https://cbg-ethz.github.io/V-pipe/img/logo.svg)\r +\r +[![bio.tools](https://img.shields.io/badge/bio-tools-blue.svg)](https://bio.tools/V-Pipe)\r +[![Snakemake](https://img.shields.io/badge/snakemake-≥7.11.0-blue.svg)](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe)\r +[![Deploy Docker image](https://github.com/cbg-ethz/V-pipe/actions/workflows/deploy-docker.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe)\r +[![Tests](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml)\r +[![Mega-Linter](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml)\r +[![License: Apache-2.0](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)\r +\r +V-pipe is a workflow designed for the analysis of next generation sequencing (NGS) data from viral pathogens. It produces a number of results in a curated format (e.g., consensus sequences, SNV calls, local/global haplotypes).\r +V-pipe is written using the Snakemake workflow management system.\r +\r +## Usage\r +\r +Different ways of initializing V-pipe are presented below. We strongly encourage you to deploy it [using the quick install script](#using-quick-install-script), as this is our preferred method.\r +\r +To configure V-pipe refer to the documentation present in [config/README.md](config/README.md).\r +\r +V-pipe expects the input samples to be organized in a [two-level](config/README.md#samples) directory hierarchy,\r +and the sequencing reads must be provided in a sub-folder named `raw_data`. Further details can be found on the [website](https://cbg-ethz.github.io/V-pipe/usage/).\r +Check the utils subdirectory for [mass-importers tools](utils/README.md#samples-mass-importers) that can assist you in generating this hierarchy.\r +\r +We provide [virus-specific base configuration files](config/README.md#virus-base-config) which contain handy defaults for, e.g., HIV and SARS-CoV-2. Set the virus in the general section of the configuration file:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +```\r +\r +Also see [snakemake's documentation](https://snakemake.readthedocs.io/en/stable/executing/cli.html) to learn more about the command-line options available when executing the workflow.\r +\r +Tutorials introducing usage of V-pipe are available in the [docs/](docs/README.md) subdirectory.\r +\r +### Using quick install script\r +\r +To deploy V-pipe, use the [installation script](utils/README.md#quick-installer) with the following parameters:\r +\r +```bash\r +curl -O 'https://raw.githubusercontent.com/cbg-ethz/V-pipe/master/utils/quick_install.sh'\r +./quick_install.sh -w work\r +```\r +\r +This script will download and install miniconda, checkout the V-pipe git repository (use `-b` to specify which branch/tag) and setup a work directory (specified with `-w`) with an executable script that will execute the workflow:\r +\r +```bash\r +cd work\r +# edit config.yaml and provide samples/ directory\r +./vpipe --jobs 4 --printshellcmds --dry-run\r +```\r +\r +### Using Docker\r +\r +Note: the [docker image](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe) is only setup with components to run the workflow for HIV and SARS-CoV-2 virus base configurations.\r +Using V-pipe with other viruses or configurations might require internet connectivity for additional software components.\r +\r +Create `config.yaml` or `vpipe.config` and then populate the `samples/` directory.\r +For example, the following config file could be used:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +\r +output:\r + snv: true\r + local: true\r + global: false\r + visualization: true\r + QA: true\r +```\r +\r +Then execute:\r +\r +```bash\r +docker run --rm -it -v $PWD:/work ghcr.io/cbg-ethz/v-pipe:master --jobs 4 --printshellcmds --dry-run\r +```\r +\r +### Using Snakedeploy\r +\r +First install [mamba](https://github.com/conda-forge/miniforge#mambaforge), then create and activate an environment with Snakemake and Snakedeploy:\r +\r +```bash\r +mamba create -c conda-forge -c bioconda --name snakemake snakemake snakedeploy\r +conda activate snakemake\r +```\r +\r +Snakemake's [official workflow installer Snakedeploy](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe) can now be used:\r +\r +```bash\r +snakedeploy deploy-workflow https://github.com/cbg-ethz/V-pipe --tag master .\r +# edit config/config.yaml and provide samples/ directory\r +snakemake --use-conda --jobs 4 --printshellcmds --dry-run\r +```\r +\r +## Dependencies\r +\r +- **[Conda](https://conda.io/docs/index.html)**\r +\r + Conda is a cross-platform package management system and an environment manager application. Snakemake uses mamba as a package manager.\r +\r +- **[Snakemake](https://snakemake.readthedocs.io/)**\r +\r + Snakemake is the central workflow and dependency manager of V-pipe. It determines the order in which individual tools are invoked and checks that programs do not exit unexpectedly.\r +\r +- **[VICUNA](https://www.broadinstitute.org/viral-genomics/vicuna)**\r +\r + VICUNA is a _de novo_ assembly software designed for populations with high mutation rates. It is used to build an initial reference for mapping reads with ngshmmalign aligner when a `references/cohort_consensus.fasta` file is not provided. Further details can be found in the [wiki](https://github.com/cbg-ethz/V-pipe/wiki/getting-started#input-files) pages.\r +\r +### Computational tools\r +\r +Other dependencies are managed by using isolated conda environments per rule, and below we list some of the computational tools integrated in V-pipe:\r +\r +- **[FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/)**\r +\r + FastQC gives an overview of the raw sequencing data. Flowcells that have been overloaded or otherwise fail during sequencing can easily be determined with FastQC.\r +\r +- **[PRINSEQ](http://prinseq.sourceforge.net/)**\r +\r + Trimming and clipping of reads is performed by PRINSEQ. It is currently the most versatile raw read processor with many customization options.\r +\r +- **[ngshmmalign](https://github.com/cbg-ethz/ngshmmalign)**\r +\r + We perform the alignment of the curated NGS data using our custom ngshmmalign that takes structural variants into account. It produces multiple consensus sequences that include either majority bases or ambiguous bases.\r +\r +- **[bwa](https://github.com/lh3/bwa)**\r +\r + In order to detect specific cross-contaminations with other probes, the Burrows-Wheeler aligner is used. It quickly yields estimates for foreign genomic material in an experiment.\r + Additionally, It can be used as an alternative aligner to ngshmmalign.\r +\r +- **[MAFFT](http://mafft.cbrc.jp/alignment/software/)**\r +\r + To standardise multiple samples to the same reference genome (say HXB2 for HIV-1), the multiple sequence aligner MAFFT is employed. The multiple sequence alignment helps in determining regions of low conservation and thus makes standardisation of alignments more robust.\r +\r +- **[Samtools and bcftools](https://www.htslib.org/)**\r +\r + The Swiss Army knife of alignment postprocessing and diagnostics. bcftools is also used to generate consensus sequence with indels.\r +\r +- **[SmallGenomeUtilities](https://github.com/cbg-ethz/smallgenomeutilities)**\r +\r + We perform genomic liftovers to standardised reference genomes using our in-house developed python library of utilities for rewriting alignments.\r +\r +- **[ShoRAH](https://github.com/cbg-ethz/shorah)**\r +\r + ShoRAh performs SNV calling and local haplotype reconstruction by using bayesian clustering.\r +\r +- **[LoFreq](https://csb5.github.io/lofreq/)**\r +\r + LoFreq (version 2) is SNVs and indels caller from next-generation sequencing data, and can be used as an alternative engine for SNV calling.\r +\r +- **[SAVAGE](https://bitbucket.org/jbaaijens/savage) and [Haploclique](https://github.com/cbg-ethz/haploclique)**\r +\r + We use HaploClique or SAVAGE to perform global haplotype reconstruction for heterogeneous viral populations by using an overlap graph.\r +\r +## Citation\r +\r +If you use this software in your research, please cite:\r +\r +Posada-Céspedes S., Seifert D., Topolsky I., Jablonski K.P., Metzner K.J., and Beerenwinkel N. 2021.\r +"V-pipe: a computational pipeline for assessing viral genetic diversity from high-throughput sequencing data."\r +_Bioinformatics_, January. doi:[10.1093/bioinformatics/btab015](https://doi.org/10.1093/bioinformatics/btab015).\r +\r +## Contributions\r +\r +- [Ivan Topolsky\\* ![orcid]](https://orcid.org/0000-0002-7561-0810), [![github]](https://github.com/dryak)\r +- [Pelin Icer Baykal ![orcid]](https://orcid.org/0000-0002-9542-5292), [![github]](https://github.com/picerbaykal)\r +- [Kim Philipp Jablonski ![orcid]](https://orcid.org/0000-0002-4166-4343), [![github]](https://github.com/kpj)\r +- [Lara Fuhrmann ![orcid]](https://orcid.org/0000-0001-6405-0654), [![github]](https://github.com/LaraFuhrmann)\r +- [Uwe Schmitt ![orcid]](https://orcid.org/0000-0002-4658-0616), [![github]](https://github.com/uweschmitt)\r +- [Michal Okoniewski ![orcid]](https://orcid.org/0000-0003-4722-4506), [![github]](https://github.com/michalogit)\r +- [Monica Dragan ![orcid]](https://orcid.org/0000-0002-7719-5892), [![github]](https://github.com/monicadragan)\r +- [Susana Posada Céspedes ![orcid]](https://orcid.org/0000-0002-7459-8186), [![github]](https://github.com/sposadac)\r +- [David Seifert ![orcid]](https://orcid.org/0000-0003-4739-5110), [![github]](https://github.com/SoapZA)\r +- Tobias Marschall\r +- [Niko Beerenwinkel\\*\\* ![orcid]](https://orcid.org/0000-0002-0573-6119)\r +\r +\\* software maintainer ;\r +\\** group leader\r +\r +[github]: https://cbg-ethz.github.io/V-pipe/img/mark-github.svg\r +[orcid]: https://cbg-ethz.github.io/V-pipe/img/ORCIDiD_iconvector.svg\r +\r +## Contact\r +\r +We encourage users to use the [issue tracker](https://github.com/cbg-ethz/V-pipe/issues). For further enquiries, you can also contact the V-pipe Dev Team .\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/301?version=4" ; + schema1:keywords "Alignment, Assembly, covid-19, Genomics, INDELs, rna, SNPs, variant_calling, workflow" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "V-pipe (main multi-virus version)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/301?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-07-12 13:22:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7214 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:11Z" ; + schema1:dateModified "2024-06-11T12:55:11Z" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.196.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook ABC MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:46 +0100" ; + schema1:url "https://workflowhub.eu/workflows/196/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 101184 ; + schema1:creator , + ; + schema1:dateCreated "2023-04-14T08:42:07Z" ; + schema1:dateModified "2023-04-14T08:43:04Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/196?version=4" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook ABC MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_amber_md_setup/blob/master/biobb_wf_amber_md_setup/notebooks/abcsetup/biobb_amber_ABC_setup.ipynb" ; + schema1:version 3 . + + a schema1:Dataset ; + schema1:datePublished "2023-09-26T14:26:12.939323" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/average-bigwig-between-replicates" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "average-bigwig-between-replicates/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.12" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/998?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/methylseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/methylseq" ; + schema1:sdDatePublished "2024-07-12 13:18:23 +0100" ; + schema1:url "https://workflowhub.eu/workflows/998/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3742 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:00Z" ; + schema1:dateModified "2024-06-11T12:55:00Z" ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/998?version=14" ; + schema1:keywords "bisulfite-sequencing, dna-methylation, em-seq, epigenome, Epigenomics, methyl-seq, pbat, rrbs" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/methylseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/998?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=10" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-07-12 13:18:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8767 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:39Z" ; + schema1:dateModified "2024-06-11T12:54:39Z" ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=10" ; + schema1:version 10 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=22" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-07-12 13:19:26 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=22" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 19626 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:16Z" ; + schema1:dateModified "2024-06-11T12:55:16Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=22" ; + schema1:version 22 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.56.7" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_protein-complex_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:34:06 +0100" ; + schema1:url "https://workflowhub.eu/workflows/56/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 68528 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-04T14:23:59Z" ; + schema1:dateModified "2024-05-14T10:10:00Z" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/56?version=6" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_protein-complex_md_setup/blob/main/biobb_wf_protein-complex_md_setup/notebooks/biobb_Protein-Complex_MDsetup_tutorial.ipynb" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=18" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-07-12 13:21:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=18" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17866 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:59Z" ; + schema1:dateModified "2024-06-11T12:54:59Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=18" ; + schema1:version 18 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Macromolecular Coarse-Grained Flexibility (FlexServ) tutorial using BioExcel Building Blocks (biobb)\r +\r +This tutorial aims to illustrate the process of generating protein conformational ensembles from 3D structures and analysing its molecular flexibility, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.551.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_flexserv" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Macromolecular Coarse-Grained Flexibility tutorial" ; + schema1:sdDatePublished "2024-07-12 13:32:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/551/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 109981 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-04T15:24:02Z" ; + schema1:dateModified "2024-05-14T10:14:52Z" ; + schema1:description """# Macromolecular Coarse-Grained Flexibility (FlexServ) tutorial using BioExcel Building Blocks (biobb)\r +\r +This tutorial aims to illustrate the process of generating protein conformational ensembles from 3D structures and analysing its molecular flexibility, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/551?version=1" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Macromolecular Coarse-Grained Flexibility tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_flexserv/blob/main/biobb_wf_flexserv/notebooks/biobb_wf_flexserv.ipynb" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/986?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/fetchngs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/fetchngs" ; + schema1:sdDatePublished "2024-07-12 13:21:29 +0100" ; + schema1:url "https://workflowhub.eu/workflows/986/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7284 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:53Z" ; + schema1:dateModified "2024-06-11T12:54:53Z" ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/986?version=13" ; + schema1:keywords "ddbj, download, ena, FASTQ, geo, sra, synapse" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/fetchngs" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/986?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/383?version=2" ; + schema1:isBasedOn "https://gitlab.bsc.es/lrodrig1/structuralvariants_poc/-/tree/1.1.3/structuralvariants/nextflow" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CNV_pipeline" ; + schema1:sdDatePublished "2024-07-12 13:35:13 +0100" ; + schema1:url "https://workflowhub.eu/workflows/383/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4406 ; + schema1:creator , + , + ; + schema1:dateCreated "2022-09-16T14:31:19Z" ; + schema1:dateModified "2022-09-16T14:31:19Z" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/383?version=2" ; + schema1:keywords "CODEX2, TransBioNet, ExomeDepth, variant calling, cancer, manta, GRIDS, structural variants" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CNV_pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/383?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.281.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_protein_complex_md_setup/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:34:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/281/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8792 ; + schema1:creator , + ; + schema1:dateCreated "2023-04-14T08:07:07Z" ; + schema1:dateModified "2023-04-14T08:08:08Z" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/281?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_protein_complex_md_setup/python/workflow.py" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Alternative splicing analysis using RNA-seq." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1018?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/rnasplice" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnasplice" ; + schema1:sdDatePublished "2024-07-12 13:19:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1018/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13851 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:13Z" ; + schema1:dateModified "2024-06-11T12:55:13Z" ; + schema1:description "Alternative splicing analysis using RNA-seq." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1018?version=4" ; + schema1:keywords "alternative-splicing, rna, rna-seq, splicing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnasplice" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1018?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Alignment, assembly and annotation of RNASEQ reads as well as annotation of generated transcripts." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/39?version=1" ; + schema1:isBasedOn "https://usegalaxy.eu/u/ambarishk/w/covid-19-stringtie-assembly-and-annotation" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for StringTie assembly and annotation" ; + schema1:sdDatePublished "2024-07-12 13:37:29 +0100" ; + schema1:url "https://workflowhub.eu/workflows/39/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 41223 ; + schema1:dateCreated "2020-06-18T23:13:25Z" ; + schema1:dateModified "2023-02-13T14:06:46Z" ; + schema1:description "Alignment, assembly and annotation of RNASEQ reads as well as annotation of generated transcripts." ; + schema1:image ; + schema1:keywords "Alignment, Assembly, Annotation, RNASEQ, StringTie, covid-19" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "StringTie assembly and annotation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/39?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 201563 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "This project is about the automated quantification of wound healing in high-throughput microscopy scratch assays." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/782?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Wound Healing Scrath Assay Image Analysis" ; + schema1:sdDatePublished "2024-07-12 13:23:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/782/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3367 ; + schema1:creator ; + schema1:dateCreated "2024-03-06T09:42:37Z" ; + schema1:dateModified "2024-03-06T09:56:18Z" ; + schema1:description "This project is about the automated quantification of wound healing in high-throughput microscopy scratch assays." ; + schema1:keywords "imageJ, Bioimage" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Wound Healing Scrath Assay Image Analysis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/782?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.129.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_virtual-screening" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein-ligand Docking tutorial (Fpocket)" ; + schema1:sdDatePublished "2024-07-12 13:33:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/129/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 43078 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-04-14T08:21:56Z" ; + schema1:dateModified "2023-04-14T08:23:50Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/129?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein-ligand Docking tutorial (Fpocket)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_virtual-screening/blob/master/biobb_wf_virtual-screening/notebooks/fpocket/wf_vs_fpocket.ipynb" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=20" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-07-12 13:20:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=20" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9565 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:04Z" ; + schema1:dateModified "2024-06-11T12:55:04Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=20" ; + schema1:version 20 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-07-12 13:19:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5698 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:14Z" ; + schema1:dateModified "2024-06-11T12:55:14Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Demultiplexing pipeline for Illumina sequencing data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/978?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/demultiplex" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/demultiplex" ; + schema1:sdDatePublished "2024-07-12 13:21:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/978/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10118 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:47Z" ; + schema1:dateModified "2024-06-11T12:54:47Z" ; + schema1:description "Demultiplexing pipeline for Illumina sequencing data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/978?version=7" ; + schema1:keywords "bases2fastq, bcl2fastq, demultiplexing, elementbiosciences, illumina" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/demultiplex" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/978?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Google DeepVariant variant caller as a Nextflow pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/977?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/deepvariant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/deepvariant" ; + schema1:sdDatePublished "2024-07-12 13:21:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/977/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4292 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:47Z" ; + schema1:dateModified "2024-06-11T12:54:47Z" ; + schema1:description "Google DeepVariant variant caller as a Nextflow pipeline" ; + schema1:keywords "deep-variant, DNA, google, variant-calling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/deepvariant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/977?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/amber-protein-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.297.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_md_setup/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy Amber Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/297/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 83494 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-25T10:10:43Z" ; + schema1:dateModified "2022-11-23T13:28:16Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/amber-protein-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/297?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy Amber Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_md_setup/galaxy/biobb_wf_amber_md_setup.ga" ; + schema1:version 1 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2023-12-05T16:19:14.974403" ; + schema1:hasPart , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/baredsc" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + ; + schema1:name "baredsc/baredSC-1d-logNorm" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "baredsc/baredSC-2d-logNorm" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/baredsc" ; + schema1:version "0.2" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=14" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-07-12 13:20:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=14" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5996 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:04Z" ; + schema1:dateModified "2024-06-11T12:55:04Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=14" ; + schema1:version 14 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1023?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/smrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/smrnaseq" ; + schema1:sdDatePublished "2024-07-12 13:18:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1023/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9333 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:18Z" ; + schema1:dateModified "2024-06-11T12:55:18Z" ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1023?version=10" ; + schema1:keywords "small-rna, smrna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/smrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1023?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/278?version=10" ; + schema1:isBasedOn "https://gitlab.bsc.es/lrodrig1/structuralvariants_poc/-/tree/1.1.3/structuralvariants/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CNV_pipeline" ; + schema1:sdDatePublished "2024-07-12 13:35:30 +0100" ; + schema1:url "https://workflowhub.eu/workflows/278/ro_crate?version=10" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 9694 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9435 ; + schema1:creator , + ; + schema1:dateCreated "2022-09-09T10:27:12Z" ; + schema1:dateModified "2022-09-09T10:40:03Z" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/278?version=10" ; + schema1:keywords "cancer, CODEX2, ExomeDepth, manta, TransBioNet, variant calling, GRIDSS, structural variants" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CNV_pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/278?version=10" ; + schema1:version 10 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 72914 . + + a schema1:Dataset ; + schema1:datePublished "2023-01-13T10:44:42.687400" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/chipseq-pe" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "chipseq-pe/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.3" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=15" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-07-12 13:19:05 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=15" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17571 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:16Z" ; + schema1:dateModified "2024-06-11T12:55:16Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=15" ; + schema1:version 15 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "cccccc" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/456?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Formula" ; + schema1:sdDatePublished "2024-07-12 13:34:09 +0100" ; + schema1:url "https://workflowhub.eu/workflows/456/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6618 ; + schema1:dateCreated "2023-04-14T01:48:47Z" ; + schema1:dateModified "2023-04-14T01:50:33Z" ; + schema1:description "cccccc" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Formula" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/456?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=20" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-07-12 13:22:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=20" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10486 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:13Z" ; + schema1:dateModified "2024-06-11T12:55:13Z" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=20" ; + schema1:version 20 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# CWL-assembly\r +[![Codacy Badge](https://api.codacy.com/project/badge/Grade/684724bbc0134960ab41748f4a4b732f)](https://www.codacy.com/app/mb1069/CWL-assembly?utm_source=github.com&utm_medium=referral&utm_content=EBI-Metagenomics/CWL-assembly&utm_campaign=Badge_Grade)\r +[![Build Status](https://travis-ci.org/EBI-Metagenomics/CWL-assembly.svg?branch=develop)](https://travis-ci.org/EBI-Metagenomics/CWL-assembly)\r +\r +## Description\r +\r +This repository contains two workflows for metagenome and metatranscriptome assembly of short read data. MetaSPAdes is used as default for paired-end data, and MEGAHIT for single-end data and co-assemblies. MEGAHIT can be specified as the default assembler in the yaml file if preferred. Steps include:\r +\r + * _QC_: removal of short reads, low quality regions, adapters and host decontamination\r + * _Assembly_: with metaSPADES or MEGAHIT\r + * _Post-assembly_: Host and PhiX decontamination, contig length filter (500bp), stats generation\r +\r +## Requirements - How to install\r +\r +This pipeline requires a conda environment with cwltool, blastn, and metaspades. If created with `requirements.yml`, the environment will be called `cwl_assembly`. \r +\r +```\r +conda env create -f requirements.yml\r +conda activate cwl_assembly\r +pip install cwltool==3.1.20230601100705\r +```\r +\r +## Databases\r +\r +You will need to pre-download fasta files for host decontamination and generate the following databases accordingly:\r + * bwa index\r + * blast index\r + \r +Specify the locations in the yaml file when running the pipeline.\r +\r +## Main pipeline executables\r +\r + * `src/workflows/metagenome_pipeline.cwl`\r + * `src/workflows/metatranscriptome_pipeline.cwl`\r +\r +## Example command\r +\r +```cwltool --singularity --outdir ${OUTDIR} ${CWL} ${YML}```\r +\r +`$CWL` is going to be one of the executables mentioned above\r +`$YML` should be a config yaml file including entries among what follows. \r +You can find a yml template in the `examples` folder.\r +\r +## Example output directory structure\r +```\r +Root directory\r + ├── megahit\r + │ └── 001 -------------------------------- Assembly root directory\r + │ ├── assembly_stats.json ------------ Human-readable assembly stats file\r + │ ├── coverage.tab ------------------- Coverage file\r + │ ├── log ---------------------------- CwlToil+megahit output log\r + | ├── options.json ------------------- Megahit input options\r + │ ├── SRR6257420.fasta.gz ------------ Archived and trimmed assembly\r + │ └── SRR6257420.fasta.gz.md5 -------- MD5 hash of above archive\r + ├── metaspades\r + │ └── 001 -------------------------------- Assembly root directory\r + │ ├── assembly_graph.fastg ----------- Assembly graph\r + │ ├── assembly_stats.json ------------ Human-readable assembly stats file\r + │ ├── coverage.tab ------------------- Coverage file\r + | ├── params.txt --------------------- Metaspades input options\r + │ ├── spades.log --------------------- Metaspades output log\r + │ ├── SRR6257420.fasta.gz ------------ Archived and trimmed assembly\r + │ └── SRR6257420.fasta.gz.md5 -------- MD5 hash of above archive\r + │ \r + └── raw ------------------------------------ Raw data directory\r + ├── SRR6257420.fastq.qc_stats.tsv ------ Stats for cleaned fastq\r + ├── SRR6257420_fastp_clean_1.fastq.gz -- Cleaned paired-end file_1\r + └── SRR6257420_fastp_clean_2.fastq.gz -- Cleaned paired-end file_2\r +```\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/474?version=2" ; + schema1:isBasedOn "https://github.com/EBI-Metagenomics/CWL-assembly.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Metagenome and metatranscriptome assembly in CWL" ; + schema1:sdDatePublished "2024-07-12 13:33:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/474/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6441 ; + schema1:dateCreated "2023-06-21T10:41:38Z" ; + schema1:dateModified "2023-06-21T10:41:38Z" ; + schema1:description """# CWL-assembly\r +[![Codacy Badge](https://api.codacy.com/project/badge/Grade/684724bbc0134960ab41748f4a4b732f)](https://www.codacy.com/app/mb1069/CWL-assembly?utm_source=github.com&utm_medium=referral&utm_content=EBI-Metagenomics/CWL-assembly&utm_campaign=Badge_Grade)\r +[![Build Status](https://travis-ci.org/EBI-Metagenomics/CWL-assembly.svg?branch=develop)](https://travis-ci.org/EBI-Metagenomics/CWL-assembly)\r +\r +## Description\r +\r +This repository contains two workflows for metagenome and metatranscriptome assembly of short read data. MetaSPAdes is used as default for paired-end data, and MEGAHIT for single-end data and co-assemblies. MEGAHIT can be specified as the default assembler in the yaml file if preferred. Steps include:\r +\r + * _QC_: removal of short reads, low quality regions, adapters and host decontamination\r + * _Assembly_: with metaSPADES or MEGAHIT\r + * _Post-assembly_: Host and PhiX decontamination, contig length filter (500bp), stats generation\r +\r +## Requirements - How to install\r +\r +This pipeline requires a conda environment with cwltool, blastn, and metaspades. If created with `requirements.yml`, the environment will be called `cwl_assembly`. \r +\r +```\r +conda env create -f requirements.yml\r +conda activate cwl_assembly\r +pip install cwltool==3.1.20230601100705\r +```\r +\r +## Databases\r +\r +You will need to pre-download fasta files for host decontamination and generate the following databases accordingly:\r + * bwa index\r + * blast index\r + \r +Specify the locations in the yaml file when running the pipeline.\r +\r +## Main pipeline executables\r +\r + * `src/workflows/metagenome_pipeline.cwl`\r + * `src/workflows/metatranscriptome_pipeline.cwl`\r +\r +## Example command\r +\r +```cwltool --singularity --outdir ${OUTDIR} ${CWL} ${YML}```\r +\r +`$CWL` is going to be one of the executables mentioned above\r +`$YML` should be a config yaml file including entries among what follows. \r +You can find a yml template in the `examples` folder.\r +\r +## Example output directory structure\r +```\r +Root directory\r + ├── megahit\r + │ └── 001 -------------------------------- Assembly root directory\r + │ ├── assembly_stats.json ------------ Human-readable assembly stats file\r + │ ├── coverage.tab ------------------- Coverage file\r + │ ├── log ---------------------------- CwlToil+megahit output log\r + | ├── options.json ------------------- Megahit input options\r + │ ├── SRR6257420.fasta.gz ------------ Archived and trimmed assembly\r + │ └── SRR6257420.fasta.gz.md5 -------- MD5 hash of above archive\r + ├── metaspades\r + │ └── 001 -------------------------------- Assembly root directory\r + │ ├── assembly_graph.fastg ----------- Assembly graph\r + │ ├── assembly_stats.json ------------ Human-readable assembly stats file\r + │ ├── coverage.tab ------------------- Coverage file\r + | ├── params.txt --------------------- Metaspades input options\r + │ ├── spades.log --------------------- Metaspades output log\r + │ ├── SRR6257420.fasta.gz ------------ Archived and trimmed assembly\r + │ └── SRR6257420.fasta.gz.md5 -------- MD5 hash of above archive\r + │ \r + └── raw ------------------------------------ Raw data directory\r + ├── SRR6257420.fastq.qc_stats.tsv ------ Stats for cleaned fastq\r + ├── SRR6257420_fastp_clean_1.fastq.gz -- Cleaned paired-end file_1\r + └── SRR6257420_fastp_clean_2.fastq.gz -- Cleaned paired-end file_2\r +```\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/474?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Metagenome and metatranscriptome assembly in CWL" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/474?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description "Genome assembly: Flye-based WF for highly repetitive genomes [Schmid et al. NAR 2018]" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/51?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ONT -- Assembly-Flye-AhrensLab" ; + schema1:sdDatePublished "2024-07-12 13:37:25 +0100" ; + schema1:url "https://workflowhub.eu/workflows/51/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 18132 ; + schema1:creator ; + schema1:dateCreated "2020-08-05T13:01:18Z" ; + schema1:dateModified "2023-02-13T14:06:46Z" ; + schema1:description "Genome assembly: Flye-based WF for highly repetitive genomes [Schmid et al. NAR 2018]" ; + schema1:keywords "name:ONT, ONT" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ONT -- Assembly-Flye-AhrensLab" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/51?version=1" ; + schema1:version 1 ; + ns1:input <#ont____assembly_flye_ahrenslab-inputs-ftp://biftp.informatik.uni-freiburg.de/pub/T0/Ahrens/SRR6982805.fastq>, + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """Workflow for Creating a large disease network from various datasets and databases for IBM, and applying the active subnetwork identification method MOGAMUN.\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/681?version=1" ; + schema1:isBasedOn "https://github.com/jdwijnbergen/IBM_ASI_workflow.git" ; + schema1:license "notspecified" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Inclusion Body Myositis Active Subnetwork Identification Workflow" ; + schema1:sdDatePublished "2024-07-12 13:24:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/681/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 27757 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5790 ; + schema1:creator , + ; + schema1:dateCreated "2023-11-27T12:52:50Z" ; + schema1:dateModified "2023-11-27T15:48:40Z" ; + schema1:description """Workflow for Creating a large disease network from various datasets and databases for IBM, and applying the active subnetwork identification method MOGAMUN.\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/681?version=6" ; + schema1:keywords "Bioinformatics, CWL, Genomics, Transcriptomics, Protein-Protein Interaction" ; + schema1:license "https://choosealicense.com/no-permission/" ; + schema1:name "Inclusion Body Myositis Active Subnetwork Identification Workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/681?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """[![Snakemake](https://img.shields.io/badge/snakemake-≥7.0.0-brightgreen.svg?style=flat)](https://snakemake.readthedocs.io)\r +\r +\r +# About SnakeMAGs\r +SnakeMAGs is a workflow to reconstruct prokaryotic genomes from metagenomes. The main purpose of SnakeMAGs is to process Illumina data from raw reads to metagenome-assembled genomes (MAGs).\r +SnakeMAGs is efficient, easy to handle and flexible to different projects. The workflow is CeCILL licensed, implemented in Snakemake (run on multiple cores) and available for Linux.\r +SnakeMAGs performed eight main steps:\r +- Quality filtering of the reads\r +- Adapter trimming\r +- Filtering of the host sequences (optional)\r +- Assembly\r +- Binning\r +- Evaluation of the quality of the bins\r +- Classification of the MAGs\r +- Estimation of the relative abundance of the MAGs\r +\r +\r +![scheme of workflow](SnakeMAGs_schema.jpg?raw=true)\r +\r +# How to use SnakeMAGs\r +## Install conda\r +The easiest way to install and run SnakeMAGs is to use [conda](https://www.anaconda.com/products/distribution). These package managers will help you to easily install [Snakemake](https://snakemake.readthedocs.io/en/stable/getting_started/installation.html).\r +\r +## Install and activate Snakemake environment\r +Note: The workflow was developed with Snakemake 7.0.0\r +```\r +conda activate\r +\r +# First, set up your channel priorities\r +conda config --add channels defaults\r +conda config --add channels bioconda\r +conda config --add channels conda-forge\r +\r +# Then, create a new environment for the Snakemake version you require\r +conda create -n snakemake_7.0.0 snakemake=7.0.0\r +\r +# And activate it\r +conda activate snakemake_7.0.0\r +```\r +\r +Alternatively, you can also install Snakemake via mamba:\r +```\r +# If you do not have mamba yet on your machine, you can install it with:\r +conda install -n base -c conda-forge mamba\r +\r +# Then you can install Snakemake\r +conda activate base\r +mamba create -c conda-forge -c bioconda -n snakemake snakemake\r +\r +# And activate it\r +conda activate snakemake\r +\r +```\r +\r +## SnakeMAGs executable\r +The easiest way to procure SnakeMAGs and its related files is to clone the repository using git:\r +```\r +git clone https://github.com/Nachida08/SnakeMAGs.git\r +```\r +Alternatively, you can download the relevant files:\r +```\r +wget https://github.com/Nachida08/SnakeMAGs/blob/main/SnakeMAGs.smk https://github.com/Nachida08/SnakeMAGs/blob/main/config.yaml\r +```\r +\r +## SnakeMAGs input files\r +- Illumina paired-end reads in FASTQ.\r +- Adapter sequence file ([adapter.fa](https://github.com/Nachida08/SnakeMAGs/blob/main/adapters.fa)).\r +- Host genome sequences in FASTA (if host_genome: "yes"), in case you work with host-associated metagenomes (e.g. human gut metagenome).\r +\r +## Download Genome Taxonomy Database (GTDB)\r +GTDB-Tk requires ~66G+ of external data (GTDB) that need to be downloaded and unarchived. Because this database is voluminous, we let you decide where you want to store it.\r +SnakeMAGs do not download automatically GTDB, you have to do it:\r +\r +```\r +#Download the latest release (tested with release207)\r +#Note: SnakeMAGs uses GTDBtk v2.1.0 and therefore require release 207 as minimum version. See https://ecogenomics.github.io/GTDBTk/installing/index.html#installing for details.\r +wget https://data.gtdb.ecogenomic.org/releases/latest/auxillary_files/gtdbtk_v2_data.tar.gz\r +#Decompress\r +tar -xzvf *tar.gz\r +#This will create a folder called release207_v2\r +```\r +All you have to do now is to indicate the path to the database folder (in our example, the folder is called release207_v2) in the config file, Classification section.\r +\r +## Download the GUNC database (required if gunc: "yes")\r +GUNC accepts either a progenomes or GTDB based reference database. Both can be downloaded using the ```gunc download_db``` command. For our study we used the default proGenome-derived GUNC database. It requires less resources with similar performance.\r +\r +```\r +conda activate\r +# Install and activate GUNC environment\r +conda create --prefix /path/to/gunc_env\r +conda install -c bioconda metabat2 --prefix /path/to/gunc_env\r +source activate /path/to/gunc_env\r +\r +#Download the proGenome-derived GUNC database (tested with gunc_db_progenomes2.1)\r +#Note: SnakeMAGs uses GUNC v1.0.5\r +gunc download_db -db progenomes /path/to/GUNC_DB\r +```\r +All you have to do now is to indicate the path to the GUNC database file in the config file, Bins quality section.\r +\r +## Edit config file\r +You need to edit the config.yaml file. In particular, you need to set the correct paths: for the working directory, to specify where are your fastq files, where you want to place the conda environments (that will be created using the provided .yaml files available in [SnakeMAGs_conda_env directory](https://github.com/Nachida08/SnakeMAGs/tree/main/SnakeMAGs_conda_env)), where are the adapters, where is GTDB and optionally where is the GUNC database and where is your host genome reference.\r +\r +Lastly, you need to allocate the proper computational resources (threads, memory) for each of the main steps. These can be optimized according to your hardware.\r +\r +\r +\r +Here is an example of a config file:\r +\r +```\r +#####################################################################################################\r +##### _____ ___ _ _ _ ______ __ __ _______ _____ #####\r +##### / ___| | \\ | | /\\ | | / / | ____| | \\ / | /\\ / _____| / ___| #####\r +##### | (___ | |\\ \\ | | / \\ | |/ / | |____ | \\/ | / \\ | | __ | (___ #####\r +##### \\___ \\ | | \\ \\| | / /\\ \\ | |\\ \\ | ____| | |\\ /| | / /\\ \\ | | |_ | \\___ \\ #####\r +##### ____) | | | \\ | / /__\\ \\ | | \\ \\ | |____ | | \\/ | | / /__\\ \\ | |____|| ____) | #####\r +##### |_____/ |_| \\__| /_/ \\_\\ |_| \\_\\ |______| |_| |_| /_/ \\_\\ \\______/ |_____/ #####\r +##### #####\r +#####################################################################################################\r +\r +############################\r +### Execution parameters ###\r +############################\r +\r +working_dir: /path/to/working/directory/ #The main directory for the project\r +raw_fastq: /path/to/raw_fastq/ #The directory that contains all the fastq files of all the samples (eg. sample1_R1.fastq & sample1_R2.fastq, sample2_R1.fastq & sample2_R2.fastq...)\r +suffix_1: "_R1.fastq" #Main type of suffix for forward reads file (eg. _1.fastq or _R1.fastq or _r1.fastq or _1.fq or _R1.fq or _r1.fq )\r +suffix_2: "_R2.fastq" #Main type of suffix for reverse reads file (eg. _2.fastq or _R2.fastq or _r2.fastq or _2.fq or _R2.fq or _r2.fq )\r +\r +###########################\r +### Conda environnemnts ###\r +###########################\r +\r +conda_env: "/path/to/SnakeMAGs_conda_env/" #Path to the provided SnakeMAGs_conda_env directory which contains the yaml file for each conda environment\r +\r +#########################\r +### Quality filtering ###\r +#########################\r +email: name.surname@your-univ.com #Your e-mail address\r +threads_filter: 10 #The number of threads to run this process. To be adjusted according to your hardware\r +resources_filter: 150 #Memory according to tools need (in GB)\r +\r +########################\r +### Adapter trimming ###\r +########################\r +adapters: /path/to/working/directory/adapters.fa #A fasta file contanning a set of various Illumina adaptors (this file is provided and is also available on github)\r +trim_params: "2:40:15" #For further details, see the Trimmomatic documentation\r +threads_trim: 10 #The number of threads to run this process. To be adjusted according to your hardware\r +resources_trim: 150 #Memory according to tools need (in GB)\r +\r +######################\r +### Host filtering ###\r +######################\r +host_genome: "yes" #yes or no. An optional step for host-associated samples (eg. termite, human, plant...)\r +threads_bowtie2: 50 #The number of threads to run this process. To be adjusted according to your hardware\r +host_genomes_directory: /path/to/working/host_genomes/ #the directory where the host genome is stored\r +host_genomes: /path/to/working/host_genomes/host_genomes.fa #A fasta file containing the DNA sequences of the host genome(s)\r +threads_samtools: 50 #The number of threads to run this process. To be adjusted according to your hardware\r +resources_host_filtering: 150 #Memory according to tools need (in GB)\r +\r +################\r +### Assembly ###\r +################\r +threads_megahit: 50 #The number of threads to run this process. To be adjusted according to your hardware\r +min_contig_len: 1000 #Minimum length (in bp) of the assembled contigs\r +k_list: "21,31,41,51,61,71,81,91,99,109,119" #Kmer size (for further details, see the megahit documentation)\r +resources_megahit: 250 #Memory according to tools need (in GB)\r +\r +###############\r +### Binning ###\r +###############\r +threads_bwa: 50 #The number of threads to run this process. To be adjusted according to your hardware\r +resources_bwa: 150 #Memory according to tools need (in GB)\r +threads_samtools: 50 #The number of threads to run this process. To be adjusted according to your hardware\r +resources_samtools: 150 #Memory according to tools need (in GB)\r +seed: 19860615 #Seed number for reproducible results\r +threads_metabat: 50 #The number of threads to run this process. To be adjusted according to your hardware\r +minContig: 2500 #Minimum length (in bp) of the contigs\r +resources_binning: 250 #Memory according to tools need (in GB)\r +\r +####################\r +### Bins quality ###\r +####################\r +#checkM\r +threads_checkm: 50 #The number of threads to run this process. To be adjusted according to your hardware\r +resources_checkm: 250 #Memory according to tools need (in GB)\r +#bins_quality_filtering\r +completion: 50 #The minimum completion rate of bins\r +contamination: 10 #The maximum contamination rate of bins\r +parks_quality_score: "yes" #yes or no. If yes bins are filtered according to the Parks quality score (completion-5*contamination >= 50)\r +#GUNC\r +gunc: "yes" #yes or no. An optional step to detect and discard chimeric and contaminated genomes using the GUNC tool\r +threads_gunc: 50 #The number of threads to run this process. To be adjusted according to your hardware\r +resources_gunc: 250 #Memory according to tools need (in GB)\r +GUNC_db: /path/to/GUNC_DB/gunc_db_progenomes2.1.dmnd #Path to the downloaded GUNC database (see the readme file)\r +\r +######################\r +### Classification ###\r +######################\r +GTDB_data_ref: /path/to/downloaded/GTDB #Path to uncompressed GTDB-Tk reference data (GTDB)\r +threads_gtdb: 10 #The number of threads to run this process. To be adjusted according to your hardware\r +resources_gtdb: 250 #Memory according to tools need (in GB)\r +\r +##################\r +### Abundances ###\r +##################\r +threads_coverM: 10 #The number of threads to run this process. To be adjusted according to your hardware\r +resources_coverM: 150 #Memory according to tools need (in GB)\r +```\r +# Run SnakeMAGs\r +If you are using a workstation with Ubuntu (tested on Ubuntu 22.04):\r +```{bash}\r +snakemake --cores 30 --snakefile SnakeMAGs.smk --use-conda --conda-prefix /path/to/SnakeMAGs_conda_env/ --configfile /path/to/config.yaml --keep-going --latency-wait 180\r +```\r +\r +If you are working on a cluster with Slurm (tested with version 18.08.7):\r +```{bash}\r +snakemake --snakefile SnakeMAGs.smk --cluster 'sbatch -p --mem -c -o "cluster_logs/{wildcards}.{rule}.{jobid}.out" -e "cluster_logs/{wildcards}.{rule}.{jobid}.err" ' --jobs --use-conda --conda-frontend conda --conda-prefix /path/to/SnakeMAGs_conda_env/ --jobname "{rule}.{wildcards}.{jobid}" --latency-wait 180 --configfile /path/to/config.yaml --keep-going\r +```\r +\r +If you are working on a cluster with SGE (tested with version 8.1.9):\r +```{bash}\r +snakemake --snakefile SnakeMAGs.smk --cluster "qsub -cwd -V -q -pe thread {threads} -e cluster_logs/{rule}.e{jobid} -o cluster_logs/{rule}.o{jobid}" --jobs --use-conda --conda-frontend conda --conda-prefix /path/to/SnakeMAGs_conda_env/ --jobname "{rule}.{wildcards}.{jobid}" --latency-wait 180 --configfile /path/to/config.yaml --keep-going\r +```\r +\r +\r +# Test\r +We provide you a small data set in the [test](https://github.com/Nachida08/SnakeMAGs/tree/main/test) directory which will allow you to validate your instalation and take your first steps with SnakeMAGs. This data set is a subset from [ZymoBiomics Mock Community](https://www.zymoresearch.com/blogs/blog/zymobiomics-microbial-standards-optimize-your-microbiomics-workflow) (250K reads) used in this tutoriel [metagenomics_tutorial](https://github.com/pjtorres/metagenomics_tutorial).\r +\r +1. Before getting started make sure you have cloned the SnakeMAGs repository or you have downloaded all the necessary files (SnakeMAGs.smk, config.yaml, chr19.fa.gz, insub732_2_R1.fastq.gz, insub732_2_R2.fastq.gz). See the [SnakeMAGs executable](#snakemags-executable) section.\r +2. Unzip the fastq files and the host sequences file.\r +```\r +gunzip fastqs/insub732_2_R1.fastq.gz fastqs/insub732_2_R2.fastq.gz host_genomes/chr19.fa.gz\r +```\r +3. For better organisation put all the read files in the same directory (eg. fastqs) and the host sequences file in a separate directory (eg. host_genomes)\r +4. Edit the config file (see [Edit config file](#edit-config-file) section)\r +5. Run the test (see [Run SnakeMAGs](#run-snakemags) section)\r +\r +Note: the analysis of these files took 1159.32 secondes to complete on a Ubuntu 22.04 LTS with an Intel(R) Xeon(R) Silver 4210 CPU @ 2.20GHz x 40 processor, 96GB of RAM.\r +\r +# Genome reference for host reads filtering\r +For host-associated samples, one can remove host sequences from the metagenomic reads by mapping these reads against a reference genome. In the case of termite gut metagenomes, we are providing [here](https://zenodo.org/record/6908287#.YuAdFXZBx8M) the relevant files (fasta and index files) from termite genomes.\r +\r +Upon request, we can help you to generate these files for your own reference genome and make them available to the community.\r +\r +NB. These steps of mapping generate voluminous files such as .bam and .sam. Depending on your disk space, you might want to delete these files after use.\r +\r +\r +# Use case\r +During the test phase of the development of SnakeMAGs, we used this workflow to process 10 publicly available termite gut metagenomes generated by Illumina sequencing, to ultimately reconstruct prokaryotic MAGs. These metagenomes were retrieved from the NCBI database using the following accession numbers: SRR10402454; SRR14739927; SRR8296321; SRR8296327; SRR8296329; SRR8296337; SRR8296343; DRR097505; SRR7466794; SRR7466795. They come from five different studies: Waidele et al, 2019; Tokuda et al, 2018; Romero Victorica et al, 2020; Moreira et al, 2021; and Calusinska et al, 2020.\r +\r +## Download the Illumina pair-end reads\r +We use fasterq-dump tool to extract data in FASTQ-format from SRA-accessions. It is a commandline-tool which offers a faster solution for downloading those large files.\r +\r +```\r +# Install and activate sra-tools environment\r +## Note: For this study we used sra-tools 2.11.0\r +\r +conda activate\r +conda install -c bioconda sra-tools\r +conda activate sra-tools\r +\r +# Download fastqs in a single directory\r +mkdir raw_fastq\r +cd raw_fastq\r +fasterq-dump --threads --skip-technical --split-3\r +```\r +\r +## Download Genome reference for host reads filtering\r +```\r +mkdir host_genomes\r +cd host_genomes\r +wget https://zenodo.org/record/6908287/files/termite_genomes.fasta.gz\r +gunzip termite_genomes.fasta.gz\r +```\r +\r +## Edit the config file\r +See [Edit config file](#edit-config-file) section.\r +\r +## Run SnakeMAGs\r +```\r +conda activate snakemake_7.0.0\r +mkdir cluster_logs\r +snakemake --snakefile SnakeMAGs.smk --cluster 'sbatch -p --mem -c -o "cluster_logs/{wildcards}.{rule}.{jobid}.out" -e "cluster_logs/{wildcards}.{rule}.{jobid}.err" ' --jobs --use-conda --conda-frontend conda --conda-prefix /path/to/SnakeMAGs_conda_env/ --jobname "{rule}.{wildcards}.{jobid}" --latency-wait 180 --configfile /path/to/config.yaml --keep-going\r +```\r +\r +## Study results\r +The MAGs reconstructed from each metagenome and their taxonomic classification are available in this [repository](https://doi.org/10.5281/zenodo.7661004).\r +\r +# Citations\r +\r +If you use SnakeMAGs, please cite:\r +> Tadrent N, Dedeine F and Hervé V. SnakeMAGs: a simple, efficient, flexible and scalable workflow to reconstruct prokaryotic genomes from metagenomes [version 2; peer review: 2 approved]. F1000Research 2023, 11:1522 (https://doi.org/10.12688/f1000research.128091.2)\r +\r +\r +Please also cite the dependencies:\r +- [Snakemake](https://doi.org/10.12688/f1000research.29032.2) : Mölder, F., Jablonski, K. P., Letcher, B., Hall, M. B., Tomkins-tinch, C. H., Sochat, V., Forster, J., Lee, S., Twardziok, S. O., Kanitz, A., Wilm, A., Holtgrewe, M., Rahmann, S., Nahnsen, S., & Köster, J. (2021) Sustainable data analysis with Snakemake [version 2; peer review: 2 approved]. *F1000Research* 2021, 10:33.\r +- [illumina-utils](https://doi.org/10.1371/journal.pone.0066643) : Murat Eren, A., Vineis, J. H., Morrison, H. G., & Sogin, M. L. (2013). A Filtering Method to Generate High Quality Short Reads Using Illumina Paired-End Technology. *PloS ONE*, 8(6), e66643.\r +- [Trimmomatic](https://doi.org/10.1093/bioinformatics/btu170) : Bolger, A. M., Lohse, M., & Usadel, B. (2014). Genome analysis Trimmomatic: a flexible trimmer for Illumina sequence data. *Bioinformatics*, 30(15), 2114-2120.\r +- [Bowtie2](https://doi.org/10.1038/nmeth.1923) : Langmead, B., & Salzberg, S. L. (2012). Fast gapped-read alignment with Bowtie 2. *Nature Methods*, 9(4), 357–359.\r +- [SAMtools](https://doi.org/10.1093/bioinformatics/btp352) : Li, H., Handsaker, B., Wysoker, A., Fennell, T., Ruan, J., Homer, N., Marth, G., Abecasis, G., & Durbin, R. (2009). The Sequence Alignment/Map format and SAMtools. *Bioinformatics*, 25(16), 2078–2079.\r +- [BEDtools](https://doi.org/10.1093/bioinformatics/btq033) : Quinlan, A. R., & Hall, I. M. (2010). BEDTools: A flexible suite of utilities for comparing genomic features. *Bioinformatics*, 26(6), 841–842.\r +- [MEGAHIT](https://doi.org/10.1093/bioinformatics/btv033) : Li, D., Liu, C. M., Luo, R., Sadakane, K., & Lam, T. W. (2015). MEGAHIT: An ultra-fast single-node solution for large and complex metagenomics assembly via succinct de Bruijn graph. *Bioinformatics*, 31(10), 1674–1676.\r +- [bwa](https://doi.org/10.1093/bioinformatics/btp324) : Li, H., & Durbin, R. (2009). Fast and accurate short read alignment with Burrows-Wheeler transform. *Bioinformatics*, 25(14), 1754–1760.\r +- [MetaBAT2](https://doi.org/10.7717/peerj.7359) : Kang, D. D., Li, F., Kirton, E., Thomas, A., Egan, R., An, H., & Wang, Z. (2019). MetaBAT 2: An adaptive binning algorithm for robust and efficient genome reconstruction from metagenome assemblies. *PeerJ*, 2019(7), 1–13.\r +- [CheckM](https://doi.org/10.1101/gr.186072.114) : Parks, D. H., Imelfort, M., Skennerton, C. T., Hugenholtz, P., & Tyson, G. W. (2015). CheckM: Assessing the quality of microbial genomes recovered from isolates, single cells, and metagenomes. *Genome Research*, 25(7), 1043–1055.\r +- [GTDB-Tk](https://doi.org/10.1093/BIOINFORMATICS/BTAC672) : Chaumeil, P.-A., Mussig, A. J., Hugenholtz, P., Parks, D. H. (2022). GTDB-Tk v2: memory friendly classification with the genome taxonomy database. *Bioinformatics*.\r +- [CoverM](https://github.com/wwood/CoverM)\r +- [Waidele et al, 2019](https://doi.org/10.1101/526038) : Waidele, L., Korb, J., Voolstra, C. R., Dedeine, F., & Staubach, F. (2019). Ecological specificity of the metagenome in a set of lower termite species supports contribution of the microbiome to adaptation of the host. *Animal Microbiome*, 1(1), 1–13.\r +- [Tokuda et al, 2018](https://doi.org/10.1073/pnas.1810550115) : Tokuda, G., Mikaelyan, A., Fukui, C., Matsuura, Y., Watanabe, H., Fujishima, M., & Brune, A. (2018). Fiber-associated spirochetes are major agents of hemicellulose degradation in the hindgut of wood-feeding higher termites. *Proceedings of the National Academy of Sciences of the United States of America*, 115(51), E11996–E12004.\r +- [Romero Victorica et al, 2020](https://doi.org/10.1038/s41598-020-60850-5) : Romero Victorica, M., Soria, M. A., Batista-García, R. A., Ceja-Navarro, J. A., Vikram, S., Ortiz, M., Ontañon, O., Ghio, S., Martínez-Ávila, L., Quintero García, O. J., Etcheverry, C., Campos, E., Cowan, D., Arneodo, J., & Talia, P. M. (2020). Neotropical termite microbiomes as sources of novel plant cell wall degrading enzymes. *Scientific Reports*, 10(1), 1–14.\r +- [Moreira et al, 2021](https://doi.org/10.3389/fevo.2021.632590) : Moreira, E. A., Persinoti, G. F., Menezes, L. R., Paixão, D. A. A., Alvarez, T. M., Cairo, J. P. L. F., Squina, F. M., Costa-Leonardo, A. M., Rodrigues, A., Sillam-Dussès, D., & Arab, A. (2021). Complementary contribution of Fungi and Bacteria to lignocellulose digestion in the food stored by a neotropical higher termite. *Frontiers in Ecology and Evolution*, 9(April), 1–12.\r +- [Calusinska et al, 2020](https://doi.org/10.1038/s42003-020-1004-3) : Calusinska, M., Marynowska, M., Bertucci, M., Untereiner, B., Klimek, D., Goux, X., Sillam-Dussès, D., Gawron, P., Halder, R., Wilmes, P., Ferrer, P., Gerin, P., Roisin, Y., & Delfosse, P. (2020). Integrative omics analysis of the termite gut system adaptation to Miscanthus diet identifies lignocellulose degradation enzymes. *Communications Biology*, 3(1), 1–12.\r +- [Orakov et al, 2021](https://doi.org/10.1186/s13059-021-02393-0) : Orakov, A., Fullam, A., Coelho, L. P., Khedkar, S., Szklarczyk, D., Mende, D. R., Schmidt, T. S. B., & Bork, P. (2021). GUNC: detection of chimerism and contamination in prokaryotic genomes. *Genome Biology*, 22(1).\r +- [Parks et al, 2015](https://doi.org/10.1101/gr.186072.114) : Parks, D. H., Imelfort, M., Skennerton, C. T., Hugenholtz, P., & Tyson, G. W. (2015). CheckM: Assessing the quality of microbial genomes recovered from isolates, single cells, and metagenomes. *Genome Research*, 25(7), 1043–1055.\r +# License\r +This project is licensed under the CeCILL License - see the [LICENSE](https://github.com/Nachida08/SnakeMAGs/blob/main/LICENCE) file for details.\r +\r +Developed by Nachida Tadrent at the Insect Biology Research Institute ([IRBI](https://irbi.univ-tours.fr/)), under the supervision of Franck Dedeine and Vincent Hervé.\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/554?version=1" ; + schema1:isBasedOn "https://github.com/Nachida08/SnakeMAGs.git" ; + schema1:license "CECILL-2.1" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for SnakeMAGs: a simple, efficient, flexible and scalable workflow to reconstruct prokaryotic genomes from metagenomes" ; + schema1:sdDatePublished "2024-07-12 13:32:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/554/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17575 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-08-02T11:41:06Z" ; + schema1:dateModified "2023-08-02T11:41:06Z" ; + schema1:description """[![Snakemake](https://img.shields.io/badge/snakemake-≥7.0.0-brightgreen.svg?style=flat)](https://snakemake.readthedocs.io)\r +\r +\r +# About SnakeMAGs\r +SnakeMAGs is a workflow to reconstruct prokaryotic genomes from metagenomes. The main purpose of SnakeMAGs is to process Illumina data from raw reads to metagenome-assembled genomes (MAGs).\r +SnakeMAGs is efficient, easy to handle and flexible to different projects. The workflow is CeCILL licensed, implemented in Snakemake (run on multiple cores) and available for Linux.\r +SnakeMAGs performed eight main steps:\r +- Quality filtering of the reads\r +- Adapter trimming\r +- Filtering of the host sequences (optional)\r +- Assembly\r +- Binning\r +- Evaluation of the quality of the bins\r +- Classification of the MAGs\r +- Estimation of the relative abundance of the MAGs\r +\r +\r +![scheme of workflow](SnakeMAGs_schema.jpg?raw=true)\r +\r +# How to use SnakeMAGs\r +## Install conda\r +The easiest way to install and run SnakeMAGs is to use [conda](https://www.anaconda.com/products/distribution). These package managers will help you to easily install [Snakemake](https://snakemake.readthedocs.io/en/stable/getting_started/installation.html).\r +\r +## Install and activate Snakemake environment\r +Note: The workflow was developed with Snakemake 7.0.0\r +```\r +conda activate\r +\r +# First, set up your channel priorities\r +conda config --add channels defaults\r +conda config --add channels bioconda\r +conda config --add channels conda-forge\r +\r +# Then, create a new environment for the Snakemake version you require\r +conda create -n snakemake_7.0.0 snakemake=7.0.0\r +\r +# And activate it\r +conda activate snakemake_7.0.0\r +```\r +\r +Alternatively, you can also install Snakemake via mamba:\r +```\r +# If you do not have mamba yet on your machine, you can install it with:\r +conda install -n base -c conda-forge mamba\r +\r +# Then you can install Snakemake\r +conda activate base\r +mamba create -c conda-forge -c bioconda -n snakemake snakemake\r +\r +# And activate it\r +conda activate snakemake\r +\r +```\r +\r +## SnakeMAGs executable\r +The easiest way to procure SnakeMAGs and its related files is to clone the repository using git:\r +```\r +git clone https://github.com/Nachida08/SnakeMAGs.git\r +```\r +Alternatively, you can download the relevant files:\r +```\r +wget https://github.com/Nachida08/SnakeMAGs/blob/main/SnakeMAGs.smk https://github.com/Nachida08/SnakeMAGs/blob/main/config.yaml\r +```\r +\r +## SnakeMAGs input files\r +- Illumina paired-end reads in FASTQ.\r +- Adapter sequence file ([adapter.fa](https://github.com/Nachida08/SnakeMAGs/blob/main/adapters.fa)).\r +- Host genome sequences in FASTA (if host_genome: "yes"), in case you work with host-associated metagenomes (e.g. human gut metagenome).\r +\r +## Download Genome Taxonomy Database (GTDB)\r +GTDB-Tk requires ~66G+ of external data (GTDB) that need to be downloaded and unarchived. Because this database is voluminous, we let you decide where you want to store it.\r +SnakeMAGs do not download automatically GTDB, you have to do it:\r +\r +```\r +#Download the latest release (tested with release207)\r +#Note: SnakeMAGs uses GTDBtk v2.1.0 and therefore require release 207 as minimum version. See https://ecogenomics.github.io/GTDBTk/installing/index.html#installing for details.\r +wget https://data.gtdb.ecogenomic.org/releases/latest/auxillary_files/gtdbtk_v2_data.tar.gz\r +#Decompress\r +tar -xzvf *tar.gz\r +#This will create a folder called release207_v2\r +```\r +All you have to do now is to indicate the path to the database folder (in our example, the folder is called release207_v2) in the config file, Classification section.\r +\r +## Download the GUNC database (required if gunc: "yes")\r +GUNC accepts either a progenomes or GTDB based reference database. Both can be downloaded using the ```gunc download_db``` command. For our study we used the default proGenome-derived GUNC database. It requires less resources with similar performance.\r +\r +```\r +conda activate\r +# Install and activate GUNC environment\r +conda create --prefix /path/to/gunc_env\r +conda install -c bioconda metabat2 --prefix /path/to/gunc_env\r +source activate /path/to/gunc_env\r +\r +#Download the proGenome-derived GUNC database (tested with gunc_db_progenomes2.1)\r +#Note: SnakeMAGs uses GUNC v1.0.5\r +gunc download_db -db progenomes /path/to/GUNC_DB\r +```\r +All you have to do now is to indicate the path to the GUNC database file in the config file, Bins quality section.\r +\r +## Edit config file\r +You need to edit the config.yaml file. In particular, you need to set the correct paths: for the working directory, to specify where are your fastq files, where you want to place the conda environments (that will be created using the provided .yaml files available in [SnakeMAGs_conda_env directory](https://github.com/Nachida08/SnakeMAGs/tree/main/SnakeMAGs_conda_env)), where are the adapters, where is GTDB and optionally where is the GUNC database and where is your host genome reference.\r +\r +Lastly, you need to allocate the proper computational resources (threads, memory) for each of the main steps. These can be optimized according to your hardware.\r +\r +\r +\r +Here is an example of a config file:\r +\r +```\r +#####################################################################################################\r +##### _____ ___ _ _ _ ______ __ __ _______ _____ #####\r +##### / ___| | \\ | | /\\ | | / / | ____| | \\ / | /\\ / _____| / ___| #####\r +##### | (___ | |\\ \\ | | / \\ | |/ / | |____ | \\/ | / \\ | | __ | (___ #####\r +##### \\___ \\ | | \\ \\| | / /\\ \\ | |\\ \\ | ____| | |\\ /| | / /\\ \\ | | |_ | \\___ \\ #####\r +##### ____) | | | \\ | / /__\\ \\ | | \\ \\ | |____ | | \\/ | | / /__\\ \\ | |____|| ____) | #####\r +##### |_____/ |_| \\__| /_/ \\_\\ |_| \\_\\ |______| |_| |_| /_/ \\_\\ \\______/ |_____/ #####\r +##### #####\r +#####################################################################################################\r +\r +############################\r +### Execution parameters ###\r +############################\r +\r +working_dir: /path/to/working/directory/ #The main directory for the project\r +raw_fastq: /path/to/raw_fastq/ #The directory that contains all the fastq files of all the samples (eg. sample1_R1.fastq & sample1_R2.fastq, sample2_R1.fastq & sample2_R2.fastq...)\r +suffix_1: "_R1.fastq" #Main type of suffix for forward reads file (eg. _1.fastq or _R1.fastq or _r1.fastq or _1.fq or _R1.fq or _r1.fq )\r +suffix_2: "_R2.fastq" #Main type of suffix for reverse reads file (eg. _2.fastq or _R2.fastq or _r2.fastq or _2.fq or _R2.fq or _r2.fq )\r +\r +###########################\r +### Conda environnemnts ###\r +###########################\r +\r +conda_env: "/path/to/SnakeMAGs_conda_env/" #Path to the provided SnakeMAGs_conda_env directory which contains the yaml file for each conda environment\r +\r +#########################\r +### Quality filtering ###\r +#########################\r +email: name.surname@your-univ.com #Your e-mail address\r +threads_filter: 10 #The number of threads to run this process. To be adjusted according to your hardware\r +resources_filter: 150 #Memory according to tools need (in GB)\r +\r +########################\r +### Adapter trimming ###\r +########################\r +adapters: /path/to/working/directory/adapters.fa #A fasta file contanning a set of various Illumina adaptors (this file is provided and is also available on github)\r +trim_params: "2:40:15" #For further details, see the Trimmomatic documentation\r +threads_trim: 10 #The number of threads to run this process. To be adjusted according to your hardware\r +resources_trim: 150 #Memory according to tools need (in GB)\r +\r +######################\r +### Host filtering ###\r +######################\r +host_genome: "yes" #yes or no. An optional step for host-associated samples (eg. termite, human, plant...)\r +threads_bowtie2: 50 #The number of threads to run this process. To be adjusted according to your hardware\r +host_genomes_directory: /path/to/working/host_genomes/ #the directory where the host genome is stored\r +host_genomes: /path/to/working/host_genomes/host_genomes.fa #A fasta file containing the DNA sequences of the host genome(s)\r +threads_samtools: 50 #The number of threads to run this process. To be adjusted according to your hardware\r +resources_host_filtering: 150 #Memory according to tools need (in GB)\r +\r +################\r +### Assembly ###\r +################\r +threads_megahit: 50 #The number of threads to run this process. To be adjusted according to your hardware\r +min_contig_len: 1000 #Minimum length (in bp) of the assembled contigs\r +k_list: "21,31,41,51,61,71,81,91,99,109,119" #Kmer size (for further details, see the megahit documentation)\r +resources_megahit: 250 #Memory according to tools need (in GB)\r +\r +###############\r +### Binning ###\r +###############\r +threads_bwa: 50 #The number of threads to run this process. To be adjusted according to your hardware\r +resources_bwa: 150 #Memory according to tools need (in GB)\r +threads_samtools: 50 #The number of threads to run this process. To be adjusted according to your hardware\r +resources_samtools: 150 #Memory according to tools need (in GB)\r +seed: 19860615 #Seed number for reproducible results\r +threads_metabat: 50 #The number of threads to run this process. To be adjusted according to your hardware\r +minContig: 2500 #Minimum length (in bp) of the contigs\r +resources_binning: 250 #Memory according to tools need (in GB)\r +\r +####################\r +### Bins quality ###\r +####################\r +#checkM\r +threads_checkm: 50 #The number of threads to run this process. To be adjusted according to your hardware\r +resources_checkm: 250 #Memory according to tools need (in GB)\r +#bins_quality_filtering\r +completion: 50 #The minimum completion rate of bins\r +contamination: 10 #The maximum contamination rate of bins\r +parks_quality_score: "yes" #yes or no. If yes bins are filtered according to the Parks quality score (completion-5*contamination >= 50)\r +#GUNC\r +gunc: "yes" #yes or no. An optional step to detect and discard chimeric and contaminated genomes using the GUNC tool\r +threads_gunc: 50 #The number of threads to run this process. To be adjusted according to your hardware\r +resources_gunc: 250 #Memory according to tools need (in GB)\r +GUNC_db: /path/to/GUNC_DB/gunc_db_progenomes2.1.dmnd #Path to the downloaded GUNC database (see the readme file)\r +\r +######################\r +### Classification ###\r +######################\r +GTDB_data_ref: /path/to/downloaded/GTDB #Path to uncompressed GTDB-Tk reference data (GTDB)\r +threads_gtdb: 10 #The number of threads to run this process. To be adjusted according to your hardware\r +resources_gtdb: 250 #Memory according to tools need (in GB)\r +\r +##################\r +### Abundances ###\r +##################\r +threads_coverM: 10 #The number of threads to run this process. To be adjusted according to your hardware\r +resources_coverM: 150 #Memory according to tools need (in GB)\r +```\r +# Run SnakeMAGs\r +If you are using a workstation with Ubuntu (tested on Ubuntu 22.04):\r +```{bash}\r +snakemake --cores 30 --snakefile SnakeMAGs.smk --use-conda --conda-prefix /path/to/SnakeMAGs_conda_env/ --configfile /path/to/config.yaml --keep-going --latency-wait 180\r +```\r +\r +If you are working on a cluster with Slurm (tested with version 18.08.7):\r +```{bash}\r +snakemake --snakefile SnakeMAGs.smk --cluster 'sbatch -p --mem -c -o "cluster_logs/{wildcards}.{rule}.{jobid}.out" -e "cluster_logs/{wildcards}.{rule}.{jobid}.err" ' --jobs --use-conda --conda-frontend conda --conda-prefix /path/to/SnakeMAGs_conda_env/ --jobname "{rule}.{wildcards}.{jobid}" --latency-wait 180 --configfile /path/to/config.yaml --keep-going\r +```\r +\r +If you are working on a cluster with SGE (tested with version 8.1.9):\r +```{bash}\r +snakemake --snakefile SnakeMAGs.smk --cluster "qsub -cwd -V -q -pe thread {threads} -e cluster_logs/{rule}.e{jobid} -o cluster_logs/{rule}.o{jobid}" --jobs --use-conda --conda-frontend conda --conda-prefix /path/to/SnakeMAGs_conda_env/ --jobname "{rule}.{wildcards}.{jobid}" --latency-wait 180 --configfile /path/to/config.yaml --keep-going\r +```\r +\r +\r +# Test\r +We provide you a small data set in the [test](https://github.com/Nachida08/SnakeMAGs/tree/main/test) directory which will allow you to validate your instalation and take your first steps with SnakeMAGs. This data set is a subset from [ZymoBiomics Mock Community](https://www.zymoresearch.com/blogs/blog/zymobiomics-microbial-standards-optimize-your-microbiomics-workflow) (250K reads) used in this tutoriel [metagenomics_tutorial](https://github.com/pjtorres/metagenomics_tutorial).\r +\r +1. Before getting started make sure you have cloned the SnakeMAGs repository or you have downloaded all the necessary files (SnakeMAGs.smk, config.yaml, chr19.fa.gz, insub732_2_R1.fastq.gz, insub732_2_R2.fastq.gz). See the [SnakeMAGs executable](#snakemags-executable) section.\r +2. Unzip the fastq files and the host sequences file.\r +```\r +gunzip fastqs/insub732_2_R1.fastq.gz fastqs/insub732_2_R2.fastq.gz host_genomes/chr19.fa.gz\r +```\r +3. For better organisation put all the read files in the same directory (eg. fastqs) and the host sequences file in a separate directory (eg. host_genomes)\r +4. Edit the config file (see [Edit config file](#edit-config-file) section)\r +5. Run the test (see [Run SnakeMAGs](#run-snakemags) section)\r +\r +Note: the analysis of these files took 1159.32 secondes to complete on a Ubuntu 22.04 LTS with an Intel(R) Xeon(R) Silver 4210 CPU @ 2.20GHz x 40 processor, 96GB of RAM.\r +\r +# Genome reference for host reads filtering\r +For host-associated samples, one can remove host sequences from the metagenomic reads by mapping these reads against a reference genome. In the case of termite gut metagenomes, we are providing [here](https://zenodo.org/record/6908287#.YuAdFXZBx8M) the relevant files (fasta and index files) from termite genomes.\r +\r +Upon request, we can help you to generate these files for your own reference genome and make them available to the community.\r +\r +NB. These steps of mapping generate voluminous files such as .bam and .sam. Depending on your disk space, you might want to delete these files after use.\r +\r +\r +# Use case\r +During the test phase of the development of SnakeMAGs, we used this workflow to process 10 publicly available termite gut metagenomes generated by Illumina sequencing, to ultimately reconstruct prokaryotic MAGs. These metagenomes were retrieved from the NCBI database using the following accession numbers: SRR10402454; SRR14739927; SRR8296321; SRR8296327; SRR8296329; SRR8296337; SRR8296343; DRR097505; SRR7466794; SRR7466795. They come from five different studies: Waidele et al, 2019; Tokuda et al, 2018; Romero Victorica et al, 2020; Moreira et al, 2021; and Calusinska et al, 2020.\r +\r +## Download the Illumina pair-end reads\r +We use fasterq-dump tool to extract data in FASTQ-format from SRA-accessions. It is a commandline-tool which offers a faster solution for downloading those large files.\r +\r +```\r +# Install and activate sra-tools environment\r +## Note: For this study we used sra-tools 2.11.0\r +\r +conda activate\r +conda install -c bioconda sra-tools\r +conda activate sra-tools\r +\r +# Download fastqs in a single directory\r +mkdir raw_fastq\r +cd raw_fastq\r +fasterq-dump --threads --skip-technical --split-3\r +```\r +\r +## Download Genome reference for host reads filtering\r +```\r +mkdir host_genomes\r +cd host_genomes\r +wget https://zenodo.org/record/6908287/files/termite_genomes.fasta.gz\r +gunzip termite_genomes.fasta.gz\r +```\r +\r +## Edit the config file\r +See [Edit config file](#edit-config-file) section.\r +\r +## Run SnakeMAGs\r +```\r +conda activate snakemake_7.0.0\r +mkdir cluster_logs\r +snakemake --snakefile SnakeMAGs.smk --cluster 'sbatch -p --mem -c -o "cluster_logs/{wildcards}.{rule}.{jobid}.out" -e "cluster_logs/{wildcards}.{rule}.{jobid}.err" ' --jobs --use-conda --conda-frontend conda --conda-prefix /path/to/SnakeMAGs_conda_env/ --jobname "{rule}.{wildcards}.{jobid}" --latency-wait 180 --configfile /path/to/config.yaml --keep-going\r +```\r +\r +## Study results\r +The MAGs reconstructed from each metagenome and their taxonomic classification are available in this [repository](https://doi.org/10.5281/zenodo.7661004).\r +\r +# Citations\r +\r +If you use SnakeMAGs, please cite:\r +> Tadrent N, Dedeine F and Hervé V. SnakeMAGs: a simple, efficient, flexible and scalable workflow to reconstruct prokaryotic genomes from metagenomes [version 2; peer review: 2 approved]. F1000Research 2023, 11:1522 (https://doi.org/10.12688/f1000research.128091.2)\r +\r +\r +Please also cite the dependencies:\r +- [Snakemake](https://doi.org/10.12688/f1000research.29032.2) : Mölder, F., Jablonski, K. P., Letcher, B., Hall, M. B., Tomkins-tinch, C. H., Sochat, V., Forster, J., Lee, S., Twardziok, S. O., Kanitz, A., Wilm, A., Holtgrewe, M., Rahmann, S., Nahnsen, S., & Köster, J. (2021) Sustainable data analysis with Snakemake [version 2; peer review: 2 approved]. *F1000Research* 2021, 10:33.\r +- [illumina-utils](https://doi.org/10.1371/journal.pone.0066643) : Murat Eren, A., Vineis, J. H., Morrison, H. G., & Sogin, M. L. (2013). A Filtering Method to Generate High Quality Short Reads Using Illumina Paired-End Technology. *PloS ONE*, 8(6), e66643.\r +- [Trimmomatic](https://doi.org/10.1093/bioinformatics/btu170) : Bolger, A. M., Lohse, M., & Usadel, B. (2014). Genome analysis Trimmomatic: a flexible trimmer for Illumina sequence data. *Bioinformatics*, 30(15), 2114-2120.\r +- [Bowtie2](https://doi.org/10.1038/nmeth.1923) : Langmead, B., & Salzberg, S. L. (2012). Fast gapped-read alignment with Bowtie 2. *Nature Methods*, 9(4), 357–359.\r +- [SAMtools](https://doi.org/10.1093/bioinformatics/btp352) : Li, H., Handsaker, B., Wysoker, A., Fennell, T., Ruan, J., Homer, N., Marth, G., Abecasis, G., & Durbin, R. (2009). The Sequence Alignment/Map format and SAMtools. *Bioinformatics*, 25(16), 2078–2079.\r +- [BEDtools](https://doi.org/10.1093/bioinformatics/btq033) : Quinlan, A. R., & Hall, I. M. (2010). BEDTools: A flexible suite of utilities for comparing genomic features. *Bioinformatics*, 26(6), 841–842.\r +- [MEGAHIT](https://doi.org/10.1093/bioinformatics/btv033) : Li, D., Liu, C. M., Luo, R., Sadakane, K., & Lam, T. W. (2015). MEGAHIT: An ultra-fast single-node solution for large and complex metagenomics assembly via succinct de Bruijn graph. *Bioinformatics*, 31(10), 1674–1676.\r +- [bwa](https://doi.org/10.1093/bioinformatics/btp324) : Li, H., & Durbin, R. (2009). Fast and accurate short read alignment with Burrows-Wheeler transform. *Bioinformatics*, 25(14), 1754–1760.\r +- [MetaBAT2](https://doi.org/10.7717/peerj.7359) : Kang, D. D., Li, F., Kirton, E., Thomas, A., Egan, R., An, H., & Wang, Z. (2019). MetaBAT 2: An adaptive binning algorithm for robust and efficient genome reconstruction from metagenome assemblies. *PeerJ*, 2019(7), 1–13.\r +- [CheckM](https://doi.org/10.1101/gr.186072.114) : Parks, D. H., Imelfort, M., Skennerton, C. T., Hugenholtz, P., & Tyson, G. W. (2015). CheckM: Assessing the quality of microbial genomes recovered from isolates, single cells, and metagenomes. *Genome Research*, 25(7), 1043–1055.\r +- [GTDB-Tk](https://doi.org/10.1093/BIOINFORMATICS/BTAC672) : Chaumeil, P.-A., Mussig, A. J., Hugenholtz, P., Parks, D. H. (2022). GTDB-Tk v2: memory friendly classification with the genome taxonomy database. *Bioinformatics*.\r +- [CoverM](https://github.com/wwood/CoverM)\r +- [Waidele et al, 2019](https://doi.org/10.1101/526038) : Waidele, L., Korb, J., Voolstra, C. R., Dedeine, F., & Staubach, F. (2019). Ecological specificity of the metagenome in a set of lower termite species supports contribution of the microbiome to adaptation of the host. *Animal Microbiome*, 1(1), 1–13.\r +- [Tokuda et al, 2018](https://doi.org/10.1073/pnas.1810550115) : Tokuda, G., Mikaelyan, A., Fukui, C., Matsuura, Y., Watanabe, H., Fujishima, M., & Brune, A. (2018). Fiber-associated spirochetes are major agents of hemicellulose degradation in the hindgut of wood-feeding higher termites. *Proceedings of the National Academy of Sciences of the United States of America*, 115(51), E11996–E12004.\r +- [Romero Victorica et al, 2020](https://doi.org/10.1038/s41598-020-60850-5) : Romero Victorica, M., Soria, M. A., Batista-García, R. A., Ceja-Navarro, J. A., Vikram, S., Ortiz, M., Ontañon, O., Ghio, S., Martínez-Ávila, L., Quintero García, O. J., Etcheverry, C., Campos, E., Cowan, D., Arneodo, J., & Talia, P. M. (2020). Neotropical termite microbiomes as sources of novel plant cell wall degrading enzymes. *Scientific Reports*, 10(1), 1–14.\r +- [Moreira et al, 2021](https://doi.org/10.3389/fevo.2021.632590) : Moreira, E. A., Persinoti, G. F., Menezes, L. R., Paixão, D. A. A., Alvarez, T. M., Cairo, J. P. L. F., Squina, F. M., Costa-Leonardo, A. M., Rodrigues, A., Sillam-Dussès, D., & Arab, A. (2021). Complementary contribution of Fungi and Bacteria to lignocellulose digestion in the food stored by a neotropical higher termite. *Frontiers in Ecology and Evolution*, 9(April), 1–12.\r +- [Calusinska et al, 2020](https://doi.org/10.1038/s42003-020-1004-3) : Calusinska, M., Marynowska, M., Bertucci, M., Untereiner, B., Klimek, D., Goux, X., Sillam-Dussès, D., Gawron, P., Halder, R., Wilmes, P., Ferrer, P., Gerin, P., Roisin, Y., & Delfosse, P. (2020). Integrative omics analysis of the termite gut system adaptation to Miscanthus diet identifies lignocellulose degradation enzymes. *Communications Biology*, 3(1), 1–12.\r +- [Orakov et al, 2021](https://doi.org/10.1186/s13059-021-02393-0) : Orakov, A., Fullam, A., Coelho, L. P., Khedkar, S., Szklarczyk, D., Mende, D. R., Schmidt, T. S. B., & Bork, P. (2021). GUNC: detection of chimerism and contamination in prokaryotic genomes. *Genome Biology*, 22(1).\r +- [Parks et al, 2015](https://doi.org/10.1101/gr.186072.114) : Parks, D. H., Imelfort, M., Skennerton, C. T., Hugenholtz, P., & Tyson, G. W. (2015). CheckM: Assessing the quality of microbial genomes recovered from isolates, single cells, and metagenomes. *Genome Research*, 25(7), 1043–1055.\r +# License\r +This project is licensed under the CeCILL License - see the [LICENSE](https://github.com/Nachida08/SnakeMAGs/blob/main/LICENCE) file for details.\r +\r +Developed by Nachida Tadrent at the Insect Biology Research Institute ([IRBI](https://irbi.univ-tours.fr/)), under the supervision of Franck Dedeine and Vincent Hervé.\r +""" ; + schema1:image ; + schema1:keywords "Bioinformatics, Metagenomics, binning, MAG" ; + schema1:license "https://spdx.org/licenses/CECILL-2.1" ; + schema1:name "SnakeMAGs: a simple, efficient, flexible and scalable workflow to reconstruct prokaryotic genomes from metagenomes" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/554?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 102488 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.292.1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Amber Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-07-12 13:35:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/292/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1722 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-17T14:45:57Z" ; + schema1:dateModified "2022-03-23T10:04:51Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/292?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Amber Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/292?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """## Learning Objectives\r +- How to access genomic resource via its Python API\r +- How to access image resource via its Python API\r +- Relate image data to genomic data\r +\r +## Diabetes related genes expressed in pancreas\r +\r +This notebook looks at the question **Which diabetes related genes are expressed in the pancreas?** Tissue and disease can be modified.\r +\r +Steps:\r +\r +- Query [humanmine.org](https://www.humanmine.org/humanmine), an integrated database of Homo sapiens genomic data using the intermine API to find the genes.\r +- Using the list of found genes, search in the [Image Data Resource (IDR)](https://idr.openmicroscopy.org/) for images linked to the genes, tissue and disease.\r +- Analyse the images found.\r +\r +## Launch\r +This notebook uses the [environment.yml](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/environment.yml) file.\r +\r +See [Setup](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/setup.md).\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.494.1" ; + schema1:isBasedOn "https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/PublicResources.ipynb" ; + schema1:license "BSD-2-Clause" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Use Public Resources to answer a biological question" ; + schema1:sdDatePublished "2024-07-12 13:33:18 +0100" ; + schema1:url "https://workflowhub.eu/workflows/494/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 1135634 ; + schema1:url "https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/includes/HumanMineIDR.png" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2884598 ; + schema1:creator , + ; + schema1:dateCreated "2023-06-01T10:14:31Z" ; + schema1:dateModified "2023-06-01T10:53:01Z" ; + schema1:description """## Learning Objectives\r +- How to access genomic resource via its Python API\r +- How to access image resource via its Python API\r +- Relate image data to genomic data\r +\r +## Diabetes related genes expressed in pancreas\r +\r +This notebook looks at the question **Which diabetes related genes are expressed in the pancreas?** Tissue and disease can be modified.\r +\r +Steps:\r +\r +- Query [humanmine.org](https://www.humanmine.org/humanmine), an integrated database of Homo sapiens genomic data using the intermine API to find the genes.\r +- Using the list of found genes, search in the [Image Data Resource (IDR)](https://idr.openmicroscopy.org/) for images linked to the genes, tissue and disease.\r +- Analyse the images found.\r +\r +## Launch\r +This notebook uses the [environment.yml](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/environment.yml) file.\r +\r +See [Setup](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/setup.md).\r +""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "imaging, Python" ; + schema1:license "https://spdx.org/licenses/BSD-2-Clause" ; + schema1:name "Use Public Resources to answer a biological question" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/PublicResources.ipynb" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.196.5" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook ABC MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/196/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 99544 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-04T15:34:43Z" ; + schema1:dateModified "2024-05-14T10:17:33Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/196?version=4" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook ABC MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_amber_md_setup/blob/main/biobb_wf_amber_md_setup/notebooks/abcsetup/biobb_amber_ABC_setup.ipynb" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """A variation of the Cancer variant annotation (hg38 VEP-based) workflow at https://doi.org/10.48546/workflowhub.workflow.607.1.\r +\r +Like that other workflow it takes a list of tumor/normal sample pair variants in VCF format (see the other workflow for details about the expected format) and\r +\r +1. annotates them using the ENSEMBL Variant Effect Predictor and custom annotation data\r +2. turns the annotated VCF into a MAF file for import into cBioPortal\r +3. generates human-readable variant- and gene-centric reports\r +\r +In addition, this worklfow exports the resulting MAF dataset to a WebDAV-enabled remote folder for subsequent import into cBioPortal.\r +WebDAV access details can be configured in the Galaxy user preferences.""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.629.1" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Cancer variant annotation (hg38 VEP-based) with MAF export" ; + schema1:sdDatePublished "2024-07-12 13:26:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/629/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 178377 ; + schema1:creator ; + schema1:dateCreated "2023-10-27T15:07:03Z" ; + schema1:dateModified "2023-10-27T15:07:32Z" ; + schema1:description """A variation of the Cancer variant annotation (hg38 VEP-based) workflow at https://doi.org/10.48546/workflowhub.workflow.607.1.\r +\r +Like that other workflow it takes a list of tumor/normal sample pair variants in VCF format (see the other workflow for details about the expected format) and\r +\r +1. annotates them using the ENSEMBL Variant Effect Predictor and custom annotation data\r +2. turns the annotated VCF into a MAF file for import into cBioPortal\r +3. generates human-readable variant- and gene-centric reports\r +\r +In addition, this worklfow exports the resulting MAF dataset to a WebDAV-enabled remote folder for subsequent import into cBioPortal.\r +WebDAV access details can be configured in the Galaxy user preferences.""" ; + schema1:keywords "EOSC4Cancer" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Cancer variant annotation (hg38 VEP-based) with MAF export" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://usegalaxy.eu/api/workflows/ce1712139b4a4273/download?format=json-download" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 18709 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "We present an R script that describes the workflow for analysing honey bee (Apis mellifera) wing shape. It is based on a large dataset of wing images and landmark coordinates available at Zenodo: https://doi.org/10.5281/zenodo.7244070. The dataset can be used as a reference for the identification of unknown samples. As unknown samples, we used data from Nawrocka et al. (2018), available at Zenodo: https://doi.org/10.5281/zenodo.7567336. Among others, the script can be used to identify the geographic origin of unknown samples and therefore assist in the monitoring and conservation of honey bee biodiversity in Europe." ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.422.1" ; + schema1:license "CC0-1.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Apis-wings-EU: A workflow for morphometric identification of honey bees from Europe" ; + schema1:sdDatePublished "2024-07-12 13:34:28 +0100" ; + schema1:url "https://workflowhub.eu/workflows/422/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 165859 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2784682 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2023-01-25T07:16:25Z" ; + schema1:dateModified "2023-02-28T13:04:32Z" ; + schema1:description "We present an R script that describes the workflow for analysing honey bee (Apis mellifera) wing shape. It is based on a large dataset of wing images and landmark coordinates available at Zenodo: https://doi.org/10.5281/zenodo.7244070. The dataset can be used as a reference for the identification of unknown samples. As unknown samples, we used data from Nawrocka et al. (2018), available at Zenodo: https://doi.org/10.5281/zenodo.7567336. Among others, the script can be used to identify the geographic origin of unknown samples and therefore assist in the monitoring and conservation of honey bee biodiversity in Europe." ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC0-1.0" ; + schema1:name "Apis-wings-EU: A workflow for morphometric identification of honey bees from Europe" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/422?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/amber-protein-ligand-complex-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.298.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_complex_md_setup/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/298/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 96594 ; + schema1:creator , + ; + schema1:dateCreated "2023-05-03T13:52:05Z" ; + schema1:dateModified "2023-05-03T13:53:54Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/amber-protein-ligand-complex-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/298?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_complex_md_setup/galaxy/biobb_wf_amber_complex_md_setup.ga" ; + schema1:version 3 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# ChIP-Seq pipeline\r +Here we provide the tools to perform paired end or single read ChIP-Seq analysis including raw data quality control, read mapping, peak calling, differential binding analysis and functional annotation. As input files you may use either zipped fastq-files (.fastq.gz) or mapped read data (.bam files). In case of paired end reads, corresponding fastq files should be named using *.R1.fastq.gz* and *.R2.fastq.gz* suffixes.\r +\r +\r +## Pipeline Workflow\r +All analysis steps are illustrated in the pipeline [flowchart](https://www.draw.io/?lightbox=1&highlight=0000ff&edit=_blank&layers=1&nav=1&title=NGSpipe2go_ChIPseq_pipeline.html#R7R1Zc6M489e4KvNgF4fPx0kyzmSPTCbJfLOzLykMsq0NBgLYOX79p5NTYLC5kkyyNRuEAKnV6m712VPPNs8Xruas%2F7YNYPYUyXjuqec9RZGHitLD%2F0nGC22ZDGe0YeVCg3UKG27hK2CNEmvdQgN4sY6%2BbZs%2BdOKNum1ZQPdjbZrr2k%2FxbkvbjH%2FV0VYg1XCra2a69Sc0%2FDVrlcez8MZXAFdr9umpMqE3Fpr%2BsHLtrcW%2BZ9kWoHc2Gn8Nm6O31gz7KdKkfumpZ65t%2B%2FSvzfMZMDFYOcToc%2FOMu8GQXWD5RR44%2F%2B%2FFO%2F%2Fxv8eH1%2FXZ5Hopg0tf6geD8184LICBQMMubddf2yvb0swvYespmS%2FAr5XQ1X%2FbjcP7rzQHtYRP%2FWXbqOFcxt2A77%2Bwlde2vo2a1v7GZHfBM%2FT%2FwS8cjNjVr8id82f2LXLxwi8s332JPIQvf0XvhY%2BRK%2F6c52uuz0Yyxtf21tXBNXDhBvjAvXU0HVqr4FEj0hU9uQJ%2BRlfPd%2B2HAH%2FQ%2Bp6ml4itGv0kbdrpV99vf27%2F2J6%2F%2Fnt3eTf%2F5e3%2B7asM8egHczoO2QLiVYt8gqHABbDRQN0X1MEFpubDXRzhNbZvVkG%2FEIHQHwyHxPj0Vfpjub6a%2BuZufbO4%2Bno7v3%2F92ucbeqeZW%2FapnjI20QxODbjDXzThyiI3xo9bjPqnLtlWwSX6a8X%2BTx5buMkWNDDyLt6awN8IVpnaApinwRY9s03bJZ3UOfkRLtrStjh2yHjNDc1bE2SXY6iPr5bQNKMvlfBv8FJ%2Bh9AE9XTlagZEiJBo1u0N1BkGrUzN8zg2cXIhkc%2F6aPFsDLj%2BTMrDrB1wffCciwns7nSsDpQRfYqRbbR806DtKUIIJ2xZ1xEaOJSGg9HxaCTEavlD49GXMf6tA4%2FgBjGyz55DeahcDSIpUzmGRfJISmHQUJqlMWjM0eoY%2FHn96Z%2FfSf%2F8%2B8f85ou7kY27i5fLvno0V%2Bs8D6uQ1xRkNeqwalZDHv3sutpLpINjQ8v3Im%2B%2Bxg0hug2nihDd5hn91Uluf%2FQHHUGIbcFUCiHg%2FGasX%2Fxzvr4Bfz3qy18vd9%2FOXvvy6L1jYIyctYKOajfRkZ2FstAx1X%2BYj76jmZTX%2F2j0zVuDLO6b4KzJBsz5YrjPeTK%2B0fcI9n5GHeSh8yxg2GdK73OUSdPXpXh31Z9NzuZsfXndvwWP6Ilr6AATIjaLhnaaOwahlJExAQEoqWxz5AyOg0OfbCnoawv0NNrBfOrocA%2BW0IJEDEXog%2F%2FZM8dqFsRzNKtIm4alCxcsY19a%2Bz5WIXzG%2B0WZr6CPRLmBazoDC9EWZQ43i6WN6Az68%2BriFs9VWdnooo%2FX1rQXeJXADis75hwQiBTMMWp4GDPm%2Bho66K8BvztAEqK9e0nPJrsjh6AmQBHRPLObG8azTqOwaWuGhxU0GOBYHWMbWxMvnqRZBkZxz0OcCWom5VIaOc9XtIsr2IDPQN%2F6ZLiRDYhQFb8WWiuyNU888An9jzQ6GnQBmReZ3YlDbhnAw6ek4rPKPgJVck5IcDI1fUyQBQdNmZ8dKj9mDgVsLgECdHxy8J%2FoeOWggxaePBXCuLAz5Q2BfnAoBFcuny0MQzkFMkUAMd5WTvJJiR79hHpgEn%2BBvVx6mIomFqEC8aNd6Zn%2F%2FSsmSe%2BTnmOycyhKN3B%2Bk5WiEnPVAnPG5vvhAffb4j%2BsZECEGOtOBDKkgDwu7GdMHYlGFdPHhe0awO2jZixrYBBxxCfUU0JYPgruRDXxn4lsooIh%2Fg16OJphBO9W8thPK6IEXT5v4D8LtFjxm3kig4AfcVkuDfFscCZZFdhEByXpaLG9QPsd3pJSvQKoR3op2aD3F7bxkmpMcS7f4C2X17n8LeyYfpS3wOi7ECJt8PwhAZmFBIPI22ChL%2BDG9Ihz5xD9TkXz8LSNk5qD8N11zerq%2BsddhZOynK2ftz6Nz63iRWPza33dMBV1Dp0Fe5gNXrKXkRlVMXDcKCIRhIASMheVZfOlDCXC%2FphYEpE4sFAGdc38zCwNPhYjTrndwQRL%2FBkb9VqaRL%2BOlfi9mEBRhcycUK3LM4EAKJKZuVBQgm2jywjnLiGuKdPf4lpxcY0fMt6MuOYIRYcNGj60qHQgOaGERtv7ZLfge%2BPIPYT0fp%2FtoM9EPrDoqXufLu46PASnDuuF1XBOsm2d6nXQVCkpwDenYqHyNKIq64zmKlCADHaa62XqrbK6pUVQJnXnLECG2iq1LvWsQoJROa69gwbYpwoScavo5E4D3dIVejAGpBhABN%2Blj%2FWxNRYuoR7%2FunRiO%2F7gU9FhiIC5lwEq%2BQywDKt7WkMfYHcY%2FOyTS9yAqmd%2Fw6FI%2FyFgf7PyjgmHcz%2BRsaRRLRL%2FYGfVSMP4KtagRhI6t8lyaiHenVzSlCtbzNx7uPCjFrbujjsi%2FQSyxVzz%2FO9nhKoXETwet4h%2B%2BniEuo3WzDb5acjVnsiwiX0CDxbx5oce07cneP56uxggmpHD7xmv36DBYbYxD%2BwcuBvW76Mbj%2FqANgfsey9roOuUyRpE9CzBLph26C9yda6OM5gE8ZJiL5YqPz6NuOmAn5%2FG0xTlk8dymvSNy3smFWEgQmclfqbLIFPAXNhPrVOo%2BijCsCBF4F5lrft7xK1YQ3b6rcofQ4gie9y0f6MIs%2BfNOoki6rQJFMkXdj4IilS88mlvLDlxIpETRlEmVtGnwjU%2FGoVG1aLQXOS02KoaT4qh0eQgcVndKy5X6KZYmChVjppFfR6Eiyz%2F9o0%2BcpWFYG3quFJskdXjF7nSsK44j5h0X0lfeHerHRE5xnF%2BobCDT6YXcrI%2FO2%2FXyl%2F4mb6bWHkYUu5jOfW5yRelTB1B0KTz32yYj6CJ%2Fspo1gCC5p%2FEfyNoPQLS5F1g6LhiRYAQQ4f5ioC3gaFyHEOVfYy9AyhauXjXEooqx6BoYVX5wn7yIVAK68p5S8R38QZoRg8bdx0HmyX2WJaz4hBo04lHXJGwVdWhgSzYLf5T1vN7dPIH2%2BFD1XxohWeQKq%2BfH0awpE79fMXuTDMljsBjtgNj%2BniBKbImfXyu6UkQ%2FbbfM4GEdDD%2FBOkA15dvW5%2B6AeIcJqhDuIUi%2B0d61PM2Vnu%2BLhSN8WYLpgFp1A2NXQnHXcR54XhnvGgMPo%2FJn2sbaGL8%2BwrMHcBv7TXhyjAZTmOor0rjNOrz7XEk6hfE80nH8FxKoboLvK2JOWIXkf0EIRmaGzA%2BEcaiPYTo3qxv0zlcLoEb%2BjDRoZjQ8xsfyjX9tmZZPGNJx3Z3DTt5OBZkTBHtZB75Xf1OnnZ%2BJ5v2qqPbGI2sz7mUbpsmkhZIJCXlWEDT1z3uq%2FcBkHmsFkTmKtjSn7uLe%2Bfs7od%2FtQHj6832yr%2B%2Fajd3SxNOW4efV%2Fnpf%2B95ddaaOSJ33J2hT2mJ2gUOQpuOkqi4C3nN7uI48SI%2BpAYQmXtraL3cswwG97QdHR3vcU92mqWNg5uNkR596n5m%2BGLSsbrLMx6QPujWYA1MB7je4EY8dXHHvBDOdhzi8RohmhTsBbxjXOx7GEtBEJVvCweltTMhxLPh8oWwMh%2FzcH3r%2BXQ%2B3M0%2BTAfhI%2FTy1ohA4CaI3TQtADBHKTyxd39gncgFJQNZqiAt4Fyo%2FT7eobt97XdJaaE25ffw7bpiX1qoJ9snBVXMeIo4axF%2BEEMHP8sJ3FjbYESwFh7dOvEQ49TXaXIV0UeZmFOl8jjTsZtOBc%2BjtPJ42JRzd2TLJFJcAvLTS6a4RHfUsTpTjV4kcWVNvuAqv47Ss5EoTWU9umdxyMrwvZ9%2BEvSs%2B6mYh6M3SyedtWYhkYcoBIsSyu9nWOJCI4C6R6kfpY70X0wcoWVQk5y%2BRoKUjoAPPT%2FonaCVXhat3EMoj4l8Cc1rEQCUp5Kj31QS%2B64LLHQNUkmhHMj1nd2UAw90b5y0JgiOCxK46h11jiVw4NkHLOFTEdr2hXan0Xf41cBakaMhWlg0wN2KiITaagOsiJDYDaJGpT464fK0bNw8LYtRpvKErQJalvQ2GEuTNC1r0NtAHLfzFqL%2FqnPJrijeh5OivTSL56Zv2XNroiY9sY7K4lzcE0vbnGEtE6l%2FU5BK6og6%2BiQBle1uEBnwiDlsAVdPEBNIH8l8D02ELqOx62zs5QnepHmCV4P0Neb5UFuiWD8t9eHp9PHu35%2FX9zfWn456ubv%2FXVyhFKGapgmVEKqV06mjNK0812s3JewDBewmA4hE657rhdW2a3EiIGi4z7U40X%2FEMKZuhoaP8vcR96LCTA1Res2FHkleT20%2BO%2BjhHB2vzE%2BpGYVEfPjl%2Bdq0W4L8coR%2FRYL8mPz0qAkrKuCTn3oE%2FGHL7sRila7SJr%2F8rdLNyRPxFlW6C70wzbs%2Bu0FvO7V93wQW0LFj5pkNlkuoQ8DSEzRA8Rblsw%2FR9fnoqtfhSGBwb91A9e5T8785ajYq6hXYPWoGHQQIqoQtStQc08ZvpSnO%2BcPNkLJwtKUp2kjqNU7RaiBJI255aYkkiYs9vvtae5XKPoVjXqcNUYuCGolO2%2FzegkaicEqTjmok5HIpTZrSSGw03Sse6syCs3TNNIkTRSOMiwyxvBT%2BPmKM1anAz6vtGGNulupMRETays3tPF0MiFjA1c%2BYMYpFazGVnhcLPexuqHEF2J700h6NBTbuuuK3FpOtcWFf%2FeOATf%2F7zcyf6Rfj%2Frs%2FIFbJlKeFmXJbwljusA8iX7VXC7kBJBxaxzbsC2DZsRJeOby5fbpGw55Ov%2F28u%2FxC%2BDPaFc%2FE%2FL6KzSMZKMXvSjjdN46WQnx%2FSyHQdBAOmQC0cTmLaBQ42h36A%2FZCoKWh8mOI2q4Iwjz%2FTVsnoycxQXgO%2B4qhHFSio2MVN%2BLC00SYpUJw4Jfl2mjNSEBrEoCrt75GQO0%2Bbn0NYaR2q7EKcb1LwNxL6V2CRGQNcPqZwMtWqOjgWv%2BOsPpZdjakztdxDXh9JASZoFL%2BoeajlVClIInnbEL%2FO0FIbgBaVpxFAidDuXlo7YMFffdTeL2EFklFE%2BTvoaVPNIJuosoo1CNDQ0%2B9eNAjgAaON4gsSpEqlAWLcPbKVsQs%2Bf4PMrBeqVKclVbWrEn4mfFSMLkhz8IS9XWFPKdlndZO028gbbcsFw7W6xSTDcbdij6QVx7OT5CiPRkaPlt2UR9Iqlz1wmyDJ6%2FQcUhitpbPlOisy2IlbWuHz47kLEyVlY4GKXNNhTB1J4cGLd0sD27kAYHxYPXa7XEqB4zzTSuAlTjLkkVHT1GFzFlNpIznUO%2BwZQMnFY6ldukQJQtTTxRWkb1p9E3aLxBYBCKXwH4xqk3iOt6A0cHg4ialsJlcUAjjeeG645BGcnXdsDxgRW36DCCYyVP1MWHvaL8%2FGPaT1QsTauFRV2Hsz8sozrON4RmUNvnPovB%2Fuyb%2FybBdk%2F9X6Y%2Fl%2Bmrqm7v1zeLq6%2B38%2FvUrD%2FAS8MUUamHaK%2BQtSZXSUMRC%2FgIrEuUeoC59XYqFLDJZCGa38UWKe0FbaPF7cZdp1sSxgTFo9TTJjTbQMAjFFGFFnIpWwV24byyXjtQUXgxF0pFSHi%2BKIUG6VvgC7%2B5eJMNsT1GfP%2FdoemmTnA9IPjrbwYcHzfyUWq3DdmUV0E0WKRTsuqEAusO6oHuMVXqcFD0jSlgOzbCRwFV0LImlEoskTgwXp3UR80C5sox%2BrgrkmsRdj7nabS9yVSAXCpGrSTNkLnZ%2FXCukECxv0D7F1ONzamjJ2Y0fzRKVC4wPa2jp7MBatABVwGFmk8PEF3WYTRuP4jBt1mOB1AYwtxMatE46yX0AvVgSOQuLP%2BOakDO7xEigXb%2F0ETT1qMt4qHgny7n1MEZJDNeCFNvv9vQ5ies25aIUprbjZ1o3f06jXYleClBHRwNBjOR%2FEjlqEL%2BM97xkcSlWFmiSFOGSVWBPEbrTv%2FuEwAdqnoXAKqp4bsr6v0%2Fv3AFn%2BeuojsJawtXW5U7PXbSPxQuDoFOLlfxqzcVCKJBwO%2B42oJehK3jSnCfsxGEoKv4hmlKz3vtNA9JxgbZBpwUvE4qCHnkgbMcFIQ48kddBqqFmCHP1Y9xARKrdZII6cTcPzN3x%2Fkg1GGBJSdqT7T6YNislbGkrIn%2FumcP%2BdSvR4ALKtLwS3z2pboAIENrWJKUv2AgYLD1z68a0QDjtOy18Y0dlc8nb%2FxFSWgM8Ax3JEW5snXEEjRlbwELzwp7E1CNYdDeM0wnKzQYEIniQVmqKhfCcELPsAwAYJ0PAeEgSgtbK%2BzTYM7zE0TJXasu3o3bA1TXhNjQapuN8hH5D8kgpLQUVMZkKoagIzpx1qdzz5MkPrHKfC%2Bsw5eesfpO5OJo8phTODVp5CpaDMnEkk1dPWMadrEwcqWTX41ECC%2BvJxMGqtxKz61%2B43mzx5KDOC1kPjxROJqVqaU2AQB3KGrlWVOnVlrZjAz1satEsYG89IieTWZGTRODRowh9erKR7c259IzkOIOaSIJQjAZdesR0MM2PWqWD0qAUJUQXQXq6JHWU26OOohgOIfh5cvSOZKjiBfXePzYobaNDrttpV9ChYwnLKkSHhOiktowNeTJW88iQO%2BxCR5mta76c4sRGeIL7eG2IMfSK50U%2F789Kwr44cx5ORoNR%2FOgxkwaiWMk0f1YntWUIfIenkrr89vNKULzVBIGjSaymTmcSBBpwucRFEwsfR%2FADOIERzjTTixZcDOPj95w%2FqigzG2qM%2BQTywgmyEarZo0d19QmqTu%2BdDFIYqTMRyRSeacqTTMGZZh%2FCdtZdsmZzSYD7xPYRwXfixoZpoXcfbAH%2FWeD3ndErz3jy0Tw7b%2BEGmppLyNYDCLiMR0GFjQ8LomdhCUTwaGi8A8aQMGcq8x9aYt3MSWCjoAFXGPoZ5FKyLfMFK%2FilNLWNLEqF%2FpiBAeEaDZ7iJ8uG0gsSrLCkKonp5YxPZJWoa8xndDhk4DhjW4%2FlcGlktL1SjqX5tWJzeVBnU5EocWahKoqAWYgtNNJkUD4G4je%2FqIRfEEc5dH8fx8js95tn5NClvXT%2BjJB65stEAEwoFmU7uB13QALsgE79z2877%2BcdGv%2FJn9%2F6P%2B%2F2WoDroLUXCNmdXpAVE%2B9Zlm0yxiE7RlXza6O%2FFaqqDAtmy5g0aFRot15Yh8tZCKFVNKtiU6XLC6qq5KPXuHOqqpo0VUVXPTcZTcc0Vcq4XCkLhVVuqltThejwwn7yIVC2FnzcgsIaKxdsEDlHb9%2BgAw7s4wQ7lDW7QDNqUFflJcBITiJPa5WNXM1qrfJKbBaoRFeDdkoYDde2vV0%2Bnmx%2BINY4KUgkeWGgrvDG4%2BWfD80biy57R3mjOixXePpI3lgMJysoMde9nGD7zPaJQpgtEKbKw7QOw9BkmpvRHukt2b8hO%2BPN5nzrFHd6DGQ2Y%2BuYJOTTqE9gE9gXUVf0r7tBn89NVJaNQs3KaOhlUOf0unPymiq3W%2FpczMqlNuW1dhh3jGoeTiNnRbl452r6nmqb72eFqZAoxpzYmcJEqHup0THe1pQKoY895pYpz16jt%2BdZnajpK1KCytzjoC3KMTqectQocCndIiYVy0dpgSaRRXE8m8Rfwaqi06dyJKMJjzLjTEtNZCthNdOTLyodsCJNhQPOHFeiv9qQvu1vrC0rQSy97WajuRhXFen7WS%2FIE12%2Feo2SSaLdO4BQUkf9N0cpkzLWeKp0T8YaZVeOoAb6dPJKl4y0XF6lmO05QZkjUCfYfhrY8CPC75z8CKlcNMXSOEn14v7NCVn7yxj%2FpmRtlltm5WoGBKF3H2uOCvIrU%2FO8pFAvVYM%2Bs6RmQ1SIWFRNRpUqSOIkRpZ9yRJLZDolT23NZIsZhr5rT%2Fh0SYqGCHMJOabtR9OMmCnbduRlgAXxc8uChx3dPT%2BebyrvDZhAhpWCf5zdYgLKy4NKCM%2BeaHaqoKQwCQBkFYUJyWVj0LHlmpSyzNkveSOBFvoSCT2n701PpfQrNV0HnkchY8KFq7kU1LgqxjOF%2BwL%2Fc312g%2F%2B0fd8EFtCJJ5cNlkuow3hu2ryPYZHDcqG%2B3pBn%2BDoyyEaHcnlNpoc20Qq79RV6u%2B7ankfG5VK5hZYPZd9ILYqz1iw%2F8KMrigzEfc0IH5K4Iw3NHvA%2FYOGPGlBDNGQTzo2WGcFHFIQCpuYUXZ1YIdQYtJZA87cuUcMgFHDhYsu63c17UR8X09ZhMDzug5dAxD2jwL13EDylxlDA37wUWqLGNF3Yw0OOzyDGJIUj8ofFtDpRLlEFM%2BCnrCDZpCDhgixgBsqs%2FFkd69Vs24%2FKq2i%2B679tA4uiX%2F4P). Specify the desired analysis details for your data in the *essential.vars.groovy* file (see below) and run the pipeline *chipseq.pipeline.groovy* as described [here](https://gitlab.rlp.net/imbforge/NGSpipe2go/-/blob/master/README.md). A markdown file *ChIPreport.Rmd* will be generated in the output reports folder after running the pipeline. Subsequently, the *ChIPreport.Rmd* file can be converted to a final html report using the *knitr* R-package.\r +\r +\r +### The pipelines includes\r +- raw data quality control with FastQC, BamQC and MultiQC\r +- mapping reads or read pairs to the reference genome using bowtie2 (default) or bowtie1\r +- filter out multimapping reads from bowtie2 output with samtools (optional)\r +- identify and remove duplicate reads with Picard MarkDuplicates (optional) \r +- generation of bigWig tracks for visualisation of alignment with deeptools bamCoverage. For single end design, reads are extended to the average fragment size\r +- characterization of insert size using Picard CollectInsertSizeMetrics (for paired end libraries only)\r +- characterize library complexity by PCR Bottleneck Coefficient using the GenomicAlignments R-package (for single read libraries only) \r +- characterize phantom peaks by cross correlation analysis using the spp R-package (for single read libraries only)\r +- peak calling of IP samples vs. corresponding input controls using MACS2\r +- peak annotation using the ChIPseeker R-package (optional)\r +- differential binding analysis using the diffbind R-package (optional). For this, input peak files must be given in *NGSpipe2go/tools/diffbind/targets_diffbind.txt* and contrasts of interest in *NGSpipe2go/tools/diffbind/contrasts_diffbind.txt* (see below)\r +\r +\r +### Pipeline-specific parameter settings\r +- targets.txt: tab-separated txt-file giving information about the analysed samples. The following columns are required: \r + - IP: bam file name of IP sample\r + - IPname: IP sample name to be used in plots and tables \r + - INPUT: bam file name of corresponding input control sample\r + - INPUTname: input sample name to be used in plots and tables \r + - group: variable for sample grouping (e.g. by condition)\r +\r +- essential.vars.groovy: essential parameter describing the experiment including: \r + - ESSENTIAL_PROJECT: your project folder name\r + - ESSENTIAL_BOWTIE_REF: full path to bowtie2 indexed reference genome (bowtie1 indexed reference genome if bowtie1 is selected as mapper)\r + - ESSENTIAL_BOWTIE_GENOME: full path to the reference genome FASTA file\r + - ESSENTIAL_BSGENOME: Bioconductor genome sequence annotation package\r + - ESSENTIAL_TXDB: Bioconductor transcript-related annotation package\r + - ESSENTIAL_ANNODB: Bioconductor genome annotation package\r + - ESSENTIAL_BLACKLIST: files with problematic 'blacklist regions' to be excluded from analysis (optional)\r + - ESSENTIAL_PAIRED: either paired end ("yes") or single read ("no") design\r + - ESSENTIAL_READLEN: read length of library\r + - ESSENTIAL_FRAGLEN: mean length of library inserts and also minimum peak size called by MACS2\r + - ESSENTIAL_THREADS: number of threads for parallel tasks\r + - ESSENTIAL_USE_BOWTIE1: if true use bowtie1 for read mapping, otherwise bowtie2 by default\r +\r +- additional (more specialized) parameter can be given in the var.groovy-files of the individual pipeline modules \r +\r +If differential binding analysis is selected it is required additionally:\r +\r +- contrasts_diffbind.txt: indicate intended group comparisions for differential binding analysis, e.g. *KOvsWT=(KO-WT)* if targets.txt contains the groups *KO* and *WT*. Give 1 contrast per line. \r +- targets_diffbind.txt: \r + - SampleID: IP sample name (as IPname in targets.txt)\r + - Condition: variable for sample grouping (as group in targets.txt)\r + - Replicate: number of replicate\r + - bamReads: bam file name of IP sample (as IP in targets.txt but with path relative to project directory)\r + - ControlID: input sample name (as INPUTname in targets.txt)\r + - bamControl: bam file name of corresponding input control sample (as INPUT in targets.txt but with path relative to project directory)\r + - Peaks: peak file name opbatined from peak caller (path relative to project directory)\r + - PeakCaller: name of peak caller (e.g. macs)\r +\r +## Programs required\r +- Bedtools\r +- Bowtie2\r +- deepTools\r +- encodeChIPqc (provided by another project from imbforge)\r +- FastQC\r +- MACS2\r +- MultiQC\r +- Picard\r +- R with packages ChIPSeeker, diffbind, GenomicAlignments, spp and genome annotation packages\r +- Samtools\r +- UCSC utilities\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/59?version=1" ; + schema1:isBasedOn "https://gitlab.rlp.net/imbforge/NGSpipe2go/-/tree/master/pipelines/ChIPseq" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ChIP-seq" ; + schema1:sdDatePublished "2024-07-12 13:36:24 +0100" ; + schema1:url "https://workflowhub.eu/workflows/59/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2757 ; + schema1:creator ; + schema1:dateCreated "2020-10-07T07:41:21Z" ; + schema1:dateModified "2023-01-16T13:44:52Z" ; + schema1:description """# ChIP-Seq pipeline\r +Here we provide the tools to perform paired end or single read ChIP-Seq analysis including raw data quality control, read mapping, peak calling, differential binding analysis and functional annotation. As input files you may use either zipped fastq-files (.fastq.gz) or mapped read data (.bam files). In case of paired end reads, corresponding fastq files should be named using *.R1.fastq.gz* and *.R2.fastq.gz* suffixes.\r +\r +\r +## Pipeline Workflow\r +All analysis steps are illustrated in the pipeline [flowchart](https://www.draw.io/?lightbox=1&highlight=0000ff&edit=_blank&layers=1&nav=1&title=NGSpipe2go_ChIPseq_pipeline.html#R7R1Zc6M489e4KvNgF4fPx0kyzmSPTCbJfLOzLykMsq0NBgLYOX79p5NTYLC5kkyyNRuEAKnV6m712VPPNs8Xruas%2F7YNYPYUyXjuqec9RZGHitLD%2F0nGC22ZDGe0YeVCg3UKG27hK2CNEmvdQgN4sY6%2BbZs%2BdOKNum1ZQPdjbZrr2k%2FxbkvbjH%2FV0VYg1XCra2a69Sc0%2FDVrlcez8MZXAFdr9umpMqE3Fpr%2BsHLtrcW%2BZ9kWoHc2Gn8Nm6O31gz7KdKkfumpZ65t%2B%2FSvzfMZMDFYOcToc%2FOMu8GQXWD5RR44%2F%2B%2FFO%2F%2Fxv8eH1%2FXZ5Hopg0tf6geD8184LICBQMMubddf2yvb0swvYespmS%2FAr5XQ1X%2FbjcP7rzQHtYRP%2FWXbqOFcxt2A77%2Bwlde2vo2a1v7GZHfBM%2FT%2FwS8cjNjVr8id82f2LXLxwi8s332JPIQvf0XvhY%2BRK%2F6c52uuz0Yyxtf21tXBNXDhBvjAvXU0HVqr4FEj0hU9uQJ%2BRlfPd%2B2HAH%2FQ%2Bp6ml4itGv0kbdrpV99vf27%2F2J6%2F%2Fnt3eTf%2F5e3%2B7asM8egHczoO2QLiVYt8gqHABbDRQN0X1MEFpubDXRzhNbZvVkG%2FEIHQHwyHxPj0Vfpjub6a%2BuZufbO4%2Bno7v3%2F92ucbeqeZW%2FapnjI20QxODbjDXzThyiI3xo9bjPqnLtlWwSX6a8X%2BTx5buMkWNDDyLt6awN8IVpnaApinwRY9s03bJZ3UOfkRLtrStjh2yHjNDc1bE2SXY6iPr5bQNKMvlfBv8FJ%2Bh9AE9XTlagZEiJBo1u0N1BkGrUzN8zg2cXIhkc%2F6aPFsDLj%2BTMrDrB1wffCciwns7nSsDpQRfYqRbbR806DtKUIIJ2xZ1xEaOJSGg9HxaCTEavlD49GXMf6tA4%2FgBjGyz55DeahcDSIpUzmGRfJISmHQUJqlMWjM0eoY%2FHn96Z%2FfSf%2F8%2B8f85ou7kY27i5fLvno0V%2Bs8D6uQ1xRkNeqwalZDHv3sutpLpINjQ8v3Im%2B%2Bxg0hug2nihDd5hn91Uluf%2FQHHUGIbcFUCiHg%2FGasX%2Fxzvr4Bfz3qy18vd9%2FOXvvy6L1jYIyctYKOajfRkZ2FstAx1X%2BYj76jmZTX%2F2j0zVuDLO6b4KzJBsz5YrjPeTK%2B0fcI9n5GHeSh8yxg2GdK73OUSdPXpXh31Z9NzuZsfXndvwWP6Ilr6AATIjaLhnaaOwahlJExAQEoqWxz5AyOg0OfbCnoawv0NNrBfOrocA%2BW0IJEDEXog%2F%2FZM8dqFsRzNKtIm4alCxcsY19a%2Bz5WIXzG%2B0WZr6CPRLmBazoDC9EWZQ43i6WN6Az68%2BriFs9VWdnooo%2FX1rQXeJXADis75hwQiBTMMWp4GDPm%2Bho66K8BvztAEqK9e0nPJrsjh6AmQBHRPLObG8azTqOwaWuGhxU0GOBYHWMbWxMvnqRZBkZxz0OcCWom5VIaOc9XtIsr2IDPQN%2F6ZLiRDYhQFb8WWiuyNU888An9jzQ6GnQBmReZ3YlDbhnAw6ek4rPKPgJVck5IcDI1fUyQBQdNmZ8dKj9mDgVsLgECdHxy8J%2FoeOWggxaePBXCuLAz5Q2BfnAoBFcuny0MQzkFMkUAMd5WTvJJiR79hHpgEn%2BBvVx6mIomFqEC8aNd6Zn%2F%2FSsmSe%2BTnmOycyhKN3B%2Bk5WiEnPVAnPG5vvhAffb4j%2BsZECEGOtOBDKkgDwu7GdMHYlGFdPHhe0awO2jZixrYBBxxCfUU0JYPgruRDXxn4lsooIh%2Fg16OJphBO9W8thPK6IEXT5v4D8LtFjxm3kig4AfcVkuDfFscCZZFdhEByXpaLG9QPsd3pJSvQKoR3op2aD3F7bxkmpMcS7f4C2X17n8LeyYfpS3wOi7ECJt8PwhAZmFBIPI22ChL%2BDG9Ihz5xD9TkXz8LSNk5qD8N11zerq%2BsddhZOynK2ftz6Nz63iRWPza33dMBV1Dp0Fe5gNXrKXkRlVMXDcKCIRhIASMheVZfOlDCXC%2FphYEpE4sFAGdc38zCwNPhYjTrndwQRL%2FBkb9VqaRL%2BOlfi9mEBRhcycUK3LM4EAKJKZuVBQgm2jywjnLiGuKdPf4lpxcY0fMt6MuOYIRYcNGj60qHQgOaGERtv7ZLfge%2BPIPYT0fp%2FtoM9EPrDoqXufLu46PASnDuuF1XBOsm2d6nXQVCkpwDenYqHyNKIq64zmKlCADHaa62XqrbK6pUVQJnXnLECG2iq1LvWsQoJROa69gwbYpwoScavo5E4D3dIVejAGpBhABN%2Blj%2FWxNRYuoR7%2FunRiO%2F7gU9FhiIC5lwEq%2BQywDKt7WkMfYHcY%2FOyTS9yAqmd%2Fw6FI%2FyFgf7PyjgmHcz%2BRsaRRLRL%2FYGfVSMP4KtagRhI6t8lyaiHenVzSlCtbzNx7uPCjFrbujjsi%2FQSyxVzz%2FO9nhKoXETwet4h%2B%2BniEuo3WzDb5acjVnsiwiX0CDxbx5oce07cneP56uxggmpHD7xmv36DBYbYxD%2BwcuBvW76Mbj%2FqANgfsey9roOuUyRpE9CzBLph26C9yda6OM5gE8ZJiL5YqPz6NuOmAn5%2FG0xTlk8dymvSNy3smFWEgQmclfqbLIFPAXNhPrVOo%2BijCsCBF4F5lrft7xK1YQ3b6rcofQ4gie9y0f6MIs%2BfNOoki6rQJFMkXdj4IilS88mlvLDlxIpETRlEmVtGnwjU%2FGoVG1aLQXOS02KoaT4qh0eQgcVndKy5X6KZYmChVjppFfR6Eiyz%2F9o0%2BcpWFYG3quFJskdXjF7nSsK44j5h0X0lfeHerHRE5xnF%2BobCDT6YXcrI%2FO2%2FXyl%2F4mb6bWHkYUu5jOfW5yRelTB1B0KTz32yYj6CJ%2Fspo1gCC5p%2FEfyNoPQLS5F1g6LhiRYAQQ4f5ioC3gaFyHEOVfYy9AyhauXjXEooqx6BoYVX5wn7yIVAK68p5S8R38QZoRg8bdx0HmyX2WJaz4hBo04lHXJGwVdWhgSzYLf5T1vN7dPIH2%2BFD1XxohWeQKq%2BfH0awpE79fMXuTDMljsBjtgNj%2BniBKbImfXyu6UkQ%2FbbfM4GEdDD%2FBOkA15dvW5%2B6AeIcJqhDuIUi%2B0d61PM2Vnu%2BLhSN8WYLpgFp1A2NXQnHXcR54XhnvGgMPo%2FJn2sbaGL8%2BwrMHcBv7TXhyjAZTmOor0rjNOrz7XEk6hfE80nH8FxKoboLvK2JOWIXkf0EIRmaGzA%2BEcaiPYTo3qxv0zlcLoEb%2BjDRoZjQ8xsfyjX9tmZZPGNJx3Z3DTt5OBZkTBHtZB75Xf1OnnZ%2BJ5v2qqPbGI2sz7mUbpsmkhZIJCXlWEDT1z3uq%2FcBkHmsFkTmKtjSn7uLe%2Bfs7od%2FtQHj6832yr%2B%2Fajd3SxNOW4efV%2Fnpf%2B95ddaaOSJ33J2hT2mJ2gUOQpuOkqi4C3nN7uI48SI%2BpAYQmXtraL3cswwG97QdHR3vcU92mqWNg5uNkR596n5m%2BGLSsbrLMx6QPujWYA1MB7je4EY8dXHHvBDOdhzi8RohmhTsBbxjXOx7GEtBEJVvCweltTMhxLPh8oWwMh%2FzcH3r%2BXQ%2B3M0%2BTAfhI%2FTy1ohA4CaI3TQtADBHKTyxd39gncgFJQNZqiAt4Fyo%2FT7eobt97XdJaaE25ffw7bpiX1qoJ9snBVXMeIo4axF%2BEEMHP8sJ3FjbYESwFh7dOvEQ49TXaXIV0UeZmFOl8jjTsZtOBc%2BjtPJ42JRzd2TLJFJcAvLTS6a4RHfUsTpTjV4kcWVNvuAqv47Ss5EoTWU9umdxyMrwvZ9%2BEvSs%2B6mYh6M3SyedtWYhkYcoBIsSyu9nWOJCI4C6R6kfpY70X0wcoWVQk5y%2BRoKUjoAPPT%2FonaCVXhat3EMoj4l8Cc1rEQCUp5Kj31QS%2B64LLHQNUkmhHMj1nd2UAw90b5y0JgiOCxK46h11jiVw4NkHLOFTEdr2hXan0Xf41cBakaMhWlg0wN2KiITaagOsiJDYDaJGpT464fK0bNw8LYtRpvKErQJalvQ2GEuTNC1r0NtAHLfzFqL%2FqnPJrijeh5OivTSL56Zv2XNroiY9sY7K4lzcE0vbnGEtE6l%2FU5BK6og6%2BiQBle1uEBnwiDlsAVdPEBNIH8l8D02ELqOx62zs5QnepHmCV4P0Neb5UFuiWD8t9eHp9PHu35%2FX9zfWn456ubv%2FXVyhFKGapgmVEKqV06mjNK0812s3JewDBewmA4hE657rhdW2a3EiIGi4z7U40X%2FEMKZuhoaP8vcR96LCTA1Res2FHkleT20%2BO%2BjhHB2vzE%2BpGYVEfPjl%2Bdq0W4L8coR%2FRYL8mPz0qAkrKuCTn3oE%2FGHL7sRila7SJr%2F8rdLNyRPxFlW6C70wzbs%2Bu0FvO7V93wQW0LFj5pkNlkuoQ8DSEzRA8Rblsw%2FR9fnoqtfhSGBwb91A9e5T8785ajYq6hXYPWoGHQQIqoQtStQc08ZvpSnO%2BcPNkLJwtKUp2kjqNU7RaiBJI255aYkkiYs9vvtae5XKPoVjXqcNUYuCGolO2%2FzegkaicEqTjmok5HIpTZrSSGw03Sse6syCs3TNNIkTRSOMiwyxvBT%2BPmKM1anAz6vtGGNulupMRETays3tPF0MiFjA1c%2BYMYpFazGVnhcLPexuqHEF2J700h6NBTbuuuK3FpOtcWFf%2FeOATf%2F7zcyf6Rfj%2Frs%2FIFbJlKeFmXJbwljusA8iX7VXC7kBJBxaxzbsC2DZsRJeOby5fbpGw55Ov%2F28u%2FxC%2BDPaFc%2FE%2FL6KzSMZKMXvSjjdN46WQnx%2FSyHQdBAOmQC0cTmLaBQ42h36A%2FZCoKWh8mOI2q4Iwjz%2FTVsnoycxQXgO%2B4qhHFSio2MVN%2BLC00SYpUJw4Jfl2mjNSEBrEoCrt75GQO0%2Bbn0NYaR2q7EKcb1LwNxL6V2CRGQNcPqZwMtWqOjgWv%2BOsPpZdjakztdxDXh9JASZoFL%2BoeajlVClIInnbEL%2FO0FIbgBaVpxFAidDuXlo7YMFffdTeL2EFklFE%2BTvoaVPNIJuosoo1CNDQ0%2B9eNAjgAaON4gsSpEqlAWLcPbKVsQs%2Bf4PMrBeqVKclVbWrEn4mfFSMLkhz8IS9XWFPKdlndZO028gbbcsFw7W6xSTDcbdij6QVx7OT5CiPRkaPlt2UR9Iqlz1wmyDJ6%2FQcUhitpbPlOisy2IlbWuHz47kLEyVlY4GKXNNhTB1J4cGLd0sD27kAYHxYPXa7XEqB4zzTSuAlTjLkkVHT1GFzFlNpIznUO%2BwZQMnFY6ldukQJQtTTxRWkb1p9E3aLxBYBCKXwH4xqk3iOt6A0cHg4ialsJlcUAjjeeG645BGcnXdsDxgRW36DCCYyVP1MWHvaL8%2FGPaT1QsTauFRV2Hsz8sozrON4RmUNvnPovB%2Fuyb%2FybBdk%2F9X6Y%2Fl%2Bmrqm7v1zeLq6%2B38%2FvUrD%2FAS8MUUamHaK%2BQtSZXSUMRC%2FgIrEuUeoC59XYqFLDJZCGa38UWKe0FbaPF7cZdp1sSxgTFo9TTJjTbQMAjFFGFFnIpWwV24byyXjtQUXgxF0pFSHi%2BKIUG6VvgC7%2B5eJMNsT1GfP%2FdoemmTnA9IPjrbwYcHzfyUWq3DdmUV0E0WKRTsuqEAusO6oHuMVXqcFD0jSlgOzbCRwFV0LImlEoskTgwXp3UR80C5sox%2BrgrkmsRdj7nabS9yVSAXCpGrSTNkLnZ%2FXCukECxv0D7F1ONzamjJ2Y0fzRKVC4wPa2jp7MBatABVwGFmk8PEF3WYTRuP4jBt1mOB1AYwtxMatE46yX0AvVgSOQuLP%2BOakDO7xEigXb%2F0ETT1qMt4qHgny7n1MEZJDNeCFNvv9vQ5ies25aIUprbjZ1o3f06jXYleClBHRwNBjOR%2FEjlqEL%2BM97xkcSlWFmiSFOGSVWBPEbrTv%2FuEwAdqnoXAKqp4bsr6v0%2Fv3AFn%2BeuojsJawtXW5U7PXbSPxQuDoFOLlfxqzcVCKJBwO%2B42oJehK3jSnCfsxGEoKv4hmlKz3vtNA9JxgbZBpwUvE4qCHnkgbMcFIQ48kddBqqFmCHP1Y9xARKrdZII6cTcPzN3x%2Fkg1GGBJSdqT7T6YNislbGkrIn%2FumcP%2BdSvR4ALKtLwS3z2pboAIENrWJKUv2AgYLD1z68a0QDjtOy18Y0dlc8nb%2FxFSWgM8Ax3JEW5snXEEjRlbwELzwp7E1CNYdDeM0wnKzQYEIniQVmqKhfCcELPsAwAYJ0PAeEgSgtbK%2BzTYM7zE0TJXasu3o3bA1TXhNjQapuN8hH5D8kgpLQUVMZkKoagIzpx1qdzz5MkPrHKfC%2Bsw5eesfpO5OJo8phTODVp5CpaDMnEkk1dPWMadrEwcqWTX41ECC%2BvJxMGqtxKz61%2B43mzx5KDOC1kPjxROJqVqaU2AQB3KGrlWVOnVlrZjAz1satEsYG89IieTWZGTRODRowh9erKR7c259IzkOIOaSIJQjAZdesR0MM2PWqWD0qAUJUQXQXq6JHWU26OOohgOIfh5cvSOZKjiBfXePzYobaNDrttpV9ChYwnLKkSHhOiktowNeTJW88iQO%2BxCR5mta76c4sRGeIL7eG2IMfSK50U%2F789Kwr44cx5ORoNR%2FOgxkwaiWMk0f1YntWUIfIenkrr89vNKULzVBIGjSaymTmcSBBpwucRFEwsfR%2FADOIERzjTTixZcDOPj95w%2FqigzG2qM%2BQTywgmyEarZo0d19QmqTu%2BdDFIYqTMRyRSeacqTTMGZZh%2FCdtZdsmZzSYD7xPYRwXfixoZpoXcfbAH%2FWeD3ndErz3jy0Tw7b%2BEGmppLyNYDCLiMR0GFjQ8LomdhCUTwaGi8A8aQMGcq8x9aYt3MSWCjoAFXGPoZ5FKyLfMFK%2FilNLWNLEqF%2FpiBAeEaDZ7iJ8uG0gsSrLCkKonp5YxPZJWoa8xndDhk4DhjW4%2FlcGlktL1SjqX5tWJzeVBnU5EocWahKoqAWYgtNNJkUD4G4je%2FqIRfEEc5dH8fx8js95tn5NClvXT%2BjJB65stEAEwoFmU7uB13QALsgE79z2877%2BcdGv%2FJn9%2F6P%2B%2F2WoDroLUXCNmdXpAVE%2B9Zlm0yxiE7RlXza6O%2FFaqqDAtmy5g0aFRot15Yh8tZCKFVNKtiU6XLC6qq5KPXuHOqqpo0VUVXPTcZTcc0Vcq4XCkLhVVuqltThejwwn7yIVC2FnzcgsIaKxdsEDlHb9%2BgAw7s4wQ7lDW7QDNqUFflJcBITiJPa5WNXM1qrfJKbBaoRFeDdkoYDde2vV0%2Bnmx%2BINY4KUgkeWGgrvDG4%2BWfD80biy57R3mjOixXePpI3lgMJysoMde9nGD7zPaJQpgtEKbKw7QOw9BkmpvRHukt2b8hO%2BPN5nzrFHd6DGQ2Y%2BuYJOTTqE9gE9gXUVf0r7tBn89NVJaNQs3KaOhlUOf0unPymiq3W%2FpczMqlNuW1dhh3jGoeTiNnRbl452r6nmqb72eFqZAoxpzYmcJEqHup0THe1pQKoY895pYpz16jt%2BdZnajpK1KCytzjoC3KMTqectQocCndIiYVy0dpgSaRRXE8m8Rfwaqi06dyJKMJjzLjTEtNZCthNdOTLyodsCJNhQPOHFeiv9qQvu1vrC0rQSy97WajuRhXFen7WS%2FIE12%2Feo2SSaLdO4BQUkf9N0cpkzLWeKp0T8YaZVeOoAb6dPJKl4y0XF6lmO05QZkjUCfYfhrY8CPC75z8CKlcNMXSOEn14v7NCVn7yxj%2FpmRtlltm5WoGBKF3H2uOCvIrU%2FO8pFAvVYM%2Bs6RmQ1SIWFRNRpUqSOIkRpZ9yRJLZDolT23NZIsZhr5rT%2Fh0SYqGCHMJOabtR9OMmCnbduRlgAXxc8uChx3dPT%2BebyrvDZhAhpWCf5zdYgLKy4NKCM%2BeaHaqoKQwCQBkFYUJyWVj0LHlmpSyzNkveSOBFvoSCT2n701PpfQrNV0HnkchY8KFq7kU1LgqxjOF%2BwL%2Fc312g%2F%2B0fd8EFtCJJ5cNlkuow3hu2ryPYZHDcqG%2B3pBn%2BDoyyEaHcnlNpoc20Qq79RV6u%2B7ankfG5VK5hZYPZd9ILYqz1iw%2F8KMrigzEfc0IH5K4Iw3NHvA%2FYOGPGlBDNGQTzo2WGcFHFIQCpuYUXZ1YIdQYtJZA87cuUcMgFHDhYsu63c17UR8X09ZhMDzug5dAxD2jwL13EDylxlDA37wUWqLGNF3Yw0OOzyDGJIUj8ofFtDpRLlEFM%2BCnrCDZpCDhgixgBsqs%2FFkd69Vs24%2FKq2i%2B679tA4uiX%2F4P). Specify the desired analysis details for your data in the *essential.vars.groovy* file (see below) and run the pipeline *chipseq.pipeline.groovy* as described [here](https://gitlab.rlp.net/imbforge/NGSpipe2go/-/blob/master/README.md). A markdown file *ChIPreport.Rmd* will be generated in the output reports folder after running the pipeline. Subsequently, the *ChIPreport.Rmd* file can be converted to a final html report using the *knitr* R-package.\r +\r +\r +### The pipelines includes\r +- raw data quality control with FastQC, BamQC and MultiQC\r +- mapping reads or read pairs to the reference genome using bowtie2 (default) or bowtie1\r +- filter out multimapping reads from bowtie2 output with samtools (optional)\r +- identify and remove duplicate reads with Picard MarkDuplicates (optional) \r +- generation of bigWig tracks for visualisation of alignment with deeptools bamCoverage. For single end design, reads are extended to the average fragment size\r +- characterization of insert size using Picard CollectInsertSizeMetrics (for paired end libraries only)\r +- characterize library complexity by PCR Bottleneck Coefficient using the GenomicAlignments R-package (for single read libraries only) \r +- characterize phantom peaks by cross correlation analysis using the spp R-package (for single read libraries only)\r +- peak calling of IP samples vs. corresponding input controls using MACS2\r +- peak annotation using the ChIPseeker R-package (optional)\r +- differential binding analysis using the diffbind R-package (optional). For this, input peak files must be given in *NGSpipe2go/tools/diffbind/targets_diffbind.txt* and contrasts of interest in *NGSpipe2go/tools/diffbind/contrasts_diffbind.txt* (see below)\r +\r +\r +### Pipeline-specific parameter settings\r +- targets.txt: tab-separated txt-file giving information about the analysed samples. The following columns are required: \r + - IP: bam file name of IP sample\r + - IPname: IP sample name to be used in plots and tables \r + - INPUT: bam file name of corresponding input control sample\r + - INPUTname: input sample name to be used in plots and tables \r + - group: variable for sample grouping (e.g. by condition)\r +\r +- essential.vars.groovy: essential parameter describing the experiment including: \r + - ESSENTIAL_PROJECT: your project folder name\r + - ESSENTIAL_BOWTIE_REF: full path to bowtie2 indexed reference genome (bowtie1 indexed reference genome if bowtie1 is selected as mapper)\r + - ESSENTIAL_BOWTIE_GENOME: full path to the reference genome FASTA file\r + - ESSENTIAL_BSGENOME: Bioconductor genome sequence annotation package\r + - ESSENTIAL_TXDB: Bioconductor transcript-related annotation package\r + - ESSENTIAL_ANNODB: Bioconductor genome annotation package\r + - ESSENTIAL_BLACKLIST: files with problematic 'blacklist regions' to be excluded from analysis (optional)\r + - ESSENTIAL_PAIRED: either paired end ("yes") or single read ("no") design\r + - ESSENTIAL_READLEN: read length of library\r + - ESSENTIAL_FRAGLEN: mean length of library inserts and also minimum peak size called by MACS2\r + - ESSENTIAL_THREADS: number of threads for parallel tasks\r + - ESSENTIAL_USE_BOWTIE1: if true use bowtie1 for read mapping, otherwise bowtie2 by default\r +\r +- additional (more specialized) parameter can be given in the var.groovy-files of the individual pipeline modules \r +\r +If differential binding analysis is selected it is required additionally:\r +\r +- contrasts_diffbind.txt: indicate intended group comparisions for differential binding analysis, e.g. *KOvsWT=(KO-WT)* if targets.txt contains the groups *KO* and *WT*. Give 1 contrast per line. \r +- targets_diffbind.txt: \r + - SampleID: IP sample name (as IPname in targets.txt)\r + - Condition: variable for sample grouping (as group in targets.txt)\r + - Replicate: number of replicate\r + - bamReads: bam file name of IP sample (as IP in targets.txt but with path relative to project directory)\r + - ControlID: input sample name (as INPUTname in targets.txt)\r + - bamControl: bam file name of corresponding input control sample (as INPUT in targets.txt but with path relative to project directory)\r + - Peaks: peak file name opbatined from peak caller (path relative to project directory)\r + - PeakCaller: name of peak caller (e.g. macs)\r +\r +## Programs required\r +- Bedtools\r +- Bowtie2\r +- deepTools\r +- encodeChIPqc (provided by another project from imbforge)\r +- FastQC\r +- MACS2\r +- MultiQC\r +- Picard\r +- R with packages ChIPSeeker, diffbind, GenomicAlignments, spp and genome annotation packages\r +- Samtools\r +- UCSC utilities\r +""" ; + schema1:keywords "ChIP-seq, bpipe, groovy" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "ChIP-seq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/59?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.196.4" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook ABC MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:46 +0100" ; + schema1:url "https://workflowhub.eu/workflows/196/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 101184 ; + schema1:creator , + ; + schema1:dateCreated "2023-07-26T09:39:03Z" ; + schema1:dateModified "2023-07-26T09:39:41Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/196?version=4" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook ABC MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_amber_md_setup/blob/master/biobb_wf_amber_md_setup/notebooks/abcsetup/biobb_amber_ABC_setup.ipynb" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "Pre-processing of mass spectrometry-based metabolomics data with quantification and identification based on MS1 and MS2 data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/57?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/metaboigniter" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/metaboigniter" ; + schema1:sdDatePublished "2024-07-12 13:22:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/57/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5180 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:00Z" ; + schema1:dateModified "2024-06-11T12:55:00Z" ; + schema1:description "Pre-processing of mass spectrometry-based metabolomics data with quantification and identification based on MS1 and MS2 data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/57?version=3" ; + schema1:keywords "Metabolomics, identification, quantification, mass-spectrometry, ms1, ms2" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/metaboigniter" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/57?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein MD Analysis tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This workflow computes a set of Quality Control (QC) analyses on top of an uploaded MD trajectory. QC analyses include positional divergence (RMSd), change of shape (Radius of Gyration), identification of flexible regions (atomic/residue fluctuations), and identification of different molecular conformations (trajectory clustering).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.287.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_protein_md_analysis/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Protein MD Analysis tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/287/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5395 ; + schema1:creator , + ; + schema1:dateCreated "2023-04-14T08:58:27Z" ; + schema1:dateModified "2023-04-14T08:59:26Z" ; + schema1:description """# Protein MD Analysis tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This workflow computes a set of Quality Control (QC) analyses on top of an uploaded MD trajectory. QC analyses include positional divergence (RMSd), change of shape (Radius of Gyration), identification of flexible regions (atomic/residue fluctuations), and identification of different molecular conformations (trajectory clustering).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/287?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Protein MD Analysis tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_protein_md_analysis/python/workflow.py" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Functional annotation of protein sequences" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/755?version=1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Functional protein annotation using EggNOG-mapper and InterProScan" ; + schema1:sdDatePublished "2024-07-12 13:24:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/755/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8415 ; + schema1:creator ; + schema1:dateCreated "2024-02-15T11:44:05Z" ; + schema1:dateModified "2024-02-15T11:44:05Z" ; + schema1:description "Functional annotation of protein sequences" ; + schema1:isPartOf ; + schema1:keywords "genome-annotation" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Functional protein annotation using EggNOG-mapper and InterProScan" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/755?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "The pangenome graph construction pipeline renders a collection of sequences into a pangenome graph. Its goal is to build a graph that is locally directed and acyclic while preserving large-scale variation. Maintaining local linearity is important for interpretation, visualization, mapping, comparative genomics, and reuse of pangenome graphs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1007?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/pangenome" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/pangenome" ; + schema1:sdDatePublished "2024-07-12 13:20:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1007/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11068 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:06Z" ; + schema1:dateModified "2024-06-11T12:55:06Z" ; + schema1:description "The pangenome graph construction pipeline renders a collection of sequences into a pangenome graph. Its goal is to build a graph that is locally directed and acyclic while preserving large-scale variation. Maintaining local linearity is important for interpretation, visualization, mapping, comparative genomics, and reuse of pangenome graphs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1007?version=3" ; + schema1:keywords "pangenome" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/pangenome" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1007?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """A CWL-based pipeline for calling small germline variants, namely SNPs and small INDELs, by processing data from Whole-genome Sequencing (WGS) or Targeted Sequencing (e.g., Whole-exome sequencing; WES) experiments. \r +\r +On the respective GitHub folder are available:\r +\r +- The CWL wrappers and subworkflows for the workflow\r +- A pre-configured YAML template, based on validation analysis of publicly available HTS data\r +\r +Briefly, the workflow performs the following steps:\r +\r +1. Quality control of Illumina reads (FastQC)\r +2. Trimming of the reads (e.g., removal of adapter and/or low quality sequences) (Trim galore)\r +3. Mapping to reference genome (BWA-MEM)\r +4. Convertion of mapped reads from SAM (Sequence Alignment Map) to BAM (Binary Alignment Map) format (samtools)\r +5. Sorting mapped reads based on read names (samtools)\r +6. Adding information regarding paired end reads (e.g., CIGAR field information) (samtools)\r +7. Re-sorting mapped reads based on chromosomal coordinates (samtools)\r +8. Adding basic Read-Group information regarding sample name, platform unit, platform (e.g., ILLUMINA), library and identifier (picard AddOrReplaceReadGroups)\r +9. Marking PCR and/or optical duplicate reads (picard MarkDuplicates)\r +10. Collection of summary statistics (samtools) \r +11. Creation of indexes for coordinate-sorted BAM files to enable fast random access (samtools)\r +12. Splitting the reference genome into a predefined number of intervals for parallel processing (GATK SplitIntervals)\r +\r +At this point the application of multi-sample workflow follows, during which multiple samples are concatenated into a single, unified VCF (Variant Calling Format) file, which contains the variant information for all samples:\r +\r +13. Application of Base Quality Score Recalibration (BQSR) (GATK BaseRecalibrator and ApplyBQSR tools)\r +14. Variant calling in gVCF (genomic VCF) mode (-ERC GVCF) (GATK HaplotypeCaller) \r +15. Merging of all genomic interval-split gVCF files for each sample (GATK MergeVCFs)\r +16. Generation of the unified VCF file (GATK CombineGVCFs and GenotypeGVCFs tools)\r +17. Separate annotation for SNP and INDEL variants, using the Variant Quality Score Recalibration (VQSR) method (GATK VariantRecalibrator and ApplyVQSR tools)\r +18. Variant filtering based on the information added during VQSR and/or custom filters (bcftools)\r +19. Normalization of INDELs (split multiallelic sites) (bcftools)\r +20. Annotation of the final dataset of filtered variants with genomic, population-related and/or clinical information (ANNOVAR)\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.526.1" ; + schema1:isBasedOn "https://github.com/BiodataAnalysisGroup/CWL_HTS_pipelines/tree/main/Germline_Variant_calling/multi-sample_analysis/with_BQSR_VQSR" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL-based (multi-sample) workflow for germline variant calling" ; + schema1:sdDatePublished "2024-07-12 13:32:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/526/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 34531 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-07-05T09:44:42Z" ; + schema1:dateModified "2023-07-05T09:45:12Z" ; + schema1:description """A CWL-based pipeline for calling small germline variants, namely SNPs and small INDELs, by processing data from Whole-genome Sequencing (WGS) or Targeted Sequencing (e.g., Whole-exome sequencing; WES) experiments. \r +\r +On the respective GitHub folder are available:\r +\r +- The CWL wrappers and subworkflows for the workflow\r +- A pre-configured YAML template, based on validation analysis of publicly available HTS data\r +\r +Briefly, the workflow performs the following steps:\r +\r +1. Quality control of Illumina reads (FastQC)\r +2. Trimming of the reads (e.g., removal of adapter and/or low quality sequences) (Trim galore)\r +3. Mapping to reference genome (BWA-MEM)\r +4. Convertion of mapped reads from SAM (Sequence Alignment Map) to BAM (Binary Alignment Map) format (samtools)\r +5. Sorting mapped reads based on read names (samtools)\r +6. Adding information regarding paired end reads (e.g., CIGAR field information) (samtools)\r +7. Re-sorting mapped reads based on chromosomal coordinates (samtools)\r +8. Adding basic Read-Group information regarding sample name, platform unit, platform (e.g., ILLUMINA), library and identifier (picard AddOrReplaceReadGroups)\r +9. Marking PCR and/or optical duplicate reads (picard MarkDuplicates)\r +10. Collection of summary statistics (samtools) \r +11. Creation of indexes for coordinate-sorted BAM files to enable fast random access (samtools)\r +12. Splitting the reference genome into a predefined number of intervals for parallel processing (GATK SplitIntervals)\r +\r +At this point the application of multi-sample workflow follows, during which multiple samples are concatenated into a single, unified VCF (Variant Calling Format) file, which contains the variant information for all samples:\r +\r +13. Application of Base Quality Score Recalibration (BQSR) (GATK BaseRecalibrator and ApplyBQSR tools)\r +14. Variant calling in gVCF (genomic VCF) mode (-ERC GVCF) (GATK HaplotypeCaller) \r +15. Merging of all genomic interval-split gVCF files for each sample (GATK MergeVCFs)\r +16. Generation of the unified VCF file (GATK CombineGVCFs and GenotypeGVCFs tools)\r +17. Separate annotation for SNP and INDEL variants, using the Variant Quality Score Recalibration (VQSR) method (GATK VariantRecalibrator and ApplyVQSR tools)\r +18. Variant filtering based on the information added during VQSR and/or custom filters (bcftools)\r +19. Normalization of INDELs (split multiallelic sites) (bcftools)\r +20. Annotation of the final dataset of filtered variants with genomic, population-related and/or clinical information (ANNOVAR)\r +""" ; + schema1:image ; + schema1:keywords "CWL, workflow, Germline, variant calling, Genomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "CWL-based (multi-sample) workflow for germline variant calling" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/526?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 237748 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=21" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-07-12 13:21:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=21" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 18600 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-12T03:03:06Z" ; + schema1:dateModified "2024-06-12T03:03:06Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=21" ; + schema1:version 21 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Alternative splicing analysis using RNA-seq." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1018?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/rnasplice" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnasplice" ; + schema1:sdDatePublished "2024-07-12 13:19:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1018/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14104 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:14Z" ; + schema1:dateModified "2024-06-11T12:55:14Z" ; + schema1:description "Alternative splicing analysis using RNA-seq." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1018?version=4" ; + schema1:keywords "alternative-splicing, rna, rna-seq, splicing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnasplice" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1018?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=16" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-07-12 13:21:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=16" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12436 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:50Z" ; + schema1:dateModified "2024-06-11T12:54:50Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=16" ; + schema1:version 16 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "For integrative analysis of CAKUT multi-omics data DIABLO method of the mixOmics package (version 6.10.9. Singh et. al. 2019) was used with sPLS-DA (sparse Partial Least Squares Discriminant Analysis Discriminant Analysis) and PLS-DA classification." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/330?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for EJP-RD WP13 case-study: CAKUT peptidome and miRNome data analysis using the DIABLO and PLS-DA methods from the mixOmics R package" ; + schema1:sdDatePublished "2024-07-12 13:34:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/330/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 999 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2022-04-20T16:43:51Z" ; + schema1:dateModified "2023-01-16T13:59:42Z" ; + schema1:description "For integrative analysis of CAKUT multi-omics data DIABLO method of the mixOmics package (version 6.10.9. Singh et. al. 2019) was used with sPLS-DA (sparse Partial Least Squares Discriminant Analysis Discriminant Analysis) and PLS-DA classification." ; + schema1:keywords "rare diseases" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "EJP-RD WP13 case-study: CAKUT peptidome and miRNome data analysis using the DIABLO and PLS-DA methods from the mixOmics R package" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/330?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2024-04-09T10:22:36.835089" ; + schema1:hasPart , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/baredsc" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + ; + schema1:name "baredsc/baredSC-1d-logNorm" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "baredsc/baredSC-2d-logNorm" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/baredsc" ; + schema1:version "0.4" . + + a schema1:Dataset ; + schema1:datePublished "2023-11-16T14:32:18.212018" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Purge-duplicate-contigs-VGP6/main" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Purge-duplicate-contigs-VGP6/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:version "0.3.2" . + + a schema1:Dataset ; + schema1:datePublished "2022-01-27T14:48:01+00:00" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/crs4/fair-crcc-send-data" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "" . + + a schema1:ImageObject, + schema1:MediaObject ; + schema1:name "Workflow diagram" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "BioTranslator performs sequentially pathway analysis and gene prioritization: A specific operator is executed for each task to translate the input gene set into semantic terms and pinpoint the pivotal-role genes on the derived semantic network. The output consists of the set of statistically significant semantic terms and the associated hub genes (the gene signature), prioritized according to their involvement in the underlying semantic topology." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/189?version=3" ; + schema1:isBasedOn "http://www.biotranslator.gr:8080/u/thodk/w/biotranslator-workflow" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for BioTranslator Workflow" ; + schema1:sdDatePublished "2024-07-12 13:36:46 +0100" ; + schema1:url "https://workflowhub.eu/workflows/189/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3960 ; + schema1:dateCreated "2021-09-24T15:16:59Z" ; + schema1:dateModified "2023-01-16T13:52:49Z" ; + schema1:description "BioTranslator performs sequentially pathway analysis and gene prioritization: A specific operator is executed for each task to translate the input gene set into semantic terms and pinpoint the pivotal-role genes on the derived semantic network. The output consists of the set of statistically significant semantic terms and the associated hub genes (the gene signature), prioritized according to their involvement in the underlying semantic topology." ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/189?version=2" ; + schema1:keywords "Semantic Network Analysis, Gene Prioritization, Pathway Analysis, Biomedical Ontologies, Semantic Interpretation" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "BioTranslator Workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/189?version=3" ; + schema1:version 3 ; + ns1:input ; + ns1:output , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 44291 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Proteogenomics database creation workflow using pypgatk framework. " ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1008?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/pgdb" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/pgdb" ; + schema1:sdDatePublished "2024-07-12 13:20:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1008/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7816 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:07Z" ; + schema1:dateModified "2024-06-11T12:55:07Z" ; + schema1:description "Proteogenomics database creation workflow using pypgatk framework. " ; + schema1:keywords "cosmic, gnomad, protein-databases, proteogenomics, Proteomics, pypgatk" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/pgdb" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1008?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "call and score variants from WGS/WES of rare disease patients" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1014?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/raredisease" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/raredisease" ; + schema1:sdDatePublished "2024-07-12 13:19:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1014/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12171 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:08Z" ; + schema1:dateModified "2024-06-11T12:55:08Z" ; + schema1:description "call and score variants from WGS/WES of rare disease patients" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1014?version=5" ; + schema1:keywords "diagnostics, rare-disease, snv, structural-variants, variant-annotation, variant-calling, wes, WGS" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/raredisease" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1014?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description "Performs scaffolding using Bionano Data. Part of VGP assembly pipeline." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/322?version=1" ; + schema1:isBasedOn "https://github.com/Delphine-L/iwc/tree/VGP/workflows/VGP-assembly-v2" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for VGP hybrid scaffolding with Bionano optical maps" ; + schema1:sdDatePublished "2024-07-12 13:35:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/322/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 4265 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 26876 ; + schema1:creator ; + schema1:dateCreated "2022-04-05T22:21:34Z" ; + schema1:dateModified "2023-01-16T13:59:34Z" ; + schema1:description "Performs scaffolding using Bionano Data. Part of VGP assembly pipeline." ; + schema1:isPartOf ; + schema1:keywords "vgp, Assembly, Galaxy" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "VGP hybrid scaffolding with Bionano optical maps" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/322?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """Correlation between Phenotypic and In Silico Detection of Antimicrobial Resistance in Salmonella enterica in Canada Using Staramr. \r +\r +Doi: [10.3390/microorganisms10020292](https://doi.org/10.3390/microorganisms10020292)\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/470?version=1" ; + schema1:isBasedOn "https://usegalaxy.eu/u/dennisd/w/103390microorganisms10020292" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Workflow 4: Staramr" ; + schema1:sdDatePublished "2024-07-12 13:26:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/470/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 23190 ; + schema1:dateCreated "2023-05-11T08:29:41Z" ; + schema1:dateModified "2023-10-30T16:54:19Z" ; + schema1:description """Correlation between Phenotypic and In Silico Detection of Antimicrobial Resistance in Salmonella enterica in Canada Using Staramr. \r +\r +Doi: [10.3390/microorganisms10020292](https://doi.org/10.3390/microorganisms10020292)\r +""" ; + schema1:keywords "AMR, AMR-detection, 10.3390/microorganisms10020292, Bioinformatics, antimicrobial resistance" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Workflow 4: Staramr" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/470?version=1" ; + schema1:version 1 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Performs phylogenetic placement with EPA-NG" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1009?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/phyloplace" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/phyloplace" ; + schema1:sdDatePublished "2024-07-12 13:20:05 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1009/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8100 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:07Z" ; + schema1:dateModified "2024-06-11T12:55:07Z" ; + schema1:description "Performs phylogenetic placement with EPA-NG" ; + schema1:keywords "evolution, evolutionary-tree, phylogenetic-placement, phylogenetics, sequence-classification, taxonomy-assignment" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/phyloplace" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1009?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=21" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-07-12 13:22:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=21" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10412 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:13Z" ; + schema1:dateModified "2024-06-11T12:55:13Z" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=21" ; + schema1:version 21 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """## Description\r +\r +The workflow takes an input file with Cancer Driver Genes predictions (i.e. the results provided by a participant), computes a set of metrics, and compares them against the data currently stored in OpenEBench within the TCGA community. Two assessment metrics are provided for that predictions. Also, some plots (which are optional) that allow to visualize the performance of the tool are generated. The workflow consists in three standard steps, defined by OpenEBench. The tools needed to run these steps are containerised in three Docker images, whose recipes are available in the [TCGA_benchmarking_dockers](https://github.com/inab/TCGA_benchmarking_dockers ) repository and the images are stored in the [INB GitLab container registry](https://gitlab.bsc.es/inb/elixir/openebench/workflows/tcga_benchmarking_dockers/container_registry) . Separated instances are spawned from these images for each step:\r +1. **Validation**: the input file format is checked and, if required, the content of the file is validated (e.g check whether the submitted gene IDs exist)\r +2. **Metrics Generation**: the predictions are compared with the 'Gold Standards' provided by the community, which results in two performance metrics - precision (Positive Predictive Value) and recall(True Positive Rate).\r +3. **Consolidation**: the benchmark itself is performed by merging the tool metrics with the rest of TCGA data. The results are provided in JSON format and SVG format (scatter plot).\r +\r +![OpenEBench benchmarking workflow](https://raw.githubusercontent.com/inab/TCGA_benchmarking_workflow/1.0.8/workflow_schema.jpg)\r +\r +## Data\r +\r +* [TCGA_sample_data](./TCGA_sample_data) folder contains all the reference data required by the steps. It is derived from the manuscript:\r +[Comprehensive Characterization of Cancer Driver Genes and Mutations](https://www.cell.com/cell/fulltext/S0092-8674%2818%2930237-X?code=cell-site), Bailey et al, 2018, Cell [![doi:10.1016/j.cell.2018.02.060](https://img.shields.io/badge/doi-10.1016%2Fj.cell.2018.02.060-green.svg)](https://doi.org/10.1016/j.cell.2018.02.060) \r +* [TCGA_sample_out](./TCGA_sample_out) folder contains an example output for a worklow run, with two cancer types / challenges selected (ACC, BRCA). Results obtained from the default execution should be similar to those ones available in this directory. Results found in [TCGA_sample_out/results](./TCGA_sample_out/results) can be visualized in the browser using [`benchmarking_workflows_results_visualizer` javascript library](https://github.com/inab/benchmarking_workflows_results_visualizer).\r +\r +## Requirements\r +This workflow depends on three tools that have to be installed before you can run it:\r +* [Git](https://git-scm.com/downloads): Used to download the workflow from GitHub.\r +* [Docker](https://docs.docker.com/get-docker/): The Docker Engine is used under the hood to execute the containerised steps of the benchmarking workflow.\r +* [Nextflow](https://www.nextflow.io/): Is the technology used to write and execute the benchmarking workflow. Note that it depends on Bash (>=3.2) and Java (>=8 , <=17). We provide the script [run_local_nextflow.bash](run_local_nextflow.bash) that automates their installation for local testing.\r +\r +Check that these tools are available in your environment:\r +```\r +# Git\r +> which git\r +/usr/bin/git\r +> git --version\r +git version 2.26.2\r +\r +# Docker\r +> which docker\r +/usr/bin/docker\r +> docker --version\r +Docker version 20.10.9-ce, build 79ea9d308018\r +\r +# Nextflow\r +> which nextflow\r +/home/myuser/bin/nextflow\r +> nextflow -version\r +\r + N E X T F L O W\r + version 21.04.1 build 5556\r + created 14-05-2021 15:20 UTC (17:20 CEST)\r + cite doi:10.1038/nbt.3820\r + http://nextflow.io\r +```\r +In the case of docker, apart from being installed the daemon has to be running. On Linux distributions that use `Systemd` for service management, which includes the most popular ones as of 2021 (Ubuntu, Debian, CentOs, Red Hat, OpenSuse), the `systemctl` command can be used to check its status and manage it:\r +\r +```\r +# Check status of docker daemon\r +> sudo systemctl status docker\r +● docker.service - Docker Application Container Engine\r + Loaded: loaded (/usr/lib/systemd/system/docker.service; disabled; vendor preset: disabled)\r + Active: inactive (dead)\r + Docs: http://docs.docker.com\r +\r +# Start docker daemon\r +> sudo systemctl start docker\r +```\r +\r +### Download workflow\r +Simply clone the repository and check out the latest tag (currently `1.0.8`):\r +\r +```\r +# Clone repository\r +> git clone https://github.com/inab/TCGA_benchmarking_dockers.git\r +\r +# Move to new directory\r +cd TCGA_benchmarking_workflow/\r +\r +# Checkout version 1.0.8\r +> git checkout 1.0.8 -b 1.0.8\r +```\r +\r +## Usage\r +The workflow can be run workflow in two different ways:\r +* Standard: `nextflow run main.nf -profile docker`\r +* Using the bash script that installs Java and Nextflow:`./run_local_nextflow.bash run main.nf -profile docker`.\r +\r +Arguments specifications:\r +```\r +Usage:\r +Run the pipeline with default parameters:\r +nextflow run main.nf -profile docker\r +\r +Run with user parameters:\r +nextflow run main.nf -profile docker --predictionsFile {driver.genes.file} --public_ref_dir {validation.reference.file} --participant_name {tool.name} --metrics_ref_dir {gold.standards.dir} --cancer_types {analyzed.cancer.types} --assess_dir {benchmark.data.dir} --results_dir {output.dir}\r +\r +Mandatory arguments:\r + --input List of cancer genes prediction\r + --community_id Name or OEB permanent ID for the benchmarking community\r + --public_ref_dir Directory with list of cancer genes used to validate the predictions\r + --participant_id Name of the tool used for prediction\r + --goldstandard_dir Dir that contains metrics reference datasets for all cancer types\r + --challenges_ids List of types of cancer selected by the user, separated by spaces\r + --assess_dir Dir where the data for the benchmark are stored\r +\r +Other options:\r + --validation_result The output directory where the results from validation step will be saved\r + --augmented_assess_dir Dir where the augmented data for the benchmark are stored\r + --assessment_results The output directory where the results from the computed metrics step will be saved\r + --outdir The output directory where the consolidation of the benchmark will be saved\r + --statsdir The output directory with nextflow statistics\r + --data_model_export_dir The output dir where json file with benchmarking data model contents will be saved\r + --otherdir The output directory where custom results will be saved (no directory inside)\r +Flags:\r + --help Display this message\r +```\r +\r +Default input parameters and Docker images to use for each step can be specified in the [config](./nextflow.config) file.\r +\r +**NOTE: In order to make your workflow compatible with the [OpenEBench VRE Nextflow Executor](https://github.com/inab/vre-process_nextflow-executor), please make sure to use the same parameter names in your workflow.**\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/244?version=4" ; + schema1:isBasedOn "https://github.com/inab/TCGA_benchmarking_workflow/tree/1.0.8" ; + schema1:license "LGPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for OpenEBench TCGA Cancer Driver Genes benchmarking workflow" ; + schema1:sdDatePublished "2024-07-12 13:36:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/244/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6421 ; + schema1:creator , + , + ; + schema1:dateCreated "2021-11-29T15:21:23Z" ; + schema1:dateModified "2023-01-16T13:55:10Z" ; + schema1:description """## Description\r +\r +The workflow takes an input file with Cancer Driver Genes predictions (i.e. the results provided by a participant), computes a set of metrics, and compares them against the data currently stored in OpenEBench within the TCGA community. Two assessment metrics are provided for that predictions. Also, some plots (which are optional) that allow to visualize the performance of the tool are generated. The workflow consists in three standard steps, defined by OpenEBench. The tools needed to run these steps are containerised in three Docker images, whose recipes are available in the [TCGA_benchmarking_dockers](https://github.com/inab/TCGA_benchmarking_dockers ) repository and the images are stored in the [INB GitLab container registry](https://gitlab.bsc.es/inb/elixir/openebench/workflows/tcga_benchmarking_dockers/container_registry) . Separated instances are spawned from these images for each step:\r +1. **Validation**: the input file format is checked and, if required, the content of the file is validated (e.g check whether the submitted gene IDs exist)\r +2. **Metrics Generation**: the predictions are compared with the 'Gold Standards' provided by the community, which results in two performance metrics - precision (Positive Predictive Value) and recall(True Positive Rate).\r +3. **Consolidation**: the benchmark itself is performed by merging the tool metrics with the rest of TCGA data. The results are provided in JSON format and SVG format (scatter plot).\r +\r +![OpenEBench benchmarking workflow](https://raw.githubusercontent.com/inab/TCGA_benchmarking_workflow/1.0.8/workflow_schema.jpg)\r +\r +## Data\r +\r +* [TCGA_sample_data](./TCGA_sample_data) folder contains all the reference data required by the steps. It is derived from the manuscript:\r +[Comprehensive Characterization of Cancer Driver Genes and Mutations](https://www.cell.com/cell/fulltext/S0092-8674%2818%2930237-X?code=cell-site), Bailey et al, 2018, Cell [![doi:10.1016/j.cell.2018.02.060](https://img.shields.io/badge/doi-10.1016%2Fj.cell.2018.02.060-green.svg)](https://doi.org/10.1016/j.cell.2018.02.060) \r +* [TCGA_sample_out](./TCGA_sample_out) folder contains an example output for a worklow run, with two cancer types / challenges selected (ACC, BRCA). Results obtained from the default execution should be similar to those ones available in this directory. Results found in [TCGA_sample_out/results](./TCGA_sample_out/results) can be visualized in the browser using [`benchmarking_workflows_results_visualizer` javascript library](https://github.com/inab/benchmarking_workflows_results_visualizer).\r +\r +## Requirements\r +This workflow depends on three tools that have to be installed before you can run it:\r +* [Git](https://git-scm.com/downloads): Used to download the workflow from GitHub.\r +* [Docker](https://docs.docker.com/get-docker/): The Docker Engine is used under the hood to execute the containerised steps of the benchmarking workflow.\r +* [Nextflow](https://www.nextflow.io/): Is the technology used to write and execute the benchmarking workflow. Note that it depends on Bash (>=3.2) and Java (>=8 , <=17). We provide the script [run_local_nextflow.bash](run_local_nextflow.bash) that automates their installation for local testing.\r +\r +Check that these tools are available in your environment:\r +```\r +# Git\r +> which git\r +/usr/bin/git\r +> git --version\r +git version 2.26.2\r +\r +# Docker\r +> which docker\r +/usr/bin/docker\r +> docker --version\r +Docker version 20.10.9-ce, build 79ea9d308018\r +\r +# Nextflow\r +> which nextflow\r +/home/myuser/bin/nextflow\r +> nextflow -version\r +\r + N E X T F L O W\r + version 21.04.1 build 5556\r + created 14-05-2021 15:20 UTC (17:20 CEST)\r + cite doi:10.1038/nbt.3820\r + http://nextflow.io\r +```\r +In the case of docker, apart from being installed the daemon has to be running. On Linux distributions that use `Systemd` for service management, which includes the most popular ones as of 2021 (Ubuntu, Debian, CentOs, Red Hat, OpenSuse), the `systemctl` command can be used to check its status and manage it:\r +\r +```\r +# Check status of docker daemon\r +> sudo systemctl status docker\r +● docker.service - Docker Application Container Engine\r + Loaded: loaded (/usr/lib/systemd/system/docker.service; disabled; vendor preset: disabled)\r + Active: inactive (dead)\r + Docs: http://docs.docker.com\r +\r +# Start docker daemon\r +> sudo systemctl start docker\r +```\r +\r +### Download workflow\r +Simply clone the repository and check out the latest tag (currently `1.0.8`):\r +\r +```\r +# Clone repository\r +> git clone https://github.com/inab/TCGA_benchmarking_dockers.git\r +\r +# Move to new directory\r +cd TCGA_benchmarking_workflow/\r +\r +# Checkout version 1.0.8\r +> git checkout 1.0.8 -b 1.0.8\r +```\r +\r +## Usage\r +The workflow can be run workflow in two different ways:\r +* Standard: `nextflow run main.nf -profile docker`\r +* Using the bash script that installs Java and Nextflow:`./run_local_nextflow.bash run main.nf -profile docker`.\r +\r +Arguments specifications:\r +```\r +Usage:\r +Run the pipeline with default parameters:\r +nextflow run main.nf -profile docker\r +\r +Run with user parameters:\r +nextflow run main.nf -profile docker --predictionsFile {driver.genes.file} --public_ref_dir {validation.reference.file} --participant_name {tool.name} --metrics_ref_dir {gold.standards.dir} --cancer_types {analyzed.cancer.types} --assess_dir {benchmark.data.dir} --results_dir {output.dir}\r +\r +Mandatory arguments:\r + --input List of cancer genes prediction\r + --community_id Name or OEB permanent ID for the benchmarking community\r + --public_ref_dir Directory with list of cancer genes used to validate the predictions\r + --participant_id Name of the tool used for prediction\r + --goldstandard_dir Dir that contains metrics reference datasets for all cancer types\r + --challenges_ids List of types of cancer selected by the user, separated by spaces\r + --assess_dir Dir where the data for the benchmark are stored\r +\r +Other options:\r + --validation_result The output directory where the results from validation step will be saved\r + --augmented_assess_dir Dir where the augmented data for the benchmark are stored\r + --assessment_results The output directory where the results from the computed metrics step will be saved\r + --outdir The output directory where the consolidation of the benchmark will be saved\r + --statsdir The output directory with nextflow statistics\r + --data_model_export_dir The output dir where json file with benchmarking data model contents will be saved\r + --otherdir The output directory where custom results will be saved (no directory inside)\r +Flags:\r + --help Display this message\r +```\r +\r +Default input parameters and Docker images to use for each step can be specified in the [config](./nextflow.config) file.\r +\r +**NOTE: In order to make your workflow compatible with the [OpenEBench VRE Nextflow Executor](https://github.com/inab/vre-process_nextflow-executor), please make sure to use the same parameter names in your workflow.**\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/244?version=3" ; + schema1:keywords "tcga, openebench, benchmarking" ; + schema1:license "https://spdx.org/licenses/LGPL-3.0" ; + schema1:name "OpenEBench TCGA Cancer Driver Genes benchmarking workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/244?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/gmx-protein-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.277.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_md_setup/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/277/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 59528 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-14T10:15:55Z" ; + schema1:dateModified "2022-11-22T09:41:33Z" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/gmx-protein-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/277?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_md_setup/galaxy/biobb_wf_md_setup.ga" ; + schema1:version 1 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-07-12 13:18:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8250 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:39Z" ; + schema1:dateModified "2024-06-11T12:54:39Z" ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + schema1:datePublished "2024-03-06T19:27:22.822246" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Purge-duplicates-one-haplotype-VGP6b" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Purge-duplicates-one-haplotype-VGP6b/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=20" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-07-12 13:21:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=20" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13448 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:50Z" ; + schema1:dateModified "2024-06-11T12:54:50Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=20" ; + schema1:version 20 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A pipeline for ortholog fetching and analysis" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1041?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/reportho" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/reportho" ; + schema1:sdDatePublished "2024-07-12 13:18:20 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1041/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11016 ; + schema1:creator ; + schema1:dateCreated "2024-06-12T03:03:07Z" ; + schema1:dateModified "2024-06-12T03:03:07Z" ; + schema1:description "A pipeline for ortholog fetching and analysis" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/reportho" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1041?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.56.5" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_protein-complex_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:34:05 +0100" ; + schema1:url "https://workflowhub.eu/workflows/56/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 156554 ; + schema1:creator , + ; + schema1:dateCreated "2023-04-14T08:04:53Z" ; + schema1:dateModified "2023-04-14T08:06:01Z" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/56?version=6" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_protein-complex_md_setup/master/biobb_wf_protein-complex_md_setup/notebooks/biobb_Protein-Complex_MDsetup_tutorial.ipynb" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """A porting of the Trinity RNA assembly pipeline, https://trinityrnaseq.github.io, that uses Nextflow to handle the underlying sub-tasks.\r +This enables additional capabilities to better use HPC resources, such as packing of tasks to fill up nodes and use of node-local disks to improve I/O.\r +By design, the pipeline separates the workflow logic (main file) and the cluster-specific configuration (config files), improving portability.\r +\r +Based on a pipeline by Sydney Informatics Hub: https://github.com/Sydney-Informatics-Hub/SIH-Raijin-Trinity""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/114?version=2" ; + schema1:isBasedOn "https://github.com/marcodelapierre/trinity-nf" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Trinity RNA Assembly" ; + schema1:sdDatePublished "2024-07-12 13:37:05 +0100" ; + schema1:url "https://workflowhub.eu/workflows/114/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8372 ; + schema1:creator ; + schema1:dateCreated "2021-03-26T12:58:59Z" ; + schema1:dateModified "2023-01-16T13:49:39Z" ; + schema1:description """A porting of the Trinity RNA assembly pipeline, https://trinityrnaseq.github.io, that uses Nextflow to handle the underlying sub-tasks.\r +This enables additional capabilities to better use HPC resources, such as packing of tasks to fill up nodes and use of node-local disks to improve I/O.\r +By design, the pipeline separates the workflow logic (main file) and the cluster-specific configuration (config files), improving portability.\r +\r +Based on a pipeline by Sydney Informatics Hub: https://github.com/Sydney-Informatics-Hub/SIH-Raijin-Trinity""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/114?version=1" ; + schema1:isPartOf ; + schema1:keywords "Assembly, Transcriptomics, RNASEQ, Nextflow" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Trinity RNA Assembly" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/114?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=26" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-07-12 13:22:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=26" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12883 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:13Z" ; + schema1:dateModified "2024-06-11T12:55:13Z" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=26" ; + schema1:version 26 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1023?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/smrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/smrnaseq" ; + schema1:sdDatePublished "2024-07-12 13:18:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1023/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9386 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:18Z" ; + schema1:dateModified "2024-06-11T12:55:18Z" ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1023?version=10" ; + schema1:keywords "small-rna, smrna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/smrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1023?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """With this galaxy pipeline you can use Salmonella sp. next generation sequencing results to predict bacterial AMR phenotypes and compare the results against gold standard Salmonella sp. phenotypes obtained from food.\r +\r +This pipeline is based on the work of the National Food Agency of Canada. \r +Doi: [10.3389/fmicb.2020.00549](https://doi.org/10.3389/fmicb.2020.00549)""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/407?version=1" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Workflow 3: AMR - SeqSero2/SISTR" ; + schema1:sdDatePublished "2024-07-12 13:23:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/407/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 127987 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 37578 ; + schema1:dateCreated "2022-11-24T13:42:42Z" ; + schema1:dateModified "2024-05-07T09:45:19Z" ; + schema1:description """With this galaxy pipeline you can use Salmonella sp. next generation sequencing results to predict bacterial AMR phenotypes and compare the results against gold standard Salmonella sp. phenotypes obtained from food.\r +\r +This pipeline is based on the work of the National Food Agency of Canada. \r +Doi: [10.3389/fmicb.2020.00549](https://doi.org/10.3389/fmicb.2020.00549)""" ; + schema1:image ; + schema1:keywords "Bioinformatics, antimicrobial resistance" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Workflow 3: AMR - SeqSero2/SISTR" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/407?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for analysis of Molecular Pixelation assays" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1010?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/pixelator" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/pixelator" ; + schema1:sdDatePublished "2024-07-12 13:20:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1010/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10317 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:07Z" ; + schema1:dateModified "2024-06-11T12:55:07Z" ; + schema1:description "Pipeline for analysis of Molecular Pixelation assays" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1010?version=5" ; + schema1:keywords "molecular-pixelation, pixelator, pixelgen-technologies, proteins, single-cell, single-cell-omics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/pixelator" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1010?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and modern ancient DNA pipeline in Nextflow and with cloud support." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-07-12 13:21:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3623 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:49Z" ; + schema1:dateModified "2024-06-11T12:54:49Z" ; + schema1:description "A fully reproducible and modern ancient DNA pipeline in Nextflow and with cloud support." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1023?version=10" ; + schema1:isBasedOn "https://github.com/nf-core/smrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/smrnaseq" ; + schema1:sdDatePublished "2024-07-12 13:18:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1023/ro_crate?version=10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12414 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:19Z" ; + schema1:dateModified "2024-06-11T12:55:19Z" ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1023?version=10" ; + schema1:keywords "small-rna, smrna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/smrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1023?version=10" ; + schema1:version 10 . + + a schema1:Dataset ; + schema1:datePublished "2024-07-10T17:38:53.055554" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/bacterial_genome_annotation" ; + schema1:license "GPL-3.0-or-later" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "bacterial_genome_annotation/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.24" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/998?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/methylseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/methylseq" ; + schema1:sdDatePublished "2024-07-12 13:18:23 +0100" ; + schema1:url "https://workflowhub.eu/workflows/998/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5432 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:01Z" ; + schema1:dateModified "2024-06-11T12:55:01Z" ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/998?version=14" ; + schema1:keywords "bisulfite-sequencing, dna-methylation, em-seq, epigenome, Epigenomics, methyl-seq, pbat, rrbs" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/methylseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/998?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Summary\r +\r +This tutorial aims to illustrate the process of setting up a simulation system containing a protein in complex with a ligand, step by step, using the BioExcel Building Blocks library (biobb). The particular example used is the T4 lysozyme L99A/M102Q protein (PDB code 3HTB), in complex with the 2-propylphenol small molecule (3-letter Code JZ4).\r +\r +Workflow engine is a jupyter notebook. It can be run in binder, following the link given, or locally. Auxiliar libraries used are: nb\\_conda_kernels, nglview, ipywidgets, os, plotly, and simpletraj. Environment can be setup using the included environment.yml file.\r +\r +\r +# Parameters\r +\r +## Inputs\r +\r +Parameters needed to configure the workflow:\r +\r +* **pdbCode**: PDB code of the protein-ligand complex structure (e.g. 3HTB)\r +\r +* **ligandCode**: Small molecule 3-letter code for the ligand structure (e.g. JZ4)\r +\r +* **mol\\_charge**: Charge of the small molecule, needed to add hydrogen atoms.\r +\r +## Outputs\r +\r +Output files generated (named according to the input parameters given above):\r +\r +* **output\\_md\\_gro**: final structure of the MD setup protocol\r +\r +* **output\\_md\\_trr**: final trajectory of the MD setup protocol\r +\r +* **output\\_md\\_cpt**: final checkpoint file\r +\r +* **output\\_gppmd\\_tpr**: final tpr file\r +\r +* **output\\_genion\\_top\\_zip**: final topology of the MD system""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.56.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_protein-complex_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb) (jupyter notebook)" ; + schema1:sdDatePublished "2024-07-12 13:34:04 +0100" ; + schema1:url "https://workflowhub.eu/workflows/56/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 68187 ; + schema1:creator , + ; + schema1:dateCreated "2021-05-06T15:40:04Z" ; + schema1:dateModified "2021-05-13T08:15:05Z" ; + schema1:description """# Summary\r +\r +This tutorial aims to illustrate the process of setting up a simulation system containing a protein in complex with a ligand, step by step, using the BioExcel Building Blocks library (biobb). The particular example used is the T4 lysozyme L99A/M102Q protein (PDB code 3HTB), in complex with the 2-propylphenol small molecule (3-letter Code JZ4).\r +\r +Workflow engine is a jupyter notebook. It can be run in binder, following the link given, or locally. Auxiliar libraries used are: nb\\_conda_kernels, nglview, ipywidgets, os, plotly, and simpletraj. Environment can be setup using the included environment.yml file.\r +\r +\r +# Parameters\r +\r +## Inputs\r +\r +Parameters needed to configure the workflow:\r +\r +* **pdbCode**: PDB code of the protein-ligand complex structure (e.g. 3HTB)\r +\r +* **ligandCode**: Small molecule 3-letter code for the ligand structure (e.g. JZ4)\r +\r +* **mol\\_charge**: Charge of the small molecule, needed to add hydrogen atoms.\r +\r +## Outputs\r +\r +Output files generated (named according to the input parameters given above):\r +\r +* **output\\_md\\_gro**: final structure of the MD setup protocol\r +\r +* **output\\_md\\_trr**: final trajectory of the MD setup protocol\r +\r +* **output\\_md\\_cpt**: final checkpoint file\r +\r +* **output\\_gppmd\\_tpr**: final tpr file\r +\r +* **output\\_genion\\_top\\_zip**: final topology of the MD system""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/56?version=6" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb) (jupyter notebook)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/56?version=2" ; + schema1:version 2 . + + a schema1:MediaObject ; + schema1:about ; + schema1:author , + ; + schema1:encodingFormat "text/markdown" . + + a schema1:Dataset ; + schema1:creator , + , + , + , + , + , + ; + schema1:datePublished "2023-04-13" ; + schema1:description "Study protocol" ; + schema1:hasPart , + , + , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.7551181" ; + schema1:name "BY-COVID - WP5 - Baseline Use Case: SARS-CoV-2 vaccine effectiveness assessment - Study protocol" ; + schema1:url "https://zenodo.org/record/7825979" ; + schema1:version "1.0.3" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.130.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Amber Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:24:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/130/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 66118 ; + schema1:creator , + ; + schema1:dateCreated "2023-04-14T08:29:40Z" ; + schema1:dateModified "2023-04-14T08:30:44Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/130?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Amber Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_amber_md_setup/master/biobb_wf_amber_md_setup/notebooks/mdsetup/biobb_amber_setup_notebook.ipynb" ; + schema1:version 3 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 22763 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# A workflow for marine Genomic Observatories data analysis\r +## An EOSC-Life project\r +\r +The workflows developed in the framework of this project are based on `pipeline-v5` of the MGnify resource. \r +This branch is a child of the [`pipeline_5.1`](https://github.com/EBI-Metagenomics/pipeline-v5) branch \r +that contains a part of the CWL descriptions of the MGnify pipeline version 5.1.\r +\r +The following comes from the initial repo and describes how to get the databases required.\r +\r +---\r +![logo](https://raw.githubusercontent.com/hariszaf/metaGOflow-use-case/gh-pages/assets/img/metaGOflow_logo_italics.png)\r +\r +\r +## An EOSC-Life project\r +\r +The workflows developed in the framework of this project are based on `pipeline-v5` of the MGnify resource.\r +\r +> This branch is a child of the [`pipeline_5.1`](https://github.com/hariszaf/pipeline-v5/tree/pipeline_5.1) branch\r +> that contains all CWL descriptions of the MGnify pipeline version 5.1.\r +\r +## Dependencies\r +\r +To run metaGOflow you need to make sure you have the following set on your computing environmnet first:\r +\r +- python3 [v 3.8+]\r +- [Docker](https://www.docker.com) [v 19.+] or [Singularity](https://apptainer.org) [v 3.7.+]/[Apptainer](https://apptainer.org) [v 1.+]\r +- [cwltool](https://github.com/common-workflow-language/cwltool) [v 3.+]\r +- [rdflib](https://rdflib.readthedocs.io/en/stable/) [v 6.+]\r +- [rdflib-jsonld](https://pypi.org/project/rdflib-jsonld/) [v 0.6.2]\r +- [ro-crate-py](https://github.com/ResearchObject/ro-crate-py) [v 0.7.0]\r +- [pyyaml](https://pypi.org/project/PyYAML/) [v 6.0]\r +- [Node.js](https://nodejs.org/) [v 10.24.0+]\r +- Available storage ~235GB for databases\r +\r +### Storage while running\r +\r +Depending on the analysis you are about to run, disk requirements vary.\r +Indicatively, you may have a look at the metaGOflow publication for computing resources used in various cases.\r +\r +## Installation\r +\r +### Get the EOSC-Life marine GOs workflow\r +\r +```bash\r +git clone https://github.com/emo-bon/MetaGOflow\r +cd MetaGOflow\r +```\r +\r +### Download necessary databases (~235GB)\r +\r +You can download databases for the EOSC-Life GOs workflow by running the\r +`download_dbs.sh` script under the `Installation` folder.\r +\r +```bash\r +bash Installation/download_dbs.sh -f [Output Directory e.g. ref-dbs] \r +```\r +If you have one or more already in your system, then create a symbolic link pointing\r +at the `ref-dbs` folder or at one of its subfolders/files.\r +\r +The final structure of the DB directory should be like the following:\r +\r +````bash\r +user@server:~/MetaGOflow: ls ref-dbs/\r +db_kofam/ diamond/ eggnog/ GO-slim/ interproscan-5.57-90.0/ kegg_pathways/ kofam_ko_desc.tsv Rfam/ silva_lsu/ silva_ssu/\r +````\r +\r +## How to run\r +\r +### Ensure that `Node.js` is installed on your system before running metaGOflow\r +\r +If you have root access on your system, you can run the commands below to install it:\r +\r +##### DEBIAN/UBUNTU\r +```bash\r +sudo apt-get update -y\r +sudo apt-get install -y nodejs\r +```\r +\r +##### RH/CentOS\r +```bash\r +sudo yum install rh-nodejs (e.g. rh-nodejs10)\r +```\r +\r +### Set up the environment\r +\r +#### Run once - Setup environment\r +\r +- ```bash\r + conda create -n EOSC-CWL python=3.8\r + ```\r +\r +- ```bash\r + conda activate EOSC-CWL\r + ```\r +\r +- ```bash\r + pip install cwlref-runner cwltool[all] rdflib-jsonld rocrate pyyaml\r +\r + ```\r +\r +#### Run every time\r +\r +```bash\r +conda activate EOSC-CWL\r +``` \r +\r +### Run the workflow\r +\r +- Edit the `config.yml` file to set the parameter values of your choice. For selecting all the steps, then set to `true` the variables in lines [2-6].\r +\r +#### Using Singularity\r +\r +##### Standalone\r +- run:\r + ```bash\r + ./run_wf.sh -s -n osd-short -d short-test-case -f test_input/wgs-paired-SRR1620013_1.fastq.gz -r test_input/wgs-paired-SRR1620013_2.fastq.gz\r + ``\r +\r +##### Using a cluster with a queueing system (e.g. SLURM)\r +\r +- Create a job file (e.g., SBATCH file)\r +\r +- Enable Singularity, e.g. module load Singularity & all other dependencies \r +\r +- Add the run line to the job file\r +\r +\r +#### Using Docker\r +\r +##### Standalone\r +- run:\r + ``` bash\r + ./run_wf.sh -n osd-short -d short-test-case -f test_input/wgs-paired-SRR1620013_1.fastq.gz -r test_input/wgs-paired-SRR1620013_2.fastq.gz\r + ```\r + HINT: If you are using Docker, you may need to run the above command without the `-s' flag.\r +\r +## Testing samples\r +The samples are available in the `test_input` folder.\r +\r +We provide metaGOflow with partial samples from the Human Metagenome Project ([SRR1620013](https://www.ebi.ac.uk/ena/browser/view/SRR1620013) and [SRR1620014](https://www.ebi.ac.uk/ena/browser/view/SRR1620014))\r +They are partial as only a small part of their sequences have been kept, in terms for the pipeline to test in a fast way. \r +\r +\r +## Hints and tips\r +\r +1. In case you are using Docker, it is strongly recommended to **avoid** installing it through `snap`.\r +\r +2. `RuntimeError`: slurm currently does not support shared caching, because it does not support cleaning up a worker\r + after the last job finishes.\r + Set the `--disableCaching` flag if you want to use this batch system.\r +\r +3. In case you are having errors like:\r +\r +```\r +cwltool.errors.WorkflowException: Singularity is not available for this tool\r +```\r +\r +You may run the following command:\r +\r +```\r +singularity pull --force --name debian:stable-slim.sif docker://debian:stable-sli\r +```\r +\r +## Contribution\r +\r +To make contribution to the project a bit easier, all the MGnify `conditionals` and `subworkflows` under\r +the `workflows/` directory that are not used in the metaGOflow framework, have been removed. \r +However, all the MGnify `tools/` and `utils/` are available in this repo, even if they are not invoked in the current\r +version of metaGOflow.\r +This way, we hope we encourage people to implement their own `conditionals` and/or `subworkflows` by exploiting the\r +currently supported `tools` and `utils` as well as by developing new `tools` and/or `utils`.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/384?version=1" ; + schema1:isBasedOn "https://github.com/emo-bon/MetaGOflow.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for A workflow for marine Genomic Observatories data analysis" ; + schema1:sdDatePublished "2024-07-12 13:33:30 +0100" ; + schema1:url "https://workflowhub.eu/workflows/384/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5329 ; + schema1:creator , + ; + schema1:dateCreated "2022-09-19T18:00:20Z" ; + schema1:dateModified "2023-05-16T20:35:49Z" ; + schema1:description """# A workflow for marine Genomic Observatories data analysis\r +## An EOSC-Life project\r +\r +The workflows developed in the framework of this project are based on `pipeline-v5` of the MGnify resource. \r +This branch is a child of the [`pipeline_5.1`](https://github.com/EBI-Metagenomics/pipeline-v5) branch \r +that contains a part of the CWL descriptions of the MGnify pipeline version 5.1.\r +\r +The following comes from the initial repo and describes how to get the databases required.\r +\r +---\r +![logo](https://raw.githubusercontent.com/hariszaf/metaGOflow-use-case/gh-pages/assets/img/metaGOflow_logo_italics.png)\r +\r +\r +## An EOSC-Life project\r +\r +The workflows developed in the framework of this project are based on `pipeline-v5` of the MGnify resource.\r +\r +> This branch is a child of the [`pipeline_5.1`](https://github.com/hariszaf/pipeline-v5/tree/pipeline_5.1) branch\r +> that contains all CWL descriptions of the MGnify pipeline version 5.1.\r +\r +## Dependencies\r +\r +To run metaGOflow you need to make sure you have the following set on your computing environmnet first:\r +\r +- python3 [v 3.8+]\r +- [Docker](https://www.docker.com) [v 19.+] or [Singularity](https://apptainer.org) [v 3.7.+]/[Apptainer](https://apptainer.org) [v 1.+]\r +- [cwltool](https://github.com/common-workflow-language/cwltool) [v 3.+]\r +- [rdflib](https://rdflib.readthedocs.io/en/stable/) [v 6.+]\r +- [rdflib-jsonld](https://pypi.org/project/rdflib-jsonld/) [v 0.6.2]\r +- [ro-crate-py](https://github.com/ResearchObject/ro-crate-py) [v 0.7.0]\r +- [pyyaml](https://pypi.org/project/PyYAML/) [v 6.0]\r +- [Node.js](https://nodejs.org/) [v 10.24.0+]\r +- Available storage ~235GB for databases\r +\r +### Storage while running\r +\r +Depending on the analysis you are about to run, disk requirements vary.\r +Indicatively, you may have a look at the metaGOflow publication for computing resources used in various cases.\r +\r +## Installation\r +\r +### Get the EOSC-Life marine GOs workflow\r +\r +```bash\r +git clone https://github.com/emo-bon/MetaGOflow\r +cd MetaGOflow\r +```\r +\r +### Download necessary databases (~235GB)\r +\r +You can download databases for the EOSC-Life GOs workflow by running the\r +`download_dbs.sh` script under the `Installation` folder.\r +\r +```bash\r +bash Installation/download_dbs.sh -f [Output Directory e.g. ref-dbs] \r +```\r +If you have one or more already in your system, then create a symbolic link pointing\r +at the `ref-dbs` folder or at one of its subfolders/files.\r +\r +The final structure of the DB directory should be like the following:\r +\r +````bash\r +user@server:~/MetaGOflow: ls ref-dbs/\r +db_kofam/ diamond/ eggnog/ GO-slim/ interproscan-5.57-90.0/ kegg_pathways/ kofam_ko_desc.tsv Rfam/ silva_lsu/ silva_ssu/\r +````\r +\r +## How to run\r +\r +### Ensure that `Node.js` is installed on your system before running metaGOflow\r +\r +If you have root access on your system, you can run the commands below to install it:\r +\r +##### DEBIAN/UBUNTU\r +```bash\r +sudo apt-get update -y\r +sudo apt-get install -y nodejs\r +```\r +\r +##### RH/CentOS\r +```bash\r +sudo yum install rh-nodejs (e.g. rh-nodejs10)\r +```\r +\r +### Set up the environment\r +\r +#### Run once - Setup environment\r +\r +- ```bash\r + conda create -n EOSC-CWL python=3.8\r + ```\r +\r +- ```bash\r + conda activate EOSC-CWL\r + ```\r +\r +- ```bash\r + pip install cwlref-runner cwltool[all] rdflib-jsonld rocrate pyyaml\r +\r + ```\r +\r +#### Run every time\r +\r +```bash\r +conda activate EOSC-CWL\r +``` \r +\r +### Run the workflow\r +\r +- Edit the `config.yml` file to set the parameter values of your choice. For selecting all the steps, then set to `true` the variables in lines [2-6].\r +\r +#### Using Singularity\r +\r +##### Standalone\r +- run:\r + ```bash\r + ./run_wf.sh -s -n osd-short -d short-test-case -f test_input/wgs-paired-SRR1620013_1.fastq.gz -r test_input/wgs-paired-SRR1620013_2.fastq.gz\r + ``\r +\r +##### Using a cluster with a queueing system (e.g. SLURM)\r +\r +- Create a job file (e.g., SBATCH file)\r +\r +- Enable Singularity, e.g. module load Singularity & all other dependencies \r +\r +- Add the run line to the job file\r +\r +\r +#### Using Docker\r +\r +##### Standalone\r +- run:\r + ``` bash\r + ./run_wf.sh -n osd-short -d short-test-case -f test_input/wgs-paired-SRR1620013_1.fastq.gz -r test_input/wgs-paired-SRR1620013_2.fastq.gz\r + ```\r + HINT: If you are using Docker, you may need to run the above command without the `-s' flag.\r +\r +## Testing samples\r +The samples are available in the `test_input` folder.\r +\r +We provide metaGOflow with partial samples from the Human Metagenome Project ([SRR1620013](https://www.ebi.ac.uk/ena/browser/view/SRR1620013) and [SRR1620014](https://www.ebi.ac.uk/ena/browser/view/SRR1620014))\r +They are partial as only a small part of their sequences have been kept, in terms for the pipeline to test in a fast way. \r +\r +\r +## Hints and tips\r +\r +1. In case you are using Docker, it is strongly recommended to **avoid** installing it through `snap`.\r +\r +2. `RuntimeError`: slurm currently does not support shared caching, because it does not support cleaning up a worker\r + after the last job finishes.\r + Set the `--disableCaching` flag if you want to use this batch system.\r +\r +3. In case you are having errors like:\r +\r +```\r +cwltool.errors.WorkflowException: Singularity is not available for this tool\r +```\r +\r +You may run the following command:\r +\r +```\r +singularity pull --force --name debian:stable-slim.sif docker://debian:stable-sli\r +```\r +\r +## Contribution\r +\r +To make contribution to the project a bit easier, all the MGnify `conditionals` and `subworkflows` under\r +the `workflows/` directory that are not used in the metaGOflow framework, have been removed. \r +However, all the MGnify `tools/` and `utils/` are available in this repo, even if they are not invoked in the current\r +version of metaGOflow.\r +This way, we hope we encourage people to implement their own `conditionals` and/or `subworkflows` by exploiting the\r +currently supported `tools` and `utils` as well as by developing new `tools` and/or `utils`.\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/384?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "A workflow for marine Genomic Observatories data analysis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/384?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.291.2" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python GMX OPLS/AA Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-07-12 13:35:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/291/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1758 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-24T14:20:27Z" ; + schema1:dateModified "2023-01-16T13:58:35Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/291?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python GMX OPLS/AA Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/291?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + schema1:datePublished "2021-12-03T15:25:30.767225" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/fragment-based-docking-scoring" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "fragment-based-docking-scoring/main" ; + schema1:sdDatePublished "2021-12-04 03:00:49 +0000" ; + schema1:softwareVersion "v0.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible ancient and modern DNA pipeline in Nextflow and with cloud support." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=9" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "NOASSERTION" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-07-12 13:21:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9505 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:50Z" ; + schema1:dateModified "2024-06-11T12:54:50Z" ; + schema1:description "A fully reproducible ancient and modern DNA pipeline in Nextflow and with cloud support." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=9" ; + schema1:version 9 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An nf-core demo pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1055?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/demo" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/demo" ; + schema1:sdDatePublished "2024-07-12 13:17:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1055/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9934 ; + schema1:creator ; + schema1:dateCreated "2024-06-21T03:02:42Z" ; + schema1:dateModified "2024-06-21T03:02:42Z" ; + schema1:description "An nf-core demo pipeline" ; + schema1:keywords "demo, minimal-example, training" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/demo" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1055?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/634?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Workflow 1: AbritAMR" ; + schema1:sdDatePublished "2024-07-12 13:23:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/634/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2073 ; + schema1:dateCreated "2023-10-31T10:42:03Z" ; + schema1:dateModified "2024-05-07T09:44:07Z" ; + schema1:description "" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Workflow 1: AbritAMR" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/634?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """MGnify (http://www.ebi.ac.uk/metagenomics) provides a free to use platform for the assembly, analysis and archiving of microbiome data derived from sequencing microbial populations that are present in particular environments. Over the past 2 years, MGnify (formerly EBI Metagenomics) has more than doubled the number of publicly available analysed datasets held within the resource. Recently, an updated approach to data analysis has been unveiled (version 5.0), replacing the previous single pipeline with multiple analysis pipelines that are tailored according to the input data, and that are formally described using the Common Workflow Language, enabling greater provenance, reusability, and reproducibility. MGnify's new analysis pipelines offer additional approaches for taxonomic assertions based on ribosomal internal transcribed spacer regions (ITS1/2) and expanded protein functional annotations. Biochemical pathways and systems predictions have also been added for assembled contigs. MGnify's growing focus on the assembly of metagenomic data has also seen the number of datasets it has assembled and analysed increase six-fold. The non-redundant protein database constructed from the proteins encoded by these assemblies now exceeds 1 billion sequences. Meanwhile, a newly developed contig viewer provides fine-grained visualisation of the assembled contigs and their enriched annotations.\r +\r +Documentation: https://docs.mgnify.org/en/latest/analysis.html#assembly-analysis-pipeline""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/360?version=1" ; + schema1:isBasedOn "https://github.com/EBI-Metagenomics/pipeline-v5.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for MGnify - assembly analysis pipeline" ; + schema1:sdDatePublished "2024-07-12 13:35:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/360/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 67287 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7995 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2022-06-07T07:41:18Z" ; + schema1:dateModified "2022-06-07T08:00:07Z" ; + schema1:description """MGnify (http://www.ebi.ac.uk/metagenomics) provides a free to use platform for the assembly, analysis and archiving of microbiome data derived from sequencing microbial populations that are present in particular environments. Over the past 2 years, MGnify (formerly EBI Metagenomics) has more than doubled the number of publicly available analysed datasets held within the resource. Recently, an updated approach to data analysis has been unveiled (version 5.0), replacing the previous single pipeline with multiple analysis pipelines that are tailored according to the input data, and that are formally described using the Common Workflow Language, enabling greater provenance, reusability, and reproducibility. MGnify's new analysis pipelines offer additional approaches for taxonomic assertions based on ribosomal internal transcribed spacer regions (ITS1/2) and expanded protein functional annotations. Biochemical pathways and systems predictions have also been added for assembled contigs. MGnify's growing focus on the assembly of metagenomic data has also seen the number of datasets it has assembled and analysed increase six-fold. The non-redundant protein database constructed from the proteins encoded by these assemblies now exceeds 1 billion sequences. Meanwhile, a newly developed contig viewer provides fine-grained visualisation of the assembled contigs and their enriched annotations.\r +\r +Documentation: https://docs.mgnify.org/en/latest/analysis.html#assembly-analysis-pipeline""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/360?version=1" ; + schema1:keywords "Metagenomics, Annotation, workflow, CWL" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "MGnify - assembly analysis pipeline" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/360?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Snakemake workflow: dna-seq-varlociraptor\r +\r +[![Snakemake](https://img.shields.io/badge/snakemake-≥6.3.0-brightgreen.svg)](https://snakemake.github.io)\r +[![GitHub actions status](https://github.com/snakemake-workflows/dna-seq-varlociraptor/workflows/Tests/badge.svg?branch=master)](https://github.com/snakemake-workflows/dna-seq-varlociraptor/actions?query=branch%3Amaster+workflow%3ATests)\r +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.4675661.svg)](https://doi.org/10.5281/zenodo.4675661)\r +\r +\r +A Snakemake workflow for calling small and structural variants under any kind of scenario (tumor/normal, tumor/normal/relapse, germline, pedigree, populations) via the unified statistical model of [Varlociraptor](https://varlociraptor.github.io).\r +\r +\r +## Usage\r +\r +The usage of this workflow is described in the [Snakemake Workflow Catalog](https://snakemake.github.io/snakemake-workflow-catalog/?usage=snakemake-workflows%2Fdna-seq-varlociraptor).\r +\r +If you use this workflow in a paper, don't forget to give credits to the authors by citing the URL of this (original) repository and its DOI (see above).\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/686?version=1" ; + schema1:isBasedOn "https://github.com/snakemake-workflows/dna-seq-varlociraptor" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for dna-seq-varlociraptor" ; + schema1:sdDatePublished "2024-07-12 13:25:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/686/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1287 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-12-14T08:15:08Z" ; + schema1:dateModified "2023-12-14T08:15:08Z" ; + schema1:description """# Snakemake workflow: dna-seq-varlociraptor\r +\r +[![Snakemake](https://img.shields.io/badge/snakemake-≥6.3.0-brightgreen.svg)](https://snakemake.github.io)\r +[![GitHub actions status](https://github.com/snakemake-workflows/dna-seq-varlociraptor/workflows/Tests/badge.svg?branch=master)](https://github.com/snakemake-workflows/dna-seq-varlociraptor/actions?query=branch%3Amaster+workflow%3ATests)\r +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.4675661.svg)](https://doi.org/10.5281/zenodo.4675661)\r +\r +\r +A Snakemake workflow for calling small and structural variants under any kind of scenario (tumor/normal, tumor/normal/relapse, germline, pedigree, populations) via the unified statistical model of [Varlociraptor](https://varlociraptor.github.io).\r +\r +\r +## Usage\r +\r +The usage of this workflow is described in the [Snakemake Workflow Catalog](https://snakemake.github.io/snakemake-workflow-catalog/?usage=snakemake-workflows%2Fdna-seq-varlociraptor).\r +\r +If you use this workflow in a paper, don't forget to give credits to the authors by citing the URL of this (original) repository and its DOI (see above).\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/686?version=1" ; + schema1:keywords "Bioinformatics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "dna-seq-varlociraptor" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/686?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Image Mass Cytometry analysis pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/992?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/imcyto" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/imcyto" ; + schema1:sdDatePublished "2024-07-12 13:21:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/992/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4292 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:57Z" ; + schema1:dateModified "2024-06-11T12:54:57Z" ; + schema1:description "Image Mass Cytometry analysis pipeline." ; + schema1:keywords "cytometry, image-analysis, image-processing, image-segmentation" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/imcyto" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/992?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2023-10-19T16:22:19.479397" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-only-VGP3" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-only-VGP3/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.14" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 2070783 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "ChIP-seq peak-calling and differential analysis pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/971?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/chipseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/chipseq" ; + schema1:sdDatePublished "2024-07-12 13:22:05 +0100" ; + schema1:url "https://workflowhub.eu/workflows/971/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5458 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:44Z" ; + schema1:dateModified "2024-06-11T12:54:44Z" ; + schema1:description "ChIP-seq peak-calling and differential analysis pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/971?version=5" ; + schema1:keywords "ChIP, ChIP-seq, chromatin-immunoprecipitation, macs2, peak-calling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/chipseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/971?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + schema1:datePublished "2024-03-26T19:39:39.619031" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-Trio-phasing-VGP5/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:Dataset ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-ont-artic-variant-calling" ; + schema1:mainEntity ; + schema1:name "sars-cov-2-ont-artic-variant-calling/COVID-19-ARTIC-ONT (v0.2.1)" ; + schema1:sdDatePublished "2021-07-27 03:00:55 +0100" ; + schema1:softwareVersion "v0.2.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 45247 ; + schema1:name "COVID-19-ARTIC-ONT" ; + schema1:programmingLanguage . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Conformational Transitions calculations tutorial using BioExcel Building Blocks (biobb) and GOdMD\r +\r +This tutorial aims to illustrate the process of computing a conformational transition between two known structural conformations of a protein, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.549.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_godmd/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL Protein Conformational Transitions calculations tutorial" ; + schema1:sdDatePublished "2024-07-12 13:32:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/549/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 31502 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8991 ; + schema1:creator , + ; + schema1:dateCreated "2023-08-02T09:54:17Z" ; + schema1:dateModified "2023-08-02T10:03:39Z" ; + schema1:description """# Protein Conformational Transitions calculations tutorial using BioExcel Building Blocks (biobb) and GOdMD\r +\r +This tutorial aims to illustrate the process of computing a conformational transition between two known structural conformations of a protein, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CWL Protein Conformational Transitions calculations tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_godmd/cwl/workflow.cwl" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=10" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-07-12 13:21:33 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11079 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:50Z" ; + schema1:dateModified "2024-06-11T12:54:50Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=10" ; + schema1:version 10 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1023?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/smrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/smrnaseq" ; + schema1:sdDatePublished "2024-07-12 13:18:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1023/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11092 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:18Z" ; + schema1:dateModified "2024-06-11T12:55:18Z" ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1023?version=10" ; + schema1:keywords "small-rna, smrna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/smrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1023?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:description "Dating the most recent common ancestor (MRCA) of SARS-CoV-2. The workflow is used to extract full length sequences of SARS-CoV-2, tidy up their names in FASTA files, produce a multiple sequences alignment and compute a maximum likelihood tree. More info can be found at https://covid19.galaxyproject.org/genomics/" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/6?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genomics - MRCA analysis" ; + schema1:sdDatePublished "2024-07-12 13:37:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/6/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 3760 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17202 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2020-04-10T12:44:38Z" ; + schema1:dateModified "2023-01-16T13:39:57Z" ; + schema1:description "Dating the most recent common ancestor (MRCA) of SARS-CoV-2. The workflow is used to extract full length sequences of SARS-CoV-2, tidy up their names in FASTA files, produce a multiple sequences alignment and compute a maximum likelihood tree. More info can be found at https://covid19.galaxyproject.org/genomics/" ; + schema1:image ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Genomics - MRCA analysis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/6?version=1" ; + schema1:version 1 ; + ns1:input . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 7247 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Global Run-On sequencing analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1004?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/nascent" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/nascent" ; + schema1:sdDatePublished "2024-07-12 13:20:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1004/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8566 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:06Z" ; + schema1:dateModified "2024-06-11T12:55:06Z" ; + schema1:description "Global Run-On sequencing analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1004?version=4" ; + schema1:keywords "gro-seq, nascent, pro-seq, rna, transcription, tss" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/nascent" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1004?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Quantitative Mass Spectrometry nf-core workflow" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1013?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/quantms" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/quantms" ; + schema1:sdDatePublished "2024-07-12 13:19:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1013/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12787 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:08Z" ; + schema1:dateModified "2024-06-11T12:55:08Z" ; + schema1:description "Quantitative Mass Spectrometry nf-core workflow" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1013?version=3" ; + schema1:keywords "dia, lfq, mass-spec, mass-spectrometry, openms, proteogenomics, Proteomics, tmt" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/quantms" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1013?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 146760 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "BioTranslator performs sequentially pathway analysis and gene prioritization: A specific operator is executed for each task to translate the input gene set into semantic terms and pinpoint the pivotal-role genes on the derived semantic network. The output consists of the set of statistically significant semantic terms and the associated hub genes (the gene signature), prioritized according to their involvement in the underlying semantic topology." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/189?version=2" ; + schema1:isBasedOn "http://www.biotranslator.gr:8080/u/thodk/w/biotranslator-workflow" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for BioTranslator Workflow" ; + schema1:sdDatePublished "2024-07-12 13:36:46 +0100" ; + schema1:url "https://workflowhub.eu/workflows/189/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3955 ; + schema1:dateCreated "2021-09-24T14:38:37Z" ; + schema1:dateModified "2021-09-24T15:15:04Z" ; + schema1:description "BioTranslator performs sequentially pathway analysis and gene prioritization: A specific operator is executed for each task to translate the input gene set into semantic terms and pinpoint the pivotal-role genes on the derived semantic network. The output consists of the set of statistically significant semantic terms and the associated hub genes (the gene signature), prioritized according to their involvement in the underlying semantic topology." ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/189?version=2" ; + schema1:keywords "Semantic Network Analysis, Gene Prioritization, Pathway Analysis, Biomedical Ontologies, Semantic Interpretation" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "BioTranslator Workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/189?version=2" ; + schema1:version 2 ; + ns1:input ; + ns1:output , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 44291 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Introduction\r +\r +This repository contains all the custom scripts used in the evaluation and comparison of [Katdetectr](https://github.com/ErasmusMC-CCBC/evaluation_katdetectr/tree/main) as described in the corresponding Technical Note (under submission).\r +\r +# Usage\r +\r +All required files were deposited on [Zenodo](https://zenodo.org/record/6623289#.YqBxHi8Rr0o%5D).\r +These can directly be downloaded using `zen4R` and be used as input.\r +\r +```R\r +# Increase the timeout (due to some large files).\r +options(timeout=5000)\r +\r +# Download the required files into the data/ folder (~1GB).\r +zen4R::download_zenodo(doi = "10.5281/zenodo.6810477", path = 'data/')\r +```""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.500.1" ; + schema1:isBasedOn "https://github.com/ErasmusMC-CCBC/evaluation_katdetectr/" ; + schema1:license "LGPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Performance evaluation of Katdetectr and other kataegis detection packages" ; + schema1:sdDatePublished "2024-07-12 13:33:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/500/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 19301 ; + schema1:creator , + ; + schema1:dateCreated "2023-06-07T17:32:16Z" ; + schema1:dateModified "2023-06-07T17:52:17Z" ; + schema1:description """# Introduction\r +\r +This repository contains all the custom scripts used in the evaluation and comparison of [Katdetectr](https://github.com/ErasmusMC-CCBC/evaluation_katdetectr/tree/main) as described in the corresponding Technical Note (under submission).\r +\r +# Usage\r +\r +All required files were deposited on [Zenodo](https://zenodo.org/record/6623289#.YqBxHi8Rr0o%5D).\r +These can directly be downloaded using `zen4R` and be used as input.\r +\r +```R\r +# Increase the timeout (due to some large files).\r +options(timeout=5000)\r +\r +# Download the required files into the data/ folder (~1GB).\r +zen4R::download_zenodo(doi = "10.5281/zenodo.6810477", path = 'data/')\r +```""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/LGPL-3.0" ; + schema1:name "Performance evaluation of Katdetectr and other kataegis detection packages" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/500?version=1" ; + schema1:version 1 . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 902 ; + schema1:programmingLanguage . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-07-12 13:20:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10237 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:58Z" ; + schema1:dateModified "2024-06-11T12:54:58Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=7" ; + schema1:version 7 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 28082 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:MediaObject ; + schema1:contentSize 3 ; + schema1:dateModified "2024-01-25T15:45:26" ; + schema1:name "energy.selection" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3 ; + schema1:dateModified "2024-01-25T15:45:26" ; + schema1:name "genion.group" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1044 ; + schema1:dateModified "2024-01-25T15:45:26" ; + schema1:name "ions.mdp" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 971 ; + schema1:dateModified "2024-01-25T15:45:26" ; + schema1:name "minim.mdp" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 109917 ; + schema1:dateModified "2024-01-25T15:45:26" ; + schema1:name "1aki.pdb" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2998215 ; + schema1:dateModified "2024-01-25T15:45:26" ; + schema1:name "1u3m.pdb" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2918025 ; + schema1:dateModified "2024-01-25T15:45:26" ; + schema1:name "1xyw.pdb" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88246 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1aki.gro" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577530 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1aki.top" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1280044 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1aki_em.tpr" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1aki_em_energy.edr" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1279304 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1aki_ions.tpr" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88246 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1aki_newbox.gro" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2123 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1aki_potential.xvg" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1525186 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1aki_solv.gro" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1524475 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1aki_solv_ions.gro" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 82046 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1u3m.gro" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 537864 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1u3m.top" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1416272 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1u3m_em.tpr" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1u3m_em_energy.edr" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1415196 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1u3m_ions.tpr" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 82046 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1u3m_newbox.gro" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2125 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1u3m_potential.xvg" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1837046 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1u3m_solv.gro" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1836965 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1u3m_solv_ions.gro" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 80112 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1xyw.gro" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 523940 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1xyw.top" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1408872 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1xyw_em.tpr" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1xyw_em_energy.edr" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1407844 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1xyw_ions.tpr" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 80112 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1xyw_newbox.gro" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2100 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1xyw_potential.xvg" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1845237 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1xyw_solv.gro" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1845066 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1xyw_solv_ions.gro" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:Dataset ; + schema1:datePublished "2024-04-23T15:13:06.571609" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Scaffolding-HiC-VGP8" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Scaffolding-HiC-VGP8/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# GBMatch_CNN\r +Work in progress...\r +Predicting TS & risk from glioblastoma whole slide images\r +\r +# Reference\r +Upcoming paper: stay tuned...\r +\r +# Dependencies\r +python 3.7.7\r +\r +randaugment by Khrystyna Faryna: https://github.com/tovaroe/pathology-he-auto-augment\r +\r +tensorflow 2.1.0\r +\r +scikit-survival 0.13.1\r +\r +pandas 1.0.3\r +\r +lifelines 0.25.0\r +\r +# Description\r +The pipeline implemented here predicts transcriptional subtypes and survival of glioblastoma patients based on H&E stained whole slide scans. Sample data is provided in this repository. To test the basic functionality with 5-fold-CV simply run train_model_OS.py (for survival) or train_model_TS.py (for transcriptional subtypes). Please note that this will not reproduce the results from the manuscript, as only a small fraction of the image data can be provided in this repository due to size constraints. In order to reproduce the results from the manuscript, please refer to the step by step guide below. The whole dataset can be accessed at https://www.medical-epigenomics.org/papers/GBMatch/.\r +If you wish to adopt this pipeline for your own use, please be sure to set the correct parameters in config.py.\r +\r +Moreover, we provide a fully trained model in gbm_predictor.py for predicting new samples (supported WSI formats are ndpi and svs). To use GBMPredictor, simply initialize by calling \r +`gbm_predictor = GBMPredictor()`\r +and predict your sample by calling\r +`(predicted_TS, risk_group, median_riskscore) = gbm_predictor.predict(*path_to_slidescan*)`\r +Heatmaps and detailed results will be automatically saved in a subfolder in your sample path.\r +\r +# Reproducing the manuscript results - step by step guide\r +\r +## Training the CNN model\r +1. Clone this repository and install the dependencies in your environment. Make sure that the path for randaugment is correctly set in the config.py (should be correct by default).\r +2. Download all included image tiles at https://doi.org/10.5281/zenodo.8358673 and replace the data/training/image_tiles folder with the image_tiles folder from zenodo.\r +3. Run train_model_OS.py and/or train_model_TS.py to reproduce the training with 5-fold cross validation. Models and results will be saved in the data/models folder.\r +4. Run train_final_model_OS.py and/or train_final_model_TS.py to train the final model on the whole training dataset.\r +\r +## Validate the CNN model on TCGA data\r +1. Download scans and clinical data of the TCGA glioblastoma cohort from https://www.cbioportal.org/ and/or https://portal.gdc.cancer.gov/\r +2. Copy tumor segmentations from GBMatch_CNN/data/validation/segmentation into the same folder as the TCGA slide scans\r +3. Predict TCGA samples with gbm_predictor (see above).\r +(You can also find all prediction results in GBMatch_CNN/data/validation/TCGA_annotation_prediction.csv.)\r +\r +## Evaluation of the tumor microenvironment\r +1. Install qupath 0.3.0 (newer versions should also work): https://qupath.github.io/.\r +2. Download immunohistochemical slides from https://www.medical-epigenomics.org/papers/GBMatch/.\r +3. Download annotation (IHC_geojsons) from https://doi.org/10.5281/zenodo.8358673.\r +4. Create a new project and import all immunohistochemical slides & annotations.\r +5. Copy the CD34 and HLA-DR thresholder from GBMatch_CNN/qupath into your project.\r +6. Run GBMatch_CNN/qupath/IHC_eval.groovy for all slides - immunohistochemistry results will be saved to a IHC_results-folder.\r +7. Create a new project and import all HE image tiles.\r +8. Run GBMatch_CNN/qupath/cellularity.groovy for all slides - cellularity results will be saved to a HE-results-folder.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.883.1" ; + schema1:isBasedOn "https://github.com/tovaroe/GBMatch_CNN.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Training a CNN model for classification of transcriptional subtypes and survival prediction in glioblastoma" ; + schema1:sdDatePublished "2024-07-12 13:22:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/883/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2551 ; + schema1:creator ; + schema1:dateCreated "2024-05-13T07:10:28Z" ; + schema1:dateModified "2024-05-13T07:11:42Z" ; + schema1:description """# GBMatch_CNN\r +Work in progress...\r +Predicting TS & risk from glioblastoma whole slide images\r +\r +# Reference\r +Upcoming paper: stay tuned...\r +\r +# Dependencies\r +python 3.7.7\r +\r +randaugment by Khrystyna Faryna: https://github.com/tovaroe/pathology-he-auto-augment\r +\r +tensorflow 2.1.0\r +\r +scikit-survival 0.13.1\r +\r +pandas 1.0.3\r +\r +lifelines 0.25.0\r +\r +# Description\r +The pipeline implemented here predicts transcriptional subtypes and survival of glioblastoma patients based on H&E stained whole slide scans. Sample data is provided in this repository. To test the basic functionality with 5-fold-CV simply run train_model_OS.py (for survival) or train_model_TS.py (for transcriptional subtypes). Please note that this will not reproduce the results from the manuscript, as only a small fraction of the image data can be provided in this repository due to size constraints. In order to reproduce the results from the manuscript, please refer to the step by step guide below. The whole dataset can be accessed at https://www.medical-epigenomics.org/papers/GBMatch/.\r +If you wish to adopt this pipeline for your own use, please be sure to set the correct parameters in config.py.\r +\r +Moreover, we provide a fully trained model in gbm_predictor.py for predicting new samples (supported WSI formats are ndpi and svs). To use GBMPredictor, simply initialize by calling \r +`gbm_predictor = GBMPredictor()`\r +and predict your sample by calling\r +`(predicted_TS, risk_group, median_riskscore) = gbm_predictor.predict(*path_to_slidescan*)`\r +Heatmaps and detailed results will be automatically saved in a subfolder in your sample path.\r +\r +# Reproducing the manuscript results - step by step guide\r +\r +## Training the CNN model\r +1. Clone this repository and install the dependencies in your environment. Make sure that the path for randaugment is correctly set in the config.py (should be correct by default).\r +2. Download all included image tiles at https://doi.org/10.5281/zenodo.8358673 and replace the data/training/image_tiles folder with the image_tiles folder from zenodo.\r +3. Run train_model_OS.py and/or train_model_TS.py to reproduce the training with 5-fold cross validation. Models and results will be saved in the data/models folder.\r +4. Run train_final_model_OS.py and/or train_final_model_TS.py to train the final model on the whole training dataset.\r +\r +## Validate the CNN model on TCGA data\r +1. Download scans and clinical data of the TCGA glioblastoma cohort from https://www.cbioportal.org/ and/or https://portal.gdc.cancer.gov/\r +2. Copy tumor segmentations from GBMatch_CNN/data/validation/segmentation into the same folder as the TCGA slide scans\r +3. Predict TCGA samples with gbm_predictor (see above).\r +(You can also find all prediction results in GBMatch_CNN/data/validation/TCGA_annotation_prediction.csv.)\r +\r +## Evaluation of the tumor microenvironment\r +1. Install qupath 0.3.0 (newer versions should also work): https://qupath.github.io/.\r +2. Download immunohistochemical slides from https://www.medical-epigenomics.org/papers/GBMatch/.\r +3. Download annotation (IHC_geojsons) from https://doi.org/10.5281/zenodo.8358673.\r +4. Create a new project and import all immunohistochemical slides & annotations.\r +5. Copy the CD34 and HLA-DR thresholder from GBMatch_CNN/qupath into your project.\r +6. Run GBMatch_CNN/qupath/IHC_eval.groovy for all slides - immunohistochemistry results will be saved to a IHC_results-folder.\r +7. Create a new project and import all HE image tiles.\r +8. Run GBMatch_CNN/qupath/cellularity.groovy for all slides - cellularity results will be saved to a HE-results-folder.\r +""" ; + schema1:keywords "Bioinformatics, Pathology" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Training a CNN model for classification of transcriptional subtypes and survival prediction in glioblastoma" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/883?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=23" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-07-12 13:20:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=23" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12386 ; + schema1:creator ; + schema1:dateCreated "2024-06-18T03:03:03Z" ; + schema1:dateModified "2024-06-18T03:03:03Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=23" ; + schema1:version 23 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state of the art epitope prediction pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/984?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/epitopeprediction" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/epitopeprediction" ; + schema1:sdDatePublished "2024-07-12 13:21:30 +0100" ; + schema1:url "https://workflowhub.eu/workflows/984/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4453 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:51Z" ; + schema1:dateModified "2024-06-11T12:54:51Z" ; + schema1:description "A fully reproducible and state of the art epitope prediction pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/984?version=7" ; + schema1:keywords "epitope, epitope-prediction, mhc-binding-prediction" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/epitopeprediction" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/984?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2023-10-17T19:27:24.855858" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/cutandrun" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "cutandrun/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.12" . + + a schema1:Dataset ; + schema1:datePublished "2023-09-15T15:02:21.113507" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/rnaseq-sr" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "rnaseq-sr/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.11" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Basic processing of a QC-filtered Anndata Object. UMAP, clustering e.t.c " ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/468?version=2" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq_QCtoBasicProcessing" ; + schema1:sdDatePublished "2024-07-12 13:22:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/468/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 33517 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-06-22T06:24:41Z" ; + schema1:dateModified "2023-11-09T03:50:40Z" ; + schema1:description "Basic processing of a QC-filtered Anndata Object. UMAP, clustering e.t.c " ; + schema1:isBasedOn "https://workflowhub.eu/workflows/468?version=2" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "scRNAseq_QCtoBasicProcessing" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/468?version=2" ; + schema1:version 2 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1017?version=14" ; + schema1:isBasedOn "https://github.com/nf-core/rnafusion" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnafusion" ; + schema1:sdDatePublished "2024-07-12 13:18:22 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1017/ro_crate?version=14" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12036 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:10Z" ; + schema1:dateModified "2024-06-11T12:55:10Z" ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1017?version=16" ; + schema1:keywords "fusion, fusion-genes, gene-fusion, rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnafusion" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1017?version=14" ; + schema1:version 14 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "This is a Nextflow implementaion of the GATK Somatic Short Variant Calling workflow. This workflow can be used to discover somatic short variants (SNVs and indels) from tumour and matched normal BAM files following GATK's Best Practices Workflow. The workflowis currently optimised to run efficiently and at scale on the National Compute Infrastructure, Gadi." ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.691.1" ; + schema1:isBasedOn "https://github.com/Sydney-Informatics-Hub/Somatic-shortV-nf" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Somatic-ShortV-nf" ; + schema1:sdDatePublished "2024-07-12 13:25:40 +0100" ; + schema1:url "https://workflowhub.eu/workflows/691/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3684 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2023-12-20T01:12:30Z" ; + schema1:dateModified "2023-12-20T01:30:37Z" ; + schema1:description "This is a Nextflow implementaion of the GATK Somatic Short Variant Calling workflow. This workflow can be used to discover somatic short variants (SNVs and indels) from tumour and matched normal BAM files following GATK's Best Practices Workflow. The workflowis currently optimised to run efficiently and at scale on the National Compute Infrastructure, Gadi." ; + schema1:keywords "Bioinformatics, FAIR workflows, GATK4, INDELs, Nextflow, variant calling, workflow, cancer, Somatic, snv, Genomics, human, WGS, HPC" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Somatic-ShortV-nf" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/691?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=27" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-07-12 13:22:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=27" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12883 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:13Z" ; + schema1:dateModified "2024-06-11T12:55:13Z" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=27" ; + schema1:version 27 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Macromolecular Coarse-Grained Flexibility (FlexServ) tutorial using BioExcel Building Blocks (biobb)\r +\r +This tutorial aims to illustrate the process of generating protein conformational ensembles from 3D structures and analysing its molecular flexibility, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.553.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_flexserv/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Macromolecular Coarse-Grained Flexibility tutorial" ; + schema1:sdDatePublished "2024-07-12 13:32:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/553/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7454 ; + schema1:creator , + ; + schema1:dateCreated "2023-08-02T11:23:28Z" ; + schema1:dateModified "2023-08-02T11:26:04Z" ; + schema1:description """# Macromolecular Coarse-Grained Flexibility (FlexServ) tutorial using BioExcel Building Blocks (biobb)\r +\r +This tutorial aims to illustrate the process of generating protein conformational ensembles from 3D structures and analysing its molecular flexibility, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Macromolecular Coarse-Grained Flexibility tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_flexserv/python/workflow.py" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "SPA workflow using cryosparc processing engine" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1073?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CRYOSPARC: acquire -> motionCorr -> ctf -> report" ; + schema1:sdDatePublished "2024-07-12 13:17:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1073/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1492 ; + schema1:dateCreated "2024-07-10T13:04:36Z" ; + schema1:dateModified "2024-07-10T13:04:36Z" ; + schema1:description "SPA workflow using cryosparc processing engine" ; + schema1:keywords "spa, cryosparc, Glacios, TalosArctica, TitanKrios" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "CRYOSPARC: acquire -> motionCorr -> ctf -> report" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1073?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1027?version=11" ; + schema1:isBasedOn "https://github.com/nf-core/viralrecon" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/viralrecon" ; + schema1:sdDatePublished "2024-07-12 13:18:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1027/ro_crate?version=11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10888 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:21Z" ; + schema1:dateModified "2024-06-11T12:55:21Z" ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1027?version=10" ; + schema1:keywords "Amplicon, ARTIC, Assembly, covid-19, covid19, illumina, long-read-sequencing, Metagenomics, nanopore, ONT, oxford-nanopore, SARS-CoV-2, variant-calling, viral, Virus" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/viralrecon" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1027?version=11" ; + schema1:version 11 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.196.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook ABC MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/196/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 101184 ; + schema1:creator , + ; + schema1:dateCreated "2021-09-28T13:24:33Z" ; + schema1:dateModified "2022-09-14T09:07:13Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/196?version=4" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook ABC MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_amber_md_setup/master/biobb_wf_amber_md_setup/notebooks/abcsetup/biobb_amber_ABC_setup.ipynb" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Take an anndata file, and perform basic QC with scanpy. Produces a filtered AnnData object." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/467?version=1" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq_CellQC" ; + schema1:sdDatePublished "2024-07-12 13:22:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/467/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 21365 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-05-05T06:49:16Z" ; + schema1:dateModified "2023-06-16T04:56:56Z" ; + schema1:description "Take an anndata file, and perform basic QC with scanpy. Produces a filtered AnnData object." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/467?version=3" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "scRNAseq_CellQC" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/467?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + ; + ns1:output , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2023-11-13T09:41:59.582932" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.17" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """RNASeq-DE @ NCI-Gadi processes RNA sequencing data (single, paired and/or multiplexed) for differential expression (raw FASTQ to counts). This pipeline consists of multiple stages and is designed for the National Computational Infrastructure's (NCI) Gadi supercompter, leveraging multiple nodes to run each stage in parallel. \r +\r +Infrastructure\\_deployment\\_metadata: Gadi (NCI)""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.152.1" ; + schema1:isBasedOn "https://github.com/Sydney-Informatics-Hub/RNASeq-DE" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for RNASeq-DE @ NCI-Gadi" ; + schema1:sdDatePublished "2024-07-12 13:35:22 +0100" ; + schema1:url "https://workflowhub.eu/workflows/152/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9756 ; + schema1:creator , + , + ; + schema1:dateCreated "2021-08-18T23:24:08Z" ; + schema1:dateModified "2023-01-16T13:51:46Z" ; + schema1:description """RNASeq-DE @ NCI-Gadi processes RNA sequencing data (single, paired and/or multiplexed) for differential expression (raw FASTQ to counts). This pipeline consists of multiple stages and is designed for the National Computational Infrastructure's (NCI) Gadi supercompter, leveraging multiple nodes to run each stage in parallel. \r +\r +Infrastructure\\_deployment\\_metadata: Gadi (NCI)""" ; + schema1:isPartOf ; + schema1:keywords "RNASEQ, differential_expression, DE, Gadi, NCI, illumina, STAR, SAMTools, RSeQC, HTSeq, MultiQC, FastQC, BBduk, rna, expression, differential expression, FASTQ, counts, NCI-Gadi, rna-seq, workflow, bash, PBS, parallel, scalable" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "RNASeq-DE @ NCI-Gadi" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/152?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Simulations and figures supporting the manuscript \"Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake\"" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.511.2" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake" ; + schema1:sdDatePublished "2024-07-12 13:32:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/511/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 305 ; + schema1:creator , + , + , + , + , + ; + schema1:dateCreated "2023-08-18T11:04:44Z" ; + schema1:dateModified "2023-08-18T11:12:51Z" ; + schema1:description "Simulations and figures supporting the manuscript \"Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake\"" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/511?version=4" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/511?version=2" ; + schema1:version 2 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 75460 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-08T13:31:32.629794" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Purge-duplicate-contigs-VGP6/main" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Purge-duplicate-contigs-VGP6/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:version "0.3.0" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=21" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-07-12 13:20:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=21" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9644 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:04Z" ; + schema1:dateModified "2024-06-11T12:55:04Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=21" ; + schema1:version 21 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """\r +Author: AMBARISH KUMAR er.ambarish@gmail.com & ambari73_sit@jnu.ac.in\r +\r +This is a proposed standard operating procedure for genomic variant detection using GATK4.\r +\r +It is hoped to be effective and useful for getting SARS-CoV-2 genome variants.\r +\r +\r +\r +It uses Illumina RNASEQ reads and genome sequence.\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/30?version=1" ; + schema1:isBasedOn "https://github.com/ambarishK/bio-cwl-tools/blob/release/gatk4W.cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genomic variants - SNPs and INDELs detection using GATK4." ; + schema1:sdDatePublished "2024-07-12 13:37:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/30/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4306 ; + schema1:creator ; + schema1:dateCreated "2020-06-17T06:11:59Z" ; + schema1:dateModified "2023-01-16T13:42:19Z" ; + schema1:description """\r +Author: AMBARISH KUMAR er.ambarish@gmail.com & ambari73_sit@jnu.ac.in\r +\r +This is a proposed standard operating procedure for genomic variant detection using GATK4.\r +\r +It is hoped to be effective and useful for getting SARS-CoV-2 genome variants.\r +\r +\r +\r +It uses Illumina RNASEQ reads and genome sequence.\r +""" ; + schema1:image ; + schema1:keywords "CWL, GATK4, SNPs, INDELs" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Genomic variants - SNPs and INDELs detection using GATK4." ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/30?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + ; + ns1:output , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 65147 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 231159 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:MediaObject ; + schema1:contentSize 1583 ; + schema1:dateModified "2024-03-13T10:40:28+00:00" ; + schema1:name "kmeans.csv" ; + schema1:sdDatePublished "2024-03-22T17:53:30+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1027?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/viralrecon" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/viralrecon" ; + schema1:sdDatePublished "2024-07-12 13:18:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1027/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10144 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:20Z" ; + schema1:dateModified "2024-06-11T12:55:20Z" ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1027?version=10" ; + schema1:keywords "Amplicon, ARTIC, Assembly, covid-19, covid19, illumina, long-read-sequencing, Metagenomics, nanopore, ONT, oxford-nanopore, SARS-CoV-2, variant-calling, viral, Virus" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/viralrecon" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1027?version=6" ; + schema1:version 6 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 13385 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + schema1:dateModified "2024-01-22T13:48:25" ; + schema1:hasPart , + , + , + ; + schema1:name "dataset_4f_16mb" ; + schema1:sdDatePublished "2024-01-22T16:19:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777334 ; + schema1:dateModified "2024-01-22T13:48:25" ; + schema1:name "file0.txt" ; + schema1:sdDatePublished "2024-01-22T16:19:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777273 ; + schema1:dateModified "2024-01-22T13:48:25" ; + schema1:name "file1.txt" ; + schema1:sdDatePublished "2024-01-22T16:19:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777443 ; + schema1:dateModified "2024-01-22T13:48:25" ; + schema1:name "file2.txt" ; + schema1:sdDatePublished "2024-01-22T16:19:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777723 ; + schema1:dateModified "2024-01-22T13:48:25" ; + schema1:name "file3.txt" ; + schema1:sdDatePublished "2024-01-22T16:19:50+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +****\r +\r +Workflow information:\r +* Input = genome.fasta.\r +* Outputs = soft_masked_genome.fasta, hard_masked_genome.fasta, and table of repeats found. \r +* Runs RepeatModeler with default settings, uses the output of this (repeat library) as input into RepeatMasker. \r +* Runs RepeatMasker with default settings except for: Skip masking of simple tandem repeats and low complexity regions. (-nolow) : default set to yes. Perform softmasking instead of hardmasking - set to yes. \r +* Converts the soft-masked genome to hard-masked for for use in other tools if required. \r +* Workflow report displays an edited table of repeats found. Note: a known bug is that sometimes the workflow report text resets to default text. To restore, look for an earlier workflow version with correct workflow report text, and copy and paste report text into current version.\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.875.3" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Repeat masking - TSI" ; + schema1:sdDatePublished "2024-07-12 13:17:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/875/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10929 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-20T23:41:31Z" ; + schema1:dateModified "2024-06-20T23:48:44Z" ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +****\r +\r +Workflow information:\r +* Input = genome.fasta.\r +* Outputs = soft_masked_genome.fasta, hard_masked_genome.fasta, and table of repeats found. \r +* Runs RepeatModeler with default settings, uses the output of this (repeat library) as input into RepeatMasker. \r +* Runs RepeatMasker with default settings except for: Skip masking of simple tandem repeats and low complexity regions. (-nolow) : default set to yes. Perform softmasking instead of hardmasking - set to yes. \r +* Converts the soft-masked genome to hard-masked for for use in other tools if required. \r +* Workflow report displays an edited table of repeats found. Note: a known bug is that sometimes the workflow report text resets to default text. To restore, look for an earlier workflow version with correct workflow report text, and copy and paste report text into current version.\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/875?version=2" ; + schema1:isPartOf ; + schema1:keywords "TSI-annotation" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Repeat masking - TSI" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/875?version=3" ; + schema1:version 3 ; + ns1:input ; + ns1:output , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 651806 . + + a schema1:Dataset ; + schema1:datePublished "2024-06-10T13:43:17.509153" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/parallel-accession-download" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "parallel-accession-download/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-wgs-variant-calling" ; + schema1:mainEntity ; + schema1:name "COVID-19-PE-WGS-ILLUMINA (v0.2)" ; + schema1:sdDatePublished "2021-06-22 03:00:47 +0100" ; + schema1:softwareVersion "v0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 34539 ; + schema1:name "COVID-19-PE-WGS-ILLUMINA" ; + schema1:programmingLanguage . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.129.5" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_virtual-screening" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein-ligand Docking tutorial (Fpocket)" ; + schema1:sdDatePublished "2024-07-12 13:33:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/129/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 43169 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-03-04T14:36:50Z" ; + schema1:dateModified "2024-05-14T10:12:37Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/129?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein-ligand Docking tutorial (Fpocket)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_virtual-screening/blob/main/biobb_wf_virtual-screening/notebooks/fpocket/wf_vs_fpocket.ipynb" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Simulations and figures supporting the manuscript \"Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake\"" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.511.3" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake" ; + schema1:sdDatePublished "2024-07-12 13:32:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/511/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 305 ; + schema1:creator , + , + , + , + , + ; + schema1:dateCreated "2023-11-24T13:38:57Z" ; + schema1:dateModified "2023-11-24T13:40:58Z" ; + schema1:description "Simulations and figures supporting the manuscript \"Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake\"" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/511?version=4" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/511?version=3" ; + schema1:version 3 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 75460 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Loads a single cell counts matrix into an annData format - adding a column called sample with the sample name. (Input format - matrix.mtx, features.tsv and barcodes.tsv)" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/512?version=1" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "other-closed" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq: Load counts matrix" ; + schema1:sdDatePublished "2024-07-12 13:22:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/512/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14187 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-06-22T06:40:48Z" ; + schema1:dateModified "2023-11-09T03:51:14Z" ; + schema1:description "Loads a single cell counts matrix into an annData format - adding a column called sample with the sample name. (Input format - matrix.mtx, features.tsv and barcodes.tsv)" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/512?version=1" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "" ; + schema1:name "scRNAseq: Load counts matrix" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/512?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + ; + ns1:output . + + a schema1:Dataset ; + schema1:datePublished "2024-05-31T11:30:02.345292" ; + schema1:hasPart , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/baredsc" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + ; + schema1:name "baredsc/baredSC-1d-logNorm" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "baredsc/baredSC-2d-logNorm" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/baredsc" ; + schema1:version "0.5" . + + a schema1:Dataset ; + schema1:datePublished "2023-11-23T13:33:07.963883" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/kmer-profiling-hifi-VGP1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "kmer-profiling-hifi-VGP1/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Molecular Structure Checking using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **checking** a **molecular structure** before using it as an input for a **Molecular Dynamics** simulation. The workflow uses the **BioExcel Building Blocks library (biobb)**. The particular structure used is the crystal structure of **human Adenylate Kinase 1A (AK1A)**, in complex with the **AP5A inhibitor** (PDB code [1Z83](https://www.rcsb.org/structure/1z83)). \r +\r +**Structure checking** is a key step before setting up a protein system for **simulations**. A number of **common issues** found in structures at **Protein Data Bank** may compromise the success of the **simulation**, or may suggest that longer **equilibration** procedures are necessary.\r +\r +The **workflow** shows how to:\r +\r +- Run **basic manipulations on structures** (selection of models, chains, alternative locations\r +- Detect and fix **amide assignments** and **wrong chiralities**\r +- Detect and fix **protein backbone** issues (missing fragments, and atoms, capping)\r +- Detect and fix **missing side-chain atoms**\r +- **Add hydrogen atoms** according to several criteria\r +- Detect and classify **atomic clashes**\r +- Detect possible **disulfide bonds (SS)**\r +\r +An implementation of this workflow in a **web-based Graphical User Interface (GUI)** can be found in the [https://mmb.irbbarcelona.org/biobb-wfs/](https://mmb.irbbarcelona.org/biobb-wfs/) server (see [https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check](https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check)).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.776.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/main/biobb_wf_structure_checking/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL Molecular Structure Checking" ; + schema1:sdDatePublished "2024-07-12 13:24:40 +0100" ; + schema1:url "https://workflowhub.eu/workflows/776/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 64539 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15044 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-05T08:41:19Z" ; + schema1:dateModified "2024-03-05T08:50:30Z" ; + schema1:description """# Molecular Structure Checking using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **checking** a **molecular structure** before using it as an input for a **Molecular Dynamics** simulation. The workflow uses the **BioExcel Building Blocks library (biobb)**. The particular structure used is the crystal structure of **human Adenylate Kinase 1A (AK1A)**, in complex with the **AP5A inhibitor** (PDB code [1Z83](https://www.rcsb.org/structure/1z83)). \r +\r +**Structure checking** is a key step before setting up a protein system for **simulations**. A number of **common issues** found in structures at **Protein Data Bank** may compromise the success of the **simulation**, or may suggest that longer **equilibration** procedures are necessary.\r +\r +The **workflow** shows how to:\r +\r +- Run **basic manipulations on structures** (selection of models, chains, alternative locations\r +- Detect and fix **amide assignments** and **wrong chiralities**\r +- Detect and fix **protein backbone** issues (missing fragments, and atoms, capping)\r +- Detect and fix **missing side-chain atoms**\r +- **Add hydrogen atoms** according to several criteria\r +- Detect and classify **atomic clashes**\r +- Detect possible **disulfide bonds (SS)**\r +\r +An implementation of this workflow in a **web-based Graphical User Interface (GUI)** can be found in the [https://mmb.irbbarcelona.org/biobb-wfs/](https://mmb.irbbarcelona.org/biobb-wfs/) server (see [https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check](https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check)).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CWL Molecular Structure Checking" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/main/biobb_wf_structure_checking/cwl/workflow.cwl" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/457?version=1" ; + schema1:isBasedOn "https://github.com/tianyao-0315/PyUtils" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for PyUtils" ; + schema1:sdDatePublished "2024-07-12 13:34:08 +0100" ; + schema1:url "https://workflowhub.eu/workflows/457/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1337 ; + schema1:dateCreated "2023-04-14T02:26:04Z" ; + schema1:dateModified "2023-04-14T02:26:04Z" ; + schema1:description "" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "PyUtils" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/457?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """The tool provides a calculation of the power spectrum of Stochastic Gravitational Wave Backgorund (SGWB) from a first-order cosmological phase transition based on the parameterisations of Roper Pol et al. (2023). The power spectrum includes two components: from the sound waves excited by collisions of bubbles of the new phase and from the turbulence that is induced by these collisions.\r +\r +The cosmological epoch of the phase transition is described by the temperature, T_star and by the number(s) of relativistic degrees of freedom, g_star that should be specified as parameters.\r +\r +The phase transition itself is characterised by phenomenological parameters, alpha, beta_H and epsilon_turb, the latent heat, the ratio of the Hubble radius to the bubble size at percolation and the fraction of the energy otuput of the phase transition that goes into turbulence.\r +\r +The product Model spectrum outputs the power spectrum for fixed values of these parameters. The product Phase transition parameters reproduces the constraints on the phase transition parameters from the Pulsar Timing Array gravitational wave detectors, reported by Boyer & Neronov (2024), including the estimate of the cosmological magnetic field induced by turbulence.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.815.2" ; + schema1:isBasedOn "https://gitlab.renkulab.io/astronomy/mmoda/sgwb" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Stochastic Gravitational Wave Backgorund (SGWB) tool" ; + schema1:sdDatePublished "2024-07-12 13:23:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/815/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3955 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-04-18T12:59:25Z" ; + schema1:dateModified "2024-04-18T13:57:10Z" ; + schema1:description """The tool provides a calculation of the power spectrum of Stochastic Gravitational Wave Backgorund (SGWB) from a first-order cosmological phase transition based on the parameterisations of Roper Pol et al. (2023). The power spectrum includes two components: from the sound waves excited by collisions of bubbles of the new phase and from the turbulence that is induced by these collisions.\r +\r +The cosmological epoch of the phase transition is described by the temperature, T_star and by the number(s) of relativistic degrees of freedom, g_star that should be specified as parameters.\r +\r +The phase transition itself is characterised by phenomenological parameters, alpha, beta_H and epsilon_turb, the latent heat, the ratio of the Hubble radius to the bubble size at percolation and the fraction of the energy otuput of the phase transition that goes into turbulence.\r +\r +The product Model spectrum outputs the power spectrum for fixed values of these parameters. The product Phase transition parameters reproduces the constraints on the phase transition parameters from the Pulsar Timing Array gravitational wave detectors, reported by Boyer & Neronov (2024), including the estimate of the cosmological magnetic field induced by turbulence.\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/815?version=1" ; + schema1:keywords "astronomy" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Stochastic Gravitational Wave Backgorund (SGWB) tool" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/815?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """Inclusion Body Myositis Active Subnetwork Identification Workflow\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/681?version=6" ; + schema1:isBasedOn "https://github.com/jdwijnbergen/IBM_ASI_workflow.git" ; + schema1:license "notspecified" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Inclusion Body Myositis Active Subnetwork Identification Workflow" ; + schema1:sdDatePublished "2024-07-12 13:24:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/681/ro_crate?version=6" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 26584 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7536 ; + schema1:creator , + ; + schema1:dateCreated "2023-11-27T16:10:47Z" ; + schema1:dateModified "2023-11-27T16:10:47Z" ; + schema1:description """Inclusion Body Myositis Active Subnetwork Identification Workflow\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/681?version=6" ; + schema1:keywords "Bioinformatics, CWL, Genomics, Transcriptomics, Protein-Protein Interaction" ; + schema1:license "https://choosealicense.com/no-permission/" ; + schema1:name "Inclusion Body Myositis Active Subnetwork Identification Workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/681?version=6" ; + schema1:version 6 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """A hecatomb is a great sacrifice or an extensive loss. Heactomb the software empowers an analyst to make data driven decisions to 'sacrifice' false-positive viral reads from metagenomes to enrich for true-positive viral reads. This process frequently results in a great loss of suspected viral sequences / contigs.\r +\r +For information about installation, usage, tutorial etc please refer to the documentation: https://hecatomb.readthedocs.io/en/latest/\r +\r +### Quick start guide\r +\r +Install Hecatomb from Bioconda\r +```bash\r +# create an env called hecatomb and install Hecatomb in it\r +conda create -n hecatomb -c conda-forge -c bioconda hecatomb\r +\r +# activate conda env\r +conda activate hecatomb\r +\r +# check the installation\r +hecatomb -h\r +\r +# download the databases - you only have to do this once\r +hecatomb install\r +\r +# Run the test dataset\r +hecatomb run --test\r +```""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.235.1" ; + schema1:isBasedOn "https://github.com/shandley/hecatomb" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Hecatomb" ; + schema1:sdDatePublished "2024-07-12 13:22:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/235/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10077 ; + schema1:creator , + , + ; + schema1:dateCreated "2021-11-11T03:37:33Z" ; + schema1:dateModified "2024-05-13T02:08:43Z" ; + schema1:description """A hecatomb is a great sacrifice or an extensive loss. Heactomb the software empowers an analyst to make data driven decisions to 'sacrifice' false-positive viral reads from metagenomes to enrich for true-positive viral reads. This process frequently results in a great loss of suspected viral sequences / contigs.\r +\r +For information about installation, usage, tutorial etc please refer to the documentation: https://hecatomb.readthedocs.io/en/latest/\r +\r +### Quick start guide\r +\r +Install Hecatomb from Bioconda\r +```bash\r +# create an env called hecatomb and install Hecatomb in it\r +conda create -n hecatomb -c conda-forge -c bioconda hecatomb\r +\r +# activate conda env\r +conda activate hecatomb\r +\r +# check the installation\r +hecatomb -h\r +\r +# download the databases - you only have to do this once\r +hecatomb install\r +\r +# Run the test dataset\r +hecatomb run --test\r +```""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Hecatomb" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/235?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/986?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/fetchngs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/fetchngs" ; + schema1:sdDatePublished "2024-07-12 13:21:29 +0100" ; + schema1:url "https://workflowhub.eu/workflows/986/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6458 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:52Z" ; + schema1:dateModified "2024-06-11T12:54:52Z" ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/986?version=13" ; + schema1:keywords "ddbj, download, ena, FASTQ, geo, sra, synapse" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/fetchngs" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/986?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=11" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-07-12 13:20:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14588 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:59Z" ; + schema1:dateModified "2024-06-11T12:54:59Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=11" ; + schema1:version 11 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:description """Workflow for quality assessment of paired reads and classification using NGTax 2.0 and functional annotation using picrust2. \r +In addition files are exported to their respective subfolders for easier data management in a later stage.\r +Steps: \r + - FastQC (read quality control)\r + - NGTax 2.0\r + - Picrust 2\r + - Export module for ngtax\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.154.2" ; + schema1:isBasedOn "https://git.wur.nl/unlock/cwl/-/raw/master/cwl/workflows/workflow_ngtax_picrust2.cwl" ; + schema1:license "AFL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Quality assessment, amplicon classification and functional prediction" ; + schema1:sdDatePublished "2024-07-12 13:36:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/154/ro_crate?version=2" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 37489 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6821 ; + schema1:creator , + ; + schema1:dateCreated "2021-09-30T11:15:19Z" ; + schema1:dateModified "2023-01-16T13:51:47Z" ; + schema1:description """Workflow for quality assessment of paired reads and classification using NGTax 2.0 and functional annotation using picrust2. \r +In addition files are exported to their respective subfolders for easier data management in a later stage.\r +Steps: \r + - FastQC (read quality control)\r + - NGTax 2.0\r + - Picrust 2\r + - Export module for ngtax\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/154?version=1" ; + schema1:keywords "Amplicon, Classification, CWL" ; + schema1:license "https://spdx.org/licenses/AFL-3.0" ; + schema1:name "Quality assessment, amplicon classification and functional prediction" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/154?version=2" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/protein-ligand-docking).\r +***\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.296.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_virtual_screening/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy Protein-ligand Docking tutorial (Fpocket)" ; + schema1:sdDatePublished "2024-07-12 13:33:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/296/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 26851 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-24T14:58:31Z" ; + schema1:dateModified "2022-11-22T10:12:44Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/protein-ligand-docking).\r +***\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/296?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy Protein-ligand Docking tutorial (Fpocket)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_virtual_screening/galaxy/biobb_wf_virtual_screening.ga" ; + schema1:version 1 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# CLAWS (CNAG's Long-read Assembly Workflow in Snakemake)\r + Snakemake Pipeline used for de novo genome assembly @CNAG. It has been developed for Snakemake v6.0.5.\r +\r +It accepts Oxford Nanopore Technologies (ONT) reads, PacBio HFi reads, illumina paired-end data, illumina 10X data and Hi-C reads. It does the preprocessing of the reads, assembly, polishing, purge_dups, scaffodling and different evaluation steps. By default it will preprocess the reads, run Flye + Hypo + purge_dups + yahs and evaluate the resulting assemblies with BUSCO, MERQURY, Nseries and assembly_stats. It needs a config file and a spec file (json file with instructions on which resources should slurm use for each of the jobs). Both files are created by the script "create_config_assembly.py" that is located in the bin directory. To check all the options accepted by the script, do:\r +\r +```\r +bin/create_config_assembly.py -h\r +```\r +\r +Once the 2 config files are produced, the pipeline can be launched using snakemake like this:\r +\r +``snakemake --notemp -j 999 --snakefile assembly_pipeline.smk --configfile assembly.config --is --cluster-conf assembly.spec --use-conda --use-envmodules``\r +\r +If you are using an HPC cluster, please check how should you run snakemake to launch the jobs to the cluster. \r +\r +Most of the tools used will be installed via conda using the environments of the "envs" directory after providing the "--use-conda" option to snakemake. However, a few tools cannot be installed via conda and will have to be available in your PATH, or as a module in the cluster. Those tools are:\r +\r +- NextDenovo/2.5.0\r +- NextPolish/1.4.1\r +\r +# How to provide input data:\r +\r +There are several ways of providing the reads.\r +\r +### 1- ONT reads\r +\r +1.1 Using the option ``--ont-dir {DIR}`` in create_config_assembly.py.\r +\r +If you do so, it will look for all the files in the directory that end in '.fastq.gz' and will add the basenames to "ONT_wildcards". These wildcards will be processed by the pipeline that will:\r +\r +- Concatenate all the files into a single file\r +\r +- Run filtlong with the default or specified parameters. \r +\r +- Use the resulting file for assembly, polishing and/or purging.\r +\r +You can also specify the basenames of the files that you want to use with the ``--ont-list `` option. In this case, the pipeline will use the wildcards that you're providing instead of merging all the files in the directory.\r +\r +1.2 Using the option ```--ont-reads {FILE}``` in create_config_assembly.py.\r +\r +If you do so, it will consider that you already have all the reads in one file and will: \r +\r +- Run filtlong with the default or specified parameters.\r +\r +- Use the resulting file for assembly, polishing and/or purging.\r +\r +1.3 Using the option ```--ont-filt {FILE}```. It will use this file as the output from filtlong. Hence, it will skip the preprocessing steps and directly use it for assembly, polishing and/or purging. \r +\r +\r +\r +### 2-Illumina 10X-linked data\r +\r +2.1 Using the ```--raw-10X {DIR:list}``` option. \r +\r +Dictionary with 10X raw read directories, it has to be the mkfastq dir. You must specify as well the sampleIDs from this run. Example: '{"mkfastq- dir":"sample1,sample2,sample3"}'...\r +\r +It will take each basename in the list to get the fastqs from the corresponding directory and run longranger on each sample. Afterwards, it will build meryldbs for each "barcoded" file. Finally, it will concatenate all the meryldbs and "barcoded" files. Resulting "barcoded" file will be used for polishing. \r +\r +2.2 Using the ``--processed-10X {DIR}`` parameter. \r +\r +This directory can already be there or be produced by the pipeline as described in step 2.1. Once all the "barcoded" fastq files are there, meryldbs will be built for each "barcoded" file. Finally, it will concatenate all the meryldbs and "barcoded" files. Resulting "barcoded" file will be used for polishing. \r +\r +2.3 Using the ``--10X`` option. \r +\r +The argument to this is the path to the concatenated ".barcoded" file that needs to be used for polishing. If the pre-concatenated files are not given, meryldbs will be directly generated with this file, but it may run out of memory. \r +\r +### 3- Illumina short-read data\r +\r +3.1 Using the ``--illumina-dir {DIR}`` option, that will look for all the files in the directory that end in '.1.fastq.gz' and will add the basenames to "illumina_wildcards". These wildcards will be processed by the pipeline that will: \r +\r +- Trim adaptors with Trimgalore\r +\r +- Concatenate all the trimmed *.1.fastq.gz and the *2.fastq.gz in one file per pair. \r +\r +- The resulting reads will be used for building meryldbs and polishing. \r +\r +3.2 Using the ``--processed-illumina`` option. If the directory exists and contains files, the pipeline will look for all the files in the directory that end in '.1.fastq.gz' and will add the basenames to "illumina_wildcards". These wildcards will be processed by the pipeline that will:\r +\r +- Concatenate all the trimmed *.1.fastq.gz and the *2.fastq.gz in one file per pair. \r +\r +- The resulting reads will be used for building meryldbs and polishing. \r +\r +3.3 Using the ``--pe1 {FILE} and --pe2 {FILE}`` options. That will consider that these are the paired files containing all the illumina reads ready to be used and will build meryldbs and polish with them.\r +\r +### 4- Input assemblies\r +\r +If you want to polish an already assembled assembly, you can give it to the pipeline by using the option ``--assembly-in ASSEMBLY_IN [ASSEMBLY_IN ...]\r + Dictionary with assemblies that need to be polished but not assembled and directory where they should\r + be polished. Example: '{"assembly1":"polishing_dir1"}' '{"assembly2"="polishing_dir2"}' ...``\r + \r +If you want to start the pipeline after polishing on an already existing assembly, you can give it to the pipeline by using the option ``--postpolish-assemblies POSTPOLISH_ASSEMBLIES [POSTPOLISH_ASSEMBLIES ...]\r + Dictionary with assemblies for which postpolishing steps need to be run but that are not assembled and\r + base step for the directory where the first postpolishing step should be run. Example:\r + '{"assembly1":"s04.1_p03.1"}' '{"assembly2"="s04.2_p03.2"}' ...``\r +\r +To evaluate and produce the final pretext file on a curated assembly, use ``--curated-assemblies CURATED_ASSEMBLIES [CURATED_ASSEMBLIES ...]\r + Dictionary with assemblies that have already been curated. Evaluations and read alignment will be perforder. Example:\r + '{"assembly1":"s04.1_p03.1"}' '{"assembly2":"s04.2_p03.2"}' ...``\r +\r +\r +\r +# Description of implemented rules\r +\r +1- Preprocessing:\r + \r +- **Read concatenation:**\r +\r +``zcat {input.fastqs} | pigz -p {threads} -c > {output.final_fastq}``\r + \r +- **Longranger for 10X reads**: it uses the Longranger version installed in the path specified in the configfile\r +\r +``longranger basic --id={params.sample} --sample={params.sample} --fastqs={input.mkfastq_dir} --localcores={threads}``\r +\r +- **Trimgalore:** By default it gives the ``--max_n 0 --gzip -q 20 --paired --retain_unpaired`` options, but it can be changed with the ``--trim-galore-opts `` argument. \r +\r +``trim_galore -j {threads} {params.opts} {input.read1} {input.read2}``\r +\r +- **Filtlong:** it uses the Filtlong version installed in the path specified in the configfile. By default it gives the min_length and min_mean_q parameters, but extra parameters can be added with the ``--filtlong-opts`` option.\r +\r +``filtlong --min_length {params.minlen} --min_mean_q {params.min_mean_q} {params.opts} {input.reads} | pigz -p {threads} -c > {output.outreads}``\r + \r +- **Build meryldb**: it uses the merqury conda environment specified in the configfile. It takes as argument the `--mery-k` value that needs to be estimated first for the genome size. It can run either on the illumina reads, the ont reads or both, default behaviour is both. \r +\r +``meryl k={params.kmer} count output {output.out_dir} {input.fastq}``\r + \r +- Concat meryldbs: with the merqury conda environment specified in the configfile\r +\r +``meryl union-sum output {output.meryl_all} {input.input_run}``\r + \r +- **Align ONT (Minimap2):** it aligns the reads using minimap2 and outputs the alignment either in bam or in paf.gz formats. It uses the minimap2 conda environment specified in the configfile\r +\r +``minimap2 -{params.align_opts} -t {threads} {input.genome} {input.reads} ``\r +\r +- **Align Illumina (BWA-MEM):** it aligns the reads with BWA-mem and outputs a bam file\r +\r +``bwa mem -Y {params.options} -t {threads} {input.genome} {input.reads} | samtools view -Sb - | samtools sort -@ {threads} -o {output.mapping} -``\r +\r +2- Assembly\r +\r +- **Flye (default)**. It is run by default, if you don't want the pipeline to run it, you can give `--no-flye` option when creating the config. It uses the conda environment specified in the config. By default it is set to 2 polishing iterations and gives the genome-size estimate that has been given when creating the config. Extra options can be provided with the `--flye-opts`.\r +\r +``flye --{params.readtype} {input.reads} -o {params.outdir}out -t {threads} -i {params.pol_iterations} {params.other_flye_opts} ``\r + \r +- **Nextdenovo (if ``run-nextdenovo``):** It uses the cluster module specified in the config. If nextdenovo option is turned on, the create_config script will also create the nextdenovo config file. Check the create_config help to see which options can be modified on it. \r +\r +``nextDenovo {input.config}``\r +\r +3- Polishing\r +\r +- **Hypo (default):** It is the polisher that the pipeline uses by default, it can be turned off specifying ``--no-hypo`` when creating the config. If selected, the reads will be aligned in previous rules and then hypo will be run, it requires illumina data. It uses the conda environment specified in the config. \r +\r +``hypo -r @short_reads.list.txt -d {input.genome} -b {input.sr_bam} -c {coverage} -s {params.genome_size} -B {input.lr_bam} -t {threads} -o {output.polished} -p {params.proc} {params.opts} ``\r + \r +- **Nextpolish ont (if turned on):** to run nextpolish with ONT reads, specify ``--nextpolish-ont-rounds`` and the number of rounds you want to run of it. \r +\r +``"python /apps/NEXTPOLISH/1.3.1/lib/nextpolish2.py -g {input.genome} -p {threads} -l lgs.fofn -r {params.lrtype} > {output.polished}``\r + \r +- **Nextpolish illumina (if turned on):** to run nextpolish with ONT reads, specify ``--nextpolish-ill-rounds`` and the number of rounds you want to run of it. \r +\r +``"python /apps/NEXTPOLISH/1.3.1/lib/nextpolish1.py -g {input.genome} -p {threads} -s {input.bam} -t {params.task} > {output.polished}``\r +\r +4- Post-assembly\r +\r +- **Purge_dups (by default):** select ``--no-purgedups`` if you don't want to run it. If no manual cutoffs are given, it'll run purgedups with automatic cutoffs and then will rerun it selecting the mean cutoff as 0.75\\*cov. It uses the version installed in the cluster module specified in the config. \r +\r +5- Evaluations\r + \r +- **Merqury:** It runs on each 'terminal' assembly. This is, the base assembly and the resulting assembly from each branch of the pipeline. \r + \r +- **Busco:** It can be run only in the terminal assemblies or on all the assemblies produced by the pipeline. It uses the conda environment specified in the config as well as the parameters specified. \r + \r +- **Nseries:** This is run during the *finalize* on all the assemblies that are evaluated. After it, that rule combines the statistics produced by all the evaluation rules. \r +\r +# Description of all options\r +```\r + bin/create_config_assembly.py -h\r +usage: create_configuration_file [-h] [--configFile configFile] [--specFile specFile] [--ndconfFile ndconfFile] [--concat-cores concat_cores]\r + [--genome-size genome_size] [--lr-type lr_type] [--basename base_name] [--species species] [--keep-intermediate]\r + [--preprocess-lr-step PREPROCESS_ONT_STEP] [--preprocess-10X-step PREPROCESS_10X_STEP]\r + [--preprocess-illumina-step PREPROCESS_ILLUMINA_STEP] [--preprocess-hic-step PREPROCESS_HIC_STEP]\r + [--flye-step FLYE_STEP] [--no-flye] [--nextdenovo-step NEXTDENOVO_STEP] [--run-nextdenovo]\r + [--nextpolish-cores nextpolish_cores] [--minimap2-cores minimap2_cores] [--bwa-cores bwa_cores]\r + [--hypo-cores hypo_cores] [--pairtools-cores pairtools_cores] [--busco-cores busco_cores]\r + [--nextpolish-ont-rounds nextpolish_ont_rounds] [--nextpolish-ill-rounds nextpolish_ill_rounds]\r + [--hypo-rounds hypo_rounds] [--longranger-cores longranger_cores] [--longranger-path longranger_path]\r + [--genomescope-opts genomescope_additional] [--no-purgedups] [--ploidy ploidy] [--run-tigmint] [--run-kraken2]\r + [--no-yahs] [--scripts-dir SCRIPTS_DIR] [--ont-reads ONT_READS] [--ont-dir ONT_DIR] [--ont-filt ONT_FILTERED]\r + [--pe1 PE1] [--pe2 PE2] [--processed-illumina PROCESSED_ILLUMINA] [--raw-10X RAW_10X [RAW_10X ...]]\r + [--processed-10X PROCESSED_10X] [--10X R10X] [--illumina-dir ILLUMINA_DIR]\r + [--assembly-in ASSEMBLY_IN [ASSEMBLY_IN ...]]\r + [--postpolish-assemblies POSTPOLISH_ASSEMBLIES [POSTPOLISH_ASSEMBLIES ...]]\r + [--curated-assemblies CURATED_ASSEMBLIES [CURATED_ASSEMBLIES ...]] [--hic-dir HIC_DIR]\r + [--pipeline-workdir PIPELINE_WORKDIR] [--filtlong-dir FILTLONG_DIR] [--concat-hic-dir CONCAT_HIC_DIR]\r + [--flye-dir FLYE_DIR] [--nextdenovo-dir NEXTDENOVO_DIR] [--flye-polishing-dir POLISH_FLYE_DIR]\r + [--nextdenovo-polishing-dir POLISH_NEXTDENOVO_DIR] [--eval-dir eval_dir] [--stats-out stats_out]\r + [--hic-qc-dir hic_qc_dir] [--filtlong-minlen filtlong_minlen] [--filtlong-min-mean-q filtlong_min_mean_q]\r + [--filtlong-opts filtlong_opts] [--kraken2-db kraken2_db] [--kraken2-kmer kraken2_kmers]\r + [--kraken2-opts additional_kraken2_opts] [--kraken2-cores kraken2_threads] [--trim-galore-opts trim_galore_opts]\r + [--trim-Illumina-cores Trim_Illumina_cores] [--flye-cores flye_cores] [--flye-polishing-iterations flye_pol_it]\r + [--other-flye-opts other_flye_opts] [--nextdenovo-cores nextdenovo_cores] [--nextdenovo-jobtype nextdenovo_type]\r + [--nextdenovo-task nextdenovo_task] [--nextdenovo-rewrite nextdenovo_rewrite]\r + [--nextdenovo-parallel_jobs nextdenovo_parallel_jobs] [--nextdenovo-minreadlen nextdenovo_minreadlen]\r + [--nextdenovo-seeddepth nextdenovo_seeddepth] [--nextdenovo-seedcutoff nextdenovo_seedcutoff]\r + [--nextdenovo-blocksize nextdenovo_blocksize] [--nextdenovo-pa-correction nextdenovo_pa_correction]\r + [--nextdenovo-minimap_raw nextdenovo_minimap_raw] [--nextdenovo-minimap_cns nextdenovo_minimap_cns]\r + [--nextdenovo-minimap_map nextdenovo_minimap_map] [--nextdenovo-sort nextdenovo_sort]\r + [--nextdenovo-correction_opts nextdenovo_correction_opts] [--nextdenovo-nextgraph_opt nextdenovo_nextgraph_opt]\r + [--sr-cov ill_cov] [--hypo-proc hypo_processes] [--hypo-no-lr] [--hypo-opts hypo_opts]\r + [--purgedups-cores purgedups_cores] [--purgedups-calcuts-opts calcuts_opts] [--tigmint-cores tigmint_cores]\r + [--tigmint-opts tigmint_opts] [--hic-qc] [--no-pretext] [--assembly-qc assembly_qc] [--yahs-cores yahs_cores]\r + [--yahs-mq yahs_mq] [--yahs-opts yahs_opts] [--hic-map-opts hic_map_opts] [--mq mq [mq ...]]\r + [--hic-qc-assemblylen hic_qc_assemblylen] [--blast-cores blast_cores] [--hic-blastdb blastdb]\r + [--hic-readsblast hic_readsblast] [--no-final-evals] [--busco-lin busco_lineage] [--merqury-db merqury_db]\r + [--merqury-plot-opts merqury_plot_opts] [--meryl-k meryl_k] [--meryl-threads meryl_threads]\r + [--meryl-reads meryl_reads [meryl_reads ...]] [--ont-list ONT_wildcards] [--illumina-list illumina_wildcards]\r + [--r10X-list r10X_wildcards] [--hic-list hic_wildcards]\r +\r +Create a configuration json file for the assembly pipeline.\r +\r +options:\r + -h, --help show this help message and exit\r +\r +General Parameters:\r + --configFile configFile\r + Configuration JSON to be generated. Default assembly.config\r + --specFile specFile Cluster specifications JSON fileto be generated. Default assembly.spec\r + --ndconfFile ndconfFile\r + Name pf the nextdenovo config file. Default nextdenovo.config\r + --concat-cores concat_cores\r + Number of threads to concatenate reads and to run filtlong. Default 4\r + --genome-size genome_size\r + Approximate genome size. Example: 615m or 2.6g. Default None\r + --lr-type lr_type Type of long reads (options are flye read-type options). Default nano-hq\r + --basename base_name Base name for the project. Default None\r + --species species Name of the species to be assembled. Default None\r + --keep-intermediate Set this to True if you do not want intermediate files to be removed. Default False\r + --preprocess-lr-step PREPROCESS_ONT_STEP\r + Step for preprocessing long-reads. Default 02.1\r + --preprocess-10X-step PREPROCESS_10X_STEP\r + Step for preprocessing 10X reads. Default 02.2\r + --preprocess-illumina-step PREPROCESS_ILLUMINA_STEP\r + Step for preprocessing illumina reads. Default 02.2\r + --preprocess-hic-step PREPROCESS_HIC_STEP\r + Step for preprocessing hic reads. Default 02.3\r + --flye-step FLYE_STEP\r + Step for running flye. Default 03.1\r + --no-flye Give this option if you do not want to run Flye.\r + --nextdenovo-step NEXTDENOVO_STEP\r + Step for running nextdenovo. Default 03.2\r + --run-nextdenovo Give this option if you do want to run Nextdenovo.\r + --nextpolish-cores nextpolish_cores\r + Number of threads to run the nextpolish step. Default 24\r + --minimap2-cores minimap2_cores\r + Number of threads to run the alignment with minimap2. Default 32\r + --bwa-cores bwa_cores\r + Number of threads to run the alignments with BWA-Mem2. Default 16\r + --hypo-cores hypo_cores\r + Number of threads to run the hypo step. Default 24\r + --pairtools-cores pairtools_cores\r + Number of threads to run the pairtools step. Default 100\r + --busco-cores busco_cores\r + Number of threads to run BUSCO. Default 32\r + --nextpolish-ont-rounds nextpolish_ont_rounds\r + Number of rounds to run the Nextpolish with ONT step. Default 0\r + --nextpolish-ill-rounds nextpolish_ill_rounds\r + Number of rounds to run the Nextpolish with illumina step. Default 0\r + --hypo-rounds hypo_rounds\r + Number of rounds to run the Hypostep. Default 1\r + --longranger-cores longranger_cores\r + Number of threads to run longranger. Default 16\r + --longranger-path longranger_path\r + Path to longranger executable. Default /scratch/project/devel/aateam/src/10X/longranger-2.2.2\r + --genomescope-opts genomescope_additional\r + Additional options to run Genomescope2 with. Default -m 10000\r + --no-purgedups Give this option if you do not want to run Purgedups.\r + --ploidy ploidy Expected ploidy. Default 2\r + --run-tigmint Give this option if you want to run the scaffolding with 10X reads step.\r + --run-kraken2 Give this option if you want to run Kraken2 on the input reads.\r + --no-yahs Give this option if you do not want to run yahs.\r +\r +Inputs:\r + --scripts-dir SCRIPTS_DIR\r + Directory with the different scripts for the pipeline. Default\r + /software/assembly/pipelines/Assembly_pipeline/CLAWSv2.2/bin/../scripts/\r + --ont-reads ONT_READS\r + File with all the ONT reads. Default None\r + --ont-dir ONT_DIR Directory where the ONT fastqs are stored. Default None\r + --ont-filt ONT_FILTERED\r + File with the ONT reads after running filtlong on them. Default None\r + --pe1 PE1 File with the illumina paired-end fastqs, already trimmed, pair 1.\r + --pe2 PE2 File with the illumina paired-end fastqs, already trimmed, pair 2.\r + --processed-illumina PROCESSED_ILLUMINA\r + Directory to Processed illumina reads. Already there or to be produced by the pipeline.\r + --raw-10X RAW_10X [RAW_10X ...]\r + Dictionary with 10X raw read directories, it has to be the mkfastq dir. You must specify as well the sampleIDs from this run.\r + Example: '{"mkfastq-dir":"sample1,sample2,sample3"}'...\r + --processed-10X PROCESSED_10X\r + Directory to Processed 10X reads. Already there or to be produced by the pipeline.\r + --10X R10X File with barcoded 10X reads in fastq.gz format, concatenated.\r + --illumina-dir ILLUMINA_DIR\r + Directory where the raw illumina fastqs are stored. Default None\r + --assembly-in ASSEMBLY_IN [ASSEMBLY_IN ...]\r + Dictionary with assemblies that need to be polished but not assembled and directory where they should be polished. Example:\r + '{"assembly1":"polishing_dir1"}' '{"assembly2"="polishing_dir2"}' ...\r + --postpolish-assemblies POSTPOLISH_ASSEMBLIES [POSTPOLISH_ASSEMBLIES ...]\r + Dictionary with assemblies for whic postpolishing steps need to be run but that are not assembled and base step for the\r + directory where the first postpolishing step should be run. Example: '{"assembly1":"s04.1_p03.1"}'\r + '{"assembly2":"s04.2_p03.2"}' ...\r + --curated-assemblies CURATED_ASSEMBLIES [CURATED_ASSEMBLIES ...]\r + Dictionary with assemblies that have already been curated. Evaluations and read alignment will be perforder. Example:\r + '{"assembly1":"s04.1_p03.1"}' '{"assembly2":"s04.2_p03.2"}' ...\r + --hic-dir HIC_DIR Directory where the HiC fastqs are stored. Default None\r +\r +Outputs:\r + --pipeline-workdir PIPELINE_WORKDIR\r + Base directory for the pipeline run. Default /scratch_isilon/groups/assembly/jgomez/test_CLAWSv2/ilErePala/assembly/\r + --filtlong-dir FILTLONG_DIR\r + Directory to process the ONT reads with filtlong. Default s02.1_p01.1_Filtlong\r + --concat-hic-dir CONCAT_HIC_DIR\r + Directory to concatenate the HiC reads. Default s02.3_p01.1_Concat_HiC\r + --flye-dir FLYE_DIR Directory to run flye. Default s03.1_p02.1_flye/\r + --nextdenovo-dir NEXTDENOVO_DIR\r + Directory to run nextdenovo. Default s03.2_p02.1_nextdenovo/\r + --flye-polishing-dir POLISH_FLYE_DIR\r + Directory to polish the flye assembly. Default s04.1_p03.1_polishing/\r + --nextdenovo-polishing-dir POLISH_NEXTDENOVO_DIR\r + Directory to run nextdenovo. Default s04.2_p03.2_polishing/\r + --eval-dir eval_dir Base directory for the evaluations. Default evaluations/\r + --stats-out stats_out\r + Path to the file with the final statistics.\r + --hic-qc-dir hic_qc_dir\r + Directory to run the hic_qc. Default hic_qc/\r +\r +Filtlong:\r + --filtlong-minlen filtlong_minlen\r + Minimum read length to use with Filtlong. Default 1000\r + --filtlong-min-mean-q filtlong_min_mean_q\r + Minimum mean quality to use with Filtlong. Default 80\r + --filtlong-opts filtlong_opts\r + Extra options to run Filtlong (eg. -t 4000000000)\r +\r +Kraken2:\r + --kraken2-db kraken2_db\r + Database to be used for running Kraken2. Default None\r + --kraken2-kmer kraken2_kmers\r + Database to be used for running Kraken2. Default None\r + --kraken2-opts additional_kraken2_opts\r + Optional parameters for the rule Kraken2. Default\r + --kraken2-cores kraken2_threads\r + Number of threads to run the Kraken2 step. Default 16\r +\r +Trim_Galore:\r + --trim-galore-opts trim_galore_opts\r + Optional parameters for the rule trim_galore. Default --max_n 0 --gzip -q 20 --paired --retain_unpaired\r + --trim-Illumina-cores Trim_Illumina_cores\r + Number of threads to run the Illumina trimming step. Default 8\r +\r +Flye:\r + --flye-cores flye_cores\r + Number of threads to run FLYE. Default 128\r + --flye-polishing-iterations flye_pol_it\r + Number of polishing iterations to use with FLYE. Default 2\r + --other-flye-opts other_flye_opts\r + Additional options to run Flye. Default --scaffold\r +\r +Nextdenovo:\r + --nextdenovo-cores nextdenovo_cores\r + Number of threads to run nextdenovo. Default 2\r + --nextdenovo-jobtype nextdenovo_type\r + Job_type for nextdenovo. Default slurm\r + --nextdenovo-task nextdenovo_task\r + Task need to run. Default all\r + --nextdenovo-rewrite nextdenovo_rewrite\r + Overwrite existing directory. Default yes\r + --nextdenovo-parallel_jobs nextdenovo_parallel_jobs\r + Number of tasks used to run in parallel. Default 50\r + --nextdenovo-minreadlen nextdenovo_minreadlen\r + Filter reads with length < minreadlen. Default 1k\r + --nextdenovo-seeddepth nextdenovo_seeddepth\r + Expected seed depth, used to calculate seed_cutoff, co-use with genome_size, you can try to set it 30-45 to get a better\r + assembly result. Default 45\r + --nextdenovo-seedcutoff nextdenovo_seedcutoff\r + Minimum seed length, <=0 means calculate it automatically using bin/seq_stat. Default 0\r + --nextdenovo-blocksize nextdenovo_blocksize\r + Block size for parallel running, split non-seed reads into small files, the maximum size of each file is blocksize. Default 1g\r + --nextdenovo-pa-correction nextdenovo_pa_correction\r + number of corrected tasks used to run in parallel, each corrected task requires ~TOTAL_INPUT_BASES/4 bytes of memory usage,\r + overwrite parallel_jobs only for this step. Default 100\r + --nextdenovo-minimap_raw nextdenovo_minimap_raw\r + minimap2 options, used to find overlaps between raw reads, see minimap2-nd for details. Default -t 30\r + --nextdenovo-minimap_cns nextdenovo_minimap_cns\r + minimap2 options, used to find overlaps between corrected reads. Default -t 30\r + --nextdenovo-minimap_map nextdenovo_minimap_map\r + minimap2 options, used to map reads back to the assembly. Default -t 30 --no-kalloc\r + --nextdenovo-sort nextdenovo_sort\r + sort options, see ovl_sort for details. Default -m 400g -t 20\r + --nextdenovo-correction_opts nextdenovo_correction_opts\r + Correction options. Default -p 30 -dbuf\r + --nextdenovo-nextgraph_opt nextdenovo_nextgraph_opt\r + nextgraph options, see nextgraph for details. Default -a 1\r +\r +Hypo:\r + --sr-cov ill_cov Approximate short read coverage for hypo Default 0\r + --hypo-proc hypo_processes\r + Number of contigs to be processed in parallel by HyPo. Default 6\r + --hypo-no-lr Set this to false if you don¡t want to run hypo with long reads. Default True\r + --hypo-opts hypo_opts\r + Additional options to run Hypo. Default None\r +\r +Purge_dups:\r + --purgedups-cores purgedups_cores\r + Number of threads to run purgedups. Default 8\r + --purgedups-calcuts-opts calcuts_opts\r + Adjusted values to run calcuts for purgedups. Default None\r +\r +Scaffold_with_10X:\r + --tigmint-cores tigmint_cores\r + Number of threads to run the 10X scaffolding step. Default 12\r + --tigmint-opts tigmint_opts\r + Adjusted values to run the scaffolding with 10X reads. Default None\r +\r +HiC:\r + --hic-qc Give this option if only QC of the HiC data needs to be done.\r + --no-pretext Give this option if you do not want to generate the pretext file\r + --assembly-qc assembly_qc\r + Path to the assembly to be used perfom the QC of the HiC reads.\r + --yahs-cores yahs_cores\r + Number of threads to run YAHS. Default 48\r + --yahs-mq yahs_mq Mapping quality to use when running yahs.Default 40\r + --yahs-opts yahs_opts\r + Additional options to give to YAHS.Default\r + --hic-map-opts hic_map_opts\r + Options to use with bwa mem when aligning the HiC reads. Deafault -5SP -T0\r + --mq mq [mq ...] Mapping qualities to use for processing the hic mappings. Default [0, 40]\r + --hic-qc-assemblylen hic_qc_assemblylen\r + Lentgh of the assembly to be used for HiC QC\r + --blast-cores blast_cores\r + Number of threads to run blast with the HiC unmapped reads.Default 8\r + --hic-blastdb blastdb\r + BLAST Database to use to classify the hic unmapped reads. Default /scratch_isilon/groups/assembly/data/blastdbs\r + --hic-readsblast hic_readsblast\r + Number of unmapped hic reads to classify with blast. Default 100\r +\r +Finalize:\r + --no-final-evals If specified, do not run evaluations on final assemblies. Default True\r + --busco-lin busco_lineage\r + Path to the lineage directory to run Busco with. Default None\r + --merqury-db merqury_db\r + Meryl database. Default None\r + --merqury-plot-opts merqury_plot_opts\r + Meryl database. Default None\r + --meryl-k meryl_k Merqury plot additional options, for example " -m 200 -n 6000|". Default None\r + --meryl-threads meryl_threads\r + Number of threads to run meryl and merqury. Default 4\r + --meryl-reads meryl_reads [meryl_reads ...]\r + Type of reads to be used to build the meryldb. Default ont illumina\r +\r +Wildcards:\r + --ont-list ONT_wildcards\r + List with basename of the ONT fastqs that will be used. Default None\r + --illumina-list illumina_wildcards\r + List with basename of the illumina fastqs. Default None\r + --r10X-list r10X_wildcards\r + List with basename of the raw 10X fastqs. Default None\r + --hic-list hic_wildcards\r + List with basename of the raw hic fastqs. Default None\r +```\r +# Changes made to v2.2: \r +\r +1. General: \r +\r + Now default read_type is nano-hq \r +\r +2. Rule trim_galore: \r +\r + "--max_n 0" has been added to the default behaviour of "--trim-galore-opts" \r +\r +3. Meryl: \r +\r + New option "--meryl-reads" has been added to the config. Default is "Illumina ont" to build the meryl database using both type of reads, it can be changed to one or the other \r +\r +4. Merqury: \r +\r + Option "--merqury-plot-opts" has been added to config file. It can be used to modify the x and y axis maximum values (eg. --merqury-plot-opts " -m 200 -n 6000") \r +\r +5. Genomescope: \r +\r + "-m 10000" is now part of the default behavior of "--genomescope-opts" \r +\r +6. Hic_statistics: \r +\r + This is now running for each assembly and mq for which a pretext file is generated \r +\r +7. Assembly inputs for different steps: \r +\r + a. "--assembly-in" to start after assembly step (eg. Evaluation, polishing, purging and scaffolding) \r +\r + b. "--postpolish-assemblies" to start after polishing step (eg. Evaluation, purging and scaffolding) \r +\r + c. "--curated-assemblies" to start after scaffolding step (eg. Evaluation and pretext generation) \r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.567.2" ; + schema1:isBasedOn "https://github.com/cnag-aat/assembly_pipeline.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CLAWS (CNAG's long-read assembly workflow in Snakemake)" ; + schema1:sdDatePublished "2024-07-12 13:24:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/567/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3879 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-02-02T12:24:07Z" ; + schema1:dateModified "2024-02-02T12:24:51Z" ; + schema1:description """# CLAWS (CNAG's Long-read Assembly Workflow in Snakemake)\r + Snakemake Pipeline used for de novo genome assembly @CNAG. It has been developed for Snakemake v6.0.5.\r +\r +It accepts Oxford Nanopore Technologies (ONT) reads, PacBio HFi reads, illumina paired-end data, illumina 10X data and Hi-C reads. It does the preprocessing of the reads, assembly, polishing, purge_dups, scaffodling and different evaluation steps. By default it will preprocess the reads, run Flye + Hypo + purge_dups + yahs and evaluate the resulting assemblies with BUSCO, MERQURY, Nseries and assembly_stats. It needs a config file and a spec file (json file with instructions on which resources should slurm use for each of the jobs). Both files are created by the script "create_config_assembly.py" that is located in the bin directory. To check all the options accepted by the script, do:\r +\r +```\r +bin/create_config_assembly.py -h\r +```\r +\r +Once the 2 config files are produced, the pipeline can be launched using snakemake like this:\r +\r +``snakemake --notemp -j 999 --snakefile assembly_pipeline.smk --configfile assembly.config --is --cluster-conf assembly.spec --use-conda --use-envmodules``\r +\r +If you are using an HPC cluster, please check how should you run snakemake to launch the jobs to the cluster. \r +\r +Most of the tools used will be installed via conda using the environments of the "envs" directory after providing the "--use-conda" option to snakemake. However, a few tools cannot be installed via conda and will have to be available in your PATH, or as a module in the cluster. Those tools are:\r +\r +- NextDenovo/2.5.0\r +- NextPolish/1.4.1\r +\r +# How to provide input data:\r +\r +There are several ways of providing the reads.\r +\r +### 1- ONT reads\r +\r +1.1 Using the option ``--ont-dir {DIR}`` in create_config_assembly.py.\r +\r +If you do so, it will look for all the files in the directory that end in '.fastq.gz' and will add the basenames to "ONT_wildcards". These wildcards will be processed by the pipeline that will:\r +\r +- Concatenate all the files into a single file\r +\r +- Run filtlong with the default or specified parameters. \r +\r +- Use the resulting file for assembly, polishing and/or purging.\r +\r +You can also specify the basenames of the files that you want to use with the ``--ont-list `` option. In this case, the pipeline will use the wildcards that you're providing instead of merging all the files in the directory.\r +\r +1.2 Using the option ```--ont-reads {FILE}``` in create_config_assembly.py.\r +\r +If you do so, it will consider that you already have all the reads in one file and will: \r +\r +- Run filtlong with the default or specified parameters.\r +\r +- Use the resulting file for assembly, polishing and/or purging.\r +\r +1.3 Using the option ```--ont-filt {FILE}```. It will use this file as the output from filtlong. Hence, it will skip the preprocessing steps and directly use it for assembly, polishing and/or purging. \r +\r +\r +\r +### 2-Illumina 10X-linked data\r +\r +2.1 Using the ```--raw-10X {DIR:list}``` option. \r +\r +Dictionary with 10X raw read directories, it has to be the mkfastq dir. You must specify as well the sampleIDs from this run. Example: '{"mkfastq- dir":"sample1,sample2,sample3"}'...\r +\r +It will take each basename in the list to get the fastqs from the corresponding directory and run longranger on each sample. Afterwards, it will build meryldbs for each "barcoded" file. Finally, it will concatenate all the meryldbs and "barcoded" files. Resulting "barcoded" file will be used for polishing. \r +\r +2.2 Using the ``--processed-10X {DIR}`` parameter. \r +\r +This directory can already be there or be produced by the pipeline as described in step 2.1. Once all the "barcoded" fastq files are there, meryldbs will be built for each "barcoded" file. Finally, it will concatenate all the meryldbs and "barcoded" files. Resulting "barcoded" file will be used for polishing. \r +\r +2.3 Using the ``--10X`` option. \r +\r +The argument to this is the path to the concatenated ".barcoded" file that needs to be used for polishing. If the pre-concatenated files are not given, meryldbs will be directly generated with this file, but it may run out of memory. \r +\r +### 3- Illumina short-read data\r +\r +3.1 Using the ``--illumina-dir {DIR}`` option, that will look for all the files in the directory that end in '.1.fastq.gz' and will add the basenames to "illumina_wildcards". These wildcards will be processed by the pipeline that will: \r +\r +- Trim adaptors with Trimgalore\r +\r +- Concatenate all the trimmed *.1.fastq.gz and the *2.fastq.gz in one file per pair. \r +\r +- The resulting reads will be used for building meryldbs and polishing. \r +\r +3.2 Using the ``--processed-illumina`` option. If the directory exists and contains files, the pipeline will look for all the files in the directory that end in '.1.fastq.gz' and will add the basenames to "illumina_wildcards". These wildcards will be processed by the pipeline that will:\r +\r +- Concatenate all the trimmed *.1.fastq.gz and the *2.fastq.gz in one file per pair. \r +\r +- The resulting reads will be used for building meryldbs and polishing. \r +\r +3.3 Using the ``--pe1 {FILE} and --pe2 {FILE}`` options. That will consider that these are the paired files containing all the illumina reads ready to be used and will build meryldbs and polish with them.\r +\r +### 4- Input assemblies\r +\r +If you want to polish an already assembled assembly, you can give it to the pipeline by using the option ``--assembly-in ASSEMBLY_IN [ASSEMBLY_IN ...]\r + Dictionary with assemblies that need to be polished but not assembled and directory where they should\r + be polished. Example: '{"assembly1":"polishing_dir1"}' '{"assembly2"="polishing_dir2"}' ...``\r + \r +If you want to start the pipeline after polishing on an already existing assembly, you can give it to the pipeline by using the option ``--postpolish-assemblies POSTPOLISH_ASSEMBLIES [POSTPOLISH_ASSEMBLIES ...]\r + Dictionary with assemblies for which postpolishing steps need to be run but that are not assembled and\r + base step for the directory where the first postpolishing step should be run. Example:\r + '{"assembly1":"s04.1_p03.1"}' '{"assembly2"="s04.2_p03.2"}' ...``\r +\r +To evaluate and produce the final pretext file on a curated assembly, use ``--curated-assemblies CURATED_ASSEMBLIES [CURATED_ASSEMBLIES ...]\r + Dictionary with assemblies that have already been curated. Evaluations and read alignment will be perforder. Example:\r + '{"assembly1":"s04.1_p03.1"}' '{"assembly2":"s04.2_p03.2"}' ...``\r +\r +\r +\r +# Description of implemented rules\r +\r +1- Preprocessing:\r + \r +- **Read concatenation:**\r +\r +``zcat {input.fastqs} | pigz -p {threads} -c > {output.final_fastq}``\r + \r +- **Longranger for 10X reads**: it uses the Longranger version installed in the path specified in the configfile\r +\r +``longranger basic --id={params.sample} --sample={params.sample} --fastqs={input.mkfastq_dir} --localcores={threads}``\r +\r +- **Trimgalore:** By default it gives the ``--max_n 0 --gzip -q 20 --paired --retain_unpaired`` options, but it can be changed with the ``--trim-galore-opts `` argument. \r +\r +``trim_galore -j {threads} {params.opts} {input.read1} {input.read2}``\r +\r +- **Filtlong:** it uses the Filtlong version installed in the path specified in the configfile. By default it gives the min_length and min_mean_q parameters, but extra parameters can be added with the ``--filtlong-opts`` option.\r +\r +``filtlong --min_length {params.minlen} --min_mean_q {params.min_mean_q} {params.opts} {input.reads} | pigz -p {threads} -c > {output.outreads}``\r + \r +- **Build meryldb**: it uses the merqury conda environment specified in the configfile. It takes as argument the `--mery-k` value that needs to be estimated first for the genome size. It can run either on the illumina reads, the ont reads or both, default behaviour is both. \r +\r +``meryl k={params.kmer} count output {output.out_dir} {input.fastq}``\r + \r +- Concat meryldbs: with the merqury conda environment specified in the configfile\r +\r +``meryl union-sum output {output.meryl_all} {input.input_run}``\r + \r +- **Align ONT (Minimap2):** it aligns the reads using minimap2 and outputs the alignment either in bam or in paf.gz formats. It uses the minimap2 conda environment specified in the configfile\r +\r +``minimap2 -{params.align_opts} -t {threads} {input.genome} {input.reads} ``\r +\r +- **Align Illumina (BWA-MEM):** it aligns the reads with BWA-mem and outputs a bam file\r +\r +``bwa mem -Y {params.options} -t {threads} {input.genome} {input.reads} | samtools view -Sb - | samtools sort -@ {threads} -o {output.mapping} -``\r +\r +2- Assembly\r +\r +- **Flye (default)**. It is run by default, if you don't want the pipeline to run it, you can give `--no-flye` option when creating the config. It uses the conda environment specified in the config. By default it is set to 2 polishing iterations and gives the genome-size estimate that has been given when creating the config. Extra options can be provided with the `--flye-opts`.\r +\r +``flye --{params.readtype} {input.reads} -o {params.outdir}out -t {threads} -i {params.pol_iterations} {params.other_flye_opts} ``\r + \r +- **Nextdenovo (if ``run-nextdenovo``):** It uses the cluster module specified in the config. If nextdenovo option is turned on, the create_config script will also create the nextdenovo config file. Check the create_config help to see which options can be modified on it. \r +\r +``nextDenovo {input.config}``\r +\r +3- Polishing\r +\r +- **Hypo (default):** It is the polisher that the pipeline uses by default, it can be turned off specifying ``--no-hypo`` when creating the config. If selected, the reads will be aligned in previous rules and then hypo will be run, it requires illumina data. It uses the conda environment specified in the config. \r +\r +``hypo -r @short_reads.list.txt -d {input.genome} -b {input.sr_bam} -c {coverage} -s {params.genome_size} -B {input.lr_bam} -t {threads} -o {output.polished} -p {params.proc} {params.opts} ``\r + \r +- **Nextpolish ont (if turned on):** to run nextpolish with ONT reads, specify ``--nextpolish-ont-rounds`` and the number of rounds you want to run of it. \r +\r +``"python /apps/NEXTPOLISH/1.3.1/lib/nextpolish2.py -g {input.genome} -p {threads} -l lgs.fofn -r {params.lrtype} > {output.polished}``\r + \r +- **Nextpolish illumina (if turned on):** to run nextpolish with ONT reads, specify ``--nextpolish-ill-rounds`` and the number of rounds you want to run of it. \r +\r +``"python /apps/NEXTPOLISH/1.3.1/lib/nextpolish1.py -g {input.genome} -p {threads} -s {input.bam} -t {params.task} > {output.polished}``\r +\r +4- Post-assembly\r +\r +- **Purge_dups (by default):** select ``--no-purgedups`` if you don't want to run it. If no manual cutoffs are given, it'll run purgedups with automatic cutoffs and then will rerun it selecting the mean cutoff as 0.75\\*cov. It uses the version installed in the cluster module specified in the config. \r +\r +5- Evaluations\r + \r +- **Merqury:** It runs on each 'terminal' assembly. This is, the base assembly and the resulting assembly from each branch of the pipeline. \r + \r +- **Busco:** It can be run only in the terminal assemblies or on all the assemblies produced by the pipeline. It uses the conda environment specified in the config as well as the parameters specified. \r + \r +- **Nseries:** This is run during the *finalize* on all the assemblies that are evaluated. After it, that rule combines the statistics produced by all the evaluation rules. \r +\r +# Description of all options\r +```\r + bin/create_config_assembly.py -h\r +usage: create_configuration_file [-h] [--configFile configFile] [--specFile specFile] [--ndconfFile ndconfFile] [--concat-cores concat_cores]\r + [--genome-size genome_size] [--lr-type lr_type] [--basename base_name] [--species species] [--keep-intermediate]\r + [--preprocess-lr-step PREPROCESS_ONT_STEP] [--preprocess-10X-step PREPROCESS_10X_STEP]\r + [--preprocess-illumina-step PREPROCESS_ILLUMINA_STEP] [--preprocess-hic-step PREPROCESS_HIC_STEP]\r + [--flye-step FLYE_STEP] [--no-flye] [--nextdenovo-step NEXTDENOVO_STEP] [--run-nextdenovo]\r + [--nextpolish-cores nextpolish_cores] [--minimap2-cores minimap2_cores] [--bwa-cores bwa_cores]\r + [--hypo-cores hypo_cores] [--pairtools-cores pairtools_cores] [--busco-cores busco_cores]\r + [--nextpolish-ont-rounds nextpolish_ont_rounds] [--nextpolish-ill-rounds nextpolish_ill_rounds]\r + [--hypo-rounds hypo_rounds] [--longranger-cores longranger_cores] [--longranger-path longranger_path]\r + [--genomescope-opts genomescope_additional] [--no-purgedups] [--ploidy ploidy] [--run-tigmint] [--run-kraken2]\r + [--no-yahs] [--scripts-dir SCRIPTS_DIR] [--ont-reads ONT_READS] [--ont-dir ONT_DIR] [--ont-filt ONT_FILTERED]\r + [--pe1 PE1] [--pe2 PE2] [--processed-illumina PROCESSED_ILLUMINA] [--raw-10X RAW_10X [RAW_10X ...]]\r + [--processed-10X PROCESSED_10X] [--10X R10X] [--illumina-dir ILLUMINA_DIR]\r + [--assembly-in ASSEMBLY_IN [ASSEMBLY_IN ...]]\r + [--postpolish-assemblies POSTPOLISH_ASSEMBLIES [POSTPOLISH_ASSEMBLIES ...]]\r + [--curated-assemblies CURATED_ASSEMBLIES [CURATED_ASSEMBLIES ...]] [--hic-dir HIC_DIR]\r + [--pipeline-workdir PIPELINE_WORKDIR] [--filtlong-dir FILTLONG_DIR] [--concat-hic-dir CONCAT_HIC_DIR]\r + [--flye-dir FLYE_DIR] [--nextdenovo-dir NEXTDENOVO_DIR] [--flye-polishing-dir POLISH_FLYE_DIR]\r + [--nextdenovo-polishing-dir POLISH_NEXTDENOVO_DIR] [--eval-dir eval_dir] [--stats-out stats_out]\r + [--hic-qc-dir hic_qc_dir] [--filtlong-minlen filtlong_minlen] [--filtlong-min-mean-q filtlong_min_mean_q]\r + [--filtlong-opts filtlong_opts] [--kraken2-db kraken2_db] [--kraken2-kmer kraken2_kmers]\r + [--kraken2-opts additional_kraken2_opts] [--kraken2-cores kraken2_threads] [--trim-galore-opts trim_galore_opts]\r + [--trim-Illumina-cores Trim_Illumina_cores] [--flye-cores flye_cores] [--flye-polishing-iterations flye_pol_it]\r + [--other-flye-opts other_flye_opts] [--nextdenovo-cores nextdenovo_cores] [--nextdenovo-jobtype nextdenovo_type]\r + [--nextdenovo-task nextdenovo_task] [--nextdenovo-rewrite nextdenovo_rewrite]\r + [--nextdenovo-parallel_jobs nextdenovo_parallel_jobs] [--nextdenovo-minreadlen nextdenovo_minreadlen]\r + [--nextdenovo-seeddepth nextdenovo_seeddepth] [--nextdenovo-seedcutoff nextdenovo_seedcutoff]\r + [--nextdenovo-blocksize nextdenovo_blocksize] [--nextdenovo-pa-correction nextdenovo_pa_correction]\r + [--nextdenovo-minimap_raw nextdenovo_minimap_raw] [--nextdenovo-minimap_cns nextdenovo_minimap_cns]\r + [--nextdenovo-minimap_map nextdenovo_minimap_map] [--nextdenovo-sort nextdenovo_sort]\r + [--nextdenovo-correction_opts nextdenovo_correction_opts] [--nextdenovo-nextgraph_opt nextdenovo_nextgraph_opt]\r + [--sr-cov ill_cov] [--hypo-proc hypo_processes] [--hypo-no-lr] [--hypo-opts hypo_opts]\r + [--purgedups-cores purgedups_cores] [--purgedups-calcuts-opts calcuts_opts] [--tigmint-cores tigmint_cores]\r + [--tigmint-opts tigmint_opts] [--hic-qc] [--no-pretext] [--assembly-qc assembly_qc] [--yahs-cores yahs_cores]\r + [--yahs-mq yahs_mq] [--yahs-opts yahs_opts] [--hic-map-opts hic_map_opts] [--mq mq [mq ...]]\r + [--hic-qc-assemblylen hic_qc_assemblylen] [--blast-cores blast_cores] [--hic-blastdb blastdb]\r + [--hic-readsblast hic_readsblast] [--no-final-evals] [--busco-lin busco_lineage] [--merqury-db merqury_db]\r + [--merqury-plot-opts merqury_plot_opts] [--meryl-k meryl_k] [--meryl-threads meryl_threads]\r + [--meryl-reads meryl_reads [meryl_reads ...]] [--ont-list ONT_wildcards] [--illumina-list illumina_wildcards]\r + [--r10X-list r10X_wildcards] [--hic-list hic_wildcards]\r +\r +Create a configuration json file for the assembly pipeline.\r +\r +options:\r + -h, --help show this help message and exit\r +\r +General Parameters:\r + --configFile configFile\r + Configuration JSON to be generated. Default assembly.config\r + --specFile specFile Cluster specifications JSON fileto be generated. Default assembly.spec\r + --ndconfFile ndconfFile\r + Name pf the nextdenovo config file. Default nextdenovo.config\r + --concat-cores concat_cores\r + Number of threads to concatenate reads and to run filtlong. Default 4\r + --genome-size genome_size\r + Approximate genome size. Example: 615m or 2.6g. Default None\r + --lr-type lr_type Type of long reads (options are flye read-type options). Default nano-hq\r + --basename base_name Base name for the project. Default None\r + --species species Name of the species to be assembled. Default None\r + --keep-intermediate Set this to True if you do not want intermediate files to be removed. Default False\r + --preprocess-lr-step PREPROCESS_ONT_STEP\r + Step for preprocessing long-reads. Default 02.1\r + --preprocess-10X-step PREPROCESS_10X_STEP\r + Step for preprocessing 10X reads. Default 02.2\r + --preprocess-illumina-step PREPROCESS_ILLUMINA_STEP\r + Step for preprocessing illumina reads. Default 02.2\r + --preprocess-hic-step PREPROCESS_HIC_STEP\r + Step for preprocessing hic reads. Default 02.3\r + --flye-step FLYE_STEP\r + Step for running flye. Default 03.1\r + --no-flye Give this option if you do not want to run Flye.\r + --nextdenovo-step NEXTDENOVO_STEP\r + Step for running nextdenovo. Default 03.2\r + --run-nextdenovo Give this option if you do want to run Nextdenovo.\r + --nextpolish-cores nextpolish_cores\r + Number of threads to run the nextpolish step. Default 24\r + --minimap2-cores minimap2_cores\r + Number of threads to run the alignment with minimap2. Default 32\r + --bwa-cores bwa_cores\r + Number of threads to run the alignments with BWA-Mem2. Default 16\r + --hypo-cores hypo_cores\r + Number of threads to run the hypo step. Default 24\r + --pairtools-cores pairtools_cores\r + Number of threads to run the pairtools step. Default 100\r + --busco-cores busco_cores\r + Number of threads to run BUSCO. Default 32\r + --nextpolish-ont-rounds nextpolish_ont_rounds\r + Number of rounds to run the Nextpolish with ONT step. Default 0\r + --nextpolish-ill-rounds nextpolish_ill_rounds\r + Number of rounds to run the Nextpolish with illumina step. Default 0\r + --hypo-rounds hypo_rounds\r + Number of rounds to run the Hypostep. Default 1\r + --longranger-cores longranger_cores\r + Number of threads to run longranger. Default 16\r + --longranger-path longranger_path\r + Path to longranger executable. Default /scratch/project/devel/aateam/src/10X/longranger-2.2.2\r + --genomescope-opts genomescope_additional\r + Additional options to run Genomescope2 with. Default -m 10000\r + --no-purgedups Give this option if you do not want to run Purgedups.\r + --ploidy ploidy Expected ploidy. Default 2\r + --run-tigmint Give this option if you want to run the scaffolding with 10X reads step.\r + --run-kraken2 Give this option if you want to run Kraken2 on the input reads.\r + --no-yahs Give this option if you do not want to run yahs.\r +\r +Inputs:\r + --scripts-dir SCRIPTS_DIR\r + Directory with the different scripts for the pipeline. Default\r + /software/assembly/pipelines/Assembly_pipeline/CLAWSv2.2/bin/../scripts/\r + --ont-reads ONT_READS\r + File with all the ONT reads. Default None\r + --ont-dir ONT_DIR Directory where the ONT fastqs are stored. Default None\r + --ont-filt ONT_FILTERED\r + File with the ONT reads after running filtlong on them. Default None\r + --pe1 PE1 File with the illumina paired-end fastqs, already trimmed, pair 1.\r + --pe2 PE2 File with the illumina paired-end fastqs, already trimmed, pair 2.\r + --processed-illumina PROCESSED_ILLUMINA\r + Directory to Processed illumina reads. Already there or to be produced by the pipeline.\r + --raw-10X RAW_10X [RAW_10X ...]\r + Dictionary with 10X raw read directories, it has to be the mkfastq dir. You must specify as well the sampleIDs from this run.\r + Example: '{"mkfastq-dir":"sample1,sample2,sample3"}'...\r + --processed-10X PROCESSED_10X\r + Directory to Processed 10X reads. Already there or to be produced by the pipeline.\r + --10X R10X File with barcoded 10X reads in fastq.gz format, concatenated.\r + --illumina-dir ILLUMINA_DIR\r + Directory where the raw illumina fastqs are stored. Default None\r + --assembly-in ASSEMBLY_IN [ASSEMBLY_IN ...]\r + Dictionary with assemblies that need to be polished but not assembled and directory where they should be polished. Example:\r + '{"assembly1":"polishing_dir1"}' '{"assembly2"="polishing_dir2"}' ...\r + --postpolish-assemblies POSTPOLISH_ASSEMBLIES [POSTPOLISH_ASSEMBLIES ...]\r + Dictionary with assemblies for whic postpolishing steps need to be run but that are not assembled and base step for the\r + directory where the first postpolishing step should be run. Example: '{"assembly1":"s04.1_p03.1"}'\r + '{"assembly2":"s04.2_p03.2"}' ...\r + --curated-assemblies CURATED_ASSEMBLIES [CURATED_ASSEMBLIES ...]\r + Dictionary with assemblies that have already been curated. Evaluations and read alignment will be perforder. Example:\r + '{"assembly1":"s04.1_p03.1"}' '{"assembly2":"s04.2_p03.2"}' ...\r + --hic-dir HIC_DIR Directory where the HiC fastqs are stored. Default None\r +\r +Outputs:\r + --pipeline-workdir PIPELINE_WORKDIR\r + Base directory for the pipeline run. Default /scratch_isilon/groups/assembly/jgomez/test_CLAWSv2/ilErePala/assembly/\r + --filtlong-dir FILTLONG_DIR\r + Directory to process the ONT reads with filtlong. Default s02.1_p01.1_Filtlong\r + --concat-hic-dir CONCAT_HIC_DIR\r + Directory to concatenate the HiC reads. Default s02.3_p01.1_Concat_HiC\r + --flye-dir FLYE_DIR Directory to run flye. Default s03.1_p02.1_flye/\r + --nextdenovo-dir NEXTDENOVO_DIR\r + Directory to run nextdenovo. Default s03.2_p02.1_nextdenovo/\r + --flye-polishing-dir POLISH_FLYE_DIR\r + Directory to polish the flye assembly. Default s04.1_p03.1_polishing/\r + --nextdenovo-polishing-dir POLISH_NEXTDENOVO_DIR\r + Directory to run nextdenovo. Default s04.2_p03.2_polishing/\r + --eval-dir eval_dir Base directory for the evaluations. Default evaluations/\r + --stats-out stats_out\r + Path to the file with the final statistics.\r + --hic-qc-dir hic_qc_dir\r + Directory to run the hic_qc. Default hic_qc/\r +\r +Filtlong:\r + --filtlong-minlen filtlong_minlen\r + Minimum read length to use with Filtlong. Default 1000\r + --filtlong-min-mean-q filtlong_min_mean_q\r + Minimum mean quality to use with Filtlong. Default 80\r + --filtlong-opts filtlong_opts\r + Extra options to run Filtlong (eg. -t 4000000000)\r +\r +Kraken2:\r + --kraken2-db kraken2_db\r + Database to be used for running Kraken2. Default None\r + --kraken2-kmer kraken2_kmers\r + Database to be used for running Kraken2. Default None\r + --kraken2-opts additional_kraken2_opts\r + Optional parameters for the rule Kraken2. Default\r + --kraken2-cores kraken2_threads\r + Number of threads to run the Kraken2 step. Default 16\r +\r +Trim_Galore:\r + --trim-galore-opts trim_galore_opts\r + Optional parameters for the rule trim_galore. Default --max_n 0 --gzip -q 20 --paired --retain_unpaired\r + --trim-Illumina-cores Trim_Illumina_cores\r + Number of threads to run the Illumina trimming step. Default 8\r +\r +Flye:\r + --flye-cores flye_cores\r + Number of threads to run FLYE. Default 128\r + --flye-polishing-iterations flye_pol_it\r + Number of polishing iterations to use with FLYE. Default 2\r + --other-flye-opts other_flye_opts\r + Additional options to run Flye. Default --scaffold\r +\r +Nextdenovo:\r + --nextdenovo-cores nextdenovo_cores\r + Number of threads to run nextdenovo. Default 2\r + --nextdenovo-jobtype nextdenovo_type\r + Job_type for nextdenovo. Default slurm\r + --nextdenovo-task nextdenovo_task\r + Task need to run. Default all\r + --nextdenovo-rewrite nextdenovo_rewrite\r + Overwrite existing directory. Default yes\r + --nextdenovo-parallel_jobs nextdenovo_parallel_jobs\r + Number of tasks used to run in parallel. Default 50\r + --nextdenovo-minreadlen nextdenovo_minreadlen\r + Filter reads with length < minreadlen. Default 1k\r + --nextdenovo-seeddepth nextdenovo_seeddepth\r + Expected seed depth, used to calculate seed_cutoff, co-use with genome_size, you can try to set it 30-45 to get a better\r + assembly result. Default 45\r + --nextdenovo-seedcutoff nextdenovo_seedcutoff\r + Minimum seed length, <=0 means calculate it automatically using bin/seq_stat. Default 0\r + --nextdenovo-blocksize nextdenovo_blocksize\r + Block size for parallel running, split non-seed reads into small files, the maximum size of each file is blocksize. Default 1g\r + --nextdenovo-pa-correction nextdenovo_pa_correction\r + number of corrected tasks used to run in parallel, each corrected task requires ~TOTAL_INPUT_BASES/4 bytes of memory usage,\r + overwrite parallel_jobs only for this step. Default 100\r + --nextdenovo-minimap_raw nextdenovo_minimap_raw\r + minimap2 options, used to find overlaps between raw reads, see minimap2-nd for details. Default -t 30\r + --nextdenovo-minimap_cns nextdenovo_minimap_cns\r + minimap2 options, used to find overlaps between corrected reads. Default -t 30\r + --nextdenovo-minimap_map nextdenovo_minimap_map\r + minimap2 options, used to map reads back to the assembly. Default -t 30 --no-kalloc\r + --nextdenovo-sort nextdenovo_sort\r + sort options, see ovl_sort for details. Default -m 400g -t 20\r + --nextdenovo-correction_opts nextdenovo_correction_opts\r + Correction options. Default -p 30 -dbuf\r + --nextdenovo-nextgraph_opt nextdenovo_nextgraph_opt\r + nextgraph options, see nextgraph for details. Default -a 1\r +\r +Hypo:\r + --sr-cov ill_cov Approximate short read coverage for hypo Default 0\r + --hypo-proc hypo_processes\r + Number of contigs to be processed in parallel by HyPo. Default 6\r + --hypo-no-lr Set this to false if you don¡t want to run hypo with long reads. Default True\r + --hypo-opts hypo_opts\r + Additional options to run Hypo. Default None\r +\r +Purge_dups:\r + --purgedups-cores purgedups_cores\r + Number of threads to run purgedups. Default 8\r + --purgedups-calcuts-opts calcuts_opts\r + Adjusted values to run calcuts for purgedups. Default None\r +\r +Scaffold_with_10X:\r + --tigmint-cores tigmint_cores\r + Number of threads to run the 10X scaffolding step. Default 12\r + --tigmint-opts tigmint_opts\r + Adjusted values to run the scaffolding with 10X reads. Default None\r +\r +HiC:\r + --hic-qc Give this option if only QC of the HiC data needs to be done.\r + --no-pretext Give this option if you do not want to generate the pretext file\r + --assembly-qc assembly_qc\r + Path to the assembly to be used perfom the QC of the HiC reads.\r + --yahs-cores yahs_cores\r + Number of threads to run YAHS. Default 48\r + --yahs-mq yahs_mq Mapping quality to use when running yahs.Default 40\r + --yahs-opts yahs_opts\r + Additional options to give to YAHS.Default\r + --hic-map-opts hic_map_opts\r + Options to use with bwa mem when aligning the HiC reads. Deafault -5SP -T0\r + --mq mq [mq ...] Mapping qualities to use for processing the hic mappings. Default [0, 40]\r + --hic-qc-assemblylen hic_qc_assemblylen\r + Lentgh of the assembly to be used for HiC QC\r + --blast-cores blast_cores\r + Number of threads to run blast with the HiC unmapped reads.Default 8\r + --hic-blastdb blastdb\r + BLAST Database to use to classify the hic unmapped reads. Default /scratch_isilon/groups/assembly/data/blastdbs\r + --hic-readsblast hic_readsblast\r + Number of unmapped hic reads to classify with blast. Default 100\r +\r +Finalize:\r + --no-final-evals If specified, do not run evaluations on final assemblies. Default True\r + --busco-lin busco_lineage\r + Path to the lineage directory to run Busco with. Default None\r + --merqury-db merqury_db\r + Meryl database. Default None\r + --merqury-plot-opts merqury_plot_opts\r + Meryl database. Default None\r + --meryl-k meryl_k Merqury plot additional options, for example " -m 200 -n 6000|". Default None\r + --meryl-threads meryl_threads\r + Number of threads to run meryl and merqury. Default 4\r + --meryl-reads meryl_reads [meryl_reads ...]\r + Type of reads to be used to build the meryldb. Default ont illumina\r +\r +Wildcards:\r + --ont-list ONT_wildcards\r + List with basename of the ONT fastqs that will be used. Default None\r + --illumina-list illumina_wildcards\r + List with basename of the illumina fastqs. Default None\r + --r10X-list r10X_wildcards\r + List with basename of the raw 10X fastqs. Default None\r + --hic-list hic_wildcards\r + List with basename of the raw hic fastqs. Default None\r +```\r +# Changes made to v2.2: \r +\r +1. General: \r +\r + Now default read_type is nano-hq \r +\r +2. Rule trim_galore: \r +\r + "--max_n 0" has been added to the default behaviour of "--trim-galore-opts" \r +\r +3. Meryl: \r +\r + New option "--meryl-reads" has been added to the config. Default is "Illumina ont" to build the meryl database using both type of reads, it can be changed to one or the other \r +\r +4. Merqury: \r +\r + Option "--merqury-plot-opts" has been added to config file. It can be used to modify the x and y axis maximum values (eg. --merqury-plot-opts " -m 200 -n 6000") \r +\r +5. Genomescope: \r +\r + "-m 10000" is now part of the default behavior of "--genomescope-opts" \r +\r +6. Hic_statistics: \r +\r + This is now running for each assembly and mq for which a pretext file is generated \r +\r +7. Assembly inputs for different steps: \r +\r + a. "--assembly-in" to start after assembly step (eg. Evaluation, polishing, purging and scaffolding) \r +\r + b. "--postpolish-assemblies" to start after polishing step (eg. Evaluation, purging and scaffolding) \r +\r + c. "--curated-assemblies" to start after scaffolding step (eg. Evaluation and pretext generation) \r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/567?version=1" ; + schema1:keywords "Bioinformatics, Genomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "CLAWS (CNAG's long-read assembly workflow in Snakemake)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/567?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """\r +This repository hosts Metabolome Annotation Workflow (MAW). The workflow takes MS2 .mzML format data files as an input in R. It performs spectral database dereplication using R Package Spectra and compound database dereplication using SIRIUS OR MetFrag . Final candidate selection is done in Python using RDKit and PubChemPy.""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.510.2" ; + schema1:isBasedOn "https://github.com/zmahnoor14/MAW" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Metabolome Annotation Workflow (MAW)" ; + schema1:sdDatePublished "2024-07-12 13:32:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/510/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1506 ; + schema1:creator , + , + , + , + , + ; + schema1:dateCreated "2023-08-01T14:21:50Z" ; + schema1:dateModified "2023-08-01T14:22:09Z" ; + schema1:description """\r +This repository hosts Metabolome Annotation Workflow (MAW). The workflow takes MS2 .mzML format data files as an input in R. It performs spectral database dereplication using R Package Spectra and compound database dereplication using SIRIUS OR MetFrag . Final candidate selection is done in Python using RDKit and PubChemPy.""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/510?version=1" ; + schema1:keywords "Metabolomics, Annotation, mass-spectrometry, identification, Bioinformatics, FAIR workflows, workflow, gnps, massbank, hmdb, spectra, rdkit, Cheminformatics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Metabolome Annotation Workflow (MAW)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/510?version=2" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + , + , + ; + ns1:output , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 10427 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-07-12 13:22:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5136 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:11Z" ; + schema1:dateModified "2024-06-11T12:55:11Z" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "ATACSeq peak-calling and differential analysis pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/965?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/atacseq" ; + schema1:sdDatePublished "2024-07-12 13:22:33 +0100" ; + schema1:url "https://workflowhub.eu/workflows/965/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5755 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:41Z" ; + schema1:dateModified "2024-06-11T12:54:41Z" ; + schema1:description "ATACSeq peak-calling and differential analysis pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/965?version=8" ; + schema1:keywords "ATAC-seq, chromatin-accessibiity" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/atacseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/965?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-23T13:33:07.958178" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Purge-duplicate-contigs-VGP6/main" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Purge-duplicate-contigs-VGP6/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:version "0.3.3" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-07-12 13:22:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5725 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:11Z" ; + schema1:dateModified "2024-06-11T12:55:11Z" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + schema1:datePublished "2021-07-26T10:22:23.309483" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-variant-calling" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:sdDatePublished "2021-10-27 15:45:10 +0100" ; + schema1:softwareVersion "v0.4.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.6" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """Workflow for Spliced RNAseq data\r +**Steps:**\r +\r +* workflow_quality.cwl:\r + * FastQC (Read Quality Control)\r + * fastp (Read Trimming)\r +* STAR (Read mapping)\r +* featurecounts (transcript read counts)\r +* kallisto (transcript [pseudo]counts)\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/95?version=1" ; + schema1:isBasedOn "https://git.wur.nl/unlock/cwl/-/blob/master/cwl/workflows/workflow_RNAseq_Spliced.cwl" ; + schema1:license "CC0-1.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Spliced RNAseq workflow" ; + schema1:sdDatePublished "2024-07-12 13:36:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/95/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 33079 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6752 ; + schema1:creator , + ; + schema1:dateCreated "2020-12-22T15:53:49Z" ; + schema1:dateModified "2023-01-16T13:46:30Z" ; + schema1:description """Workflow for Spliced RNAseq data\r +**Steps:**\r +\r +* workflow_quality.cwl:\r + * FastQC (Read Quality Control)\r + * fastp (Read Trimming)\r +* STAR (Read mapping)\r +* featurecounts (transcript read counts)\r +* kallisto (transcript [pseudo]counts)\r +""" ; + schema1:image ; + schema1:keywords "RNASEQ, rna, rna-seq, kallisto, STAR" ; + schema1:license "https://spdx.org/licenses/CC0-1.0" ; + schema1:name "Spliced RNAseq workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/95?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2024-06-18T14:17:17.498298" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/amr_gene_detection" ; + schema1:license "GPL-3.0-or-later" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "amr_gene_detection/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# metaGOflow: A workflow for marine Genomic Observatories' data analysis\r +\r +![logo](https://raw.githubusercontent.com/hariszaf/metaGOflow-use-case/gh-pages/assets/img/metaGOflow_logo_italics.png)\r +\r +\r +## An EOSC-Life project\r +\r +The workflows developed in the framework of this project are based on `pipeline-v5` of the MGnify resource.\r +\r +> This branch is a child of the [`pipeline_5.1`](https://github.com/hariszaf/pipeline-v5/tree/pipeline_5.1) branch\r +> that contains all CWL descriptions of the MGnify pipeline version 5.1.\r +\r +## Dependencies\r +\r +To run metaGOflow you need to make sure you have the following set on your computing environmnet first:\r +\r +- python3 [v 3.8+]\r +- [Docker](https://www.docker.com) [v 19.+] or [Singularity](https://apptainer.org) [v 3.7.+]/[Apptainer](https://apptainer.org) [v 1.+]\r +- [cwltool](https://github.com/common-workflow-language/cwltool) [v 3.+]\r +- [rdflib](https://rdflib.readthedocs.io/en/stable/) [v 6.+]\r +- [rdflib-jsonld](https://pypi.org/project/rdflib-jsonld/) [v 0.6.2]\r +- [ro-crate-py](https://github.com/ResearchObject/ro-crate-py) [v 0.7.0]\r +- [pyyaml](https://pypi.org/project/PyYAML/) [v 6.0]\r +- [Node.js](https://nodejs.org/) [v 10.24.0+]\r +- Available storage ~235GB for databases\r +\r +### Storage while running\r +\r +Depending on the analysis you are about to run, disk requirements vary.\r +Indicatively, you may have a look at the metaGOflow publication for computing resources used in various cases.\r +\r +## Installation\r +\r +### Get the EOSC-Life marine GOs workflow\r +\r +```bash\r +git clone https://github.com/emo-bon/MetaGOflow\r +cd MetaGOflow\r +```\r +\r +### Download necessary databases (~235GB)\r +\r +You can download databases for the EOSC-Life GOs workflow by running the\r +`download_dbs.sh` script under the `Installation` folder.\r +\r +```bash\r +bash Installation/download_dbs.sh -f [Output Directory e.g. ref-dbs] \r +```\r +If you have one or more already in your system, then create a symbolic link pointing\r +at the `ref-dbs` folder or at one of its subfolders/files.\r +\r +The final structure of the DB directory should be like the following:\r +\r +````bash\r +user@server:~/MetaGOflow: ls ref-dbs/\r +db_kofam/ diamond/ eggnog/ GO-slim/ interproscan-5.57-90.0/ kegg_pathways/ kofam_ko_desc.tsv Rfam/ silva_lsu/ silva_ssu/\r +````\r +\r +## How to run\r +\r +### Ensure that `Node.js` is installed on your system before running metaGOflow\r +\r +If you have root access on your system, you can run the commands below to install it:\r +\r +##### DEBIAN/UBUNTU\r +```bash\r +sudo apt-get update -y\r +sudo apt-get install -y nodejs\r +```\r +\r +##### RH/CentOS\r +```bash\r +sudo yum install rh-nodejs (e.g. rh-nodejs10)\r +```\r +\r +### Set up the environment\r +\r +#### Run once - Setup environment\r +\r +- ```bash\r + conda create -n EOSC-CWL python=3.8\r + ```\r +\r +- ```bash\r + conda activate EOSC-CWL\r + ```\r +\r +- ```bash\r + pip install cwlref-runner cwltool[all] rdflib-jsonld rocrate pyyaml\r +\r + ```\r +\r +#### Run every time\r +\r +```bash\r +conda activate EOSC-CWL\r +``` \r +\r +### Run the workflow\r +\r +- Edit the `config.yml` file to set the parameter values of your choice. For selecting all the steps, then set to `true` the variables in lines [2-6].\r +\r +#### Using Singularity\r +\r +##### Standalone\r +- run:\r + ```bash\r + ./run_wf.sh -s -n osd-short -d short-test-case -f test_input/wgs-paired-SRR1620013_1.fastq.gz -r test_input/wgs-paired-SRR1620013_2.fastq.gz\r + ``\r +\r +##### Using a cluster with a queueing system (e.g. SLURM)\r +\r +- Create a job file (e.g., SBATCH file)\r +\r +- Enable Singularity, e.g. module load Singularity & all other dependencies \r +\r +- Add the run line to the job file\r +\r +\r +#### Using Docker\r +\r +##### Standalone\r +- run:\r + ``` bash\r + ./run_wf.sh -n osd-short -d short-test-case -f test_input/wgs-paired-SRR1620013_1.fastq.gz -r test_input/wgs-paired-SRR1620013_2.fastq.gz\r + ```\r + HINT: If you are using Docker, you may need to run the above command without the `-s' flag.\r +\r +## Testing samples\r +The samples are available in the `test_input` folder.\r +\r +We provide metaGOflow with partial samples from the Human Metagenome Project ([SRR1620013](https://www.ebi.ac.uk/ena/browser/view/SRR1620013) and [SRR1620014](https://www.ebi.ac.uk/ena/browser/view/SRR1620014))\r +They are partial as only a small part of their sequences have been kept, in terms for the pipeline to test in a fast way. \r +\r +\r +## Hints and tips\r +\r +1. In case you are using Docker, it is strongly recommended to **avoid** installing it through `snap`.\r +\r +2. `RuntimeError`: slurm currently does not support shared caching, because it does not support cleaning up a worker\r + after the last job finishes.\r + Set the `--disableCaching` flag if you want to use this batch system.\r +\r +3. In case you are having errors like:\r +\r +```\r +cwltool.errors.WorkflowException: Singularity is not available for this tool\r +```\r +\r +You may run the following command:\r +\r +```\r +singularity pull --force --name debian:stable-slim.sif docker://debian:stable-sli\r +```\r +\r +## Contribution\r +\r +To make contribution to the project a bit easier, all the MGnify `conditionals` and `subworkflows` under\r +the `workflows/` directory that are not used in the metaGOflow framework, have been removed. \r +However, all the MGnify `tools/` and `utils/` are available in this repo, even if they are not invoked in the current\r +version of metaGOflow.\r +This way, we hope we encourage people to implement their own `conditionals` and/or `subworkflows` by exploiting the\r +currently supported `tools` and `utils` as well as by developing new `tools` and/or `utils`.\r +\r +\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.384.3" ; + schema1:isBasedOn "https://github.com/emo-bon/MetaGOflow.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for A workflow for marine Genomic Observatories data analysis" ; + schema1:sdDatePublished "2024-07-12 13:33:33 +0100" ; + schema1:url "https://workflowhub.eu/workflows/384/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7006 ; + schema1:creator , + ; + schema1:dateCreated "2023-05-16T20:41:04Z" ; + schema1:dateModified "2023-05-16T22:01:12Z" ; + schema1:description """# metaGOflow: A workflow for marine Genomic Observatories' data analysis\r +\r +![logo](https://raw.githubusercontent.com/hariszaf/metaGOflow-use-case/gh-pages/assets/img/metaGOflow_logo_italics.png)\r +\r +\r +## An EOSC-Life project\r +\r +The workflows developed in the framework of this project are based on `pipeline-v5` of the MGnify resource.\r +\r +> This branch is a child of the [`pipeline_5.1`](https://github.com/hariszaf/pipeline-v5/tree/pipeline_5.1) branch\r +> that contains all CWL descriptions of the MGnify pipeline version 5.1.\r +\r +## Dependencies\r +\r +To run metaGOflow you need to make sure you have the following set on your computing environmnet first:\r +\r +- python3 [v 3.8+]\r +- [Docker](https://www.docker.com) [v 19.+] or [Singularity](https://apptainer.org) [v 3.7.+]/[Apptainer](https://apptainer.org) [v 1.+]\r +- [cwltool](https://github.com/common-workflow-language/cwltool) [v 3.+]\r +- [rdflib](https://rdflib.readthedocs.io/en/stable/) [v 6.+]\r +- [rdflib-jsonld](https://pypi.org/project/rdflib-jsonld/) [v 0.6.2]\r +- [ro-crate-py](https://github.com/ResearchObject/ro-crate-py) [v 0.7.0]\r +- [pyyaml](https://pypi.org/project/PyYAML/) [v 6.0]\r +- [Node.js](https://nodejs.org/) [v 10.24.0+]\r +- Available storage ~235GB for databases\r +\r +### Storage while running\r +\r +Depending on the analysis you are about to run, disk requirements vary.\r +Indicatively, you may have a look at the metaGOflow publication for computing resources used in various cases.\r +\r +## Installation\r +\r +### Get the EOSC-Life marine GOs workflow\r +\r +```bash\r +git clone https://github.com/emo-bon/MetaGOflow\r +cd MetaGOflow\r +```\r +\r +### Download necessary databases (~235GB)\r +\r +You can download databases for the EOSC-Life GOs workflow by running the\r +`download_dbs.sh` script under the `Installation` folder.\r +\r +```bash\r +bash Installation/download_dbs.sh -f [Output Directory e.g. ref-dbs] \r +```\r +If you have one or more already in your system, then create a symbolic link pointing\r +at the `ref-dbs` folder or at one of its subfolders/files.\r +\r +The final structure of the DB directory should be like the following:\r +\r +````bash\r +user@server:~/MetaGOflow: ls ref-dbs/\r +db_kofam/ diamond/ eggnog/ GO-slim/ interproscan-5.57-90.0/ kegg_pathways/ kofam_ko_desc.tsv Rfam/ silva_lsu/ silva_ssu/\r +````\r +\r +## How to run\r +\r +### Ensure that `Node.js` is installed on your system before running metaGOflow\r +\r +If you have root access on your system, you can run the commands below to install it:\r +\r +##### DEBIAN/UBUNTU\r +```bash\r +sudo apt-get update -y\r +sudo apt-get install -y nodejs\r +```\r +\r +##### RH/CentOS\r +```bash\r +sudo yum install rh-nodejs (e.g. rh-nodejs10)\r +```\r +\r +### Set up the environment\r +\r +#### Run once - Setup environment\r +\r +- ```bash\r + conda create -n EOSC-CWL python=3.8\r + ```\r +\r +- ```bash\r + conda activate EOSC-CWL\r + ```\r +\r +- ```bash\r + pip install cwlref-runner cwltool[all] rdflib-jsonld rocrate pyyaml\r +\r + ```\r +\r +#### Run every time\r +\r +```bash\r +conda activate EOSC-CWL\r +``` \r +\r +### Run the workflow\r +\r +- Edit the `config.yml` file to set the parameter values of your choice. For selecting all the steps, then set to `true` the variables in lines [2-6].\r +\r +#### Using Singularity\r +\r +##### Standalone\r +- run:\r + ```bash\r + ./run_wf.sh -s -n osd-short -d short-test-case -f test_input/wgs-paired-SRR1620013_1.fastq.gz -r test_input/wgs-paired-SRR1620013_2.fastq.gz\r + ``\r +\r +##### Using a cluster with a queueing system (e.g. SLURM)\r +\r +- Create a job file (e.g., SBATCH file)\r +\r +- Enable Singularity, e.g. module load Singularity & all other dependencies \r +\r +- Add the run line to the job file\r +\r +\r +#### Using Docker\r +\r +##### Standalone\r +- run:\r + ``` bash\r + ./run_wf.sh -n osd-short -d short-test-case -f test_input/wgs-paired-SRR1620013_1.fastq.gz -r test_input/wgs-paired-SRR1620013_2.fastq.gz\r + ```\r + HINT: If you are using Docker, you may need to run the above command without the `-s' flag.\r +\r +## Testing samples\r +The samples are available in the `test_input` folder.\r +\r +We provide metaGOflow with partial samples from the Human Metagenome Project ([SRR1620013](https://www.ebi.ac.uk/ena/browser/view/SRR1620013) and [SRR1620014](https://www.ebi.ac.uk/ena/browser/view/SRR1620014))\r +They are partial as only a small part of their sequences have been kept, in terms for the pipeline to test in a fast way. \r +\r +\r +## Hints and tips\r +\r +1. In case you are using Docker, it is strongly recommended to **avoid** installing it through `snap`.\r +\r +2. `RuntimeError`: slurm currently does not support shared caching, because it does not support cleaning up a worker\r + after the last job finishes.\r + Set the `--disableCaching` flag if you want to use this batch system.\r +\r +3. In case you are having errors like:\r +\r +```\r +cwltool.errors.WorkflowException: Singularity is not available for this tool\r +```\r +\r +You may run the following command:\r +\r +```\r +singularity pull --force --name debian:stable-slim.sif docker://debian:stable-sli\r +```\r +\r +## Contribution\r +\r +To make contribution to the project a bit easier, all the MGnify `conditionals` and `subworkflows` under\r +the `workflows/` directory that are not used in the metaGOflow framework, have been removed. \r +However, all the MGnify `tools/` and `utils/` are available in this repo, even if they are not invoked in the current\r +version of metaGOflow.\r +This way, we hope we encourage people to implement their own `conditionals` and/or `subworkflows` by exploiting the\r +currently supported `tools` and `utils` as well as by developing new `tools` and/or `utils`.\r +\r +\r +\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/384?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "A workflow for marine Genomic Observatories data analysis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/384?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """# workflow-partial-cstacks-sstacks-gstacks\r +\r +These workflows are part of a set designed to work for RAD-seq data on the Galaxy platform, using the tools from the Stacks program. \r +\r +Galaxy Australia: https://usegalaxy.org.au/\r +\r +Stacks: http://catchenlab.life.illinois.edu/stacks/\r +\r +This workflow takes in ustacks output, and runs cstacks, sstacks and gstacks. \r +\r +To generate ustacks output see https://workflowhub.eu/workflows/349\r +\r +For the full de novo workflow see https://workflowhub.eu/workflows/348\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/350?version=1" ; + schema1:isBasedOn "https://github.com/AnnaSyme/workflow-partial-cstacks-sstacks-gstacks.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Partial de novo workflow: c-s-g-pops only" ; + schema1:sdDatePublished "2024-07-12 13:35:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/350/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 23360 ; + schema1:creator ; + schema1:dateCreated "2022-05-31T07:56:39Z" ; + schema1:dateModified "2023-01-30T18:21:31Z" ; + schema1:description """# workflow-partial-cstacks-sstacks-gstacks\r +\r +These workflows are part of a set designed to work for RAD-seq data on the Galaxy platform, using the tools from the Stacks program. \r +\r +Galaxy Australia: https://usegalaxy.org.au/\r +\r +Stacks: http://catchenlab.life.illinois.edu/stacks/\r +\r +This workflow takes in ustacks output, and runs cstacks, sstacks and gstacks. \r +\r +To generate ustacks output see https://workflowhub.eu/workflows/349\r +\r +For the full de novo workflow see https://workflowhub.eu/workflows/348\r +""" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Partial de novo workflow: c-s-g-pops only" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/350?version=1" ; + schema1:version 1 ; + ns1:input , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A pipeline to demultiplex, QC and map Nanopore data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1002?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/nanoseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/nanoseq" ; + schema1:sdDatePublished "2024-07-12 13:20:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1002/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6467 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:05Z" ; + schema1:dateModified "2024-06-11T12:55:05Z" ; + schema1:description "A pipeline to demultiplex, QC and map Nanopore data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1002?version=5" ; + schema1:keywords "Alignment, demultiplexing, nanopore, QC" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/nanoseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1002?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.280.4" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_ligand_parameterization/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python GMX Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-07-12 13:34:06 +0100" ; + schema1:url "https://workflowhub.eu/workflows/280/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1728 ; + schema1:creator , + ; + schema1:dateCreated "2023-04-14T07:55:42Z" ; + schema1:dateModified "2023-04-14T07:56:58Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/280?version=3" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python GMX Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_ligand_parameterization/python/workflow.py" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=29" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-07-12 13:22:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=29" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13149 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:13Z" ; + schema1:dateModified "2024-06-11T12:55:13Z" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=29" ; + schema1:version 29 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """## Purge dups\r +\r +This snakemake pipeline is designed to be run using as input a contig-level genome and pacbio reads. This pipeline has been tested with `snakemake v7.32.4`. Raw long-read sequencing files and the input contig genome assembly must be given in the `config.yaml` file. To execute the workflow run:\r +\r +`snakemake --use-conda --cores N`\r +\r +Or configure the cluster.json and run using the `./run_cluster` command""" ; + schema1:hasPart , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.506.2" ; + schema1:isBasedOn "https://github.com/ERGA-consortium/pipelines/tree/main/assembly/snakemake/3.Purging/purge-dups" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Purge retained haplotypes using Purge-Dups" ; + schema1:sdDatePublished "2024-07-12 13:23:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/506/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1741 ; + schema1:creator ; + schema1:dateCreated "2024-03-16T07:45:11Z" ; + schema1:dateModified "2024-03-20T16:10:05Z" ; + schema1:description """## Purge dups\r +\r +This snakemake pipeline is designed to be run using as input a contig-level genome and pacbio reads. This pipeline has been tested with `snakemake v7.32.4`. Raw long-read sequencing files and the input contig genome assembly must be given in the `config.yaml` file. To execute the workflow run:\r +\r +`snakemake --use-conda --cores N`\r +\r +Or configure the cluster.json and run using the `./run_cluster` command""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/506?version=1" ; + schema1:isPartOf ; + schema1:keywords "Assembly, Genomics, Snakemake, Bioinformatics, Genome assembly" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Purge retained haplotypes using Purge-Dups" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/snakemake/3.Purging/purge-dups/Snakefile" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A pipeline to investigate horizontal gene transfer from NGS data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/988?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/hgtseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hgtseq" ; + schema1:sdDatePublished "2024-07-12 13:21:26 +0100" ; + schema1:url "https://workflowhub.eu/workflows/988/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8564 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:54Z" ; + schema1:dateModified "2024-06-11T12:54:54Z" ; + schema1:description "A pipeline to investigate horizontal gene transfer from NGS data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/988?version=1" ; + schema1:keywords "BWA-mem, bwa-mem2, FastQC, genomics-visualization, ggbio, horizontal-gene-transfer, kraken2, krona, MultiQC, NGS, SAMTools, taxonomies, tidyverse" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hgtseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/988?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# TronFlow alignment pipeline\r +\r +![GitHub tag (latest SemVer)](https://img.shields.io/github/v/release/tron-bioinformatics/tronflow-bwa?sort=semver)\r +[![Run tests](https://github.com/TRON-Bioinformatics/tronflow-bwa/actions/workflows/automated_tests.yml/badge.svg?branch=master)](https://github.com/TRON-Bioinformatics/tronflow-bwa/actions/workflows/automated_tests.yml)\r +[![DOI](https://zenodo.org/badge/327943420.svg)](https://zenodo.org/badge/latestdoi/327943420)\r +[![License](https://img.shields.io/badge/license-MIT-green)](https://opensource.org/licenses/MIT)\r +[![Powered by Nextflow](https://img.shields.io/badge/powered%20by-Nextflow-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)](https://www.nextflow.io/)\r +\r +The TronFlow alignment pipeline is part of a collection of computational workflows for tumor-normal pair \r +somatic variant calling.\r +\r +Find the documentation here [![Documentation Status](https://readthedocs.org/projects/tronflow-docs/badge/?version=latest)](https://tronflow-docs.readthedocs.io/en/latest/?badge=latest)\r +\r +This pipeline aligns paired and single end FASTQ files with BWA aln and mem algorithms and with BWA mem 2.\r +For RNA-seq STAR is also supported. To increase sensitivity of novel junctions use `--star_two_pass_mode` (recommended for RNAseq variant calling).\r +It also includes an initial step of read trimming using FASTP.\r +\r +\r +## How to run it\r +\r +Run it from GitHub as follows:\r +```\r +nextflow run tron-bioinformatics/tronflow-alignment -profile conda --input_files $input --output $output --algorithm aln --library paired\r +```\r +\r +Otherwise download the project and run as follows:\r +```\r +nextflow main.nf -profile conda --input_files $input --output $output --algorithm aln --library paired\r +```\r +\r +Find the help as follows:\r +```\r +$ nextflow run tron-bioinformatics/tronflow-alignment --help\r +N E X T F L O W ~ version 19.07.0\r +Launching `main.nf` [intergalactic_shannon] - revision: e707c77d7b\r +\r +Usage:\r + nextflow main.nf --input_files input_files [--reference reference.fasta]\r +\r +Input:\r + * input_fastq1: the path to a FASTQ file (incompatible with --input_files)\r + * input_files: the path to a tab-separated values file containing in each row the sample name and two paired FASTQs (incompatible with --fastq1 and --fastq2)\r + when `--library paired`, or a single FASTQ file when `--library single`\r + Example input file:\r + name1 fastq1.1 fastq1.2\r + name2 fastq2.1 fastq2.2\r + * reference: path to the indexed FASTA genome reference or the star reference folder in case of using star\r +\r +Optional input:\r + * input_fastq2: the path to a second FASTQ file (incompatible with --input_files, incompatible with --library paired)\r + * output: the folder where to publish output (default: output)\r + * algorithm: determines the BWA algorithm, either `aln`, `mem`, `mem2` or `star` (default `aln`)\r + * library: determines whether the sequencing library is paired or single end, either `paired` or `single` (default `paired`)\r + * cpus: determines the number of CPUs for each job, with the exception of bwa sampe and samse steps which are not parallelized (default: 8)\r + * memory: determines the memory required by each job (default: 32g)\r + * inception: if enabled it uses an inception, only valid for BWA aln, it requires a fast file system such as flash (default: false)\r + * skip_trimming: skips the read trimming step\r + * star_two_pass_mode: activates STAR two-pass mode, increasing sensitivity of novel junction discovery, recommended for RNA variant calling (default: false)\r + * additional_args: additional alignment arguments, only effective in BWA mem, BWA mem 2 and STAR (default: none) \r +\r +Output:\r + * A BAM file \\${name}.bam and its index\r + * FASTP read trimming stats report in HTML format \\${name.fastp_stats.html}\r + * FASTP read trimming stats report in JSON format \\${name.fastp_stats.json}\r +```\r +\r +### Input tables\r +\r +The table with FASTQ files expects two tab-separated columns without a header\r +\r +| Sample name | FASTQ 1 | FASTQ 2 |\r +|----------------------|---------------------------------|------------------------------|\r +| sample_1 | /path/to/sample_1.1.fastq | /path/to/sample_1.2.fastq |\r +| sample_2 | /path/to/sample_2.1.fastq | /path/to/sample_2.2.fastq |\r +\r +\r +### Reference genome\r +\r +The reference genome has to be provided in FASTA format and it requires two set of indexes:\r +* FAI index. Create with `samtools faidx your.fasta`\r +* BWA indexes. Create with `bwa index your.fasta`\r +\r +For bwa-mem2 a specific index is needed:\r +```\r +bwa-mem2 index your.fasta\r +```\r +\r +For star a reference folder prepared with star has to be provided. In order to prepare it will need the reference\r +genome in FASTA format and the gene annotations in GTF format. Run a command as follows:\r +```\r +STAR --runMode genomeGenerate --genomeDir $YOUR_FOLDER --genomeFastaFiles $YOUR_FASTA --sjdbGTFfile $YOUR_GTF\r +```\r +\r +## References\r +\r +* Li H. and Durbin R. (2010) Fast and accurate long-read alignment with Burrows-Wheeler Transform. Bioinformatics, Epub. https://doi.org/10.1093/bioinformatics/btp698 \r +* Shifu Chen, Yanqing Zhou, Yaru Chen, Jia Gu; fastp: an ultra-fast all-in-one FASTQ preprocessor, Bioinformatics, Volume 34, Issue 17, 1 September 2018, Pages i884–i890, https://doi.org/10.1093/bioinformatics/bty560\r +* Vasimuddin Md, Sanchit Misra, Heng Li, Srinivas Aluru. Efficient Architecture-Aware Acceleration of BWA-MEM for Multicore Systems. IEEE Parallel and Distributed Processing Symposium (IPDPS), 2019.\r +* Dobin A, Davis CA, Schlesinger F, Drenkow J, Zaleski C, Jha S, Batut P, Chaisson M, Gingeras TR. STAR: ultrafast universal RNA-seq aligner. Bioinformatics. 2013 Jan 1;29(1):15-21. doi: 10.1093/bioinformatics/bts635. Epub 2012 Oct 25. PMID: 23104886; PMCID: PMC3530905.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/418?version=1" ; + schema1:isBasedOn "https://github.com/TRON-Bioinformatics/tronflow-alignment" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for TronFlow alignment pipeline" ; + schema1:sdDatePublished "2024-07-12 13:34:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/418/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4781 ; + schema1:dateCreated "2023-01-17T16:51:42Z" ; + schema1:dateModified "2023-01-17T16:51:42Z" ; + schema1:description """# TronFlow alignment pipeline\r +\r +![GitHub tag (latest SemVer)](https://img.shields.io/github/v/release/tron-bioinformatics/tronflow-bwa?sort=semver)\r +[![Run tests](https://github.com/TRON-Bioinformatics/tronflow-bwa/actions/workflows/automated_tests.yml/badge.svg?branch=master)](https://github.com/TRON-Bioinformatics/tronflow-bwa/actions/workflows/automated_tests.yml)\r +[![DOI](https://zenodo.org/badge/327943420.svg)](https://zenodo.org/badge/latestdoi/327943420)\r +[![License](https://img.shields.io/badge/license-MIT-green)](https://opensource.org/licenses/MIT)\r +[![Powered by Nextflow](https://img.shields.io/badge/powered%20by-Nextflow-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)](https://www.nextflow.io/)\r +\r +The TronFlow alignment pipeline is part of a collection of computational workflows for tumor-normal pair \r +somatic variant calling.\r +\r +Find the documentation here [![Documentation Status](https://readthedocs.org/projects/tronflow-docs/badge/?version=latest)](https://tronflow-docs.readthedocs.io/en/latest/?badge=latest)\r +\r +This pipeline aligns paired and single end FASTQ files with BWA aln and mem algorithms and with BWA mem 2.\r +For RNA-seq STAR is also supported. To increase sensitivity of novel junctions use `--star_two_pass_mode` (recommended for RNAseq variant calling).\r +It also includes an initial step of read trimming using FASTP.\r +\r +\r +## How to run it\r +\r +Run it from GitHub as follows:\r +```\r +nextflow run tron-bioinformatics/tronflow-alignment -profile conda --input_files $input --output $output --algorithm aln --library paired\r +```\r +\r +Otherwise download the project and run as follows:\r +```\r +nextflow main.nf -profile conda --input_files $input --output $output --algorithm aln --library paired\r +```\r +\r +Find the help as follows:\r +```\r +$ nextflow run tron-bioinformatics/tronflow-alignment --help\r +N E X T F L O W ~ version 19.07.0\r +Launching `main.nf` [intergalactic_shannon] - revision: e707c77d7b\r +\r +Usage:\r + nextflow main.nf --input_files input_files [--reference reference.fasta]\r +\r +Input:\r + * input_fastq1: the path to a FASTQ file (incompatible with --input_files)\r + * input_files: the path to a tab-separated values file containing in each row the sample name and two paired FASTQs (incompatible with --fastq1 and --fastq2)\r + when `--library paired`, or a single FASTQ file when `--library single`\r + Example input file:\r + name1 fastq1.1 fastq1.2\r + name2 fastq2.1 fastq2.2\r + * reference: path to the indexed FASTA genome reference or the star reference folder in case of using star\r +\r +Optional input:\r + * input_fastq2: the path to a second FASTQ file (incompatible with --input_files, incompatible with --library paired)\r + * output: the folder where to publish output (default: output)\r + * algorithm: determines the BWA algorithm, either `aln`, `mem`, `mem2` or `star` (default `aln`)\r + * library: determines whether the sequencing library is paired or single end, either `paired` or `single` (default `paired`)\r + * cpus: determines the number of CPUs for each job, with the exception of bwa sampe and samse steps which are not parallelized (default: 8)\r + * memory: determines the memory required by each job (default: 32g)\r + * inception: if enabled it uses an inception, only valid for BWA aln, it requires a fast file system such as flash (default: false)\r + * skip_trimming: skips the read trimming step\r + * star_two_pass_mode: activates STAR two-pass mode, increasing sensitivity of novel junction discovery, recommended for RNA variant calling (default: false)\r + * additional_args: additional alignment arguments, only effective in BWA mem, BWA mem 2 and STAR (default: none) \r +\r +Output:\r + * A BAM file \\${name}.bam and its index\r + * FASTP read trimming stats report in HTML format \\${name.fastp_stats.html}\r + * FASTP read trimming stats report in JSON format \\${name.fastp_stats.json}\r +```\r +\r +### Input tables\r +\r +The table with FASTQ files expects two tab-separated columns without a header\r +\r +| Sample name | FASTQ 1 | FASTQ 2 |\r +|----------------------|---------------------------------|------------------------------|\r +| sample_1 | /path/to/sample_1.1.fastq | /path/to/sample_1.2.fastq |\r +| sample_2 | /path/to/sample_2.1.fastq | /path/to/sample_2.2.fastq |\r +\r +\r +### Reference genome\r +\r +The reference genome has to be provided in FASTA format and it requires two set of indexes:\r +* FAI index. Create with `samtools faidx your.fasta`\r +* BWA indexes. Create with `bwa index your.fasta`\r +\r +For bwa-mem2 a specific index is needed:\r +```\r +bwa-mem2 index your.fasta\r +```\r +\r +For star a reference folder prepared with star has to be provided. In order to prepare it will need the reference\r +genome in FASTA format and the gene annotations in GTF format. Run a command as follows:\r +```\r +STAR --runMode genomeGenerate --genomeDir $YOUR_FOLDER --genomeFastaFiles $YOUR_FASTA --sjdbGTFfile $YOUR_GTF\r +```\r +\r +## References\r +\r +* Li H. and Durbin R. (2010) Fast and accurate long-read alignment with Burrows-Wheeler Transform. Bioinformatics, Epub. https://doi.org/10.1093/bioinformatics/btp698 \r +* Shifu Chen, Yanqing Zhou, Yaru Chen, Jia Gu; fastp: an ultra-fast all-in-one FASTQ preprocessor, Bioinformatics, Volume 34, Issue 17, 1 September 2018, Pages i884–i890, https://doi.org/10.1093/bioinformatics/bty560\r +* Vasimuddin Md, Sanchit Misra, Heng Li, Srinivas Aluru. Efficient Architecture-Aware Acceleration of BWA-MEM for Multicore Systems. IEEE Parallel and Distributed Processing Symposium (IPDPS), 2019.\r +* Dobin A, Davis CA, Schlesinger F, Drenkow J, Zaleski C, Jha S, Batut P, Chaisson M, Gingeras TR. STAR: ultrafast universal RNA-seq aligner. Bioinformatics. 2013 Jan 1;29(1):15-21. doi: 10.1093/bioinformatics/bts635. Epub 2012 Oct 25. PMID: 23104886; PMCID: PMC3530905.\r +""" ; + schema1:isPartOf ; + schema1:keywords "Alignment, BWA, STAR, Bioinformatics, fastp" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "TronFlow alignment pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/418?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Analysis pipeline for CUT&RUN and CUT&TAG experiments that includes sequencing QC, spike-in normalisation, IgG control normalisation, peak calling and downstream peak analysis." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/976?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/cutandrun" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/cutandrun" ; + schema1:sdDatePublished "2024-07-12 13:21:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/976/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10375 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:46Z" ; + schema1:dateModified "2024-06-11T12:54:46Z" ; + schema1:description "Analysis pipeline for CUT&RUN and CUT&TAG experiments that includes sequencing QC, spike-in normalisation, IgG control normalisation, peak calling and downstream peak analysis." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/976?version=7" ; + schema1:keywords "cutandrun, cutandrun-seq, cutandtag, cutandtag-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/cutandrun" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/976?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2023-09-01T14:29:11.916146" ; + schema1:hasPart , + , + , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/hic-hicup-cooler" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + , + , + ; + schema1:name "hic-hicup-cooler/hic-fastq-to-cool-hicup-cooler" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "hic-hicup-cooler/chic-fastq-to-cool-hicup-cooler" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/hic-hicup-cooler" ; + schema1:version "0.2.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.10" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "hic-hicup-cooler/hic-fastq-to-pairs-hicup" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/hic-hicup-cooler" ; + schema1:version "0.2.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "hic-hicup-cooler/hic-juicermediumtabix-to_cool-cooler" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/hic-hicup-cooler" ; + schema1:version "0.2.1" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/amber-protein-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.297.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_md_setup/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy Amber Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/297/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 80553 ; + schema1:creator , + ; + schema1:dateCreated "2023-05-03T13:49:59Z" ; + schema1:dateModified "2023-05-03T13:51:37Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/amber-protein-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/297?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy Amber Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_md_setup/galaxy/biobb_wf_amber_md_setup.ga" ; + schema1:version 3 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2023-10-17T15:33:20.525539" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/chipseq-sr" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "chipseq-sr/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.12" . + + a schema1:Dataset ; + schema1:datePublished "2024-06-23T17:52:56.378217" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Plot-Nx-Size" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Plot-Nx-Size/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "ChIP-seq peak-calling and differential analysis pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/971?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/chipseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/chipseq" ; + schema1:sdDatePublished "2024-07-12 13:22:05 +0100" ; + schema1:url "https://workflowhub.eu/workflows/971/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4753 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:44Z" ; + schema1:dateModified "2024-06-11T12:54:44Z" ; + schema1:description "ChIP-seq peak-calling and differential analysis pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/971?version=5" ; + schema1:keywords "ChIP, ChIP-seq, chromatin-immunoprecipitation, macs2, peak-calling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/chipseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/971?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.120.5" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:34:17 +0100" ; + schema1:url "https://workflowhub.eu/workflows/120/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 57824 ; + schema1:creator , + ; + schema1:dateCreated "2023-07-26T09:17:16Z" ; + schema1:dateModified "2023-07-26T09:18:43Z" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/120?version=5" ; + schema1:isPartOf , + , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_md_setup/blob/master/biobb_wf_md_setup/notebooks/biobb_MDsetup_tutorial.ipynb" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.282.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_virtual_screening/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Protein-ligand Docking tutorial (Fpocket)" ; + schema1:sdDatePublished "2024-07-12 13:33:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/282/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2666 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-17T10:16:30Z" ; + schema1:dateModified "2022-04-11T09:29:43Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/282?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Protein-ligand Docking tutorial (Fpocket)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_virtual_screening/python/workflow.py" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2021-07-26T10:22:23.313479" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-se-illumina-wgs-variant-calling" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:sdDatePublished "2021-10-27 15:45:08 +0100" ; + schema1:softwareVersion "v0.1.2" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.6" . + + a schema1:Dataset ; + schema1:datePublished "2024-04-22T10:09:30.195105" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=26" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-07-12 13:21:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=26" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13457 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:51Z" ; + schema1:dateModified "2024-06-11T12:54:51Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=26" ; + schema1:version 26 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """Galaxy Workflow created on Galaxy-E european instance, ecology.usegalaxy.eu, related to the Galaxy training tutorial "Deep learning to predict animal behavior" .\r +\r +This workflow allows to analyze animal behavior data through deep learning.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/659?version=1" ; + schema1:isBasedOn "https://ecology.usegalaxy.eu/u/ylebras/w/workflow-constructed-from-history-imported-animal-dive-prediction-using-deep-learning" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Animal dive prediction using deep learning" ; + schema1:sdDatePublished "2024-07-12 13:26:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/659/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 78381 ; + schema1:dateCreated "2023-11-09T21:12:11Z" ; + schema1:dateModified "2023-11-09T21:12:11Z" ; + schema1:description """Galaxy Workflow created on Galaxy-E european instance, ecology.usegalaxy.eu, related to the Galaxy training tutorial "Deep learning to predict animal behavior" .\r +\r +This workflow allows to analyze animal behavior data through deep learning.\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Animal dive prediction using deep learning" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/659?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/986?version=14" ; + schema1:isBasedOn "https://github.com/nf-core/fetchngs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/fetchngs" ; + schema1:sdDatePublished "2024-07-12 13:21:29 +0100" ; + schema1:url "https://workflowhub.eu/workflows/986/ro_crate?version=14" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9364 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:53Z" ; + schema1:dateModified "2024-06-11T12:54:53Z" ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/986?version=13" ; + schema1:keywords "ddbj, download, ena, FASTQ, geo, sra, synapse" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/fetchngs" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/986?version=14" ; + schema1:version 14 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1021?version=10" ; + schema1:isBasedOn "https://github.com/nf-core/scrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/scrnaseq" ; + schema1:sdDatePublished "2024-07-12 13:19:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1021/ro_crate?version=10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10337 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:17Z" ; + schema1:dateModified "2024-06-11T12:55:17Z" ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1021?version=13" ; + schema1:keywords "10x-genomics, 10xgenomics, alevin, bustools, Cellranger, kallisto, rna-seq, single-cell, star-solo" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/scrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1021?version=10" ; + schema1:version 10 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +****\r +\r +About this workflow:\r +\r +* Inputs: transdecoder-peptides.fasta, transdecoder-nucleotides.fasta\r +* Runs many steps to convert outputs into the formats required for Fgenesh - .pro, .dat and .cdna""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.880.1" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Convert formats - TSI" ; + schema1:sdDatePublished "2024-07-12 13:22:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/880/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 77130 ; + schema1:creator , + ; + schema1:dateCreated "2024-05-08T07:23:51Z" ; + schema1:dateModified "2024-05-09T04:09:41Z" ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +****\r +\r +About this workflow:\r +\r +* Inputs: transdecoder-peptides.fasta, transdecoder-nucleotides.fasta\r +* Runs many steps to convert outputs into the formats required for Fgenesh - .pro, .dat and .cdna""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "TSI-annotation" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Convert formats - TSI" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/880?version=1" ; + schema1:version 1 ; + ns1:input , + ; + ns1:output . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 522976 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# mvgwas-nf\r +\r +[![nextflow](https://img.shields.io/badge/nextflow-%E2%89%A520.04.1-blue.svg)](http://nextflow.io)\r +[![CI-checks](https://github.com/guigolab/sqtlseeker2-nf/actions/workflows/ci.yaml/badge.svg)](https://github.com/guigolab/sqtlseeker2-nf/actions/workflows/ci.yaml)\r +\r +A pipeline for multi-trait genome-wide association studies (GWAS) using [MANTA](https://github.com/dgarrimar/manta).\r +\r +The pipeline performs the following analysis steps:\r +\r +* Split genotype file \r +* Preprocess phenotype and covariate data\r +* Test for association between phenotypes and genetic variants\r +* Collect summary statistics\r +\r +The pipeline uses [Nextflow](http://www.nextflow.io) as the execution backend. Please check [Nextflow documentation](http://www.nextflow.io/docs/latest/index.html) for more information.\r +\r +## Requirements\r +\r +- Unix-like operating system (Linux, MacOS, etc.)\r +- Java 8 or later \r +- [Docker](https://www.docker.com/) (v1.10.0 or later) or [Singularity](http://singularity.lbl.gov) (v2.5.0 or later)\r +\r +## Quickstart (~2 min)\r +\r +1. Install Nextflow:\r + ```\r + curl -fsSL get.nextflow.io | bash\r + ```\r +\r +2. Make a test run:\r + ```\r + nextflow run dgarrimar/mvgwas-nf -with-docker\r + ```\r +\r +**Notes**: move the `nextflow` executable to a directory in your `$PATH`. Set `-with-singularity` to use Singularity instead of Docker. \r +\r +(*) Alternatively you can clone this repository:\r +```\r +git clone https://github.com/dgarrimar/mvgwas-nf\r +cd mvgwas-nf\r +nextflow run mvgwas.nf -with-docker\r +```\r +\r +## Pipeline usage\r +\r +Launching the pipeline with the `--help` parameter shows the help message:\r +\r +```\r +nextflow run mvgwas.nf --help\r +```\r +\r +```\r +N E X T F L O W ~ version 20.04.1\r +Launching `mvgwas.nf` [amazing_roentgen] - revision: 56125073b7\r +\r +mvgwas-nf: A pipeline for multivariate Genome-Wide Association Studies\r +==============================================================================================\r +Performs multi-trait GWAS using using MANTA (https://github.com/dgarrimar/manta)\r +\r +Usage:\r +nextflow run mvgwas.nf [options]\r +\r +Parameters:\r +--pheno PHENOTYPES phenotype file\r +--geno GENOTYPES indexed genotype VCF file\r +--cov COVARIATES covariate file\r +--l VARIANTS/CHUNK variants tested per chunk (default: 10000)\r +--t TRANSFOMATION phenotype transformation: none, sqrt, log (default: none)\r +--i INTERACTION test for interaction with a covariate: none, (default: none)\r +--ng INDIVIDUALS/GENOTYPE minimum number of individuals per genotype group (default: 10)\r +--dir DIRECTORY output directory (default: result)\r +--out OUTPUT output file (default: mvgwas.tsv)\r +```\r +\r +## Input files and format\r +\r +`mvgwas-nf` requires the following input files:\r +\r +* **Genotypes.** \r +[bgzip](http://www.htslib.org/doc/bgzip.html)-compressed and indexed [VCF](https://samtools.github.io/hts-specs/VCFv4.3.pdf) genotype file.\r +\r +* **Phenotypes.**\r +Tab-separated file with phenotype measurements (quantitative) for each sample (i.e. *n* samples x *q* phenotypes).\r +The first column should contain sample IDs. Columns should be named.\r +\r +* **Covariates.**\r +Tab-separated file with covariate measurements (quantitative or categorical) for each sample (i.e. *n* samples x *k* covariates). \r +The first column should contain sample IDs. Columns should be named. \r +\r +Example [data](data) is available for the test run.\r +\r +## Pipeline results\r +\r +An output text file containing the multi-trait GWAS summary statistics (default: `./result/mvgwas.tsv`), with the following information:\r +\r +* `CHR`: chromosome\r +* `POS`: position\r +* `ID`: variant ID\r +* `REF`: reference allele\r +* `ALT`: alternative allele\r +* `F`: pseudo-F statistic\r +* `R2`: fraction of variance explained by the variant\r +* `P`: P-value\r +\r +The output folder and file names can be modified with the `--dir` and `--out` parameters, respectively.\r +\r +## Cite mvgwas-nf\r +\r +If you find `mvgwas-nf` useful in your research please cite the related publication:\r +\r +Garrido-Martín, D., Calvo, M., Reverter, F., Guigó, R. A fast non-parametric test of association for multiple traits. *bioRxiv* (2022). [https://doi.org/10.1101/2022.06.06.493041](https://doi.org/10.1101/2022.06.06.493041)\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/436?version=1" ; + schema1:isBasedOn "https://github.com/dgarrimar/mvgwas-nf.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for mvgwas-nf" ; + schema1:sdDatePublished "2024-07-12 13:34:30 +0100" ; + schema1:url "https://workflowhub.eu/workflows/436/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5754 ; + schema1:creator , + ; + schema1:dateCreated "2023-02-15T11:58:32Z" ; + schema1:dateModified "2023-02-15T12:09:57Z" ; + schema1:description """# mvgwas-nf\r +\r +[![nextflow](https://img.shields.io/badge/nextflow-%E2%89%A520.04.1-blue.svg)](http://nextflow.io)\r +[![CI-checks](https://github.com/guigolab/sqtlseeker2-nf/actions/workflows/ci.yaml/badge.svg)](https://github.com/guigolab/sqtlseeker2-nf/actions/workflows/ci.yaml)\r +\r +A pipeline for multi-trait genome-wide association studies (GWAS) using [MANTA](https://github.com/dgarrimar/manta).\r +\r +The pipeline performs the following analysis steps:\r +\r +* Split genotype file \r +* Preprocess phenotype and covariate data\r +* Test for association between phenotypes and genetic variants\r +* Collect summary statistics\r +\r +The pipeline uses [Nextflow](http://www.nextflow.io) as the execution backend. Please check [Nextflow documentation](http://www.nextflow.io/docs/latest/index.html) for more information.\r +\r +## Requirements\r +\r +- Unix-like operating system (Linux, MacOS, etc.)\r +- Java 8 or later \r +- [Docker](https://www.docker.com/) (v1.10.0 or later) or [Singularity](http://singularity.lbl.gov) (v2.5.0 or later)\r +\r +## Quickstart (~2 min)\r +\r +1. Install Nextflow:\r + ```\r + curl -fsSL get.nextflow.io | bash\r + ```\r +\r +2. Make a test run:\r + ```\r + nextflow run dgarrimar/mvgwas-nf -with-docker\r + ```\r +\r +**Notes**: move the `nextflow` executable to a directory in your `$PATH`. Set `-with-singularity` to use Singularity instead of Docker. \r +\r +(*) Alternatively you can clone this repository:\r +```\r +git clone https://github.com/dgarrimar/mvgwas-nf\r +cd mvgwas-nf\r +nextflow run mvgwas.nf -with-docker\r +```\r +\r +## Pipeline usage\r +\r +Launching the pipeline with the `--help` parameter shows the help message:\r +\r +```\r +nextflow run mvgwas.nf --help\r +```\r +\r +```\r +N E X T F L O W ~ version 20.04.1\r +Launching `mvgwas.nf` [amazing_roentgen] - revision: 56125073b7\r +\r +mvgwas-nf: A pipeline for multivariate Genome-Wide Association Studies\r +==============================================================================================\r +Performs multi-trait GWAS using using MANTA (https://github.com/dgarrimar/manta)\r +\r +Usage:\r +nextflow run mvgwas.nf [options]\r +\r +Parameters:\r +--pheno PHENOTYPES phenotype file\r +--geno GENOTYPES indexed genotype VCF file\r +--cov COVARIATES covariate file\r +--l VARIANTS/CHUNK variants tested per chunk (default: 10000)\r +--t TRANSFOMATION phenotype transformation: none, sqrt, log (default: none)\r +--i INTERACTION test for interaction with a covariate: none, (default: none)\r +--ng INDIVIDUALS/GENOTYPE minimum number of individuals per genotype group (default: 10)\r +--dir DIRECTORY output directory (default: result)\r +--out OUTPUT output file (default: mvgwas.tsv)\r +```\r +\r +## Input files and format\r +\r +`mvgwas-nf` requires the following input files:\r +\r +* **Genotypes.** \r +[bgzip](http://www.htslib.org/doc/bgzip.html)-compressed and indexed [VCF](https://samtools.github.io/hts-specs/VCFv4.3.pdf) genotype file.\r +\r +* **Phenotypes.**\r +Tab-separated file with phenotype measurements (quantitative) for each sample (i.e. *n* samples x *q* phenotypes).\r +The first column should contain sample IDs. Columns should be named.\r +\r +* **Covariates.**\r +Tab-separated file with covariate measurements (quantitative or categorical) for each sample (i.e. *n* samples x *k* covariates). \r +The first column should contain sample IDs. Columns should be named. \r +\r +Example [data](data) is available for the test run.\r +\r +## Pipeline results\r +\r +An output text file containing the multi-trait GWAS summary statistics (default: `./result/mvgwas.tsv`), with the following information:\r +\r +* `CHR`: chromosome\r +* `POS`: position\r +* `ID`: variant ID\r +* `REF`: reference allele\r +* `ALT`: alternative allele\r +* `F`: pseudo-F statistic\r +* `R2`: fraction of variance explained by the variant\r +* `P`: P-value\r +\r +The output folder and file names can be modified with the `--dir` and `--out` parameters, respectively.\r +\r +## Cite mvgwas-nf\r +\r +If you find `mvgwas-nf` useful in your research please cite the related publication:\r +\r +Garrido-Martín, D., Calvo, M., Reverter, F., Guigó, R. A fast non-parametric test of association for multiple traits. *bioRxiv* (2022). [https://doi.org/10.1101/2022.06.06.493041](https://doi.org/10.1101/2022.06.06.493041)\r +""" ; + schema1:keywords "GWAS, Multivariate, Non-parametric, Nextflow" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "mvgwas-nf" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/436?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2023-10-20T09:57:48.562544" ; + schema1:hasPart , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/baredsc" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + ; + schema1:name "baredsc/baredSC-1d-logNorm" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.14" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.14" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "baredsc/baredSC-2d-logNorm" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/baredsc" ; + schema1:version "0.1" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.130.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Amber Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:24:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/130/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 66118 ; + schema1:creator , + ; + schema1:dateCreated "2022-09-15T12:29:23Z" ; + schema1:dateModified "2023-01-16T13:50:19Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/130?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Amber Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_amber_md_setup/master/biobb_wf_amber_md_setup/notebooks/mdsetup/biobb_amber_setup_notebook.ipynb" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# Galaxy Workflow Documentation: MS Finder Pipeline\r +\r +This document outlines a MSFinder Galaxy workflow designed for peak annotation. The workflow consists of several steps aimed at preprocessing MS data, filtering, enhancing, and running MSFinder.\r +\r +## Step 1: Data Collection and Preprocessing\r +Collect if the inchi and smiles are missing from the dataset, and subsequently filter out the spectra which are missing inchi and smiles.\r +\r +### 1.1 MSMetaEnhancer: Collect InChi, Isomeric_smiles, and Nominal_mass\r +- Utilizes MSMetaEnhancer to collect InChi and Isomeric_smiles using PubChem and IDSM databases.\r +- Utilizes MSMetaEnhancer to collect MW using RDkit (For GOLM).\r +\r +### 1.2 replace key\r +- replace isomeric_smiles key to smiles using replace text tool\r +- replace MW key to parent_mass using replace text tool (For GOLM)\r +\r +### 1.3 Matchms Filtering\r +- Filters out invalid SMILES and InChi from the dataset using Matchms filtering.\r +\r +## Step 2: Complex Removal and Subsetting Dataset\r +Removes coordination complexes from the dataset.\r +\r +### 2.1 Remove Complexes and Subset Data\r +- Removes complexes from the dataset.\r +- Exports metadata using Matchms metadata export, cuts the SMILES column, removes complexes using Rem_Complex tool, and updates the dataset using Matchms subsetting.\r +\r +## Step 3: Data Key Manipulation\r +Add missing metadata required by the MSFinder for annotation.\r +\r +### 3.1 Matchms Remove Key\r +- Removes existing keys such as adduct, charge, and ionmode from the dataset.\r +\r +### 3.2 Matchms Add Key\r +- Adds necessary keys like charge, ionmode, and adduct to the dataset.\r +\r +### 3.3 Matchms Filtering\r +- Derives precursor m/z using parent mass and adduct information using matchms filtering.\r +\r +### 3.4 Matchms Convert\r +- Converts the dataset to Riken format for compatibility with MSFinder using matchms convert.\r +\r +## Step 4: Peak Annotation\r +### 4.1 Recetox-MSFinder\r +- Executes MSFinder with a 0.5 Da tolerance for both MS1 and MS2, including all element checks and an extended range for peak annotation.\r +\r +## Step 5: Error Handling and Refinement\r +Check the MSFinder output to see if the output is the results or the log file. If the output is log file remove the smile from the dataset using matchms subsetting tool and rerun MSFinder.\r +\r +### 5.1 Error Handling\r +- Handles errors in peak annotation by removing SMILES that are not accepted by MSFinder.\r +- Reruns MSFinder after error correction or with different parameter (if applicable).\r +\r +## Step 6: High-res Annotation\r +### 6.1 High-Res Peak Overwriting\r +- Utilizes the Use_Theoretical_mz_Annotations tool to Overwrite experimentally measured mz values for peaks with theoretical values from peak comments.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.888.2" ; + schema1:isBasedOn "https://github.com/RECETOX/workflow-testing/blob/main/msfinder_workflow/Galaxy_Workflow_MsFinder_Workflow_GOLM_V2.ga" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Theoretical fragment substructure generation and in silico mass spectral library high-resolution upcycling workflow" ; + schema1:sdDatePublished "2024-07-12 13:18:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/888/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 41205 ; + schema1:creator , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-06T10:18:12Z" ; + schema1:dateModified "2024-06-19T09:35:34Z" ; + schema1:description """# Galaxy Workflow Documentation: MS Finder Pipeline\r +\r +This document outlines a MSFinder Galaxy workflow designed for peak annotation. The workflow consists of several steps aimed at preprocessing MS data, filtering, enhancing, and running MSFinder.\r +\r +## Step 1: Data Collection and Preprocessing\r +Collect if the inchi and smiles are missing from the dataset, and subsequently filter out the spectra which are missing inchi and smiles.\r +\r +### 1.1 MSMetaEnhancer: Collect InChi, Isomeric_smiles, and Nominal_mass\r +- Utilizes MSMetaEnhancer to collect InChi and Isomeric_smiles using PubChem and IDSM databases.\r +- Utilizes MSMetaEnhancer to collect MW using RDkit (For GOLM).\r +\r +### 1.2 replace key\r +- replace isomeric_smiles key to smiles using replace text tool\r +- replace MW key to parent_mass using replace text tool (For GOLM)\r +\r +### 1.3 Matchms Filtering\r +- Filters out invalid SMILES and InChi from the dataset using Matchms filtering.\r +\r +## Step 2: Complex Removal and Subsetting Dataset\r +Removes coordination complexes from the dataset.\r +\r +### 2.1 Remove Complexes and Subset Data\r +- Removes complexes from the dataset.\r +- Exports metadata using Matchms metadata export, cuts the SMILES column, removes complexes using Rem_Complex tool, and updates the dataset using Matchms subsetting.\r +\r +## Step 3: Data Key Manipulation\r +Add missing metadata required by the MSFinder for annotation.\r +\r +### 3.1 Matchms Remove Key\r +- Removes existing keys such as adduct, charge, and ionmode from the dataset.\r +\r +### 3.2 Matchms Add Key\r +- Adds necessary keys like charge, ionmode, and adduct to the dataset.\r +\r +### 3.3 Matchms Filtering\r +- Derives precursor m/z using parent mass and adduct information using matchms filtering.\r +\r +### 3.4 Matchms Convert\r +- Converts the dataset to Riken format for compatibility with MSFinder using matchms convert.\r +\r +## Step 4: Peak Annotation\r +### 4.1 Recetox-MSFinder\r +- Executes MSFinder with a 0.5 Da tolerance for both MS1 and MS2, including all element checks and an extended range for peak annotation.\r +\r +## Step 5: Error Handling and Refinement\r +Check the MSFinder output to see if the output is the results or the log file. If the output is log file remove the smile from the dataset using matchms subsetting tool and rerun MSFinder.\r +\r +### 5.1 Error Handling\r +- Handles errors in peak annotation by removing SMILES that are not accepted by MSFinder.\r +- Reruns MSFinder after error correction or with different parameter (if applicable).\r +\r +## Step 6: High-res Annotation\r +### 6.1 High-Res Peak Overwriting\r +- Utilizes the Use_Theoretical_mz_Annotations tool to Overwrite experimentally measured mz values for peaks with theoretical values from peak comments.\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/888?version=1" ; + schema1:keywords "Bioinformatics, Cheminformatics, Metabolomics" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Theoretical fragment substructure generation and in silico mass spectral library high-resolution upcycling workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/888?version=2" ; + schema1:version 2 ; + ns1:input ; + ns1:output , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description "This is a Galaxy workflow for performing molecular dynamics simulations and analysis with flavivirus helicases in the Apo or unbound state. The associated input files can be found at: https://zenodo.org/records/7493015 The associated output files can be found at: https://zenodo.org/records/7850935" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.762.1" ; + schema1:isBasedOn "https://zenodo.org/records/7493015" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for flavivirushelicase_apo" ; + schema1:sdDatePublished "2024-07-12 13:24:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/762/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 72163 ; + schema1:creator ; + schema1:dateCreated "2024-02-16T17:30:27Z" ; + schema1:dateModified "2024-03-02T16:24:20Z" ; + schema1:description "This is a Galaxy workflow for performing molecular dynamics simulations and analysis with flavivirus helicases in the Apo or unbound state. The associated input files can be found at: https://zenodo.org/records/7493015 The associated output files can be found at: https://zenodo.org/records/7850935" ; + schema1:keywords "zika, dengue, west nile, helicase, rna virus, molecular dynamics" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "flavivirushelicase_apo" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/762?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.196.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook ABC MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:46 +0100" ; + schema1:url "https://workflowhub.eu/workflows/196/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 101184 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-23T08:57:03Z" ; + schema1:dateModified "2023-01-16T13:53:15Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/196?version=4" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook ABC MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_amber_md_setup/blob/master/biobb_wf_amber_md_setup/notebooks/abcsetup/biobb_amber_ABC_setup.ipynb" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1023?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/smrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/smrnaseq" ; + schema1:sdDatePublished "2024-07-12 13:18:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1023/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4039 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:18Z" ; + schema1:dateModified "2024-06-11T12:55:18Z" ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1023?version=10" ; + schema1:keywords "small-rna, smrna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/smrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1023?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2024-02-29T18:36:07.286557" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/fluorescence-nuclei-segmentation-and-counting" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "fluorescence-nuclei-segmentation-and-counting/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.63.1" ; + schema1:isBasedOn "https://github.com/arvados/bh20-seq-resource" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for COVID-19 PubSeq Pangenome Generate" ; + schema1:sdDatePublished "2024-07-12 13:35:12 +0100" ; + schema1:url "https://workflowhub.eu/workflows/63/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3464 ; + schema1:creator , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2020-10-07T09:36:13Z" ; + schema1:dateModified "2023-01-16T13:44:54Z" ; + schema1:description "" ; + schema1:image ; + schema1:keywords "covid-19, CWL, pangenome" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "COVID-19 PubSeq Pangenome Generate" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/63?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 30852 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """To discover causal mutations of inherited diseases it’s common practice to do a trio analysis. In a trio analysis DNA is sequenced of both the patient and parents. Using this method, it’s possible to identify multiple inheritance patterns. Some examples of these patterns are autosomal recessive, autosomal dominant, and de-novo variants, which are represented in the figure below. To elaborate, the most left tree shows an autosomal dominant inhertitance pattern where the offspring inherits a faulty copy of the gene from one of the parents.\r +\r +To discover these mutations either whole exome sequencing (WES) or whole genome sequencing (WGS) can be used. With these technologies it is possible to uncover the DNA of the parents and offspring to find (shared) mutations in the DNA. These mutations can include insertions/deletions (indels), loss of heterozygosity (LOH), single nucleotide variants (SNVs), copy number variations (CNVs), and fusion genes.\r +\r +In this workflow we will also make use of the HTSGET protocol, which is a program to download our data securely and savely. This protocol has been implemented in the EGA Download Client Tool: toolshed.g2.bx.psu.edu/repos/iuc/ega_download_client/pyega3/4.0.0+galaxy0 tool, so we don’t have to leave Galaxy to retrieve our data.\r +\r +We will not start our analysis from scratch, since the main goal of this tutorial is to use the HTSGET protocol to download variant information from an online archive and to find the causative variant from those variants. If you want to learn how to do the analysis from scratch, using the raw reads, you can have a look at the Exome sequencing data analysis for diagnosing a genetic disease tutorial.""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/363?version=1" ; + schema1:isBasedOn "https://github.com/galaxyproject/training-material/blob/main/topics/variant-analysis/tutorials/trio-analysis/workflows/main_workflow.ga" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Trio Analysis" ; + schema1:sdDatePublished "2024-07-12 13:34:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/363/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 26615 ; + schema1:creator ; + schema1:dateCreated "2022-06-09T08:32:07Z" ; + schema1:dateModified "2023-03-01T15:35:23Z" ; + schema1:description """To discover causal mutations of inherited diseases it’s common practice to do a trio analysis. In a trio analysis DNA is sequenced of both the patient and parents. Using this method, it’s possible to identify multiple inheritance patterns. Some examples of these patterns are autosomal recessive, autosomal dominant, and de-novo variants, which are represented in the figure below. To elaborate, the most left tree shows an autosomal dominant inhertitance pattern where the offspring inherits a faulty copy of the gene from one of the parents.\r +\r +To discover these mutations either whole exome sequencing (WES) or whole genome sequencing (WGS) can be used. With these technologies it is possible to uncover the DNA of the parents and offspring to find (shared) mutations in the DNA. These mutations can include insertions/deletions (indels), loss of heterozygosity (LOH), single nucleotide variants (SNVs), copy number variations (CNVs), and fusion genes.\r +\r +In this workflow we will also make use of the HTSGET protocol, which is a program to download our data securely and savely. This protocol has been implemented in the EGA Download Client Tool: toolshed.g2.bx.psu.edu/repos/iuc/ega_download_client/pyega3/4.0.0+galaxy0 tool, so we don’t have to leave Galaxy to retrieve our data.\r +\r +We will not start our analysis from scratch, since the main goal of this tutorial is to use the HTSGET protocol to download variant information from an online archive and to find the causative variant from those variants. If you want to learn how to do the analysis from scratch, using the raw reads, you can have a look at the Exome sequencing data analysis for diagnosing a genetic disease tutorial.""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/363?version=1" ; + schema1:keywords "variant-analysis" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Trio Analysis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/363?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2022-10-24T13:41:51.909409" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/rnaseq-pe" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "rnaseq-pe/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.11" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Dual RNA-seq pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/982?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/dualrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/dualrnaseq" ; + schema1:sdDatePublished "2024-07-12 13:21:40 +0100" ; + schema1:url "https://workflowhub.eu/workflows/982/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8839 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:49Z" ; + schema1:dateModified "2024-06-11T12:54:49Z" ; + schema1:description "Dual RNA-seq pipeline" ; + schema1:keywords "dualrna-seq, host-pathogen, quantification, readmapping, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/dualrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/982?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2022-02-11T15:29:50.854615" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-variant-calling" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sars-cov-2-pe-illumina-artic-variant-calling/COVID-19-PE-ARTIC-ILLUMINA" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """This workflow demonstrates the usage of the [Community Earth System Model](https://www.cesm.ucar.edu/) on Galaxy Europe. \r +\r +A fully coupled B1850 compset with resolution f19_g17 is run for 1 month.\r +\r +![](https://nordicesmhub.github.io/GEO4962/fig/newcase.png)""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/364?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Workflow for running the Community Earth System Model in fully coupled mode" ; + schema1:sdDatePublished "2024-07-12 13:35:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/364/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 2639 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 18008 ; + schema1:dateCreated "2022-06-12T19:01:32Z" ; + schema1:dateModified "2023-01-16T14:01:23Z" ; + schema1:description """This workflow demonstrates the usage of the [Community Earth System Model](https://www.cesm.ucar.edu/) on Galaxy Europe. \r +\r +A fully coupled B1850 compset with resolution f19_g17 is run for 1 month.\r +\r +![](https://nordicesmhub.github.io/GEO4962/fig/newcase.png)""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Workflow for running the Community Earth System Model in fully coupled mode" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/364?version=1" ; + schema1:version 1 ; + ns1:input , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 6568 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for screening for functional components of assembled contigs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/987?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/funcscan" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/funcscan" ; + schema1:sdDatePublished "2024-07-12 13:21:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/987/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 16015 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:54Z" ; + schema1:dateModified "2024-06-11T12:54:54Z" ; + schema1:description "Pipeline for screening for functional components of assembled contigs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/987?version=8" ; + schema1:keywords "amp, AMR, antibiotic-resistance, antimicrobial-peptides, antimicrobial-resistance-genes, arg, Assembly, bgc, biosynthetic-gene-clusters, contigs, function, Metagenomics, natural-products, screening, secondary-metabolites" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/funcscan" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/987?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=23" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-07-12 13:22:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=23" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10903 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:13Z" ; + schema1:dateModified "2024-06-11T12:55:13Z" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=23" ; + schema1:version 23 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description "This is a Galaxy workflow for performing molecular dynamics simulations and analysis with coronavirus helicases bound to a ligand/drug molecule. The associated input files can be found at: https://zenodo.org/records/7492987. The associated output files can be found at: https://zenodo.org/records/7851000." ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.763.1" ; + schema1:isBasedOn "https://zenodo.org/records/7492987" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for coronavirushelicase_proteindrugcomplex" ; + schema1:sdDatePublished "2024-07-12 13:24:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/763/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 78464 ; + schema1:creator ; + schema1:dateCreated "2024-02-16T19:04:32Z" ; + schema1:dateModified "2024-03-02T17:06:31Z" ; + schema1:description "This is a Galaxy workflow for performing molecular dynamics simulations and analysis with coronavirus helicases bound to a ligand/drug molecule. The associated input files can be found at: https://zenodo.org/records/7492987. The associated output files can be found at: https://zenodo.org/records/7851000." ; + schema1:keywords "covid-19, coronavirus, helicase, rna virus, molecular dynamics, SARS-CoV-2, MERS, NSP13, covid19.galaxyproject.org" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "coronavirushelicase_proteindrugcomplex" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/763?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "MNase-seq analysis pipeline using BWA and DANPOS2." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1000?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/mnaseseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mnaseseq" ; + schema1:sdDatePublished "2024-07-12 13:20:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1000/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5436 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:05Z" ; + schema1:dateModified "2024-06-11T12:55:05Z" ; + schema1:description "MNase-seq analysis pipeline using BWA and DANPOS2." ; + schema1:keywords "mnase-seq, nucleosome, nucleosome-maps, nucleosome-positioning" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mnaseseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1000?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "ATACSeq peak-calling and differential analysis pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/965?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/atacseq" ; + schema1:sdDatePublished "2024-07-12 13:22:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/965/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4439 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:41Z" ; + schema1:dateModified "2024-06-11T12:54:41Z" ; + schema1:description "ATACSeq peak-calling and differential analysis pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/965?version=8" ; + schema1:keywords "ATAC-seq, chromatin-accessibiity" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/atacseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/965?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for analysis of Molecular Pixelation assays" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1010?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/pixelator" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/pixelator" ; + schema1:sdDatePublished "2024-07-12 13:20:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1010/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10964 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:07Z" ; + schema1:dateModified "2024-06-11T12:55:07Z" ; + schema1:description "Pipeline for analysis of Molecular Pixelation assays" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1010?version=5" ; + schema1:keywords "molecular-pixelation, pixelator, pixelgen-technologies, proteins, single-cell, single-cell-omics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/pixelator" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1010?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Genes and transcripts annotation with Isoseq using uLTRA and TAMA" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/993?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/isoseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/isoseq" ; + schema1:sdDatePublished "2024-07-12 13:21:04 +0100" ; + schema1:url "https://workflowhub.eu/workflows/993/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8438 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:58Z" ; + schema1:dateModified "2024-06-11T12:54:58Z" ; + schema1:description "Genes and transcripts annotation with Isoseq using uLTRA and TAMA" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/993?version=6" ; + schema1:keywords "isoseq, isoseq-3, rna, tama, ultra" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/isoseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/993?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Quantitative Mass Spectrometry nf-core workflow" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1013?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/quantms" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/quantms" ; + schema1:sdDatePublished "2024-07-12 13:19:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1013/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14567 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:08Z" ; + schema1:dateModified "2024-06-11T12:55:08Z" ; + schema1:description "Quantitative Mass Spectrometry nf-core workflow" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1013?version=3" ; + schema1:keywords "dia, lfq, mass-spec, mass-spectrometry, openms, proteogenomics, Proteomics, tmt" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/quantms" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1013?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + schema1:datePublished "2022-10-24T11:08:23.961762" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-consensus-from-variation" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sars-cov-2-consensus-from-variation/COVID-19-CONSENSUS-CONSTRUCTION" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.11" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# SNP-Calling\r +GATK Variant calling pipeline for genomic data using Nextflow\r +\r +[![nextflow](https://img.shields.io/badge/nextflow-%E2%89%A522.04.5-brightgreen.svg)](http://nextflow.io)\r +\r +## Quickstart\r +\r +Install Nextflow using the following command: \r +\r + curl -s https://get.nextflow.io | bash\r + \r +Index reference genome:\r +\r + `$ bwa index /path/to/reference/genome.fa`\r + \r + `$ samtools faidx /path/to/reference/genome.fa`\r + \r + `$ gatk CreateSequenceDictionary -R /path/to/genome.fa -O genome.dict`\r +\r +Launch the pipeline execution with the following command:\r +\r + nextflow run jdetras/snp-calling -r main -profile docker\r + \r +## Pipeline Description\r +\r +The variant calling pipeline follows the recommended practices from GATK. The input genomic data are aligned to a reference genome using BWA. The alignemnt files are processed using Picard Tools. Variant calling is done using samtools and GATK. \r +\r +## Input files\r +\r +The input files required to run the pipeline:\r +* Genomic sequence paired reads, `*_{1,2}.fq.gz`\r +* Reference genome, `*.fa`\r +\r +## Pipeline parameters\r +\r +### Usage\r +Usage: `nextflow run jdetras/snp-calling -profile docker [options]`\r +\r +Options:\r +\r +* `--reads` \r +* `--genome`\r +* `--output`\r +\r +Example: \r + `$ nextflow run jdetras/snp-calling -profile docker --reads '/path/to/reads/*_{1,2}.fq.gz' --genome '/path/to/reference/genome.fa' --output '/path/to/output'`\r +\r +#### `--reads`\r +\r +* The path to the FASTQ read files.\r +* Wildcards (*, ?) can be used to declare multiple reads. Use single quotes when wildcards are used. \r +* Default parameter: `$projectDir/data/reads/*_{1,2}.fq.gz`\r +\r +Example: \r + `$ nextflow run jdetras/snp-calling -profile docker --reads '/path/to/reads/*_{1,2}.fq.gz'`\r + \r +#### `--genome`\r +\r +* The path to the genome file in fasta format.\r +* The extension is `.fa`.\r +* Default parameter: `$projectDir/data/reference/genome.fa`\r +\r +Example:\r + `$ nextflow run jdetras/snp-calling -profile docker --genome /path/to/reference/genome.fa`\r + \r +#### `--output`\r +\r +* The path to the directory for the output files.\r +* Default parameter: `$projectDir/output`\r +\r +## Software\r +\r +* [BWA 0.7.17](http://bio-bwa.sourceforge.net/)\r +* [Samtools 1.3.1](http://www.htslib.org/)\r +* [GATK 4.2.6.1](https://gatk.broadinstitute.org/) \r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/442?version=1" ; + schema1:isBasedOn "https://github.com/jdetras/SNP-Calling.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for SNP-Calling Workflow" ; + schema1:sdDatePublished "2024-07-12 13:34:26 +0100" ; + schema1:url "https://workflowhub.eu/workflows/442/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1737 ; + schema1:dateCreated "2023-03-21T05:30:15Z" ; + schema1:dateModified "2023-03-21T05:30:15Z" ; + schema1:description """# SNP-Calling\r +GATK Variant calling pipeline for genomic data using Nextflow\r +\r +[![nextflow](https://img.shields.io/badge/nextflow-%E2%89%A522.04.5-brightgreen.svg)](http://nextflow.io)\r +\r +## Quickstart\r +\r +Install Nextflow using the following command: \r +\r + curl -s https://get.nextflow.io | bash\r + \r +Index reference genome:\r +\r + `$ bwa index /path/to/reference/genome.fa`\r + \r + `$ samtools faidx /path/to/reference/genome.fa`\r + \r + `$ gatk CreateSequenceDictionary -R /path/to/genome.fa -O genome.dict`\r +\r +Launch the pipeline execution with the following command:\r +\r + nextflow run jdetras/snp-calling -r main -profile docker\r + \r +## Pipeline Description\r +\r +The variant calling pipeline follows the recommended practices from GATK. The input genomic data are aligned to a reference genome using BWA. The alignemnt files are processed using Picard Tools. Variant calling is done using samtools and GATK. \r +\r +## Input files\r +\r +The input files required to run the pipeline:\r +* Genomic sequence paired reads, `*_{1,2}.fq.gz`\r +* Reference genome, `*.fa`\r +\r +## Pipeline parameters\r +\r +### Usage\r +Usage: `nextflow run jdetras/snp-calling -profile docker [options]`\r +\r +Options:\r +\r +* `--reads` \r +* `--genome`\r +* `--output`\r +\r +Example: \r + `$ nextflow run jdetras/snp-calling -profile docker --reads '/path/to/reads/*_{1,2}.fq.gz' --genome '/path/to/reference/genome.fa' --output '/path/to/output'`\r +\r +#### `--reads`\r +\r +* The path to the FASTQ read files.\r +* Wildcards (*, ?) can be used to declare multiple reads. Use single quotes when wildcards are used. \r +* Default parameter: `$projectDir/data/reads/*_{1,2}.fq.gz`\r +\r +Example: \r + `$ nextflow run jdetras/snp-calling -profile docker --reads '/path/to/reads/*_{1,2}.fq.gz'`\r + \r +#### `--genome`\r +\r +* The path to the genome file in fasta format.\r +* The extension is `.fa`.\r +* Default parameter: `$projectDir/data/reference/genome.fa`\r +\r +Example:\r + `$ nextflow run jdetras/snp-calling -profile docker --genome /path/to/reference/genome.fa`\r + \r +#### `--output`\r +\r +* The path to the directory for the output files.\r +* Default parameter: `$projectDir/output`\r +\r +## Software\r +\r +* [BWA 0.7.17](http://bio-bwa.sourceforge.net/)\r +* [Samtools 1.3.1](http://www.htslib.org/)\r +* [GATK 4.2.6.1](https://gatk.broadinstitute.org/) \r +""" ; + schema1:keywords "variant calling, GATK4, BWA-mem, rice" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "SNP-Calling Workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/442?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """## EBP-Nor Genome Assembly pipeline\r +\r +This repository contains the EBP-Nor genome assembly pipeline. This pipeline is implemented in snakemake.\r +This pipeline is developed to create haplotype-resolved genome assemblies from PacBio HiFi reads and HiC reads,\r +and is primarly designed for diploid eukaryotic organisms. The pipeline is designed to work on a linux cluster with slurm as workload manager.\r +\r +## Requirements & Setup\r +\r +Some software need to be configured/installed before the pipeline can be run\r +\r +### Conda setup\r +\r +Most required software, including snakemake itself, can be installed using [conda](https://conda.io/projects/conda/en/latest/user-guide/install/index.html).\r +\r +Once conda is installed, you can create a new environment containing most necessary software from the provided asm_pipeline.yaml file as follows:\r +\r +```shell\r +conda create -n asm_pipeline --file=worfklow/envs/asm_pipeline.yaml\r +```\r +\r +### Other software setup\r +\r +The following software need to be installed manually:\r +\r +- KMC v3.1.1 (https://github.com/tbenavi1/KMC)\r +- HiFiAdapterFilt (https://github.com/sheinasim/HiFiAdapterFilt)\r +- Oatk (https://github.com/c-zhou/oatk)\r +- OatkDB (https://github.com/c-zhou/OatkDB)\r +- NCBI FCS-Adaptor (https://github.com/ncbi/fcs/wiki/FCS-adaptor)\r +- NCBI FCS-GX (https://github.com/ncbi/fcs/wiki/FCS-GX)\r +\r +Please refer to their respective installation instructions to properly install them. You will need to privide the installation paths of these software to the config file (see Parameter section).\r +\r +### BUSCO database setup\r +\r +As in general, computing nodes are not connected to the internet, BUSCO lineage datasets need to be downloaded manually before running the pipeline.\r +This can easily be done by running\r +\r +```shell\r +busco --download eukaryota\r +```\r +\r +You will need to specify the folder where you downloaded the busco lineages in the config file (see Parameter section).\r +\r +### Data\r +\r +This pipeline is created for using PacBio HiFi reads together with paired-end Hi-C data.\r +You will need to specify the absolute paths to these files in the config file (see Parameters section).\r +\r +### Parameters\r +\r +The necessary config files for running the pipeline can be found in the config folder.\r +\r +General snakemake and cluster submission parameters are defined in ```config/config.yaml```, \r +data- and software-specfic parameters are defined in ```config/asm_params.yaml```.\r +\r +First, define the paths of the input files you want to use:\r +- pacbio: path to the location of the PacBio HiFi reads (```.fastq.gz```)\r +- hicF and hicR: path to the forward and reverse HiC reads respectively\r +\r +For software not installed by conda, the installation path needs to be provided to the Snakemake pipeline by editing following parameters in the ```config/asm_params.yaml```:\r +\r +- Set the "adapterfilt_install_dir" parameter to the installation path of HiFiAdapterFilt\r +- Set the "KMC_path" parameter to the installation path of KMC\r +- Set the "oatk_dir" parameter to the installation path of oatk\r +- Set the "oatk_db" parameter to the directory where you downloaded the oatk_db files\r +- Set the "fcs_path" parameter to the location of the ```run_fcsadaptor.sh``` and ```fcs.py``` scripts\r +- Set the "fcs_adaptor_image" and "fcs_gx_image" parameters to the paths to the ```fcs-adaptor.sif``` and ```fcs-gx.sif``` files respectively\r +- Set the "fcs_gx_db" parameter to the path of the fcs-gx database\r +\r +A couple of other parameters need to be verified as well in the config/asm_params.yaml file before running the pipeline:\r +\r +- The location of the input data (```input_dir```) should be set to the folder containing the input data.\r +- The location of the downloaded busco lineages (```busco_db_dir```) should be set to the folder containing the busco lineages files downloaded earlier\r +- The required BUSCO lineage for running the BUSCO analysis needs to set (```busco_lineage``` parameter). Run ```busco --list-datasets``` to get an overview of all available datasets.\r +- The required oatk lineage for running organelle genome assembly (```oatk_lineage``` parameter). Check https://github.com/c-zhou/OatkDB for an overview of available lineages.\r +- A boolean value wether the species is plant (for plastid prediction) or not (```oatk_isPlant```; set to either True or False)\r +- The NCBI taxid of your species, required for the decontamination step (```taxid``` parameter)\r +\r +## Usage and run modes\r +\r +Before running, make sure to activate the conda environment containing the necessary software: ```conda activate asm_assembly```.\r +To run the pipeline, run the following command:\r +\r +```\r +snakemake --profile config/ --configfile config/asm_params.yaml --snakefile workflow/Snakefile {run_mode}\r +```\r +\r +If you invoke the snakemake command in another directory than the one containing the ```workflow``` and ```config``` folders, \r +or if the config files (```config.yaml``` and ```asm_params.yaml```) are in another location, you need to specify their correct paths on the command line.\r +\r +The workflow parameters can be modified in 3 ways:\r +- Directly modifying the ```config/asm_parameters.yaml``` file\r +- Overriding the default parameters on the command line: ```--config parameter=new_value```\r +- Overriding the default parameters using a different yaml file: ```--configfile path_to_parameters.yaml```\r +\r +The pipeline has different runing modes, and the run mode should always be the last argument on the command line:\r +\r +- "all" (default): will run the full workflow including pre-assembly (genomescope & smudgeplot), assembly, scaffolding, decontamination, and organelle assembly\r +- "pre_assembly": will run only the pre-assembly steps (genomescope & smudgeplot)\r +- "assembly": will filter the HiFi reads and assemble them using hifiasm (also using the Hi-C reads), and run busco\r +- "scaffolding": will run all steps necessary for scaffolding (filtering, assembly, HiC filtering, scaffolding, busco), but without pre-assembly\r +- "decontamination": will run assembly, scaffolding, and decontamination, but without pre-assembly and busco analyses\r +- "organelles": will run only organnelle genome assembly\r +\r +## Output\r +\r +All generated output will be present in the "results" directory, which will be created in the folder from where you invoke the snakemake command.\r +This results directory contains different subdirectories related to the different steps in the assembly:\r +- results/pre_assembly: genomescope and smudgeplot output (each in its own subfolder)\r +- results/assembly: Hifiasm assembly output and corresponding busco results\r +- results/scaffolding: scaffolding output, separated in two folders:\r + - meryl: meryl databases used for filtering HiC reads\r + - yahs: scaffolding output, including final scaffolds and their corresponding busco results\r +- results/decontamination: decontamination output of the final scaffolded assembly\r +- results/organelles: assembled organellar genomes\r +\r +Additionally, a text file containing all software versions will be created in the specified input directory.\r +The log files of the different steps in the workflow can be found in the ```logs``` directory that will be created.""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/740?version=1" ; + schema1:isBasedOn "https://github.com/ebp-nor/GenomeAssembly" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for EBP-Nor Genome Assembly Pipeline" ; + schema1:sdDatePublished "2024-07-12 13:24:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/740/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 19252 ; + schema1:dateCreated "2024-02-13T09:44:35Z" ; + schema1:dateModified "2024-02-13T09:44:35Z" ; + schema1:description """## EBP-Nor Genome Assembly pipeline\r +\r +This repository contains the EBP-Nor genome assembly pipeline. This pipeline is implemented in snakemake.\r +This pipeline is developed to create haplotype-resolved genome assemblies from PacBio HiFi reads and HiC reads,\r +and is primarly designed for diploid eukaryotic organisms. The pipeline is designed to work on a linux cluster with slurm as workload manager.\r +\r +## Requirements & Setup\r +\r +Some software need to be configured/installed before the pipeline can be run\r +\r +### Conda setup\r +\r +Most required software, including snakemake itself, can be installed using [conda](https://conda.io/projects/conda/en/latest/user-guide/install/index.html).\r +\r +Once conda is installed, you can create a new environment containing most necessary software from the provided asm_pipeline.yaml file as follows:\r +\r +```shell\r +conda create -n asm_pipeline --file=worfklow/envs/asm_pipeline.yaml\r +```\r +\r +### Other software setup\r +\r +The following software need to be installed manually:\r +\r +- KMC v3.1.1 (https://github.com/tbenavi1/KMC)\r +- HiFiAdapterFilt (https://github.com/sheinasim/HiFiAdapterFilt)\r +- Oatk (https://github.com/c-zhou/oatk)\r +- OatkDB (https://github.com/c-zhou/OatkDB)\r +- NCBI FCS-Adaptor (https://github.com/ncbi/fcs/wiki/FCS-adaptor)\r +- NCBI FCS-GX (https://github.com/ncbi/fcs/wiki/FCS-GX)\r +\r +Please refer to their respective installation instructions to properly install them. You will need to privide the installation paths of these software to the config file (see Parameter section).\r +\r +### BUSCO database setup\r +\r +As in general, computing nodes are not connected to the internet, BUSCO lineage datasets need to be downloaded manually before running the pipeline.\r +This can easily be done by running\r +\r +```shell\r +busco --download eukaryota\r +```\r +\r +You will need to specify the folder where you downloaded the busco lineages in the config file (see Parameter section).\r +\r +### Data\r +\r +This pipeline is created for using PacBio HiFi reads together with paired-end Hi-C data.\r +You will need to specify the absolute paths to these files in the config file (see Parameters section).\r +\r +### Parameters\r +\r +The necessary config files for running the pipeline can be found in the config folder.\r +\r +General snakemake and cluster submission parameters are defined in ```config/config.yaml```, \r +data- and software-specfic parameters are defined in ```config/asm_params.yaml```.\r +\r +First, define the paths of the input files you want to use:\r +- pacbio: path to the location of the PacBio HiFi reads (```.fastq.gz```)\r +- hicF and hicR: path to the forward and reverse HiC reads respectively\r +\r +For software not installed by conda, the installation path needs to be provided to the Snakemake pipeline by editing following parameters in the ```config/asm_params.yaml```:\r +\r +- Set the "adapterfilt_install_dir" parameter to the installation path of HiFiAdapterFilt\r +- Set the "KMC_path" parameter to the installation path of KMC\r +- Set the "oatk_dir" parameter to the installation path of oatk\r +- Set the "oatk_db" parameter to the directory where you downloaded the oatk_db files\r +- Set the "fcs_path" parameter to the location of the ```run_fcsadaptor.sh``` and ```fcs.py``` scripts\r +- Set the "fcs_adaptor_image" and "fcs_gx_image" parameters to the paths to the ```fcs-adaptor.sif``` and ```fcs-gx.sif``` files respectively\r +- Set the "fcs_gx_db" parameter to the path of the fcs-gx database\r +\r +A couple of other parameters need to be verified as well in the config/asm_params.yaml file before running the pipeline:\r +\r +- The location of the input data (```input_dir```) should be set to the folder containing the input data.\r +- The location of the downloaded busco lineages (```busco_db_dir```) should be set to the folder containing the busco lineages files downloaded earlier\r +- The required BUSCO lineage for running the BUSCO analysis needs to set (```busco_lineage``` parameter). Run ```busco --list-datasets``` to get an overview of all available datasets.\r +- The required oatk lineage for running organelle genome assembly (```oatk_lineage``` parameter). Check https://github.com/c-zhou/OatkDB for an overview of available lineages.\r +- A boolean value wether the species is plant (for plastid prediction) or not (```oatk_isPlant```; set to either True or False)\r +- The NCBI taxid of your species, required for the decontamination step (```taxid``` parameter)\r +\r +## Usage and run modes\r +\r +Before running, make sure to activate the conda environment containing the necessary software: ```conda activate asm_assembly```.\r +To run the pipeline, run the following command:\r +\r +```\r +snakemake --profile config/ --configfile config/asm_params.yaml --snakefile workflow/Snakefile {run_mode}\r +```\r +\r +If you invoke the snakemake command in another directory than the one containing the ```workflow``` and ```config``` folders, \r +or if the config files (```config.yaml``` and ```asm_params.yaml```) are in another location, you need to specify their correct paths on the command line.\r +\r +The workflow parameters can be modified in 3 ways:\r +- Directly modifying the ```config/asm_parameters.yaml``` file\r +- Overriding the default parameters on the command line: ```--config parameter=new_value```\r +- Overriding the default parameters using a different yaml file: ```--configfile path_to_parameters.yaml```\r +\r +The pipeline has different runing modes, and the run mode should always be the last argument on the command line:\r +\r +- "all" (default): will run the full workflow including pre-assembly (genomescope & smudgeplot), assembly, scaffolding, decontamination, and organelle assembly\r +- "pre_assembly": will run only the pre-assembly steps (genomescope & smudgeplot)\r +- "assembly": will filter the HiFi reads and assemble them using hifiasm (also using the Hi-C reads), and run busco\r +- "scaffolding": will run all steps necessary for scaffolding (filtering, assembly, HiC filtering, scaffolding, busco), but without pre-assembly\r +- "decontamination": will run assembly, scaffolding, and decontamination, but without pre-assembly and busco analyses\r +- "organelles": will run only organnelle genome assembly\r +\r +## Output\r +\r +All generated output will be present in the "results" directory, which will be created in the folder from where you invoke the snakemake command.\r +This results directory contains different subdirectories related to the different steps in the assembly:\r +- results/pre_assembly: genomescope and smudgeplot output (each in its own subfolder)\r +- results/assembly: Hifiasm assembly output and corresponding busco results\r +- results/scaffolding: scaffolding output, separated in two folders:\r + - meryl: meryl databases used for filtering HiC reads\r + - yahs: scaffolding output, including final scaffolds and their corresponding busco results\r +- results/decontamination: decontamination output of the final scaffolded assembly\r +- results/organelles: assembled organellar genomes\r +\r +Additionally, a text file containing all software versions will be created in the specified input directory.\r +The log files of the different steps in the workflow can be found in the ```logs``` directory that will be created.""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "EBP-Nor Genome Assembly Pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/740?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 22814 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """# deepconsensus 1.2 snakemake pipeline\r +This snakemake-based workflow takes in a subreads.bam and results in a deepconsensus.fastq\r +- no methylation calls !\r +\r +The metadata id of the subreads file needs to be: "m[numeric]_[numeric]_[numeric].subreads.bam"\r +\r +Chunking (how many subjobs) and ccs min quality filter can be adjusted in the config.yaml\r +\r +the checkpoint model for deepconsensus1.2 should be accessible like this:\r +gsutil cp -r gs://brain-genomics-public/research/deepconsensus/models/v1.2/model_checkpoint/* "${QS_DIR}"/model/\r +if that does not work, try to download all at:\r +https://console.cloud.google.com/storage/browser/brain-genomics-public/research/deepconsensus/models?pageState=(%22StorageObjectListTable%22:(%22f%22:%22%255B%255D%22))&prefix=&forceOnObjectsSortingFiltering=false\r +\r +A run example is included in the run_snake.sh\r +\r +Feedback / pull requests welcome!\r +\r +Developed by Daniel Rickert @ WGGC Düsseldorf\r +\r +more to look at:\r +\r +https://www.youtube.com/watch?v=TlWtIao2i9E\r +\r +https://www.nature.com/articles/s41587-022-01435-7\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1075?version=1" ; + schema1:isBasedOn "https://github.com/WestGermanGenomeCenter/deep_snake.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Deepconsensus for Sequel2/2e subreads" ; + schema1:sdDatePublished "2024-07-12 13:17:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1075/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4578 ; + schema1:dateCreated "2024-07-12T08:59:51Z" ; + schema1:dateModified "2024-07-12T08:59:51Z" ; + schema1:description """# deepconsensus 1.2 snakemake pipeline\r +This snakemake-based workflow takes in a subreads.bam and results in a deepconsensus.fastq\r +- no methylation calls !\r +\r +The metadata id of the subreads file needs to be: "m[numeric]_[numeric]_[numeric].subreads.bam"\r +\r +Chunking (how many subjobs) and ccs min quality filter can be adjusted in the config.yaml\r +\r +the checkpoint model for deepconsensus1.2 should be accessible like this:\r +gsutil cp -r gs://brain-genomics-public/research/deepconsensus/models/v1.2/model_checkpoint/* "${QS_DIR}"/model/\r +if that does not work, try to download all at:\r +https://console.cloud.google.com/storage/browser/brain-genomics-public/research/deepconsensus/models?pageState=(%22StorageObjectListTable%22:(%22f%22:%22%255B%255D%22))&prefix=&forceOnObjectsSortingFiltering=false\r +\r +A run example is included in the run_snake.sh\r +\r +Feedback / pull requests welcome!\r +\r +Developed by Daniel Rickert @ WGGC Düsseldorf\r +\r +more to look at:\r +\r +https://www.youtube.com/watch?v=TlWtIao2i9E\r +\r +https://www.nature.com/articles/s41587-022-01435-7\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Deepconsensus for Sequel2/2e subreads" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1075?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Automated quantitative analysis of DIA proteomics mass spectrometry measurements." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/980?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/diaproteomics" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/diaproteomics" ; + schema1:sdDatePublished "2024-07-12 13:21:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/980/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6543 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:48Z" ; + schema1:dateModified "2024-06-11T12:54:48Z" ; + schema1:description "Automated quantitative analysis of DIA proteomics mass spectrometry measurements." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/980?version=6" ; + schema1:keywords "data-independent-proteomics, dia-proteomics, openms, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/diaproteomics" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/980?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state of the art epitope prediction pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/984?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/epitopeprediction" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/epitopeprediction" ; + schema1:sdDatePublished "2024-07-12 13:21:30 +0100" ; + schema1:url "https://workflowhub.eu/workflows/984/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5704 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:51Z" ; + schema1:dateModified "2024-06-11T12:54:51Z" ; + schema1:description "A fully reproducible and state of the art epitope prediction pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/984?version=7" ; + schema1:keywords "epitope, epitope-prediction, mhc-binding-prediction" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/epitopeprediction" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/984?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "The workflow takes a paired-reads collection (like illumina WGS or HiC), runs FastQC and SeqKit, trims with Fastp, and creates a MultiQC report. The main outputs are a paired collection of trimmed reads, a report with raw and trimmed reads stats, and a table with raw reads stats." ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.601.1" ; + schema1:isBasedOn "https://github.com/ERGA-consortium/pipelines/tree/main/assembly/galaxy" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ERGA DataQC Illumina v2309 (WF0)" ; + schema1:sdDatePublished "2024-07-12 13:23:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/601/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 19048 ; + schema1:creator , + ; + schema1:dateCreated "2023-10-06T13:03:57Z" ; + schema1:dateModified "2024-03-13T09:02:48Z" ; + schema1:description "The workflow takes a paired-reads collection (like illumina WGS or HiC), runs FastQC and SeqKit, trims with Fastp, and creates a MultiQC report. The main outputs are a paired collection of trimmed reads, a report with raw and trimmed reads stats, and a table with raw reads stats." ; + schema1:image ; + schema1:isPartOf , + , + ; + schema1:keywords "ERGA, DataQC, illumina" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ERGA DataQC Illumina v2309 (WF0)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/0.Data_QC/Galaxy-Workflow-ERGA_DataQC_Illumina_v2309_(WF0).ga" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 169490 ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/0.Data_QC/pics/QC_illu_2309.png" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and modern ancient DNA pipeline in Nextflow and with cloud support." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-07-12 13:21:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3775 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:49Z" ; + schema1:dateModified "2024-06-11T12:54:49Z" ; + schema1:description "A fully reproducible and modern ancient DNA pipeline in Nextflow and with cloud support." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# rquest-omop-worker\r +Executes BC|RQuest Availability queries via an open source implementation of BC|Link against a target PostgreSQL data source containing OMOP CDM 5.3 data.\r +\r +## Inputs\r +\r +### Body\r +Sample input payload:\r +\r +```json\r +{\r + "task_id": "job-2023-01-13-14: 20: 38-",\r + "project": "",\r + "owner": "",\r + "cohort": {\r + "groups": [\r + {\r + "rules": [\r + {\r + "varname": "OMOP",\r + "varcat": "Person",\r + "type": "TEXT",\r + "oper": "=",\r + "value": "8507"\r + }\r + ],\r + "rules_oper": "AND"\r + }\r + ],\r + "groups_oper": "OR"\r + },\r + "collection": "",\r + "protocol_version": "",\r + "char_salt": "",\r + "uuid": ""\r +}\r +```\r +\r +### Database access\r +\r +Currently this workflow requires inputs for connecting to the database it will run queries against using SQL Alchemy compatible options:\r +- `db_host` - the postgres db server hostname\r +- `db_name` - the database name\r +- `db_user` - the username for postgres\r +- `db_password` the password for postgres\r +\r +It is not currently possible to use a port other than the postgres default (`5432`)\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.471.2" ; + schema1:isBasedOn "https://github.com/HDRUK/rquest-omop-worker-workflows" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for rquest-omop-worker" ; + schema1:sdDatePublished "2024-07-12 13:27:10 +0100" ; + schema1:url "https://workflowhub.eu/workflows/471/ro_crate?version=2" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 8282 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 871 ; + schema1:creator ; + schema1:dateCreated "2023-10-10T15:03:58Z" ; + schema1:dateModified "2023-10-10T15:04:32Z" ; + schema1:description """# rquest-omop-worker\r +Executes BC|RQuest Availability queries via an open source implementation of BC|Link against a target PostgreSQL data source containing OMOP CDM 5.3 data.\r +\r +## Inputs\r +\r +### Body\r +Sample input payload:\r +\r +```json\r +{\r + "task_id": "job-2023-01-13-14: 20: 38-",\r + "project": "",\r + "owner": "",\r + "cohort": {\r + "groups": [\r + {\r + "rules": [\r + {\r + "varname": "OMOP",\r + "varcat": "Person",\r + "type": "TEXT",\r + "oper": "=",\r + "value": "8507"\r + }\r + ],\r + "rules_oper": "AND"\r + }\r + ],\r + "groups_oper": "OR"\r + },\r + "collection": "",\r + "protocol_version": "",\r + "char_salt": "",\r + "uuid": ""\r +}\r +```\r +\r +### Database access\r +\r +Currently this workflow requires inputs for connecting to the database it will run queries against using SQL Alchemy compatible options:\r +- `db_host` - the postgres db server hostname\r +- `db_name` - the database name\r +- `db_user` - the username for postgres\r +- `db_password` the password for postgres\r +\r +It is not currently possible to use a port other than the postgres default (`5432`)\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/471?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "rquest-omop-worker" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/471?version=2" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + ; + ns1:output . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=15" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-07-12 13:20:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=15" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17481 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:59Z" ; + schema1:dateModified "2024-06-11T12:54:59Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=15" ; + schema1:version 15 . + + a schema1:Dataset ; + schema1:datePublished "2022-11-30T12:14:23.298144" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/rnaseq-sr" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "rnaseq-sr/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.3" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "B cell repertoire analysis pipeline with immcantation framework." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/963?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/airrflow" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/bcellmagic" ; + schema1:sdDatePublished "2024-07-12 13:22:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/963/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4117 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:36Z" ; + schema1:dateModified "2024-06-11T12:54:36Z" ; + schema1:description "B cell repertoire analysis pipeline with immcantation framework." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/963?version=13" ; + schema1:keywords "airr, b-cell, immcantation, immunorepertoire, repseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/bcellmagic" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/963?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "Analyse Bulk RNA-Seq data in preparation for downstream Pathways analysis with MINERVA" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/689?version=1" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for mRNA-Seq BY-COVID Pipeline: Analysis" ; + schema1:sdDatePublished "2024-07-12 13:25:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/689/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 31727 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2023-12-19T10:10:54Z" ; + schema1:dateModified "2024-01-24T09:43:21Z" ; + schema1:description "Analyse Bulk RNA-Seq data in preparation for downstream Pathways analysis with MINERVA" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "BY-COVID, covid-19" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "mRNA-Seq BY-COVID Pipeline: Analysis" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/689?version=1" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 945135 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """The workflows in this collection are from the '16S Microbial Analysis with mothur' tutorial for analysis of 16S data (Saskia Hiltemann, Bérénice Batut, Dave Clements), adapted for pipeline use on galaxy australia (Ahmed Mehdi). The workflows developed in galaxy use mothur software package developed by Schloss et al https://pubmed.ncbi.nlm.nih.gov/19801464/. \r +\r +Please also refer to the 16S tutorials available at Galaxy https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop-short/tutorial.html and [https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop/tutorial.html\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/651?version=1" ; + schema1:isBasedOn "https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop-short/tutorial.html" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Workflow 5: OTU Clustering [16S Microbial Analysis With Mothur]" ; + schema1:sdDatePublished "2024-07-12 13:26:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/651/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17106 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2023-11-09T05:20:20Z" ; + schema1:dateModified "2023-11-09T05:20:20Z" ; + schema1:description """The workflows in this collection are from the '16S Microbial Analysis with mothur' tutorial for analysis of 16S data (Saskia Hiltemann, Bérénice Batut, Dave Clements), adapted for pipeline use on galaxy australia (Ahmed Mehdi). The workflows developed in galaxy use mothur software package developed by Schloss et al https://pubmed.ncbi.nlm.nih.gov/19801464/. \r +\r +Please also refer to the 16S tutorials available at Galaxy https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop-short/tutorial.html and [https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop/tutorial.html\r +""" ; + schema1:isPartOf ; + schema1:keywords "Metagenomics" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Workflow 5: OTU Clustering [16S Microbial Analysis With Mothur]" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/651?version=1" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.258.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_protein_complex_md_setup/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:36:15 +0100" ; + schema1:url "https://workflowhub.eu/workflows/258/ro_crate?version=2" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 192643 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 36316 ; + schema1:creator , + ; + schema1:dateCreated "2023-06-07T10:54:43Z" ; + schema1:dateModified "2023-06-07T11:04:49Z" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/258?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CWL Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_protein_complex_md_setup/cwl/workflow.cwl" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=19" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-07-12 13:18:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=19" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13451 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:40Z" ; + schema1:dateModified "2024-06-11T12:54:40Z" ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=19" ; + schema1:version 19 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """Example workflow which allows the use of Mothra\r +\r +Accepts (e.g.) [these](https://github.com/machine-shop/mothra-data/tree/main/test_images) input files, bundled as a collection.""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/413?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Mothra" ; + schema1:sdDatePublished "2024-07-12 13:34:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/413/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3782 ; + schema1:dateCreated "2022-12-14T16:03:30Z" ; + schema1:dateModified "2023-01-16T14:04:58Z" ; + schema1:description """Example workflow which allows the use of Mothra\r +\r +Accepts (e.g.) [these](https://github.com/machine-shop/mothra-data/tree/main/test_images) input files, bundled as a collection.""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Mothra" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/413?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """\r +Author: AMBARISH KUMAR er.ambarish@gmail.com; ambari73_sit@jnu.ac.in\r +\r +This is a proposed standard operating procedure for genomic variant detection using SAMTools.\r +\r +It is hoped to be effective and useful for getting SARS-CoV-2 genome variants.\r +\r +\r +\r +It uses Illumina RNASEQ reads and genome sequence.\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/34?version=1" ; + schema1:isBasedOn "https://github.com/ambarishK/bio-cwl-tools/blob/release/samtoolsW.cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genomic variants - SNPs and INDELs detection using SAMTools." ; + schema1:sdDatePublished "2024-07-12 13:37:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/34/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 54065 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2761 ; + schema1:dateCreated "2020-06-17T07:41:06Z" ; + schema1:dateModified "2023-01-16T13:42:36Z" ; + schema1:description """\r +Author: AMBARISH KUMAR er.ambarish@gmail.com; ambari73_sit@jnu.ac.in\r +\r +This is a proposed standard operating procedure for genomic variant detection using SAMTools.\r +\r +It is hoped to be effective and useful for getting SARS-CoV-2 genome variants.\r +\r +\r +\r +It uses Illumina RNASEQ reads and genome sequence.\r +""" ; + schema1:image ; + schema1:keywords "CWL, SAMTools, SNPs, INDELs, covid-19" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Genomic variants - SNPs and INDELs detection using SAMTools." ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/34?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + ; + ns1:output , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and modern ancient DNA pipeline in Nextflow and with cloud support." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-07-12 13:21:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3894 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:49Z" ; + schema1:dateModified "2024-06-11T12:54:49Z" ; + schema1:description "A fully reproducible and modern ancient DNA pipeline in Nextflow and with cloud support." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + schema1:datePublished "2022-10-21T10:14:20.880368" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/rnaseq-sr" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "rnaseq-sr/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.11" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state of the art epitope prediction pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/984?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/epitopeprediction" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/epitopeprediction" ; + schema1:sdDatePublished "2024-07-12 13:21:30 +0100" ; + schema1:url "https://workflowhub.eu/workflows/984/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7710 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:51Z" ; + schema1:dateModified "2024-06-11T12:54:51Z" ; + schema1:description "A fully reproducible and state of the art epitope prediction pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/984?version=7" ; + schema1:keywords "epitope, epitope-prediction, mhc-binding-prediction" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/epitopeprediction" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/984?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=25" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-07-12 13:21:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=25" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13457 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:51Z" ; + schema1:dateModified "2024-06-11T12:54:51Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=25" ; + schema1:version 25 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for screening for functional components of assembled contigs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/987?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/funcscan" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/funcscan" ; + schema1:sdDatePublished "2024-07-12 13:21:28 +0100" ; + schema1:url "https://workflowhub.eu/workflows/987/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 16624 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:54Z" ; + schema1:dateModified "2024-06-11T12:54:54Z" ; + schema1:description "Pipeline for screening for functional components of assembled contigs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/987?version=8" ; + schema1:keywords "amp, AMR, antibiotic-resistance, antimicrobial-peptides, antimicrobial-resistance-genes, arg, Assembly, bgc, biosynthetic-gene-clusters, contigs, function, Metagenomics, natural-products, screening, secondary-metabolites" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/funcscan" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/987?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.262.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_abc_md_setup/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL ABC MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:35:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/262/ro_crate?version=2" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 230004 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 46603 ; + schema1:creator , + ; + schema1:dateCreated "2023-06-12T08:27:43Z" ; + schema1:dateModified "2023-06-12T08:35:34Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/262?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CWL ABC MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_abc_md_setup/cwl/workflow.cwl" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 49084 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Summary\r +\r +This tutorial aims to illustrate how to compute a fast-growth mutation free energy calculation, step by step, using the BioExcel Building Blocks library (biobb). The particular example used is the Staphylococcal nuclease protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +Workflow engine is a jupyter notebook. Auxiliary libraries used are nb\\_conda\\_kernels, os, and plotly. Environment setup can be carried out using the environment.yml in the code repository. The tutorial uses docker for running pmx - a local setup can be used instead, see notes in the tutorial.\r +\r +# Parameters\r +\r +## Inputs\r +\r +Workflow Input files needed:\r +\r +* **stateA_traj**: Equilibrium trajectory for the WT protein.\r +\r +* **stateB_traj**: Equilibrium trajectory for the Mutated protein.\r +\r +* **stateA_tpr**: WT protein topology (GROMACS tpr format).\r +\r +* **stateB_tpr**: Mutated protein topology (GROMACS tpr format).\r +\r +Auxiliar force field libraries needed:\r +\r +* **mutff45 (folder)**: pmx mutation force field libraries. \r +\r +\r +## Outputs\r +\r +* **pmx.outputs**: Final free energy estimation. Summary of information got applying the different methods.\r +\r +* **pmx.plots.png**: Final free energy plot of the Mutation free energy pipeline.\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.55.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_pmx_tutorial" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Mutation Free Energy Calculations using BioExcel Building Blocks (biobb) (jupyter notebook)" ; + schema1:sdDatePublished "2024-07-12 13:24:28 +0100" ; + schema1:url "https://workflowhub.eu/workflows/55/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 48053 ; + schema1:creator , + ; + schema1:dateCreated "2021-05-07T13:22:40Z" ; + schema1:dateModified "2021-05-13T08:15:28Z" ; + schema1:description """# Summary\r +\r +This tutorial aims to illustrate how to compute a fast-growth mutation free energy calculation, step by step, using the BioExcel Building Blocks library (biobb). The particular example used is the Staphylococcal nuclease protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +Workflow engine is a jupyter notebook. Auxiliary libraries used are nb\\_conda\\_kernels, os, and plotly. Environment setup can be carried out using the environment.yml in the code repository. The tutorial uses docker for running pmx - a local setup can be used instead, see notes in the tutorial.\r +\r +# Parameters\r +\r +## Inputs\r +\r +Workflow Input files needed:\r +\r +* **stateA_traj**: Equilibrium trajectory for the WT protein.\r +\r +* **stateB_traj**: Equilibrium trajectory for the Mutated protein.\r +\r +* **stateA_tpr**: WT protein topology (GROMACS tpr format).\r +\r +* **stateB_tpr**: Mutated protein topology (GROMACS tpr format).\r +\r +Auxiliar force field libraries needed:\r +\r +* **mutff45 (folder)**: pmx mutation force field libraries. \r +\r +\r +## Outputs\r +\r +* **pmx.outputs**: Final free energy estimation. Summary of information got applying the different methods.\r +\r +* **pmx.plots.png**: Final free energy plot of the Mutation free energy pipeline.\r +\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/55?version=5" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Mutation Free Energy Calculations using BioExcel Building Blocks (biobb) (jupyter notebook)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/55?version=2" ; + schema1:version 2 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 80077 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "ChIP-seq peak-calling and differential analysis pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/971?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/chipseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/chipseq" ; + schema1:sdDatePublished "2024-07-12 13:22:05 +0100" ; + schema1:url "https://workflowhub.eu/workflows/971/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5685 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:44Z" ; + schema1:dateModified "2024-06-11T12:54:44Z" ; + schema1:description "ChIP-seq peak-calling and differential analysis pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/971?version=5" ; + schema1:keywords "ChIP, ChIP-seq, chromatin-immunoprecipitation, macs2, peak-calling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/chipseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/971?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.129.4" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_virtual-screening" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein-ligand Docking tutorial (Fpocket)" ; + schema1:sdDatePublished "2024-07-12 13:33:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/129/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 43078 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-07-26T09:30:15Z" ; + schema1:dateModified "2023-07-26T09:33:11Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/129?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein-ligand Docking tutorial (Fpocket)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_virtual-screening/blob/master/biobb_wf_virtual-screening/notebooks/fpocket/wf_vs_fpocket.ipynb" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """# ESCALIBUR\r +\r +Escalibur Population Genomic Analysis Pipeline is able to explore key aspects centering the population genetics of organisms, and automates three key bioinformatic components in population genomic analysis using Workflow Definition Language (WDL: https://openwdl.org/), and customised R, Perl, Python and Unix shell scripts. Associated programs are packaged into a platform independent singularity image, for which the definition file is provided.\r +\r +The workflow for analysis using Escalibur consists of three steps - each step can be run in a separate workflow in a sequential manner; step 2 is optional.\r +\r + 1. Trimming and mapping the raw data - selection of the best reference genome;\r + 2. Removing the contamination from mapped data;\r + 3. Recalibration, variant calling and filtering;\r +\r +This implementation runs both locally and in a distributed environment that uses SLURM job scheduler.\r +\r +## Dependencies\r +Following software dependencies are required:\r +\r +* Git\r +* SLURM scheduler required for distributed HPC environment (https://slurm.schedmd.com/documentation.html)\r +* Python3.7: (https://www.python.org/)\r +* Perl 5.26.2: (https://www.perl.org/)\r +* Java 1.8\r +* Singularity 3.7.3: (https://sylabs.io/singularity/)\r +\r +## Step 1: Installation\r +\r +Typically, the installation of Singularity requires root rights. You should therefore contact your administrator to get it correctly installed. Minimum Linux kernel version requirement is 3.8, thought >= 3.18 would be preferred (https://sylabs.io/guides/3.5/admin-guide/installation.html).\r +\r +Clone the git repository to a directory on your cluster or stand-alone server.\r +```\r +> git clone --depth 1 -b v0.3-beta https://gitlab.unimelb.edu.au/bioscience/escalibur.git\r +> cd escalibur\r +```\r +\r +### Description of Files\r +* `workflow-main.local.config`: main configuration file for stand alone server runtime environment\r +* `workflow-main.slurm.config`: main configuration file for HPC runtime environment that support Slurm job scheduler\r +* `workflow-mapping.json`: defines location of input files, has behavioral settings and sets resource allocations\r +* `workflow-cleaning.json`: defines location of input files and sets resource allocations\r +* `workflow-variants.json`: defines location of input files, has behavioral settings and sets resource allocations\r +* `workflow-mapping.wdl`: main workflow file to trim and map PE reads into the genome\r +* `workflow-cleaning.wdl`: main workflow file to clean contamination from mapped PE reads against genomes representing putative contamination\r +* `workflow-variants.wdl`: main workflow file to call variants using mapped and cleaned reads\r +* `workflow-mapping.outputs.json`: defines location for resultant outputs and logs from mapping workflow\r +* `workflow-cleaning.outputs.json`: defines location for resultant outputs and logs from cleaning workflow\r +* `workflow-variants.outputs.json`: defines location for resultant outputs and logs from variants workflow\r +* `inputReads.txt`: example input file for fastq read files to mapping step\r +* `cleanup.conf`: example configuration file for putative host contamination to cleaning step\r +* `inputBams.txt`: example input file for resultant BAM files to variant calling step\r +* `references.txt`: contains list of example references genomes\r +* `perl_scripts`: contains Perl scripts used by the pipeline\r +* `scripts`: contains Python scripts used by the pipeline\r +* `R_scripts`: contains R scripts used by the pipeline\r +* `sub_workflows`: sub-workflows, one for each of the workflow steps\r +* `tasks`: workflow tasks\r +* `cromwell-50.jar`: java archive file required to run the workflow.\r +\r +Two config files have been created. One for stand alone server (`workflow-runtime.local.config`) and another one for HPC environment that supports Slurm scheduler (`workflow-runtime.slurm.config`).\r +These files have already been optimised. For slurm configuration you only need to define the HPC partition in line 35: "String rt_queue"\r +Change this to the partition you have access to on HPC environment.\r +\r +Files `workflow-mapping.outputs.json`, `workflow-cleaning.outputs.json` and `workflow-variants.outputs.json` define the directories to copy the result files to. Modify if you want to change default output directories `outputMapping`, `outputCleaning` and `outputVariants`. These output directories are generated to the directory `escalibur`.\r +#### NOTE: delete output directories from previous runs. If you have files there already and a name matches during the copy, the workflow may fail.\r +\r +`Singularity` directory contains the definition file for the software used in Escalibur. Pre-built singularity image can be downloaded from `library://pakorhon/workflows/escalibur:0.0.1-beta`.\r +```\r +> singularity pull escalibur.sif library://pakorhon/workflows/escalibur:0.0.1-beta\r +```\r +\r +## Step 2: Test run\r +\r +To confirm correct function of the workflows (`mapping`, `cleaning` and `variant calling`), fix the required absolute paths, marked by three dots `...` in `workflow-mapping.json`, `workflow-cleaning.json` and `workflow-variants.json` and configuration files `cleanup.conf` and `inputBams.txt`, and run the workflow with the provided test and configuration files, and parameter settings.\r +```\r +> java -Dconfig.file=./workflow-runtime.local.config -jar ./cromwell-50.jar run workflow-mapping.wdl -i workflow-mapping.json -o workflow-mapping.outputs.json > out.mapping 2> err.mapping\r +> java -Dconfig.file=./workflow-runtime.local.config -jar ./cromwell-50.jar run workflow-cleaning.wdl -i workflow-cleaning.json -o workflow-cleaning.outputs.json > out.cleaning 2> err.cleaning\r +> java -Dconfig.file=./workflow-runtime.local.config -jar ./cromwell-50.jar run workflow-variants.wdl -i workflow-variants.json -o workflow-variants.outputs.json > out.variants 2> err.variants\r +```\r +Slurm file templates `runMapping.slurm`, `runCleaning.slurm` and `runVariants.slurm` are available for each workflow.\r +#### NOTE: default parameter settings for run-times, memory usage and module loading may require adjustment in these files if run in HPC environment using slurm. Current settings should account for the test run.\r +\r +After the runs are complete, the results will be at the output directories: `outputMapping`, `outputCleaning` and `outputVariants`.\r +You can compare the result of `outputVariants/full_genotype_output.vcf` to that or pre-run `TestResults/full_genotype_output.vcf`.\r +\r +## Step 3: Mapping\r +\r +Make a directory for your fastq files e.g. `Reads` and copy your paired end raw data in there.\r +```\r +> mkdir Reads\r +```\r +\r +It should look something like below\r +```\r +> ls TestReads/\r +1-1_r1.fastq.gz 32-1_r1.fastq.gz 44-1_r1.fastq.gz\r +1-1_r2.fastq.gz 32-1_r2.fastq.gz 44-1_r2.fastq.gz\r +```\r +Run the python script to create a file of your input samples and edit the resulting file to match your sample identifiers and libraries.\r +```\r +> python3 scripts/inputArgMaker.py -d Reads/ -p -ps 33 -pq 20 -pl ILLUMINA -ml 50 -o inputReads.txt \r +```\r +\r +The edited output file is shown below. The script will automatically sort the files by size.\r +```\r +> cat inputReads.txt\r +# Prefix PE/SE MinLen PhredS Sequencer PhredQ Library Read Group ID Sample Platform Unit First pair of PE reads Second pair of PE reads\r +test1 PE 50 33 ILLUMINA 28 LIB1 CL100082180L1 SM1 CL100082180L1 ./TestReads/1-1_r1.fastq.gz ./TestReads/1-1_r2.fastq.gz\r +test2 PE 50 33 ILLUMINA 20 LIB2 CL100082180L1 SM2 CL100082180L1 ./TestReads/44-1_r1.fastq.gz ./TestReads/44-1_r2.fastq.gz\r +test3 PE 50 33 ILLUMINA 20 LIB3 CL100034574L1 SM2 CL100034574L1 ./TestReads/32-1_r1.fastq.gz ./TestReads/32-1_r2.fastq.gz\r +```\r +#### NOTE: If several libraries are embedded in a single read file, library-specific reads have to be separated into own files before create the inputReads.txt file. In contrast, inputReads.txt file format can accommodate multiple library files to a single sample.\r +\r +* `Prefix`: Prefix for the resultant files from trimming.\r +* `PE/SE`: Paired-End/Single-End reads as input.\r +* `MinLen`: Minimum Length of reads after trimming.\r +* `PhredS`: Used Phred coding by the sequencer (33 or 64).\r +* `Sequencer`: Name of the sequencer.\r +* `PhredQ`: Phred cut-off score used in trimming.\r +* `Library`: Identifier for the library.\r +* `Read Group ID`: Identifier for the read groups required by GATK (inputArgMaker tries to find this from FASTQ reads). Refer to (https://gatk.broadinstitute.org/hc/en-us/articles/360035890671-Read-groups).\r +* `Sample`: Identifier for the sample. Defined prefix for resultant sample specific files.\r +* `Platform Unit (optional)`: Information about flow cell, lane and sample. Helps GATK in recalibration (inputArgMaker copies Read Group ID here). Refer to (https://gatk.broadinstitute.org/hc/en-us/articles/360035890671-Read-groups).\r +* `First pair of PE reads`: Relative path to the forward pair of PE reads.\r +* `Second pair of PE reads`: Relative path to the reverse pair of PE reads.\r +\r +Create a file listing reference genomes and configure `workflow-mapping.json` file.\r +An example reference file (`references.txt`) has been created for you. Use this as an example to create your own.\r +Ensure there are no whitespaces at the end of the line or else the cromwell engine will throw an error.\r +Reads are mapped to these reference files and the best matching reference will be selected for variant calling.\r +```\r +> cat references.txt\r +scf00001 ./TestReferences/scf00001.fa\r +scf00013 ./TestReferences/scf00013.fa\r +```\r +#### NOTE: Reference label (e.g. `scf00001`) must be a substring found in the reference fasta file (`scf00001.fa`)\r +\r +The figure below illustrates the flow of the information, and appearance of labels (`Prefix`, `Sample`, `Label`) in file names, as defined in `inputReads.txt` and `references.txt`.\r +![](figures/labelFlow.png)\r +\r +### workflow-mapping.json config file\r +Add the path of your fastq and reference genome input files and change parameters as appropriate, and adjust the absolute paths for singularity image. If `mapping_workflow.readQc` is set to `yes`, reads are trimmed both for quality and the adapters. Adapters to trim are given in `mapping_workflow.pe_filtering_workflow.trimmomatic_pe_task.truseq_pe_adapter`. If you want to use custom adapters, copy them to `adapters` directory and instead of default `TruSeq3-PE.fa`, refer to your custom file. If you don't want to use adapters, use `empty.fa` file instead. For BGISEQ adapters, refer to (https://en.mgitech.cn/Download/download_file/id/71).\r +```\r +{\r + "## CONFIG FILE": "WDL",\r + "mapping_workflow.inputSampleFile": "./inputReads.txt",\r + "mapping_workflow.inputReferenceFile": "./references.txt",\r +\r + "## Parameters for samtools read filtering": "-F 4 does filters unmapped reads from resultant files",\r + "mapping_workflow.samtoolsParameters": "-F 4",\r + \r + "## Is read QC required": "yes or no",\r + "mapping_workflow.readQc": "yes",\r + "## What is the ploidy of given genome": "1 for haploid, 2 for diploid, etc.",\r + "mapping_workflow.ploidy": 2,\r + \r + "## Singularity parameters": "absolute paths to the container and the directory to bind visible inside singularity",\r + "mapping_workflow.singularityContainerPath": "/home/.../escalibur/escalibur.sif",\r + "mapping_workflow.singularityBindPath": "/home/.../escalibur/",\r +\r + "## trimmomatic adapters": "",\r + "mapping_workflow.pe_filtering_workflow.trimmomatic_pe_task.truseq_pe_adapter":"./adapters/TruSeq3-PE.fa",\r + "mapping_workflow.pe_filtering_workflow.trimmomatic_se_task.truseq_se_adapter":"./adapters/TruSeq3-SE.fa",\r + \r + "## Indexing sub workflow task parameters": "Samtools index run time parameters",\r + "mapping_workflow.index_sub_workflow.indexing_sam_task.IST_minutes": 300,\r + "mapping_workflow.index_sub_workflow.indexing_sam_task.IST_threads": 16,\r + "mapping_workflow.index_sub_workflow.indexing_sam_task.IST_mem": 30000,\r + .\r + .\r + .\r +}\r +```\r +\r +Run the mapping workflow.\r +```\r +> java -Dconfig.file=./workflow-runtime.local.config -jar ./cromwell-50.jar run workflow-mapping.wdl -i workflow-mapping.json -o workflow-mapping.outputs.json > out.mapping 2> err.mapping\r +```\r +The resultant BAM files will be copied to `outputMapping` directory.\r +\r +## Step 4 (optional): Cleaning\r +\r +If you suspect 'host' contamination in your data, you can remove that using the cleaning workflow.\r +Define the file representing the contamination. First column defines the sample identifier, second the resultant BAM file from mapping workflow and third the putative contaminant genome assembly.\r +```\r +> cat cleanup.conf\r +SM1 /home/.../escalibur/outputMapping/SM1.scf00001.MarkDup.bam /home/.../escalibur/Hosts/host1.fa\r +SM2 /home/.../escalibur/outputMapping/SM2.scf00001.MarkDup.bam /home/.../escalibur/Hosts/host1.fa\r +```\r +#### NOTE: you have to use absolute paths both to BAM files and the contaminant reference genome (here `host1.fa` and `host2.fa`).\r +\r +### workflow-cleaning.json config file\r +Add the path of your cleaning config file (here `cleanup.conf`) and adjust the absolute paths for singularity image.\r +```\r +{\r + "## CONFIG FILE": "WDL",\r + "cleaning_workflow.inputContaminantFile": "./cleanup.conf",\r + \r + "## Singularity parameters": "absolute paths to the container and the directory to bind visible inside singularity",\r + "cleaning_workflow.singularityContainerPath": "/home/.../escalibur/escalibur.sif",\r + "cleaning_workflow.singularityBindPath": "/home/.../escalibur/",\r +\r + "cleaning_workflow.indexing_bwa_task.IBT_minutes": 60,\r + "cleaning_workflow.indexing_bwa_task.IBT_threads": 1,\r + "cleaning_workflow.indexing_bwa_task.IBT_mem": 16000,\r +\r + "######################################":"########################################",\r + "CLEANING":"PARAMETERS",\r + "######################################":"########################################",\r + "cleaning_workflow.clean_bams_workflow.cleanBams_task.CLEAN_BAMS_minutes": 600,\r + "cleaning_workflow.clean_bams_workflow.cleanBams_task.CLEAN_BAMS_threads": 4,\r + "cleaning_workflow.clean_bams_workflow.cleanBams_task.CLEAN_BAMS_mem": 32000,\r +\r + "cleaning_workflow.create_cleaned_bams_workflow.createCleanedBams_task.CREATE_CLEAN_BAMS_minutes": 300,\r + "cleaning_workflow.create_cleaned_bams_workflow.createCleanedBams_task.CREATE_CLEAN_BAMS_threads": 4,\r + "cleaning_workflow.create_cleaned_bams_workflow.createCleanedBams_task.CREATE_CLEAN_BAMS_mem": 32000,\r +\r + "cleaning_workflow.refsBySample.RBS_minutes": 5,\r + "cleaning_workflow.refsBySample.RBS_threads": 1,\r + "cleaning_workflow.refsBySample.RBS_mem": 4000\r +}\r +```\r +\r +Run the cleaning workflow.\r +```\r +> java -Dconfig.file=./workflow-runtime.local.config -jar ./cromwell-50.jar run workflow-cleaning.wdl -i workflow-cleaning.json -o workflow-cleaning.outputs.json > out.cleaning 2> err.cleaning\r +```\r +The resultant cleaned BAM files will be copied to `outputCleaning` directory. You can repeat the workflow if you suspect that there may be more than one contaminant genomes per each sample. In that case you have to take care of the properly configured `cleanup.conf` file that should describe the BAM files from previous cleaning round but also define new output directory for each round in `workflow-cleaning.outputs.json` file.\r +\r +## Step 5: Variant calling\r +\r +Define the file listing the BAM files used for variant calling. First column defines the sample identifier, and second the resultant BAM file either from mapping of cleaning workflow.\r +```\r +> cat inputBams.txt\r +SM1 /home/.../escalibur/outputMapping/SM1.scf00001.MarkDup.bam\r +SM2 /home/.../escalibur/outputCleaned/SM2.scf00001.MarkDup.cleaned.bam\r +```\r +\r +### workflow-variants.json config file\r +Add the path of your file listing the locations of BAM files (here `inputBams.txt`), and add the location to selected reference genome (found in `outputMapping/best.ref`) and it's label, as defined in `references.txt` file. Adjust the absolute paths for singularity image and adjust other parameters, especially define if you want to recalibrate the BAM files by selecting value "independent" to "variants_workflow.call_type".\r +```\r +{\r + "## CONFIG FILE": "WDL",\r + "variants_workflow.inputSampleFile": "./inputBams.txt",\r + "variants_workflow.selectedRefFile": "TestReferences/scf00001.fa",\r + "variants_workflow.selectedRefLabel": "scf00001",\r + \r + "## Singularity parameters": "absolute paths to the container and the directory to bind visible inside singularity",\r + "variants_workflow.singularityContainerPath": "/home/.../escalibur/escalibur.sif",\r + "variants_workflow.singularityBindPath": "/home/.../escalibur/",\r +\r + "## Which variant call workflow to use": "fast or independent",\r + "variants_workflow.call_type": "fast",\r + \r + "## Variant filtering expressions": "For SNPs and INDELs",\r + "variants_workflow.SNP_filt_exp": "QD < 2.0 || FS > 60.0 || MQ < 40.0 || MQRankSum < -12.5 || ReadPosRankSum < -8.0",\r + "variants_workflow.INDEL_filt_exp": "QD < 2.0 || FS > 200.0 || ReadPosRankSum < -20.0",\r +\r + "## Variant Filter params": "Variant filter, indel, snps, report making: Safe to leave as default",\r + "variants_workflow.ploidy": 2,\r + "variants_workflow.maxIndelSize": 60,\r + "variants_workflow.scafNumLim": 95,\r + "variants_workflow.scafNumCo": 2,\r + "variants_workflow.scafLenCutOff": 0,\r + "variants_workflow.ldWinSize": 10,\r + "variants_workflow.ldWinStep": 5,\r + "variants_workflow.ldCutOff": 0.3,\r + "variants_workflow.snp_indel_var_filtering_workflow.indelFilterName": "Indel_filter",\r + "variants_workflow.snp_indel_var_filtering_workflow.indelFilterExpression": "QD < 2.0 || FS > 200.0 || ReadPosRankSum < -20.0",\r + "variants_workflow.snp_indel_var_filtering_workflow.snpFilterName": "Snp_filter",\r + "variants_workflow.snp_indel_var_filtering_workflow.snpFilterExpression": "QD < 2.0 || FS > 60.0 || MQ < 40.0 || MQRankSum < -12.5 || ReadPosRankSum < -8.0",\r + "variants_workflow.snp_indel_var_filtering_workflow.vfindel_tk.selectType": "",\r + "variants_workflow.snp_indel_var_filtering_workflow.vfsnp_tk.selectType": "",\r +\r + "## Build chromosome map":"map_def_scf_lim_task",\r + "variants_workflow.snp_indel_var_filtering_workflow.map_def_scf_lim_task.scafLenCutOff": 1000000,\r + "variants_workflow.snp_indel_var_filtering_workflow.map_def_scf_lim_task.scafNumCo": 3,\r +\r + "## Indexing sub workflow task parameters": "Samtools index run time parameters",\r + "variants_workflow.ref_index.IST_minutes": 300,\r + "variants_workflow.ref_index.IST_threads": 2,\r + "variants_workflow.ref_index.IST_mem": 8000,\r + .\r + .\r + .\r +}\r +```\r +\r +Run the variant calling workflow.\r +```\r +> java -Dconfig.file=./workflow-runtime.local.config -jar ./cromwell-50.jar run workflow-variants.wdl -i workflow-variants.json -o workflow-variants.outputs.json > out.variants 2> err.variants\r +```\r +The resultant files will be copied to `outputVariants` directory. That includes filtered variants calls (`full_genotype_output.vcf`) and recalibrated BAM files (if independent call_type is selected).\r +\r +## Other considerations\r +\r +### Resource allocation in HPC environment\r +Wall time, memory usage and thread count (`_minutes`, `_mem`, `_threads`) given in `.json` files for each workflow can vary substantially and may require adjusting in HPC environment and slurm. This may lead to frequent restarting of the workflow after each adjustment. We have automated this task by providing scripts that automatically check the failed resource allocations and double them for each round. These scripts are located in `Automation` directory and can be run as follows:\r +```\r +> cd Automation\r +> sh init.sh # Copies the content of ../tasks directory to tasksOrig directory\r +> sbatch runMapping.slurm # Runs runLoopMapping.sh in a worker node\r +> sbatch runCleaning.slurm # Runs runLoopCleaning.sh in a worker node\r +> sbatch runVariants.slurm # Runs runLoopVariants.sh in a worker node\r +```\r +Scripts `runLoop*.sh` copy resource allocations from collective `runtimes.json` file to the files in `../tasks` directory, run the workflow and double the failed resource allocations in `../tasks` files, and reruns the workflow until it succeeds or until ten rounds have passed. Copying of resource allocations directly to the files in `../tasks` directory is necessary to guarantee proper function of call-caching.\r +#### NOTE: automated resource allocation adjustment is experimental, should be monitored when running and may require modifications to scripts to function properly.\r +\r +### Disk usage\r +Cromwell will create duplicate copies of files while running the workflows. It is therefore recommended to remove `cromwell-executions` directory after each workflow is run, if disk space is getting sparse.\r +```\r +> rm -r cromwell-executions\r +```\r +Especially, if there are hundreds of samples that may sum up to terabytes of data, disk space might become an issue if unused files are not removed.\r +\r +### Troubleshooting\r +If the output text does not reveal the error, you can try to find an error message using command(s):\r +```\r +> find cromwell-executions/ -name stderr -exec cat {} \\; | grep -i fatal\r +> find cromwell-executions/ -name stderr -exec cat {} \\; | less\r +```\r +\r +Most commonly encountered error cases:\r +\r +* Singularity is not running correctly. Typically you require help from your administrator to get singularity properly installed.\r +* Singularity image `escalibur.sif` was not downloaded\r +* Check that you are using correct runtime configuration file `workflow-runtime.local.config` or `workflow-runtime.slurm.config` when calling `cromwell-50.jar`\r +* Absolute file paths for Singularity/Trimmomatic, input files or contaminant genomes are not updated or are wrong in `workflow-*.json`, `inputBams.txt` or `cleanup.conf` configuration files, respectively.\r +* Defined run-time and memory requirements for some tasks are not sufficient in `.json` configuration files to run the pipeline in HPC environment.\r +* If you are using slurm job scheduler and want to run the pipeline in HPC environment, you have to create the related configuration file yourselves.\r +* Pipeline has not been tested in other environments but Linux and we expect that users encounter challenges if trying to run the pipeline e.g. in Mac environment.\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/335?version=1" ; + schema1:isBasedOn "https://gitlab.unimelb.edu.au/bioscience/escalibur.git" ; + schema1:license "BSD-3-Clause" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Escalibur" ; + schema1:sdDatePublished "2024-07-12 13:35:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/335/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8987 ; + schema1:dateCreated "2022-04-20T23:21:34Z" ; + schema1:dateModified "2023-01-16T13:59:45Z" ; + schema1:description """# ESCALIBUR\r +\r +Escalibur Population Genomic Analysis Pipeline is able to explore key aspects centering the population genetics of organisms, and automates three key bioinformatic components in population genomic analysis using Workflow Definition Language (WDL: https://openwdl.org/), and customised R, Perl, Python and Unix shell scripts. Associated programs are packaged into a platform independent singularity image, for which the definition file is provided.\r +\r +The workflow for analysis using Escalibur consists of three steps - each step can be run in a separate workflow in a sequential manner; step 2 is optional.\r +\r + 1. Trimming and mapping the raw data - selection of the best reference genome;\r + 2. Removing the contamination from mapped data;\r + 3. Recalibration, variant calling and filtering;\r +\r +This implementation runs both locally and in a distributed environment that uses SLURM job scheduler.\r +\r +## Dependencies\r +Following software dependencies are required:\r +\r +* Git\r +* SLURM scheduler required for distributed HPC environment (https://slurm.schedmd.com/documentation.html)\r +* Python3.7: (https://www.python.org/)\r +* Perl 5.26.2: (https://www.perl.org/)\r +* Java 1.8\r +* Singularity 3.7.3: (https://sylabs.io/singularity/)\r +\r +## Step 1: Installation\r +\r +Typically, the installation of Singularity requires root rights. You should therefore contact your administrator to get it correctly installed. Minimum Linux kernel version requirement is 3.8, thought >= 3.18 would be preferred (https://sylabs.io/guides/3.5/admin-guide/installation.html).\r +\r +Clone the git repository to a directory on your cluster or stand-alone server.\r +```\r +> git clone --depth 1 -b v0.3-beta https://gitlab.unimelb.edu.au/bioscience/escalibur.git\r +> cd escalibur\r +```\r +\r +### Description of Files\r +* `workflow-main.local.config`: main configuration file for stand alone server runtime environment\r +* `workflow-main.slurm.config`: main configuration file for HPC runtime environment that support Slurm job scheduler\r +* `workflow-mapping.json`: defines location of input files, has behavioral settings and sets resource allocations\r +* `workflow-cleaning.json`: defines location of input files and sets resource allocations\r +* `workflow-variants.json`: defines location of input files, has behavioral settings and sets resource allocations\r +* `workflow-mapping.wdl`: main workflow file to trim and map PE reads into the genome\r +* `workflow-cleaning.wdl`: main workflow file to clean contamination from mapped PE reads against genomes representing putative contamination\r +* `workflow-variants.wdl`: main workflow file to call variants using mapped and cleaned reads\r +* `workflow-mapping.outputs.json`: defines location for resultant outputs and logs from mapping workflow\r +* `workflow-cleaning.outputs.json`: defines location for resultant outputs and logs from cleaning workflow\r +* `workflow-variants.outputs.json`: defines location for resultant outputs and logs from variants workflow\r +* `inputReads.txt`: example input file for fastq read files to mapping step\r +* `cleanup.conf`: example configuration file for putative host contamination to cleaning step\r +* `inputBams.txt`: example input file for resultant BAM files to variant calling step\r +* `references.txt`: contains list of example references genomes\r +* `perl_scripts`: contains Perl scripts used by the pipeline\r +* `scripts`: contains Python scripts used by the pipeline\r +* `R_scripts`: contains R scripts used by the pipeline\r +* `sub_workflows`: sub-workflows, one for each of the workflow steps\r +* `tasks`: workflow tasks\r +* `cromwell-50.jar`: java archive file required to run the workflow.\r +\r +Two config files have been created. One for stand alone server (`workflow-runtime.local.config`) and another one for HPC environment that supports Slurm scheduler (`workflow-runtime.slurm.config`).\r +These files have already been optimised. For slurm configuration you only need to define the HPC partition in line 35: "String rt_queue"\r +Change this to the partition you have access to on HPC environment.\r +\r +Files `workflow-mapping.outputs.json`, `workflow-cleaning.outputs.json` and `workflow-variants.outputs.json` define the directories to copy the result files to. Modify if you want to change default output directories `outputMapping`, `outputCleaning` and `outputVariants`. These output directories are generated to the directory `escalibur`.\r +#### NOTE: delete output directories from previous runs. If you have files there already and a name matches during the copy, the workflow may fail.\r +\r +`Singularity` directory contains the definition file for the software used in Escalibur. Pre-built singularity image can be downloaded from `library://pakorhon/workflows/escalibur:0.0.1-beta`.\r +```\r +> singularity pull escalibur.sif library://pakorhon/workflows/escalibur:0.0.1-beta\r +```\r +\r +## Step 2: Test run\r +\r +To confirm correct function of the workflows (`mapping`, `cleaning` and `variant calling`), fix the required absolute paths, marked by three dots `...` in `workflow-mapping.json`, `workflow-cleaning.json` and `workflow-variants.json` and configuration files `cleanup.conf` and `inputBams.txt`, and run the workflow with the provided test and configuration files, and parameter settings.\r +```\r +> java -Dconfig.file=./workflow-runtime.local.config -jar ./cromwell-50.jar run workflow-mapping.wdl -i workflow-mapping.json -o workflow-mapping.outputs.json > out.mapping 2> err.mapping\r +> java -Dconfig.file=./workflow-runtime.local.config -jar ./cromwell-50.jar run workflow-cleaning.wdl -i workflow-cleaning.json -o workflow-cleaning.outputs.json > out.cleaning 2> err.cleaning\r +> java -Dconfig.file=./workflow-runtime.local.config -jar ./cromwell-50.jar run workflow-variants.wdl -i workflow-variants.json -o workflow-variants.outputs.json > out.variants 2> err.variants\r +```\r +Slurm file templates `runMapping.slurm`, `runCleaning.slurm` and `runVariants.slurm` are available for each workflow.\r +#### NOTE: default parameter settings for run-times, memory usage and module loading may require adjustment in these files if run in HPC environment using slurm. Current settings should account for the test run.\r +\r +After the runs are complete, the results will be at the output directories: `outputMapping`, `outputCleaning` and `outputVariants`.\r +You can compare the result of `outputVariants/full_genotype_output.vcf` to that or pre-run `TestResults/full_genotype_output.vcf`.\r +\r +## Step 3: Mapping\r +\r +Make a directory for your fastq files e.g. `Reads` and copy your paired end raw data in there.\r +```\r +> mkdir Reads\r +```\r +\r +It should look something like below\r +```\r +> ls TestReads/\r +1-1_r1.fastq.gz 32-1_r1.fastq.gz 44-1_r1.fastq.gz\r +1-1_r2.fastq.gz 32-1_r2.fastq.gz 44-1_r2.fastq.gz\r +```\r +Run the python script to create a file of your input samples and edit the resulting file to match your sample identifiers and libraries.\r +```\r +> python3 scripts/inputArgMaker.py -d Reads/ -p -ps 33 -pq 20 -pl ILLUMINA -ml 50 -o inputReads.txt \r +```\r +\r +The edited output file is shown below. The script will automatically sort the files by size.\r +```\r +> cat inputReads.txt\r +# Prefix PE/SE MinLen PhredS Sequencer PhredQ Library Read Group ID Sample Platform Unit First pair of PE reads Second pair of PE reads\r +test1 PE 50 33 ILLUMINA 28 LIB1 CL100082180L1 SM1 CL100082180L1 ./TestReads/1-1_r1.fastq.gz ./TestReads/1-1_r2.fastq.gz\r +test2 PE 50 33 ILLUMINA 20 LIB2 CL100082180L1 SM2 CL100082180L1 ./TestReads/44-1_r1.fastq.gz ./TestReads/44-1_r2.fastq.gz\r +test3 PE 50 33 ILLUMINA 20 LIB3 CL100034574L1 SM2 CL100034574L1 ./TestReads/32-1_r1.fastq.gz ./TestReads/32-1_r2.fastq.gz\r +```\r +#### NOTE: If several libraries are embedded in a single read file, library-specific reads have to be separated into own files before create the inputReads.txt file. In contrast, inputReads.txt file format can accommodate multiple library files to a single sample.\r +\r +* `Prefix`: Prefix for the resultant files from trimming.\r +* `PE/SE`: Paired-End/Single-End reads as input.\r +* `MinLen`: Minimum Length of reads after trimming.\r +* `PhredS`: Used Phred coding by the sequencer (33 or 64).\r +* `Sequencer`: Name of the sequencer.\r +* `PhredQ`: Phred cut-off score used in trimming.\r +* `Library`: Identifier for the library.\r +* `Read Group ID`: Identifier for the read groups required by GATK (inputArgMaker tries to find this from FASTQ reads). Refer to (https://gatk.broadinstitute.org/hc/en-us/articles/360035890671-Read-groups).\r +* `Sample`: Identifier for the sample. Defined prefix for resultant sample specific files.\r +* `Platform Unit (optional)`: Information about flow cell, lane and sample. Helps GATK in recalibration (inputArgMaker copies Read Group ID here). Refer to (https://gatk.broadinstitute.org/hc/en-us/articles/360035890671-Read-groups).\r +* `First pair of PE reads`: Relative path to the forward pair of PE reads.\r +* `Second pair of PE reads`: Relative path to the reverse pair of PE reads.\r +\r +Create a file listing reference genomes and configure `workflow-mapping.json` file.\r +An example reference file (`references.txt`) has been created for you. Use this as an example to create your own.\r +Ensure there are no whitespaces at the end of the line or else the cromwell engine will throw an error.\r +Reads are mapped to these reference files and the best matching reference will be selected for variant calling.\r +```\r +> cat references.txt\r +scf00001 ./TestReferences/scf00001.fa\r +scf00013 ./TestReferences/scf00013.fa\r +```\r +#### NOTE: Reference label (e.g. `scf00001`) must be a substring found in the reference fasta file (`scf00001.fa`)\r +\r +The figure below illustrates the flow of the information, and appearance of labels (`Prefix`, `Sample`, `Label`) in file names, as defined in `inputReads.txt` and `references.txt`.\r +![](figures/labelFlow.png)\r +\r +### workflow-mapping.json config file\r +Add the path of your fastq and reference genome input files and change parameters as appropriate, and adjust the absolute paths for singularity image. If `mapping_workflow.readQc` is set to `yes`, reads are trimmed both for quality and the adapters. Adapters to trim are given in `mapping_workflow.pe_filtering_workflow.trimmomatic_pe_task.truseq_pe_adapter`. If you want to use custom adapters, copy them to `adapters` directory and instead of default `TruSeq3-PE.fa`, refer to your custom file. If you don't want to use adapters, use `empty.fa` file instead. For BGISEQ adapters, refer to (https://en.mgitech.cn/Download/download_file/id/71).\r +```\r +{\r + "## CONFIG FILE": "WDL",\r + "mapping_workflow.inputSampleFile": "./inputReads.txt",\r + "mapping_workflow.inputReferenceFile": "./references.txt",\r +\r + "## Parameters for samtools read filtering": "-F 4 does filters unmapped reads from resultant files",\r + "mapping_workflow.samtoolsParameters": "-F 4",\r + \r + "## Is read QC required": "yes or no",\r + "mapping_workflow.readQc": "yes",\r + "## What is the ploidy of given genome": "1 for haploid, 2 for diploid, etc.",\r + "mapping_workflow.ploidy": 2,\r + \r + "## Singularity parameters": "absolute paths to the container and the directory to bind visible inside singularity",\r + "mapping_workflow.singularityContainerPath": "/home/.../escalibur/escalibur.sif",\r + "mapping_workflow.singularityBindPath": "/home/.../escalibur/",\r +\r + "## trimmomatic adapters": "",\r + "mapping_workflow.pe_filtering_workflow.trimmomatic_pe_task.truseq_pe_adapter":"./adapters/TruSeq3-PE.fa",\r + "mapping_workflow.pe_filtering_workflow.trimmomatic_se_task.truseq_se_adapter":"./adapters/TruSeq3-SE.fa",\r + \r + "## Indexing sub workflow task parameters": "Samtools index run time parameters",\r + "mapping_workflow.index_sub_workflow.indexing_sam_task.IST_minutes": 300,\r + "mapping_workflow.index_sub_workflow.indexing_sam_task.IST_threads": 16,\r + "mapping_workflow.index_sub_workflow.indexing_sam_task.IST_mem": 30000,\r + .\r + .\r + .\r +}\r +```\r +\r +Run the mapping workflow.\r +```\r +> java -Dconfig.file=./workflow-runtime.local.config -jar ./cromwell-50.jar run workflow-mapping.wdl -i workflow-mapping.json -o workflow-mapping.outputs.json > out.mapping 2> err.mapping\r +```\r +The resultant BAM files will be copied to `outputMapping` directory.\r +\r +## Step 4 (optional): Cleaning\r +\r +If you suspect 'host' contamination in your data, you can remove that using the cleaning workflow.\r +Define the file representing the contamination. First column defines the sample identifier, second the resultant BAM file from mapping workflow and third the putative contaminant genome assembly.\r +```\r +> cat cleanup.conf\r +SM1 /home/.../escalibur/outputMapping/SM1.scf00001.MarkDup.bam /home/.../escalibur/Hosts/host1.fa\r +SM2 /home/.../escalibur/outputMapping/SM2.scf00001.MarkDup.bam /home/.../escalibur/Hosts/host1.fa\r +```\r +#### NOTE: you have to use absolute paths both to BAM files and the contaminant reference genome (here `host1.fa` and `host2.fa`).\r +\r +### workflow-cleaning.json config file\r +Add the path of your cleaning config file (here `cleanup.conf`) and adjust the absolute paths for singularity image.\r +```\r +{\r + "## CONFIG FILE": "WDL",\r + "cleaning_workflow.inputContaminantFile": "./cleanup.conf",\r + \r + "## Singularity parameters": "absolute paths to the container and the directory to bind visible inside singularity",\r + "cleaning_workflow.singularityContainerPath": "/home/.../escalibur/escalibur.sif",\r + "cleaning_workflow.singularityBindPath": "/home/.../escalibur/",\r +\r + "cleaning_workflow.indexing_bwa_task.IBT_minutes": 60,\r + "cleaning_workflow.indexing_bwa_task.IBT_threads": 1,\r + "cleaning_workflow.indexing_bwa_task.IBT_mem": 16000,\r +\r + "######################################":"########################################",\r + "CLEANING":"PARAMETERS",\r + "######################################":"########################################",\r + "cleaning_workflow.clean_bams_workflow.cleanBams_task.CLEAN_BAMS_minutes": 600,\r + "cleaning_workflow.clean_bams_workflow.cleanBams_task.CLEAN_BAMS_threads": 4,\r + "cleaning_workflow.clean_bams_workflow.cleanBams_task.CLEAN_BAMS_mem": 32000,\r +\r + "cleaning_workflow.create_cleaned_bams_workflow.createCleanedBams_task.CREATE_CLEAN_BAMS_minutes": 300,\r + "cleaning_workflow.create_cleaned_bams_workflow.createCleanedBams_task.CREATE_CLEAN_BAMS_threads": 4,\r + "cleaning_workflow.create_cleaned_bams_workflow.createCleanedBams_task.CREATE_CLEAN_BAMS_mem": 32000,\r +\r + "cleaning_workflow.refsBySample.RBS_minutes": 5,\r + "cleaning_workflow.refsBySample.RBS_threads": 1,\r + "cleaning_workflow.refsBySample.RBS_mem": 4000\r +}\r +```\r +\r +Run the cleaning workflow.\r +```\r +> java -Dconfig.file=./workflow-runtime.local.config -jar ./cromwell-50.jar run workflow-cleaning.wdl -i workflow-cleaning.json -o workflow-cleaning.outputs.json > out.cleaning 2> err.cleaning\r +```\r +The resultant cleaned BAM files will be copied to `outputCleaning` directory. You can repeat the workflow if you suspect that there may be more than one contaminant genomes per each sample. In that case you have to take care of the properly configured `cleanup.conf` file that should describe the BAM files from previous cleaning round but also define new output directory for each round in `workflow-cleaning.outputs.json` file.\r +\r +## Step 5: Variant calling\r +\r +Define the file listing the BAM files used for variant calling. First column defines the sample identifier, and second the resultant BAM file either from mapping of cleaning workflow.\r +```\r +> cat inputBams.txt\r +SM1 /home/.../escalibur/outputMapping/SM1.scf00001.MarkDup.bam\r +SM2 /home/.../escalibur/outputCleaned/SM2.scf00001.MarkDup.cleaned.bam\r +```\r +\r +### workflow-variants.json config file\r +Add the path of your file listing the locations of BAM files (here `inputBams.txt`), and add the location to selected reference genome (found in `outputMapping/best.ref`) and it's label, as defined in `references.txt` file. Adjust the absolute paths for singularity image and adjust other parameters, especially define if you want to recalibrate the BAM files by selecting value "independent" to "variants_workflow.call_type".\r +```\r +{\r + "## CONFIG FILE": "WDL",\r + "variants_workflow.inputSampleFile": "./inputBams.txt",\r + "variants_workflow.selectedRefFile": "TestReferences/scf00001.fa",\r + "variants_workflow.selectedRefLabel": "scf00001",\r + \r + "## Singularity parameters": "absolute paths to the container and the directory to bind visible inside singularity",\r + "variants_workflow.singularityContainerPath": "/home/.../escalibur/escalibur.sif",\r + "variants_workflow.singularityBindPath": "/home/.../escalibur/",\r +\r + "## Which variant call workflow to use": "fast or independent",\r + "variants_workflow.call_type": "fast",\r + \r + "## Variant filtering expressions": "For SNPs and INDELs",\r + "variants_workflow.SNP_filt_exp": "QD < 2.0 || FS > 60.0 || MQ < 40.0 || MQRankSum < -12.5 || ReadPosRankSum < -8.0",\r + "variants_workflow.INDEL_filt_exp": "QD < 2.0 || FS > 200.0 || ReadPosRankSum < -20.0",\r +\r + "## Variant Filter params": "Variant filter, indel, snps, report making: Safe to leave as default",\r + "variants_workflow.ploidy": 2,\r + "variants_workflow.maxIndelSize": 60,\r + "variants_workflow.scafNumLim": 95,\r + "variants_workflow.scafNumCo": 2,\r + "variants_workflow.scafLenCutOff": 0,\r + "variants_workflow.ldWinSize": 10,\r + "variants_workflow.ldWinStep": 5,\r + "variants_workflow.ldCutOff": 0.3,\r + "variants_workflow.snp_indel_var_filtering_workflow.indelFilterName": "Indel_filter",\r + "variants_workflow.snp_indel_var_filtering_workflow.indelFilterExpression": "QD < 2.0 || FS > 200.0 || ReadPosRankSum < -20.0",\r + "variants_workflow.snp_indel_var_filtering_workflow.snpFilterName": "Snp_filter",\r + "variants_workflow.snp_indel_var_filtering_workflow.snpFilterExpression": "QD < 2.0 || FS > 60.0 || MQ < 40.0 || MQRankSum < -12.5 || ReadPosRankSum < -8.0",\r + "variants_workflow.snp_indel_var_filtering_workflow.vfindel_tk.selectType": "",\r + "variants_workflow.snp_indel_var_filtering_workflow.vfsnp_tk.selectType": "",\r +\r + "## Build chromosome map":"map_def_scf_lim_task",\r + "variants_workflow.snp_indel_var_filtering_workflow.map_def_scf_lim_task.scafLenCutOff": 1000000,\r + "variants_workflow.snp_indel_var_filtering_workflow.map_def_scf_lim_task.scafNumCo": 3,\r +\r + "## Indexing sub workflow task parameters": "Samtools index run time parameters",\r + "variants_workflow.ref_index.IST_minutes": 300,\r + "variants_workflow.ref_index.IST_threads": 2,\r + "variants_workflow.ref_index.IST_mem": 8000,\r + .\r + .\r + .\r +}\r +```\r +\r +Run the variant calling workflow.\r +```\r +> java -Dconfig.file=./workflow-runtime.local.config -jar ./cromwell-50.jar run workflow-variants.wdl -i workflow-variants.json -o workflow-variants.outputs.json > out.variants 2> err.variants\r +```\r +The resultant files will be copied to `outputVariants` directory. That includes filtered variants calls (`full_genotype_output.vcf`) and recalibrated BAM files (if independent call_type is selected).\r +\r +## Other considerations\r +\r +### Resource allocation in HPC environment\r +Wall time, memory usage and thread count (`_minutes`, `_mem`, `_threads`) given in `.json` files for each workflow can vary substantially and may require adjusting in HPC environment and slurm. This may lead to frequent restarting of the workflow after each adjustment. We have automated this task by providing scripts that automatically check the failed resource allocations and double them for each round. These scripts are located in `Automation` directory and can be run as follows:\r +```\r +> cd Automation\r +> sh init.sh # Copies the content of ../tasks directory to tasksOrig directory\r +> sbatch runMapping.slurm # Runs runLoopMapping.sh in a worker node\r +> sbatch runCleaning.slurm # Runs runLoopCleaning.sh in a worker node\r +> sbatch runVariants.slurm # Runs runLoopVariants.sh in a worker node\r +```\r +Scripts `runLoop*.sh` copy resource allocations from collective `runtimes.json` file to the files in `../tasks` directory, run the workflow and double the failed resource allocations in `../tasks` files, and reruns the workflow until it succeeds or until ten rounds have passed. Copying of resource allocations directly to the files in `../tasks` directory is necessary to guarantee proper function of call-caching.\r +#### NOTE: automated resource allocation adjustment is experimental, should be monitored when running and may require modifications to scripts to function properly.\r +\r +### Disk usage\r +Cromwell will create duplicate copies of files while running the workflows. It is therefore recommended to remove `cromwell-executions` directory after each workflow is run, if disk space is getting sparse.\r +```\r +> rm -r cromwell-executions\r +```\r +Especially, if there are hundreds of samples that may sum up to terabytes of data, disk space might become an issue if unused files are not removed.\r +\r +### Troubleshooting\r +If the output text does not reveal the error, you can try to find an error message using command(s):\r +```\r +> find cromwell-executions/ -name stderr -exec cat {} \\; | grep -i fatal\r +> find cromwell-executions/ -name stderr -exec cat {} \\; | less\r +```\r +\r +Most commonly encountered error cases:\r +\r +* Singularity is not running correctly. Typically you require help from your administrator to get singularity properly installed.\r +* Singularity image `escalibur.sif` was not downloaded\r +* Check that you are using correct runtime configuration file `workflow-runtime.local.config` or `workflow-runtime.slurm.config` when calling `cromwell-50.jar`\r +* Absolute file paths for Singularity/Trimmomatic, input files or contaminant genomes are not updated or are wrong in `workflow-*.json`, `inputBams.txt` or `cleanup.conf` configuration files, respectively.\r +* Defined run-time and memory requirements for some tasks are not sufficient in `.json` configuration files to run the pipeline in HPC environment.\r +* If you are using slurm job scheduler and want to run the pipeline in HPC environment, you have to create the related configuration file yourselves.\r +* Pipeline has not been tested in other environments but Linux and we expect that users encounter challenges if trying to run the pipeline e.g. in Mac environment.\r +\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/BSD-3-Clause" ; + schema1:name "Escalibur" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/335?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """# ABR\\_Threshold_Detection\r +\r +## What is this?\r +\r +This code can be used to automatically determine hearing thresholds from ABR hearing curves. \r +\r +One of the following methods can be used for this purpose:\r + \r ++ neural network (NN) training, \r ++ calibration of a self-supervised sound level regression (SLR) method \r +\r +on given data sets with manually determined hearing thresholds.\r +\r +## Installation:\r +\r +Run inside the [src](./src) directory:\r +\r +### Installation as python package\r +\r +```\r +pip install -e ./src (Installation as python package)\r +```\r +\r +### Installation as conda virtual environment\r +```\r +conda create -n abr_threshold_detection python=3.7\r +conda activate abr_threshold_detection\r +conda install pip\r +pip install -e ./src\r +```\r +\r +## Usage:\r +Data files can be downloaded here: [https://zenodo.org/deposit/5779876](https://zenodo.org/deposit/5779876).\r +\r +For the Jupyter Notebooks (see the [`notebooks`](./notebooks) directory) to run, the path to the data has to be defined. For this, see the corresponding documentation of the respective notebooks.\r +\r +### Using NNs (`./src/ABR_ThresholdFinder_NN`)\r +\r +The neural network models were trained in `./src/notebooks/GMCtrained_NN*_training.ipynb` with GMC data and in `./src/notebooks/INGtrained_NN*_training.ipynb` with ING data.\r +\r +```\r +import ABR_ThresholdFinder_NN.data_preparation as dataprep\r +from ABR_ThresholdFinder_NN.models import create_model_1, compile_model_1\r +```\r +For automatic threshold detection based on NNs, `GMCtrained_NN_threshold_detection.ipynb` and `INGtrained_NN_threshold_detection.ipynb` in `./src/notebooks` can be used.\r +\r +```\r +import ABR_ThresholdFinder_NN.data_preparation as dataprep\r +import ABR_ThresholdFinder_NN.thresholder as abrthr\r +```\r +\r +### Using the SLR method (`./src/ABR_ThresholdFinder_SLR`)\r +\r +In `./src/notebooks/GMCcalibrated_SLR_threshold_detection.ipynb` and `./src/notebooks/INGcalibrated_SLR_threshold_detection.ipynb` it is shown how to use the module to:\r +\r ++ train a threshold detector on a data set and estimate the thresholds\r ++ save a trained model\r ++ load a model\r ++ apply a trained threshold estimator to a data set\r ++ evaluate thresholds by comparing it to a ground truth\r ++ evaluate thresholds by analysing signal averages\r +\r +```\r +import pandas as pd\r +import numpy as np\r +\r +from ABR_ThresholdFinder_SLR import ABR_Threshold_Detector_multi_stimulus\r +from ABR_ThresholdFinder_SLR.evaluations import evaluate_classification_against_ground_truth, plot_evaluation_curve_for_specific_stimulus\r +```\r +\r +##### Evaluate thresholds by comparing it with a 'ground truth' (a human set threshold in this case)\r +\r +For example:\r +\r +```\r +# 5dB buffer\r +evaluation = evaluate_classification_against_ground_truth(GMC_data2, 5, \r + frequency = 'frequency',\r + mouse_id = 'mouse_id',\r + sound_level = 'sound_level',\r + threshold_estimated = 'slr_estimated_thr',\r + threshold_ground_truth = 'threshold')\r +``` \r +### Compute and plot evaluation curves that allow to judge the quality of a thresholding\r +\r +Four threshold types are evaluated and compared:\r +\r ++ the threshols predicted with neural networks ('threshold NN')\r ++ the thresholds estimated by a sound level regression method ('threshold SLR')\r ++ the human ground truth ('threshold manual')\r ++ a constant threshold ('50')\r +\r +For more details, please see `Evaluation_of_ML_detected_thresholds.ipynb` in `./src/notebooks`.\r +\r +## Folder structure:\r +\r +### [`data`](./data)\r +Contains the preprocessed ABR and mouse phenotyping datasets from GMC and Ingham et al. in csv format, as well as the mouse ID distributions stored as numpy arrays for neural networks training, validation and testing.\r +\r +### [`models`](./models)\r +Contains the trained models of the two neural networks and the SLR method, but also the predictions of the first neural network with which the second neural network was fed.\r +\r +### [`models_cross-validation`](./models_cross-validation)\r +Contains the models that resulted from the cross-validation of the neural networks.\r +\r +### [`notebooks`](./notebooks)\r +Contains the Jupyter notebooks used for training, testing and evaluation of the neural networks and the SLR method, as well as those used for the hearing curve analysis.\r +\r +### [`notebooks_reports`](./notebooks_reports)\r +Contains the contents of Jupyter notebooks in html format.\r +\r +### [`results`](./results)\r +Contains the predictions or estimates made by the neural networks or the SLR method for the two data sets from GMC and Ingham et al. but also all the plots made to analyse the results.\r +\r +### [`src`](./src)\r +Contains the Python scripts used in the Jupyter notebooks.""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/376?version=1" ; + schema1:isBasedOn "https://github.com/ExperimentalGenetics/ABR_thresholder.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ABR Threshold Detection" ; + schema1:sdDatePublished "2024-07-12 13:35:25 +0100" ; + schema1:url "https://workflowhub.eu/workflows/376/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4654 ; + schema1:dateCreated "2022-07-18T08:08:04Z" ; + schema1:dateModified "2023-01-16T14:02:01Z" ; + schema1:description """# ABR\\_Threshold_Detection\r +\r +## What is this?\r +\r +This code can be used to automatically determine hearing thresholds from ABR hearing curves. \r +\r +One of the following methods can be used for this purpose:\r + \r ++ neural network (NN) training, \r ++ calibration of a self-supervised sound level regression (SLR) method \r +\r +on given data sets with manually determined hearing thresholds.\r +\r +## Installation:\r +\r +Run inside the [src](./src) directory:\r +\r +### Installation as python package\r +\r +```\r +pip install -e ./src (Installation as python package)\r +```\r +\r +### Installation as conda virtual environment\r +```\r +conda create -n abr_threshold_detection python=3.7\r +conda activate abr_threshold_detection\r +conda install pip\r +pip install -e ./src\r +```\r +\r +## Usage:\r +Data files can be downloaded here: [https://zenodo.org/deposit/5779876](https://zenodo.org/deposit/5779876).\r +\r +For the Jupyter Notebooks (see the [`notebooks`](./notebooks) directory) to run, the path to the data has to be defined. For this, see the corresponding documentation of the respective notebooks.\r +\r +### Using NNs (`./src/ABR_ThresholdFinder_NN`)\r +\r +The neural network models were trained in `./src/notebooks/GMCtrained_NN*_training.ipynb` with GMC data and in `./src/notebooks/INGtrained_NN*_training.ipynb` with ING data.\r +\r +```\r +import ABR_ThresholdFinder_NN.data_preparation as dataprep\r +from ABR_ThresholdFinder_NN.models import create_model_1, compile_model_1\r +```\r +For automatic threshold detection based on NNs, `GMCtrained_NN_threshold_detection.ipynb` and `INGtrained_NN_threshold_detection.ipynb` in `./src/notebooks` can be used.\r +\r +```\r +import ABR_ThresholdFinder_NN.data_preparation as dataprep\r +import ABR_ThresholdFinder_NN.thresholder as abrthr\r +```\r +\r +### Using the SLR method (`./src/ABR_ThresholdFinder_SLR`)\r +\r +In `./src/notebooks/GMCcalibrated_SLR_threshold_detection.ipynb` and `./src/notebooks/INGcalibrated_SLR_threshold_detection.ipynb` it is shown how to use the module to:\r +\r ++ train a threshold detector on a data set and estimate the thresholds\r ++ save a trained model\r ++ load a model\r ++ apply a trained threshold estimator to a data set\r ++ evaluate thresholds by comparing it to a ground truth\r ++ evaluate thresholds by analysing signal averages\r +\r +```\r +import pandas as pd\r +import numpy as np\r +\r +from ABR_ThresholdFinder_SLR import ABR_Threshold_Detector_multi_stimulus\r +from ABR_ThresholdFinder_SLR.evaluations import evaluate_classification_against_ground_truth, plot_evaluation_curve_for_specific_stimulus\r +```\r +\r +##### Evaluate thresholds by comparing it with a 'ground truth' (a human set threshold in this case)\r +\r +For example:\r +\r +```\r +# 5dB buffer\r +evaluation = evaluate_classification_against_ground_truth(GMC_data2, 5, \r + frequency = 'frequency',\r + mouse_id = 'mouse_id',\r + sound_level = 'sound_level',\r + threshold_estimated = 'slr_estimated_thr',\r + threshold_ground_truth = 'threshold')\r +``` \r +### Compute and plot evaluation curves that allow to judge the quality of a thresholding\r +\r +Four threshold types are evaluated and compared:\r +\r ++ the threshols predicted with neural networks ('threshold NN')\r ++ the thresholds estimated by a sound level regression method ('threshold SLR')\r ++ the human ground truth ('threshold manual')\r ++ a constant threshold ('50')\r +\r +For more details, please see `Evaluation_of_ML_detected_thresholds.ipynb` in `./src/notebooks`.\r +\r +## Folder structure:\r +\r +### [`data`](./data)\r +Contains the preprocessed ABR and mouse phenotyping datasets from GMC and Ingham et al. in csv format, as well as the mouse ID distributions stored as numpy arrays for neural networks training, validation and testing.\r +\r +### [`models`](./models)\r +Contains the trained models of the two neural networks and the SLR method, but also the predictions of the first neural network with which the second neural network was fed.\r +\r +### [`models_cross-validation`](./models_cross-validation)\r +Contains the models that resulted from the cross-validation of the neural networks.\r +\r +### [`notebooks`](./notebooks)\r +Contains the Jupyter notebooks used for training, testing and evaluation of the neural networks and the SLR method, as well as those used for the hearing curve analysis.\r +\r +### [`notebooks_reports`](./notebooks_reports)\r +Contains the contents of Jupyter notebooks in html format.\r +\r +### [`results`](./results)\r +Contains the predictions or estimates made by the neural networks or the SLR method for the two data sets from GMC and Ingham et al. but also all the plots made to analyse the results.\r +\r +### [`src`](./src)\r +Contains the Python scripts used in the Jupyter notebooks.""" ; + schema1:keywords "Machine Learning" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "ABR Threshold Detection" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/376?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Quantitative Mass Spectrometry nf-core workflow" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1013?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/quantms" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/quantms" ; + schema1:sdDatePublished "2024-07-12 13:19:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1013/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 16977 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:08Z" ; + schema1:dateModified "2024-06-11T12:55:08Z" ; + schema1:description "Quantitative Mass Spectrometry nf-core workflow" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1013?version=3" ; + schema1:keywords "dia, lfq, mass-spec, mass-spectrometry, openms, proteogenomics, Proteomics, tmt" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/quantms" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1013?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + schema1:datePublished "2021-07-26T10:22:23.303007" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/parallel-accession-download" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:sdDatePublished "2021-10-27 15:45:11 +0100" ; + schema1:softwareVersion "v0.1.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.6" . + + a schema1:Dataset ; + schema1:datePublished "2024-03-26T20:18:42.365340" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 169594 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + schema1:datePublished "2023-02-23T16:26:06.211606" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/pox-virus-amplicon" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "pox-virus-amplicon/main" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "pox-virus-amplicon/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/pox-virus-amplicon" ; + schema1:version "0.1" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Differential abundance analysis" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/981?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/differentialabundance" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/differentialabundance" ; + schema1:sdDatePublished "2024-07-12 13:21:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/981/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11181 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:48Z" ; + schema1:dateModified "2024-06-11T12:54:48Z" ; + schema1:description "Differential abundance analysis" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/981?version=8" ; + schema1:keywords "ATAC-seq, ChIP-seq, deseq2, differential-abundance, differential-expression, gsea, limma, microarray, rna-seq, shiny" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/differentialabundance" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/981?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Analysis of Chromosome Conformation Capture data (Hi-C)" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/989?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/hic" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hic" ; + schema1:sdDatePublished "2024-07-12 13:21:25 +0100" ; + schema1:url "https://workflowhub.eu/workflows/989/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9650 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:54Z" ; + schema1:dateModified "2024-06-11T12:54:54Z" ; + schema1:description "Analysis of Chromosome Conformation Capture data (Hi-C)" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/989?version=7" ; + schema1:keywords "chromosome-conformation-capture, Hi-C" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hic" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/989?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """# ERGA Protein-coding gene annotation workflow.\r +Adapted from the work of Sagane Joye:\r +\r +https://github.com/sdind/genome_annotation_workflow\r +\r +## Prerequisites\r +\r +The following programs are required to run the workflow and the listed version were tested. It should be noted that older versions of snakemake are not compatible with newer versions of singularity as is noted here: [https://github.com/nextflow-io/nextflow/issues/1659](https://github.com/nextflow-io/nextflow/issues/1659).\r +\r +`conda v 23.7.3`\r +\r +`singularity v 3.7.3`\r +\r +`snakemake v 7.32.3` \r +\r +You will also need to acquire a licence key for Genemark and place this in your home directory with name `~/.gm_key` The key file can be obtained from the following location, where the licence should be read and agreed to: http://topaz.gatech.edu/GeneMark/license_download.cgi\r +\r +## Workflow\r +\r +The pipeline is based on braker3 and was tested on the following dataset from Drosophila melanogaster: [https://doi.org/10.5281/zenodo.8013373](https://doi.org/10.5281/zenodo.8013373)\r +\r +### Input data\r +\r +- Reference genome in fasta format\r +\r +- RNAseq data in paired-end zipped fastq format\r +\r +- uniprot fasta sequences in zipped fasta format\r +\r +### Pipeline steps\r +\r +- **Repeat Model and Mask** Run RepeatModeler using the genome as input, filter any repeats also annotated as protein sequences in the uniprot database and use this filtered libray to mask the genome with RepeatMasker\r +\r +- **Map RNAseq data** Trim any remaining adapter sequences and map the trimmed reads to the input genome\r +\r +- **Run gene prediction software** Use the mapped RNAseq reads and the uniprot sequences to create hints for gene prediction using Braker3 on the masked genome\r +\r +- **Evaluate annotation** Run BUSCO to evaluate the completeness of the annotation produced\r +\r +### Output data\r +\r +- FastQC reports for input RNAseq data before and after adapter trimming\r +\r +- RepeatMasker report containing quantity of masked sequence and distribution among TE families\r +\r +- Protein-coding gene annotation file in gff3 format\r +\r +- BUSCO summary of annotated sequences\r +\r +## Setup\r +\r +Your data should be placed in the `data` folder, with the reference genome in the folder `data/ref` and the transcript data in the foler `data/rnaseq`.\r +\r +The config file requires the following to be given:\r +\r +```\r +asm: 'absolute path to reference fasta'\r +snakemake_dir_path: 'path to snakemake working directory'\r +name: 'name for project, e.g. mHomSap1'\r +RNA_dir: 'absolute path to rnaseq directory'\r +busco_phylum: 'busco database to use for evaluation e.g. mammalia_odb10'\r +```\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.569.1" ; + schema1:isBasedOn "https://github.com/ERGA-consortium/pipelines/tree/main/annotation/snakemake" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ERGA Protein-coding gene annotation workflow" ; + schema1:sdDatePublished "2024-07-12 13:27:22 +0100" ; + schema1:url "https://workflowhub.eu/workflows/569/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 562 ; + schema1:creator ; + schema1:dateCreated "2023-09-12T19:29:55Z" ; + schema1:dateModified "2023-09-13T13:41:08Z" ; + schema1:description """# ERGA Protein-coding gene annotation workflow.\r +Adapted from the work of Sagane Joye:\r +\r +https://github.com/sdind/genome_annotation_workflow\r +\r +## Prerequisites\r +\r +The following programs are required to run the workflow and the listed version were tested. It should be noted that older versions of snakemake are not compatible with newer versions of singularity as is noted here: [https://github.com/nextflow-io/nextflow/issues/1659](https://github.com/nextflow-io/nextflow/issues/1659).\r +\r +`conda v 23.7.3`\r +\r +`singularity v 3.7.3`\r +\r +`snakemake v 7.32.3` \r +\r +You will also need to acquire a licence key for Genemark and place this in your home directory with name `~/.gm_key` The key file can be obtained from the following location, where the licence should be read and agreed to: http://topaz.gatech.edu/GeneMark/license_download.cgi\r +\r +## Workflow\r +\r +The pipeline is based on braker3 and was tested on the following dataset from Drosophila melanogaster: [https://doi.org/10.5281/zenodo.8013373](https://doi.org/10.5281/zenodo.8013373)\r +\r +### Input data\r +\r +- Reference genome in fasta format\r +\r +- RNAseq data in paired-end zipped fastq format\r +\r +- uniprot fasta sequences in zipped fasta format\r +\r +### Pipeline steps\r +\r +- **Repeat Model and Mask** Run RepeatModeler using the genome as input, filter any repeats also annotated as protein sequences in the uniprot database and use this filtered libray to mask the genome with RepeatMasker\r +\r +- **Map RNAseq data** Trim any remaining adapter sequences and map the trimmed reads to the input genome\r +\r +- **Run gene prediction software** Use the mapped RNAseq reads and the uniprot sequences to create hints for gene prediction using Braker3 on the masked genome\r +\r +- **Evaluate annotation** Run BUSCO to evaluate the completeness of the annotation produced\r +\r +### Output data\r +\r +- FastQC reports for input RNAseq data before and after adapter trimming\r +\r +- RepeatMasker report containing quantity of masked sequence and distribution among TE families\r +\r +- Protein-coding gene annotation file in gff3 format\r +\r +- BUSCO summary of annotated sequences\r +\r +## Setup\r +\r +Your data should be placed in the `data` folder, with the reference genome in the folder `data/ref` and the transcript data in the foler `data/rnaseq`.\r +\r +The config file requires the following to be given:\r +\r +```\r +asm: 'absolute path to reference fasta'\r +snakemake_dir_path: 'path to snakemake working directory'\r +name: 'name for project, e.g. mHomSap1'\r +RNA_dir: 'absolute path to rnaseq directory'\r +busco_phylum: 'busco database to use for evaluation e.g. mammalia_odb10'\r +```\r +""" ; + schema1:keywords "Annotation, Genomics, Transcriptomics" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "ERGA Protein-coding gene annotation workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ERGA-consortium/pipelines/tree/main/annotation/snakemake/Snakefile" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for the analysis of CRISPR data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/975?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/crisprseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/crisprseq" ; + schema1:sdDatePublished "2024-07-12 13:18:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/975/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11023 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:46Z" ; + schema1:dateModified "2024-06-11T12:54:46Z" ; + schema1:description "Pipeline for the analysis of CRISPR data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/975?version=4" ; + schema1:keywords "crispr, crispr-analysis, crispr-cas, NGS" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/crisprseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/975?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + schema1:datePublished "2023-08-31T07:14:38.838218" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/chipseq-sr" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "chipseq-sr/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state of the art epitope prediction pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/984?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/epitopeprediction" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/epitopeprediction" ; + schema1:sdDatePublished "2024-07-12 13:21:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/984/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11406 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:52Z" ; + schema1:dateModified "2024-06-11T12:54:52Z" ; + schema1:description "A fully reproducible and state of the art epitope prediction pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/984?version=7" ; + schema1:keywords "epitope, epitope-prediction, mhc-binding-prediction" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/epitopeprediction" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/984?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + schema1:datePublished "2024-06-24T12:34:13.326994" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/bacterial_genome_annotation" ; + schema1:license "GPL-3.0-or-later" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "bacterial_genome_annotation/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + schema1:datePublished "2023-11-13T11:31:30.254075" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + , + ; + schema1:name "consensus-peaks/consensus-peaks-chip-sr" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.17" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-atac-cutandrun" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.2" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.17" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-chip-pe" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.2" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.17" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/gmx-ligand-parameterization).\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.294.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_ligand_parameterization/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy GMX Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/294/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9948 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-22T09:50:36Z" ; + schema1:dateModified "2023-01-16T13:58:36Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/gmx-ligand-parameterization).\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/294?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy GMX Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_ligand_parameterization/galaxy/biobb_wf_ligand_parameterization.ga" ; + schema1:version 2 ; + ns1:output , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state of the art epitope prediction pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/984?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/epitopeprediction" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/epitopeprediction" ; + schema1:sdDatePublished "2024-07-12 13:21:30 +0100" ; + schema1:url "https://workflowhub.eu/workflows/984/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8593 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:51Z" ; + schema1:dateModified "2024-06-11T12:54:51Z" ; + schema1:description "A fully reproducible and state of the art epitope prediction pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/984?version=7" ; + schema1:keywords "epitope, epitope-prediction, mhc-binding-prediction" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/epitopeprediction" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/984?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# EukRecover\r +Pipeline to recover eukaryotic MAGs using CONCOCT, metaBAT2 and EukCC's merging algorythm.\r +\r +Needs paired end shotgun metagenomic reads.\r +\r +## Environment\r +\r +Eukrecover requires an environment with snakemake and metaWRAP.\r +\r +## Quickstart\r +\r +Define your samples in the file `samples.csv`.\r +This file needs to have the columns project and run to identify each metagenome. \r +\r +This pipeline does not support co-binning, but feel free to change it. \r +\r +Clone this repro wherever you want to run the pipeline:\r +```\r +git clone https://github.com/openpaul/eukrecover/\r +```\r +\r +\r +You can then run the snakemake like so\r +\r +```\r +snakemake --use-singularity\r +```\r +\r +The pipeline used dockerhub to fetch all tools, so make sure you have singularity installed.\r +\r +\r +\r +## Prepare databases\r +The pipeline will setup databases for you, but if you already have a EukCC or a BUSCO 5 database you can use them \r +by specifying the location in the file `config/config.yaml`\r +\r +\r +## Output:\r +In the folder results you will find a folder `MAGs` which will contain a folder\r +`fa` containing the actual MAG fastas.\r +In addition you will find stats for each MAG in the table `QC.csv`.\r +\r +This table contains the following columns:\r +\r +name,eukcc_compl,eukcc_cont,BUSCO_C,BUSCO_M,BUSCO_D,BUSCO_F,BUSCO_tax,N50,bp\r +\r +\r +\r +## Citation:\r +\r +If you use this pipeline please make sure to cite all used software. \r +\r +For this please reffer to the used rules.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/475?version=1" ; + schema1:isBasedOn "https://github.com/EBI-Metagenomics/eukrecover.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for EukRecover" ; + schema1:sdDatePublished "2024-07-12 13:33:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/475/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6502 ; + schema1:dateCreated "2023-05-19T14:02:24Z" ; + schema1:dateModified "2023-05-19T14:02:24Z" ; + schema1:description """# EukRecover\r +Pipeline to recover eukaryotic MAGs using CONCOCT, metaBAT2 and EukCC's merging algorythm.\r +\r +Needs paired end shotgun metagenomic reads.\r +\r +## Environment\r +\r +Eukrecover requires an environment with snakemake and metaWRAP.\r +\r +## Quickstart\r +\r +Define your samples in the file `samples.csv`.\r +This file needs to have the columns project and run to identify each metagenome. \r +\r +This pipeline does not support co-binning, but feel free to change it. \r +\r +Clone this repro wherever you want to run the pipeline:\r +```\r +git clone https://github.com/openpaul/eukrecover/\r +```\r +\r +\r +You can then run the snakemake like so\r +\r +```\r +snakemake --use-singularity\r +```\r +\r +The pipeline used dockerhub to fetch all tools, so make sure you have singularity installed.\r +\r +\r +\r +## Prepare databases\r +The pipeline will setup databases for you, but if you already have a EukCC or a BUSCO 5 database you can use them \r +by specifying the location in the file `config/config.yaml`\r +\r +\r +## Output:\r +In the folder results you will find a folder `MAGs` which will contain a folder\r +`fa` containing the actual MAG fastas.\r +In addition you will find stats for each MAG in the table `QC.csv`.\r +\r +This table contains the following columns:\r +\r +name,eukcc_compl,eukcc_cont,BUSCO_C,BUSCO_M,BUSCO_D,BUSCO_F,BUSCO_tax,N50,bp\r +\r +\r +\r +## Citation:\r +\r +If you use this pipeline please make sure to cite all used software. \r +\r +For this please reffer to the used rules.\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "EukRecover" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/475?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=24" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-07-12 13:22:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=24" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10903 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:13Z" ; + schema1:dateModified "2024-06-11T12:55:13Z" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=24" ; + schema1:version 24 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "MARS-seq 1/2 preprocessing pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/996?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/marsseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/marsseq" ; + schema1:sdDatePublished "2024-07-12 13:20:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/996/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6424 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:59Z" ; + schema1:dateModified "2024-06-11T12:54:59Z" ; + schema1:description "MARS-seq 1/2 preprocessing pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/996?version=4" ; + schema1:keywords "facs-sorting, MARS-seq, single-cell, single-cell-rna-seq, star-solo, transcriptional-dynamics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/marsseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/996?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Taxonomic classification and profiling of shotgun short- and long-read metagenomic data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1025?version=9" ; + schema1:isBasedOn "https://github.com/nf-core/taxprofiler" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/taxprofiler" ; + schema1:sdDatePublished "2024-07-12 13:18:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1025/ro_crate?version=9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15939 ; + schema1:creator , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:20Z" ; + schema1:dateModified "2024-06-11T12:55:20Z" ; + schema1:description "Taxonomic classification and profiling of shotgun short- and long-read metagenomic data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1025?version=10" ; + schema1:keywords "Classification, illumina, long-reads, Metagenomics, microbiome, nanopore, pathogen, Profiling, shotgun, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/taxprofiler" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1025?version=9" ; + schema1:version 9 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/abcix-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.299.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_abc_md_setup/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy ABC MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/299/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 107797 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-23T09:03:51Z" ; + schema1:dateModified "2023-01-16T13:59:01Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/abcix-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/299?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy ABC MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_abc_md_setup/galaxy/biobb_wf_amber_abc_md_setup.ga" ; + schema1:version 2 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Take a scRNAseq counts matrix from a single sample, and perform basic QC with scanpy. Then, do further processing by making a UMAP and clustering. Produces a processed AnnData \r +object.\r +\r +Depreciated: use individual workflows insead for multiple samples""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/514?version=3" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq Single Sample Processing Counts Matrix" ; + schema1:sdDatePublished "2024-07-12 13:22:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/514/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 105641 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-05-30T05:19:29Z" ; + schema1:dateModified "2024-05-30T05:55:19Z" ; + schema1:description """Take a scRNAseq counts matrix from a single sample, and perform basic QC with scanpy. Then, do further processing by making a UMAP and clustering. Produces a processed AnnData \r +object.\r +\r +Depreciated: use individual workflows insead for multiple samples""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/514?version=2" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "scRNAseq Single Sample Processing Counts Matrix" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/514?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-23T15:28:30+00:00" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:mainEntity . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:name "main" ; + schema1:programmingLanguage . + + a schema1:Dataset ; + schema1:datePublished "2023-11-27T11:45:55.271114" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + , + ; + schema1:name "consensus-peaks/consensus-peaks-chip-sr" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-atac-cutandrun" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.3" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-chip-pe" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.3" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/986?version=12" ; + schema1:isBasedOn "https://github.com/nf-core/fetchngs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/fetchngs" ; + schema1:sdDatePublished "2024-07-12 13:21:29 +0100" ; + schema1:url "https://workflowhub.eu/workflows/986/ro_crate?version=12" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9517 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:53Z" ; + schema1:dateModified "2024-06-11T12:54:53Z" ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/986?version=13" ; + schema1:keywords "ddbj, download, ena, FASTQ, geo, sra, synapse" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/fetchngs" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/986?version=12" ; + schema1:version 12 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-08T17:33:58.391559" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.17" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=23" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-07-12 13:21:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=23" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13434 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:51Z" ; + schema1:dateModified "2024-06-11T12:54:51Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=23" ; + schema1:version 23 . + + a schema1:Dataset ; + schema1:datePublished "2024-02-05T13:35:16.224942" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.20" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# BAM-to-FASTQ-QC\r +\r +## General recommendations for using BAM-to-FASTQ-QC\r +Please see the [`Genome assembly with hifiasm on Galaxy Australia`](https://australianbiocommons.github.io/how-to-guides/genome_assembly/hifi_assembly) guide.\r +\r +## Acknowledgements\r +\r +The workflow & the [doc_guidelines template used](https://github.com/AustralianBioCommons/doc_guidelines) are supported by the Australian BioCommons via Bioplatforms Australia funding, the Australian Research Data Commons (https://doi.org/10.47486/PL105) and the Queensland Government RICF programme. Bioplatforms Australia and the Australian Research Data Commons are enabled by the National Collaborative Research Infrastructure Strategy (NCRIS).\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.220.2" ; + schema1:isBasedOn "https://github.com/AustralianBioCommons/BAM-to-FASTQ-QC" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for BAM to FASTQ + QC v1.0" ; + schema1:sdDatePublished "2024-07-12 13:35:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/220/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10441 ; + schema1:creator ; + schema1:dateCreated "2022-10-17T02:51:00Z" ; + schema1:dateModified "2023-01-30T18:21:31Z" ; + schema1:description """# BAM-to-FASTQ-QC\r +\r +## General recommendations for using BAM-to-FASTQ-QC\r +Please see the [`Genome assembly with hifiasm on Galaxy Australia`](https://australianbiocommons.github.io/how-to-guides/genome_assembly/hifi_assembly) guide.\r +\r +## Acknowledgements\r +\r +The workflow & the [doc_guidelines template used](https://github.com/AustralianBioCommons/doc_guidelines) are supported by the Australian BioCommons via Bioplatforms Australia funding, the Australian Research Data Commons (https://doi.org/10.47486/PL105) and the Queensland Government RICF programme. Bioplatforms Australia and the Australian Research Data Commons are enabled by the National Collaborative Research Infrastructure Strategy (NCRIS).\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/220?version=1" ; + schema1:isPartOf , + ; + schema1:keywords "BAM, FASTQ, Conversion, QC" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "BAM to FASTQ + QC v1.0" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/220?version=2" ; + schema1:version 2 ; + ns1:input ; + ns1:output , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2023-11-23T06:34:49+00:00" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/AustralianBioCommons/Genome-assessment-post-assembly.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Genome-assessment-post-assembly" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Genome-assessment-post-assembly" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/AustralianBioCommons/Genome-assessment-post-assembly.git" ; + schema1:version "main" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.276.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_md_setup/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:34:08 +0100" ; + schema1:url "https://workflowhub.eu/workflows/276/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6826 ; + schema1:creator , + ; + schema1:dateCreated "2023-04-14T07:46:27Z" ; + schema1:dateModified "2023-04-14T07:49:00Z" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/276?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_md_setup/python/workflow.py" ; + schema1:version 3 . + + a schema1:Dataset ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """This is an experimental KNIME workflow of using the BioExcel building blocks to implement the Protein MD Setup tutorial for molecular dynamics with GROMACS.\r +\r +Note that this workflow won't import in KNIME without the [experimental KNIME nodes](https://bioexcel.eu/research/projects/biobb_knime/) for BioBB - contact Adam Hospital for details.""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.201.1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Protein MD Setup tutorial using BioExcel Building Blocks (biobb) in KNIME" ; + schema1:sdDatePublished "2024-07-12 13:36:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/201/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:creator ; + schema1:dateCreated "2021-09-29T14:25:42Z" ; + schema1:dateModified "2022-04-11T09:27:55Z" ; + schema1:description """This is an experimental KNIME workflow of using the BioExcel building blocks to implement the Protein MD Setup tutorial for molecular dynamics with GROMACS.\r +\r +Note that this workflow won't import in KNIME without the [experimental KNIME nodes](https://bioexcel.eu/research/projects/biobb_knime/) for BioBB - contact Adam Hospital for details.""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Protein MD Setup tutorial using BioExcel Building Blocks (biobb) in KNIME" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/201?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 329944 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/233?version=1" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for 16S_biodiversity_for_nonoverlap_paired_end" ; + schema1:sdDatePublished "2024-07-12 13:36:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/233/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 59678 ; + schema1:dateCreated "2021-11-10T00:16:43Z" ; + schema1:dateModified "2024-04-17T04:17:52Z" ; + schema1:description "" ; + schema1:isPartOf ; + schema1:keywords "MetaDEGalaxy" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "16S_biodiversity_for_nonoverlap_paired_end" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/233?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.285.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_abc_md_setup/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python ABC MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/285/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8001 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-17T10:52:38Z" ; + schema1:dateModified "2022-04-11T09:29:43Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/285?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python ABC MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_abc_md_setup/python/workflow.py" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-27T10:02:38.084617" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/gromacs-mmgbsa" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "gromacs-mmgbsa/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:Dataset ; + schema1:datePublished "2024-06-27T11:47:36.658528" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Plot-Nx-Size" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Plot-Nx-Size/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# ONTViSc (ONT-based Viral Screening for Biosecurity)\r +\r +## Introduction\r +eresearchqut/ontvisc is a Nextflow-based bioinformatics pipeline designed to help diagnostics of viruses and viroid pathogens for biosecurity. It takes fastq files generated from either amplicon or whole-genome sequencing using Oxford Nanopore Technologies as input.\r +\r +The pipeline can either: 1) perform a direct search on the sequenced reads, 2) generate clusters, 3) assemble the reads to generate longer contigs or 4) directly map reads to a known reference. \r +\r +The reads can optionally be filtered from a plant host before performing downstream analysis.\r +\r +## Pipeline overview\r +- Data quality check (QC) and preprocessing\r + - Merge fastq files (optional)\r + - Raw fastq file QC (Nanoplot)\r + - Trim adaptors (PoreChop ABI - optional)\r + - Filter reads based on length and/or quality (Chopper - optional)\r + - Reformat fastq files so read names are trimmed after the first whitespace (bbmap)\r + - Processed fastq file QC (if PoreChop and/or Chopper is run) (Nanoplot)\r +- Host read filtering\r + - Align reads to host reference provided (Minimap2)\r + - Extract reads that do not align for downstream analysis (seqtk)\r +- QC report\r + - Derive read counts recovered pre and post data processing and post host filtering\r +- Read classification analysis mode\r +- Clustering mode\r + - Read clustering (Rattle)\r + - Convert fastq to fasta format (seqtk)\r + - Cluster scaffolding (Cap3)\r + - Megablast homology search against ncbi or custom database (blast)\r + - Derive top candidate viral hits\r +- De novo assembly mode\r + - De novo assembly (Canu or Flye)\r + - Megablast homology search against ncbi or custom database or reference (blast)\r + - Derive top candidate viral hits\r +- Read classification mode\r + - Option 1 Nucleotide-based taxonomic classification of reads (Kraken2, Braken)\r + - Option 2 Protein-based taxonomic classification of reads (Kaiju, Krona)\r + - Option 3 Convert fastq to fasta format (seqtk) and perform direct homology search using megablast (blast)\r +- Map to reference mode\r + - Align reads to reference fasta file (Minimap2) and derive bam file and alignment statistics (Samtools)\r +\r +Detailed instructions can be found on [GitHub](https://github.com/eresearchqut/ontvisc/).\r +A step-by-step guide with instructions on how to set up and execute the ONTvisc pipeline on one of the HPC systems: Lyra (Queensland University of Technology), Setonix (Pawsey) and Gadi (National Computational Infrastructure) can be found [here](https://mantczakaus.github.io/ontvisc_guide/).\r +\r +### Authors\r +Marie-Emilie Gauthier \r +Craig Windell \r +Magdalena Antczak \r +Roberto Barrero """ ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.683.1" ; + schema1:isBasedOn "https://github.com/eresearchqut/ontvisc.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ONTViSc (ONT-based Viral Screening for Biosecurity)" ; + schema1:sdDatePublished "2024-07-12 13:24:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/683/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 1243142 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 35839 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2023-12-04T01:42:40Z" ; + schema1:dateModified "2024-02-19T05:24:24Z" ; + schema1:description """# ONTViSc (ONT-based Viral Screening for Biosecurity)\r +\r +## Introduction\r +eresearchqut/ontvisc is a Nextflow-based bioinformatics pipeline designed to help diagnostics of viruses and viroid pathogens for biosecurity. It takes fastq files generated from either amplicon or whole-genome sequencing using Oxford Nanopore Technologies as input.\r +\r +The pipeline can either: 1) perform a direct search on the sequenced reads, 2) generate clusters, 3) assemble the reads to generate longer contigs or 4) directly map reads to a known reference. \r +\r +The reads can optionally be filtered from a plant host before performing downstream analysis.\r +\r +## Pipeline overview\r +- Data quality check (QC) and preprocessing\r + - Merge fastq files (optional)\r + - Raw fastq file QC (Nanoplot)\r + - Trim adaptors (PoreChop ABI - optional)\r + - Filter reads based on length and/or quality (Chopper - optional)\r + - Reformat fastq files so read names are trimmed after the first whitespace (bbmap)\r + - Processed fastq file QC (if PoreChop and/or Chopper is run) (Nanoplot)\r +- Host read filtering\r + - Align reads to host reference provided (Minimap2)\r + - Extract reads that do not align for downstream analysis (seqtk)\r +- QC report\r + - Derive read counts recovered pre and post data processing and post host filtering\r +- Read classification analysis mode\r +- Clustering mode\r + - Read clustering (Rattle)\r + - Convert fastq to fasta format (seqtk)\r + - Cluster scaffolding (Cap3)\r + - Megablast homology search against ncbi or custom database (blast)\r + - Derive top candidate viral hits\r +- De novo assembly mode\r + - De novo assembly (Canu or Flye)\r + - Megablast homology search against ncbi or custom database or reference (blast)\r + - Derive top candidate viral hits\r +- Read classification mode\r + - Option 1 Nucleotide-based taxonomic classification of reads (Kraken2, Braken)\r + - Option 2 Protein-based taxonomic classification of reads (Kaiju, Krona)\r + - Option 3 Convert fastq to fasta format (seqtk) and perform direct homology search using megablast (blast)\r +- Map to reference mode\r + - Align reads to reference fasta file (Minimap2) and derive bam file and alignment statistics (Samtools)\r +\r +Detailed instructions can be found on [GitHub](https://github.com/eresearchqut/ontvisc/).\r +A step-by-step guide with instructions on how to set up and execute the ONTvisc pipeline on one of the HPC systems: Lyra (Queensland University of Technology), Setonix (Pawsey) and Gadi (National Computational Infrastructure) can be found [here](https://mantczakaus.github.io/ontvisc_guide/).\r +\r +### Authors\r +Marie-Emilie Gauthier \r +Craig Windell \r +Magdalena Antczak \r +Roberto Barrero """ ; + schema1:image ; + schema1:keywords "Assembly, Bioinformatics, Virology, blast, Nextflow, ONT, singularity, Virus" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "ONTViSc (ONT-based Viral Screening for Biosecurity)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/683?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.286.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_dna_helparms/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Structural DNA helical parameters tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/286/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 22900 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-17T11:10:00Z" ; + schema1:dateModified "2022-04-11T09:29:43Z" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/286?version=3" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Structural DNA helical parameters tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_dna_helparms/python/workflow.py" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """## Introduction\r +\r +**vibbits/rnaseq-editing** is a bioinformatics pipeline that can be used to analyse RNA sequencing data obtained from organisms with a reference genome and annotation followed by a prediction step of editing sites using RDDpred.\r +\r +The pipeline is largely based on the [nf-core RNAseq pipeline](https://nf-co.re/rnaseq/).\r +\r +The initial nf-core pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!\r +\r +## Pipeline summary\r +\r +1. Merge re-sequenced FastQ files ([`cat`](http://www.linfo.org/cat.html))\r +2. Read QC ([`FastQC`](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/))\r +3. Adapter and quality trimming ([`Trimmomatics`](https://www.bioinformatics.babraham.ac.uk/projects/trim_galore/))\r +4. Use of STAR for multiple alignment and quantification: [`STAR`](https://github.com/alexdobin/STAR)\r +5. Sort and index alignments ([`SAMtools`](https://sourceforge.net/projects/samtools/files/samtools/))\r +6. Prediction of editing sites using RDDpred ([`RDDpred`](https://github.com/vibbits/RDDpred))\r +7. Extensive quality control:\r + 1. [`RSeQC`](http://rseqc.sourceforge.net/)\r + 2. [`Qualimap`](http://qualimap.bioinfo.cipf.es/)\r + 3. [`dupRadar`](https://bioconductor.org/packages/release/bioc/html/dupRadar.html)\r +8. Present QC for raw read, alignment, gene biotype, sample similarity, and strand-specificity checks ([`MultiQC`](http://multiqc.info/), [`R`](https://www.r-project.org/))\r +\r +## Quick Start\r +\r +1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=21.04.0`)\r +\r +2. Install [`Docker`](https://docs.docker.com/engine/installation/) on a Linux operating system.\r + Note: This pipeline does not currently support running with macOS.\r +\r +3. Download the pipeline via git clone, download the associated training data files for RDDpred into the assets folder, download the reference data to \r +\r + ```console\r + git clone https://github.com/vibbits/rnaseq-editing.git\r + cd $(pwd)/rnaseq-editing/assets\r + # download training data file for RDDpred\r + wget -c \r + # download reference data for your genome, we provide genome and indexed genome for STAR 2.7.3a\r + \r + ```\r +\r + > * Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment.\r +\r +4. Start running your own analysis using Docker locally!\r +\r + ```console\r + nextflow run vibbits/rnaseq-editing \\\r + --input samplesheet.csv \\\r + --genome hg19 \\\r + -profile docker\r + ```\r +\r + * An executable Python script called [`fastq_dir_to_samplesheet.py`](https://github.com/nf-core/rnaseq/blob/master/bin/fastq_dir_to_samplesheet.py) has been provided if you would like to auto-create an input samplesheet based on a directory containing FastQ files **before** you run the pipeline (requires Python 3 installed locally) e.g.\r +\r + ```console\r + wget -L https://raw.githubusercontent.com/nf-core/rnaseq/master/bin/fastq_dir_to_samplesheet.py\r + ./fastq_dir_to_samplesheet.py samplesheet.csv --strandedness reverse\r + ```\r +\r + * The final analysis has been executed on the Azure platform using Azure Kubernetes Services (AKS). AKS has to be set up on the Azure platform by defining a standard node pool called sys next to the scalable node pool cpumem using Standard_E8ds_v4 as node size for calculation.\r + Furthermore, persistent volume claims (PVCs) have been setup for input and work folders of the nextflow runs. In the PVC `input` the reference data as well as the fastqc files have been stored where the PVC `work`, the temporary nextflow files for the individual runs as well as the output files have been stored.\r + * The config file for the final execution run for [RNAseq editing for the human samples and reference genome hg19](https://github.com/vibbits/rnaseq-editing/blob/master/nextflow.config.as-executed). \r +\r +## Documentation\r +\r +The nf-core/rnaseq pipeline comes with documentation about the pipeline [usage](https://nf-co.re/rnaseq/usage), [parameters](https://nf-co.re/rnaseq/parameters) and [output](https://nf-co.re/rnaseq/output).\r +\r +## Credits\r +These scripts were written to provide a reproducible data analysis pipeline until the downstream processing using dedicated R scripts for exploratory analysis and plotting. The general structure of pipeline is based on the data analysis steps of the our recent paper [ADAR1 interaction with Z-RNA promotes editing of endogenous double-stranded RNA and prevents MDA5-dependent immune activation](https://pubmed.ncbi.nlm.nih.gov/34380029/).\r +\r +Note: The nf-core scripts this pipeline is based on were originally written for use at the [National Genomics Infrastructure](https://ngisweden.scilifelab.se), part of [SciLifeLab](http://www.scilifelab.se/) in Stockholm, Sweden, by Phil Ewels ([@ewels](https://github.com/ewels)) and Rickard Hammarén ([@Hammarn](https://github.com/Hammarn)).\r +\r +The RNAseq pipeline was re-written in Nextflow DSL2 by Harshil Patel ([@drpatelh](https://github.com/drpatelh)) from [The Bioinformatics & Biostatistics Group](https://www.crick.ac.uk/research/science-technology-platforms/bioinformatics-and-biostatistics/) at [The Francis Crick Institute](https://www.crick.ac.uk/), London.\r +\r +## Citations\r +\r +The `nf-core` publication is cited here as follows:\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/264?version=1" ; + schema1:isBasedOn "https://github.com/vibbits/rnaseq-editing.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for RNA sequencing data obtained from organisms with a reference genome and annotation followed by a prediction step of editing sites using RDDpred" ; + schema1:sdDatePublished "2024-07-12 13:34:46 +0100" ; + schema1:url "https://workflowhub.eu/workflows/264/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2455 ; + schema1:dateCreated "2022-01-27T10:44:25Z" ; + schema1:dateModified "2023-01-16T13:57:29Z" ; + schema1:description """## Introduction\r +\r +**vibbits/rnaseq-editing** is a bioinformatics pipeline that can be used to analyse RNA sequencing data obtained from organisms with a reference genome and annotation followed by a prediction step of editing sites using RDDpred.\r +\r +The pipeline is largely based on the [nf-core RNAseq pipeline](https://nf-co.re/rnaseq/).\r +\r +The initial nf-core pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!\r +\r +## Pipeline summary\r +\r +1. Merge re-sequenced FastQ files ([`cat`](http://www.linfo.org/cat.html))\r +2. Read QC ([`FastQC`](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/))\r +3. Adapter and quality trimming ([`Trimmomatics`](https://www.bioinformatics.babraham.ac.uk/projects/trim_galore/))\r +4. Use of STAR for multiple alignment and quantification: [`STAR`](https://github.com/alexdobin/STAR)\r +5. Sort and index alignments ([`SAMtools`](https://sourceforge.net/projects/samtools/files/samtools/))\r +6. Prediction of editing sites using RDDpred ([`RDDpred`](https://github.com/vibbits/RDDpred))\r +7. Extensive quality control:\r + 1. [`RSeQC`](http://rseqc.sourceforge.net/)\r + 2. [`Qualimap`](http://qualimap.bioinfo.cipf.es/)\r + 3. [`dupRadar`](https://bioconductor.org/packages/release/bioc/html/dupRadar.html)\r +8. Present QC for raw read, alignment, gene biotype, sample similarity, and strand-specificity checks ([`MultiQC`](http://multiqc.info/), [`R`](https://www.r-project.org/))\r +\r +## Quick Start\r +\r +1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=21.04.0`)\r +\r +2. Install [`Docker`](https://docs.docker.com/engine/installation/) on a Linux operating system.\r + Note: This pipeline does not currently support running with macOS.\r +\r +3. Download the pipeline via git clone, download the associated training data files for RDDpred into the assets folder, download the reference data to \r +\r + ```console\r + git clone https://github.com/vibbits/rnaseq-editing.git\r + cd $(pwd)/rnaseq-editing/assets\r + # download training data file for RDDpred\r + wget -c \r + # download reference data for your genome, we provide genome and indexed genome for STAR 2.7.3a\r + \r + ```\r +\r + > * Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment.\r +\r +4. Start running your own analysis using Docker locally!\r +\r + ```console\r + nextflow run vibbits/rnaseq-editing \\\r + --input samplesheet.csv \\\r + --genome hg19 \\\r + -profile docker\r + ```\r +\r + * An executable Python script called [`fastq_dir_to_samplesheet.py`](https://github.com/nf-core/rnaseq/blob/master/bin/fastq_dir_to_samplesheet.py) has been provided if you would like to auto-create an input samplesheet based on a directory containing FastQ files **before** you run the pipeline (requires Python 3 installed locally) e.g.\r +\r + ```console\r + wget -L https://raw.githubusercontent.com/nf-core/rnaseq/master/bin/fastq_dir_to_samplesheet.py\r + ./fastq_dir_to_samplesheet.py samplesheet.csv --strandedness reverse\r + ```\r +\r + * The final analysis has been executed on the Azure platform using Azure Kubernetes Services (AKS). AKS has to be set up on the Azure platform by defining a standard node pool called sys next to the scalable node pool cpumem using Standard_E8ds_v4 as node size for calculation.\r + Furthermore, persistent volume claims (PVCs) have been setup for input and work folders of the nextflow runs. In the PVC `input` the reference data as well as the fastqc files have been stored where the PVC `work`, the temporary nextflow files for the individual runs as well as the output files have been stored.\r + * The config file for the final execution run for [RNAseq editing for the human samples and reference genome hg19](https://github.com/vibbits/rnaseq-editing/blob/master/nextflow.config.as-executed). \r +\r +## Documentation\r +\r +The nf-core/rnaseq pipeline comes with documentation about the pipeline [usage](https://nf-co.re/rnaseq/usage), [parameters](https://nf-co.re/rnaseq/parameters) and [output](https://nf-co.re/rnaseq/output).\r +\r +## Credits\r +These scripts were written to provide a reproducible data analysis pipeline until the downstream processing using dedicated R scripts for exploratory analysis and plotting. The general structure of pipeline is based on the data analysis steps of the our recent paper [ADAR1 interaction with Z-RNA promotes editing of endogenous double-stranded RNA and prevents MDA5-dependent immune activation](https://pubmed.ncbi.nlm.nih.gov/34380029/).\r +\r +Note: The nf-core scripts this pipeline is based on were originally written for use at the [National Genomics Infrastructure](https://ngisweden.scilifelab.se), part of [SciLifeLab](http://www.scilifelab.se/) in Stockholm, Sweden, by Phil Ewels ([@ewels](https://github.com/ewels)) and Rickard Hammarén ([@Hammarn](https://github.com/Hammarn)).\r +\r +The RNAseq pipeline was re-written in Nextflow DSL2 by Harshil Patel ([@drpatelh](https://github.com/drpatelh)) from [The Bioinformatics & Biostatistics Group](https://www.crick.ac.uk/research/science-technology-platforms/bioinformatics-and-biostatistics/) at [The Francis Crick Institute](https://www.crick.ac.uk/), London.\r +\r +## Citations\r +\r +The `nf-core` publication is cited here as follows:\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "RNA sequencing data obtained from organisms with a reference genome and annotation followed by a prediction step of editing sites using RDDpred" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/264?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# HiC scaffolding pipeline\r +\r +Snakemake pipeline for scaffolding of a genome using HiC reads using yahs.\r +\r +## Prerequisites\r +\r +This pipeine has been tested using `Snakemake v7.32.4` and requires conda for installation of required tools. To run the pipline use the command:\r +\r +`snakemake --use-conda --cores N`\r +\r +where N is number of cores to use. There are provided a set of configuration and running scripts for exectution on a slurm queueing system. After configuring the `cluster.json` file run:\r +\r +`./run_cluster`\r +\r +## Before starting\r +\r +You need to create a temporary folder and specify the path in the `config.yaml` file. This should be able to hold the temporary files created when sorting the `.pairsam` file (100s of GB or even many TBs)\r +\r +The path to the genome assemly must be given in the `config.yaml`.\r +\r +The HiC reads should be paired and named as follows: `Library_1.fastq.gz Library_2.fastq.gz`. The pipeline can accept any number of paired HiC read files, but the naming must be consistent. The folder containing these files must be provided in the `config.yaml`.""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.796.2" ; + schema1:isBasedOn "https://github.com/ERGA-consortium/pipelines/tree/main/assembly/snakemake/4.Scaffolding/yahs" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for HiC scaffolding pipeline" ; + schema1:sdDatePublished "2024-07-12 13:23:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/796/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4471 ; + schema1:creator ; + schema1:dateCreated "2024-06-21T09:42:46Z" ; + schema1:dateModified "2024-06-21T09:43:16Z" ; + schema1:description """# HiC scaffolding pipeline\r +\r +Snakemake pipeline for scaffolding of a genome using HiC reads using yahs.\r +\r +## Prerequisites\r +\r +This pipeine has been tested using `Snakemake v7.32.4` and requires conda for installation of required tools. To run the pipline use the command:\r +\r +`snakemake --use-conda --cores N`\r +\r +where N is number of cores to use. There are provided a set of configuration and running scripts for exectution on a slurm queueing system. After configuring the `cluster.json` file run:\r +\r +`./run_cluster`\r +\r +## Before starting\r +\r +You need to create a temporary folder and specify the path in the `config.yaml` file. This should be able to hold the temporary files created when sorting the `.pairsam` file (100s of GB or even many TBs)\r +\r +The path to the genome assemly must be given in the `config.yaml`.\r +\r +The HiC reads should be paired and named as follows: `Library_1.fastq.gz Library_2.fastq.gz`. The pipeline can accept any number of paired HiC read files, but the naming must be consistent. The folder containing these files must be provided in the `config.yaml`.""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/796?version=1" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "HiC scaffolding pipeline" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/796?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/278?version=8" ; + schema1:isBasedOn "https://gitlab.bsc.es/lrodrig1/structuralvariants_poc/-/tree/1.1.3/structuralvariants/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CNV_pipeline" ; + schema1:sdDatePublished "2024-07-12 13:35:29 +0100" ; + schema1:url "https://workflowhub.eu/workflows/278/ro_crate?version=8" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 9694 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9443 ; + schema1:creator , + ; + schema1:dateCreated "2022-06-30T12:00:53Z" ; + schema1:dateModified "2022-06-30T12:00:53Z" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/278?version=10" ; + schema1:keywords "cancer, CODEX2, ExomeDepth, manta, TransBioNet, variant calling, GRIDSS, structural variants" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CNV_pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/278?version=8" ; + schema1:version 8 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 72963 . + + a schema1:Dataset ; + schema1:datePublished "2023-09-25T12:10:50.925708" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "atacseq/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.12" . + + a schema1:Dataset ; + schema1:datePublished "2023-11-20T09:17:15.968072" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/991?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/hlatyping" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hlatyping" ; + schema1:sdDatePublished "2024-07-12 13:18:29 +0100" ; + schema1:url "https://workflowhub.eu/workflows/991/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3910 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:56Z" ; + schema1:dateModified "2024-06-11T12:54:56Z" ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/991?version=9" ; + schema1:keywords "DNA, hla, hla-typing, immunology, optitype, personalized-medicine, rna" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hlatyping" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/991?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + schema1:datePublished "2022-06-13T14:36:29.891474" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/VGP-meryldb-creation" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "VGP-meryldb-creation/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.11" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """# workflow-partial-gstacks-populations\r +\r +These workflows are part of a set designed to work for RAD-seq data on the Galaxy platform, using the tools from the Stacks program. \r +\r +Galaxy Australia: https://usegalaxy.org.au/\r +\r +Stacks: http://catchenlab.life.illinois.edu/stacks/\r +\r +This workflow is part of the reference-guided stacks workflow, https://workflowhub.eu/workflows/347\r +\r + This workflow takes in bam files and a population map. \r +\r +To generate bam files see: https://workflowhub.eu/workflows/351\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/352?version=1" ; + schema1:isBasedOn "https://github.com/AnnaSyme/workflow-partial-gstacks-populations.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Partial ref-guided workflow - gstacks and pops" ; + schema1:sdDatePublished "2024-07-12 13:35:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/352/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14430 ; + schema1:creator ; + schema1:dateCreated "2022-05-31T08:12:30Z" ; + schema1:dateModified "2023-01-30T18:21:31Z" ; + schema1:description """# workflow-partial-gstacks-populations\r +\r +These workflows are part of a set designed to work for RAD-seq data on the Galaxy platform, using the tools from the Stacks program. \r +\r +Galaxy Australia: https://usegalaxy.org.au/\r +\r +Stacks: http://catchenlab.life.illinois.edu/stacks/\r +\r +This workflow is part of the reference-guided stacks workflow, https://workflowhub.eu/workflows/347\r +\r + This workflow takes in bam files and a population map. \r +\r +To generate bam files see: https://workflowhub.eu/workflows/351\r +""" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Partial ref-guided workflow - gstacks and pops" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/352?version=1" ; + schema1:version 1 ; + ns1:input , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 28082 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + schema1:datePublished "2021-11-04T15:32:26.200967" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-ivar-analysis" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sars-cov-2-pe-illumina-artic-ivar-analysis/SARS-COV-2-ILLUMINA-AMPLICON-IVAR-PANGOLIN-NEXTCLADE" ; + schema1:sdDatePublished "2021-11-05 03:00:46 +0000" ; + schema1:softwareVersion "v0.2.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=21" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-07-12 13:19:24 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=21" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 18834 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:16Z" ; + schema1:dateModified "2024-06-11T12:55:16Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=21" ; + schema1:version 21 . + + a schema1:Dataset ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-ont-artic-variant-calling" ; + schema1:mainEntity ; + schema1:name "COVID-19-ARTIC-ONT (v0.2)" ; + schema1:sdDatePublished "2021-04-09 03:00:41 +0100" ; + schema1:softwareVersion "v0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 45245 ; + schema1:name "COVID-19-ARTIC-ONT" ; + schema1:programmingLanguage . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1021?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/scrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/scrnaseq" ; + schema1:sdDatePublished "2024-07-12 13:18:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1021/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8849 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:17Z" ; + schema1:dateModified "2024-06-11T12:55:17Z" ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1021?version=13" ; + schema1:keywords "10x-genomics, 10xgenomics, alevin, bustools, Cellranger, kallisto, rna-seq, single-cell, star-solo" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/scrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1021?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/abcix-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.299.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_abc_md_setup/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy ABC MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/299/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 102801 ; + schema1:creator , + ; + schema1:dateCreated "2023-05-03T13:54:07Z" ; + schema1:dateModified "2023-05-03T13:55:38Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/abcix-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/299?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy ABC MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_abc_md_setup/galaxy/biobb_wf_amber_abc_md_setup.ga" ; + schema1:version 3 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Analysis of Chromosome Conformation Capture data (Hi-C)" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/989?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/hic" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hic" ; + schema1:sdDatePublished "2024-07-12 13:21:24 +0100" ; + schema1:url "https://workflowhub.eu/workflows/989/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3921 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:54Z" ; + schema1:dateModified "2024-06-11T12:54:54Z" ; + schema1:description "Analysis of Chromosome Conformation Capture data (Hi-C)" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/989?version=7" ; + schema1:keywords "chromosome-conformation-capture, Hi-C" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hic" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/989?version=2" ; + schema1:version 2 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 2771028 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Conformational ensembles generation\r +\r +## Workflow included in the [ELIXIR 3D-Bioinfo](https://elixir-europe.org/communities/3d-bioinfo) Implementation Study:\r +\r +### Building on PDBe-KB to chart and characterize the conformation landscape of native proteins\r +\r +This tutorial aims to illustrate the process of generating **protein conformational ensembles** from** 3D structures **and analysing its **molecular flexibility**, step by step, using the **BioExcel Building Blocks library (biobb)**.\r +\r +## Conformational landscape of native proteins\r +**Proteins** are **dynamic** systems that adopt multiple **conformational states**, a property essential for many **biological processes** (e.g. binding other proteins, nucleic acids, small molecule ligands, or switching between functionaly active and inactive states). Characterizing the different **conformational states** of proteins and the transitions between them is therefore critical for gaining insight into their **biological function** and can help explain the effects of genetic variants in **health** and **disease** and the action of drugs.\r +\r +**Structural biology** has become increasingly efficient in sampling the different **conformational states** of proteins. The **PDB** has currently archived more than **170,000 individual structures**, but over two thirds of these structures represent **multiple conformations** of the same or related protein, observed in different crystal forms, when interacting with other proteins or other macromolecules, or upon binding small molecule ligands. Charting this conformational diversity across the PDB can therefore be employed to build a useful approximation of the **conformational landscape** of native proteins.\r +\r +A number of resources and **tools** describing and characterizing various often complementary aspects of protein **conformational diversity** in known structures have been developed, notably by groups in Europe. These tools include algorithms with varying degree of sophistication, for aligning the 3D structures of individual protein chains or domains, of protein assemblies, and evaluating their degree of **structural similarity**. Using such tools one can **align structures pairwise**, compute the corresponding **similarity matrix**, and identify ensembles of **structures/conformations** with a defined **similarity level** that tend to recur in different PDB entries, an operation typically performed using **clustering** methods. Such workflows are at the basis of resources such as **CATH, Contemplate, or PDBflex** that offer access to **conformational ensembles** comprised of similar **conformations** clustered according to various criteria. Other types of tools focus on differences between **protein conformations**, identifying regions of proteins that undergo large **collective displacements** in different PDB entries, those that act as **hinges or linkers**, or regions that are inherently **flexible**.\r +\r +To build a meaningful approximation of the **conformational landscape** of native proteins, the **conformational ensembles** (and the differences between them), identified on the basis of **structural similarity/dissimilarity** measures alone, need to be **biophysically characterized**. This may be approached at **two different levels**. \r +- At the **biological level**, it is important to link observed **conformational ensembles**, to their **functional roles** by evaluating the correspondence with **protein family classifications** based on sequence information and **functional annotations** in public databases e.g. Uniprot, PDKe-Knowledge Base (KB). These links should provide valuable mechanistic insights into how the **conformational and dynamic properties** of proteins are exploited by evolution to regulate their **biological function**.

\r +\r +- At the **physical level** one needs to introduce **energetic consideration** to evaluate the likelihood that the identified **conformational ensembles** represent **conformational states** that the protein (or domain under study) samples in isolation. Such evaluation is notoriously **challenging** and can only be roughly approximated by using **computational methods** to evaluate the extent to which the observed **conformational ensembles** can be reproduced by algorithms that simulate the **dynamic behavior** of protein systems. These algorithms include the computationally expensive **classical molecular dynamics (MD) simulations** to sample local thermal fluctuations but also faster more approximate methods such as **Elastic Network Models** and **Normal Node Analysis** (NMA) to model low energy **collective motions**. Alternatively, **enhanced sampling molecular dynamics** can be used to model complex types of **conformational changes** but at a very high computational cost. \r +\r +The **ELIXIR 3D-Bioinfo Implementation Study** *Building on PDBe-KB to chart and characterize the conformation landscape of native proteins* focuses on:\r +\r +1. Mapping the **conformational diversity** of proteins and their homologs across the PDB. \r +2. Characterize the different **flexibility properties** of protein regions, and link this information to sequence and functional annotation.\r +3. Benchmark **computational methods** that can predict a biophysical description of protein motions.\r +\r +This notebook is part of the third objective, where a list of **computational resources** that are able to predict **protein flexibility** and **conformational ensembles** have been collected, evaluated, and integrated in reproducible and interoperable workflows using the **BioExcel Building Blocks library**. Note that the list is not meant to be exhaustive, it is built following the expertise of the implementation study partners.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.486.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_flexdyn" ; + schema1:license "other-open" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein conformational ensembles generation" ; + schema1:sdDatePublished "2024-07-12 13:33:23 +0100" ; + schema1:url "https://workflowhub.eu/workflows/486/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 108347 ; + schema1:creator , + ; + schema1:dateCreated "2023-07-26T09:35:19Z" ; + schema1:dateModified "2023-07-26T09:36:15Z" ; + schema1:description """# Protein Conformational ensembles generation\r +\r +## Workflow included in the [ELIXIR 3D-Bioinfo](https://elixir-europe.org/communities/3d-bioinfo) Implementation Study:\r +\r +### Building on PDBe-KB to chart and characterize the conformation landscape of native proteins\r +\r +This tutorial aims to illustrate the process of generating **protein conformational ensembles** from** 3D structures **and analysing its **molecular flexibility**, step by step, using the **BioExcel Building Blocks library (biobb)**.\r +\r +## Conformational landscape of native proteins\r +**Proteins** are **dynamic** systems that adopt multiple **conformational states**, a property essential for many **biological processes** (e.g. binding other proteins, nucleic acids, small molecule ligands, or switching between functionaly active and inactive states). Characterizing the different **conformational states** of proteins and the transitions between them is therefore critical for gaining insight into their **biological function** and can help explain the effects of genetic variants in **health** and **disease** and the action of drugs.\r +\r +**Structural biology** has become increasingly efficient in sampling the different **conformational states** of proteins. The **PDB** has currently archived more than **170,000 individual structures**, but over two thirds of these structures represent **multiple conformations** of the same or related protein, observed in different crystal forms, when interacting with other proteins or other macromolecules, or upon binding small molecule ligands. Charting this conformational diversity across the PDB can therefore be employed to build a useful approximation of the **conformational landscape** of native proteins.\r +\r +A number of resources and **tools** describing and characterizing various often complementary aspects of protein **conformational diversity** in known structures have been developed, notably by groups in Europe. These tools include algorithms with varying degree of sophistication, for aligning the 3D structures of individual protein chains or domains, of protein assemblies, and evaluating their degree of **structural similarity**. Using such tools one can **align structures pairwise**, compute the corresponding **similarity matrix**, and identify ensembles of **structures/conformations** with a defined **similarity level** that tend to recur in different PDB entries, an operation typically performed using **clustering** methods. Such workflows are at the basis of resources such as **CATH, Contemplate, or PDBflex** that offer access to **conformational ensembles** comprised of similar **conformations** clustered according to various criteria. Other types of tools focus on differences between **protein conformations**, identifying regions of proteins that undergo large **collective displacements** in different PDB entries, those that act as **hinges or linkers**, or regions that are inherently **flexible**.\r +\r +To build a meaningful approximation of the **conformational landscape** of native proteins, the **conformational ensembles** (and the differences between them), identified on the basis of **structural similarity/dissimilarity** measures alone, need to be **biophysically characterized**. This may be approached at **two different levels**. \r +- At the **biological level**, it is important to link observed **conformational ensembles**, to their **functional roles** by evaluating the correspondence with **protein family classifications** based on sequence information and **functional annotations** in public databases e.g. Uniprot, PDKe-Knowledge Base (KB). These links should provide valuable mechanistic insights into how the **conformational and dynamic properties** of proteins are exploited by evolution to regulate their **biological function**.

\r +\r +- At the **physical level** one needs to introduce **energetic consideration** to evaluate the likelihood that the identified **conformational ensembles** represent **conformational states** that the protein (or domain under study) samples in isolation. Such evaluation is notoriously **challenging** and can only be roughly approximated by using **computational methods** to evaluate the extent to which the observed **conformational ensembles** can be reproduced by algorithms that simulate the **dynamic behavior** of protein systems. These algorithms include the computationally expensive **classical molecular dynamics (MD) simulations** to sample local thermal fluctuations but also faster more approximate methods such as **Elastic Network Models** and **Normal Node Analysis** (NMA) to model low energy **collective motions**. Alternatively, **enhanced sampling molecular dynamics** can be used to model complex types of **conformational changes** but at a very high computational cost. \r +\r +The **ELIXIR 3D-Bioinfo Implementation Study** *Building on PDBe-KB to chart and characterize the conformation landscape of native proteins* focuses on:\r +\r +1. Mapping the **conformational diversity** of proteins and their homologs across the PDB. \r +2. Characterize the different **flexibility properties** of protein regions, and link this information to sequence and functional annotation.\r +3. Benchmark **computational methods** that can predict a biophysical description of protein motions.\r +\r +This notebook is part of the third objective, where a list of **computational resources** that are able to predict **protein flexibility** and **conformational ensembles** have been collected, evaluated, and integrated in reproducible and interoperable workflows using the **BioExcel Building Blocks library**. Note that the list is not meant to be exhaustive, it is built following the expertise of the implementation study partners.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/486?version=2" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "" ; + schema1:name "Jupyter Notebook Protein conformational ensembles generation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_flexdyn/blob/main/biobb_wf_flexdyn/notebooks/biobb_wf_flexdyn.ipynb" ; + schema1:version 2 . + + a schema1:Dataset ; + schema1:datePublished "2023-09-14T22:03:45.620920" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/rnaseq-sr" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "rnaseq-sr/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.11" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1021?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/scrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/scrnaseq" ; + schema1:sdDatePublished "2024-07-12 13:19:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1021/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9948 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:17Z" ; + schema1:dateModified "2024-06-11T12:55:17Z" ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1021?version=13" ; + schema1:keywords "10x-genomics, 10xgenomics, alevin, bustools, Cellranger, kallisto, rna-seq, single-cell, star-solo" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/scrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1021?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.825.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/main/biobb_wf_ligand_parameterization/docker" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Docker GMX Notebook Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-07-12 13:23:30 +0100" ; + schema1:url "https://workflowhub.eu/workflows/825/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 794 ; + schema1:creator , + ; + schema1:dateCreated "2024-04-22T10:54:09Z" ; + schema1:dateModified "2024-05-22T13:39:14Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Docker GMX Notebook Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/main/biobb_wf_ligand_parameterization/docker/Dockerfile" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Deprecated" ; + schema1:description "A pipeline for mapping, calling, and annotation of SARS-CoV2 variants." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/105?version=1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ENA SARS-CoV2 Variant Calling" ; + schema1:sdDatePublished "2024-07-12 13:26:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/105/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8162 ; + schema1:creator ; + schema1:dateCreated "2021-02-15T09:19:44Z" ; + schema1:dateModified "2023-11-24T10:02:53Z" ; + schema1:description "A pipeline for mapping, calling, and annotation of SARS-CoV2 variants." ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "ENA SARS-CoV2 Variant Calling" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/105?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "SLAMseq analysis using Slamdunk with various T>C conversion quantifications and QC" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1022?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/slamseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/slamseq" ; + schema1:sdDatePublished "2024-07-12 13:18:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1022/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4642 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:18Z" ; + schema1:dateModified "2024-06-11T12:55:18Z" ; + schema1:description "SLAMseq analysis using Slamdunk with various T>C conversion quantifications and QC" ; + schema1:keywords "differential-expression, quantseq, slamseq, Transcriptomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/slamseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1022?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """A CWL-based pipeline for processing RNA-Seq data (FASTQ format) and performing differential gene/transcript expression analysis. \r +\r +On the respective GitHub folder are available:\r +\r +- The CWL wrappers for the workflow\r +- A pre-configured YAML template, based on validation analysis of publicly available HTS data\r +- A table of metadata (``mrna_cll_subsets_phenotypes.csv``), based on the same validation analysis, to serve as an input example for the design of comparisons during differential expression analysis\r +\r +Briefly, the workflow performs the following steps:\r +\r +1. Quality control of Illumina reads (FastQC)\r +2. Trimming of the reads (e.g., removal of adapter and/or low quality sequences) (Trim galore)\r +3. (Optional) custom processing of the reads using FASTA/Q Trimmer (part of the FASTX-toolkit) \r +4. Mapping to reference genome (HISAT2)\r +5. Convertion of mapped reads from SAM (Sequence Alignment Map) to BAM (Binary Alignment Map) format (samtools)\r +6. Sorting mapped reads based on chromosomal coordinates (samtools)\r +\r +Subsequently, two independent workflows are implemented for differential expression analysis at the transcript and gene level. \r +\r +**First**, following the [reference protocol](https://doi.org/10.1038/nprot.2016.095) for HISAT, StringTie and Ballgown transcript expression analysis, StringTie along with a reference transcript annotation GTF (Gene Transfer Format) file (if one is available) is used to:\r +\r +- Assemble transcripts for each RNA-Seq sample using the previous read alignments (BAM files)\r +- Generate a global, non-redundant set of transcripts observed in any of the RNA-Seq samples\r +- Estimate transcript abundances and generate read coverage tables for each RNA-Seq sample, based on the global, merged set of transcripts (rather than the reference) which is observed across all samples\r +\r +Ballgown program is then used to load the coverage tables generated in the previous step and perform statistical analyses for differential expression at the transcript level. Notably, the StringTie - Ballgown protocol applied here was selected to include potentially novel transcripts in the analysis. \r +\r +**Second**, featureCounts is used to count reads that are mapped to selected genomic features, in this case genes by default, and generate a table of read counts per gene and sample. This table is passed as input to DESeq2 to perform differential expression analysis at the gene level. Both Ballgown and DESeq2 R scripts, along with their respective CWL wrappers, were designed to receive as input various parameters, such as experimental design, contrasts of interest, numeric thresholds, and hidden batch effects.\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.524.1" ; + schema1:isBasedOn "https://github.com/BiodataAnalysisGroup/CWL_HTS_pipelines/tree/main/RNA_Seq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL-based RNA-Seq workflow" ; + schema1:sdDatePublished "2024-07-12 13:32:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/524/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 24199 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-07-05T08:44:44Z" ; + schema1:dateModified "2023-07-05T09:16:36Z" ; + schema1:description """A CWL-based pipeline for processing RNA-Seq data (FASTQ format) and performing differential gene/transcript expression analysis. \r +\r +On the respective GitHub folder are available:\r +\r +- The CWL wrappers for the workflow\r +- A pre-configured YAML template, based on validation analysis of publicly available HTS data\r +- A table of metadata (``mrna_cll_subsets_phenotypes.csv``), based on the same validation analysis, to serve as an input example for the design of comparisons during differential expression analysis\r +\r +Briefly, the workflow performs the following steps:\r +\r +1. Quality control of Illumina reads (FastQC)\r +2. Trimming of the reads (e.g., removal of adapter and/or low quality sequences) (Trim galore)\r +3. (Optional) custom processing of the reads using FASTA/Q Trimmer (part of the FASTX-toolkit) \r +4. Mapping to reference genome (HISAT2)\r +5. Convertion of mapped reads from SAM (Sequence Alignment Map) to BAM (Binary Alignment Map) format (samtools)\r +6. Sorting mapped reads based on chromosomal coordinates (samtools)\r +\r +Subsequently, two independent workflows are implemented for differential expression analysis at the transcript and gene level. \r +\r +**First**, following the [reference protocol](https://doi.org/10.1038/nprot.2016.095) for HISAT, StringTie and Ballgown transcript expression analysis, StringTie along with a reference transcript annotation GTF (Gene Transfer Format) file (if one is available) is used to:\r +\r +- Assemble transcripts for each RNA-Seq sample using the previous read alignments (BAM files)\r +- Generate a global, non-redundant set of transcripts observed in any of the RNA-Seq samples\r +- Estimate transcript abundances and generate read coverage tables for each RNA-Seq sample, based on the global, merged set of transcripts (rather than the reference) which is observed across all samples\r +\r +Ballgown program is then used to load the coverage tables generated in the previous step and perform statistical analyses for differential expression at the transcript level. Notably, the StringTie - Ballgown protocol applied here was selected to include potentially novel transcripts in the analysis. \r +\r +**Second**, featureCounts is used to count reads that are mapped to selected genomic features, in this case genes by default, and generate a table of read counts per gene and sample. This table is passed as input to DESeq2 to perform differential expression analysis at the gene level. Both Ballgown and DESeq2 R scripts, along with their respective CWL wrappers, were designed to receive as input various parameters, such as experimental design, contrasts of interest, numeric thresholds, and hidden batch effects.\r +""" ; + schema1:image ; + schema1:keywords "RNASEQ, Transcriptomics, CWL, workflow" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "CWL-based RNA-Seq workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/524?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 168876 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/991?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/hlatyping" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hlatyping" ; + schema1:sdDatePublished "2024-07-12 13:18:25 +0100" ; + schema1:url "https://workflowhub.eu/workflows/991/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3272 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:55Z" ; + schema1:dateModified "2024-06-11T12:54:55Z" ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/991?version=9" ; + schema1:keywords "DNA, hla, hla-typing, immunology, optitype, personalized-medicine, rna" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hlatyping" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/991?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Taxonomic classification and profiling of shotgun short- and long-read metagenomic data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1025?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/taxprofiler" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/taxprofiler" ; + schema1:sdDatePublished "2024-07-12 13:18:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1025/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15361 ; + schema1:creator , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:19Z" ; + schema1:dateModified "2024-06-11T12:55:19Z" ; + schema1:description "Taxonomic classification and profiling of shotgun short- and long-read metagenomic data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1025?version=10" ; + schema1:keywords "Classification, illumina, long-reads, Metagenomics, microbiome, nanopore, pathogen, Profiling, shotgun, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/taxprofiler" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1025?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.195.5" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_dna_helparms" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Structural DNA helical parameters tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/195/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 73714 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-04T15:36:51Z" ; + schema1:dateModified "2024-05-14T10:18:03Z" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/195?version=4" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Structural DNA helical parameters tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_dna_helparms/blob/main/biobb_wf_dna_helparms/notebooks/biobb_dna_helparms_tutorial.ipynb" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Demultiplexing pipeline for Illumina sequencing data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/978?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/demultiplex" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/demultiplex" ; + schema1:sdDatePublished "2024-07-12 13:21:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/978/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7786 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:47Z" ; + schema1:dateModified "2024-06-11T12:54:47Z" ; + schema1:description "Demultiplexing pipeline for Illumina sequencing data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/978?version=7" ; + schema1:keywords "bases2fastq, bcl2fastq, demultiplexing, elementbiosciences, illumina" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/demultiplex" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/978?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=11" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-07-12 13:21:33 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11079 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:50Z" ; + schema1:dateModified "2024-06-11T12:54:50Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=11" ; + schema1:version 11 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 83280 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:MediaObject ; + schema1:contentSize 6224 ; + schema1:dateModified "2020-09-21T10:34:27" ; + schema1:name "compss.txt" ; + schema1:sdDatePublished "2023-12-15T14:53:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2728 ; + schema1:dateModified "2023-12-15T14:53:16" ; + schema1:name "result.txt" ; + schema1:sdDatePublished "2023-12-15T14:53:21+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """Workflow for Metagenomics from raw reads to bins.\r +\r +**Steps:**\r +\r +* workflow_quality.cwl:\r +\r + * FastQC (control)\r +\r + * fastp (quality trimming)\r +\r + * bbmap contamination filter\r +\r +* SPAdes (Assembly)\r +\r +* QUAST (Assembly quality report)\r +\r +* BBmap (Read mapping to assembly)\r +\r +* MetaBat2 (binning)\r +\r +* CheckM (bin completeness and contamination)\r +\r +* GTDB-Tk (bin taxonomic classification)\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/64?version=10" ; + schema1:isBasedOn "https://gitlab.com/m-unlock/cwl/-/blob/master/cwl/workflows/workflow_metagenomics_binning.cwl" ; + schema1:license "CC0-1.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Metagenomics Binning Workflow" ; + schema1:sdDatePublished "2024-07-12 13:34:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/64/ro_crate?version=10" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 60154 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13774 ; + schema1:creator , + ; + schema1:dateCreated "2021-06-07T17:34:04Z" ; + schema1:dateModified "2021-06-07T17:35:51Z" ; + schema1:description """Workflow for Metagenomics from raw reads to bins.\r +\r +**Steps:**\r +\r +* workflow_quality.cwl:\r +\r + * FastQC (control)\r +\r + * fastp (quality trimming)\r +\r + * bbmap contamination filter\r +\r +* SPAdes (Assembly)\r +\r +* QUAST (Assembly quality report)\r +\r +* BBmap (Read mapping to assembly)\r +\r +* MetaBat2 (binning)\r +\r +* CheckM (bin completeness and contamination)\r +\r +* GTDB-Tk (bin taxonomic classification)\r +\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/64?version=10" ; + schema1:keywords "Metagenomics, microbial, metagenome, binning" ; + schema1:license "https://spdx.org/licenses/CC0-1.0" ; + schema1:name "Metagenomics Binning Workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/64?version=10" ; + schema1:version 10 ; + ns1:input , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-07-12 13:18:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7971 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:39Z" ; + schema1:dateModified "2024-06-11T12:54:39Z" ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# SLURM HPC Cromwell implementation of GATK4 germline variant calling pipeline\r +See the [GATK](https://gatk.broadinstitute.org/hc/en-us) website for more information on this toolset \r +## Assumptions\r +- Using hg38 human reference genome build\r +- Running using HPC/SLURM scheduling. This repo was specifically tested on Pawsey Zeus machine, primarily running in the `/scratch` partition. \r +- Starting from short-read Illumina paired-end fastq files as input\r +\r +### Dependencies\r +The following versions have been tested and work, but GATK and Cromwell are regularly updated and so one must consider whether they would like to use newer versions of these tools. \r +- BWA/0.7.15\r +- GATK v4.0.6.0\r +- SAMtools/1.5\r +- picard/2.9\r +- Python/2.7\r +- Cromwell v61\r +\r +## Quick start guide\r +### Installing and preparing environment for GATK4 with Cromwell\r +\r +1. Clone repository\r +```\r +git clone https://github.com/SarahBeecroft/slurmCromwellGATK4.git\r +cd slurmCromwellGATK4\r +chmod +x *.sh\r +```\r +\r +2. Install [Miniconda](https://docs.conda.io/en/latest/miniconda.html) if you haven’t already. This is best placed in your `/group` directory to avoid filling your small `/home` directory, or being purged is placed in the `/scratch` directory.\r +\r +3. Create Conda environment using the supplied conda environment file\r +\r +```\r +conda env create --file gatk4_pipeline.yml\r +```\r +\r +3. Download the necessary .jar files\r + - The Cromwell workfow orchestration engine can be downloaded from https://github.com/broadinstitute/cromwell/releases/ \r + - GATK can be downloaded from https://github.com/broadinstitute/gatk/releases. Unzip the file with `unzip` \r + - Picard can be downloaded from https://github.com/broadinstitute/picard/releases/\r +\r +\r +4. If you do not have the resource bundle files already, these need to be downloaded. In future they will be cached on Pawsey systems. The bundle data should be download from the [Google Cloud bucket](https://console.cloud.google.com/storage/browser/genomics-public-data/references/hg38/v0;tab=objects?_ga=2.98248159.1769807612.1582055494-233304531.1578854612&pli=1&prefix=&forceOnObjectsSortingFiltering=false) and not from the FTP site, which is missing various files. Refer to this handy [blog post](https://davetang.org/muse/2020/02/21/using-google-cloud-sdk-to-download-gatk-resource-bundle-files/) on how to download the resource files using Google Cloud SDK. There is a Slurm script (download_bundle.slurm) that can be used to download all hg38 files from the Google Cloud bucket. The files were downloaded in /scratch/pawsey0001/sbeecroft/hg38/v0, which needs to be moved before the data becomes purged after 30 days. Note that Homo_sapiens_assembly38.dbsnp138.vcf.gz was from the FTP bundle as this file could not be downloaded using the Conda version of Google Cloud SDK.\r +\r +Note that the `hg38_wgs_scattered_calling_intervals.txt` will need to be to generated using the following:\r +\r +```\r +cd \r +find `pwd` -name "scattered.interval_list" -print | sort > hg38_wgs_scattered_calling_intervals.txt\r +```\r +\r +These files are required for Multisample_Fastq_to_Gvcf_GATK4.\r +\r +```\r +Homo_sapiens_assembly38.dict\r +Homo_sapiens_assembly38.fasta\r +Homo_sapiens_assembly38.fasta.fai\r +Homo_sapiens_assembly38.fasta.64.alt\r +Homo_sapiens_assembly38.fasta.64.amb\r +Homo_sapiens_assembly38.fasta.64.ann\r +Homo_sapiens_assembly38.fasta.64.bwt\r +Homo_sapiens_assembly38.fasta.64.pac\r +Homo_sapiens_assembly38.fasta.64.sa\r +Homo_sapiens_assembly38.fasta.amb\r +Homo_sapiens_assembly38.fasta.ann\r +Homo_sapiens_assembly38.fasta.bwt\r +Homo_sapiens_assembly38.fasta.pac\r +Homo_sapiens_assembly38.fasta.sa\r +Homo_sapiens_assembly38.dbsnp138.vcf.gz (needs to be gunzipped)\r +Homo_sapiens_assembly38.dbsnp138.vcf.idx\r +Mills_and_1000G_gold_standard.indels.hg38.vcf.gz\r +Mills_and_1000G_gold_standard.indels.hg38.vcf.gz.tbi\r +Homo_sapiens_assembly38.dbsnp138.vcf\r +Homo_sapiens_assembly38.dbsnp138.vcf.idx\r +Homo_sapiens_assembly38.known_indels.vcf.gz\r +Homo_sapiens_assembly38.known_indels.vcf.gz.tbi\r +```\r +\r +These files are required for Multisample_jointgt_GATK4.\r +\r +```\r +wgs_evaluation_regions.hg38.interval_list\r +hg38.custom_100Mb.intervals\r +Homo_sapiens_assembly38.dbsnp138.vcf\r +Homo_sapiens_assembly38.dbsnp138.vcf.idx\r +1000G_phase1.snps.high_confidence.hg38.vcf.gz\r +1000G_phase1.snps.high_confidence.hg38.vcf.gz.tbi\r +1000G_omni2.5.hg38.vcf.gz\r +1000G_omni2.5.hg38.vcf.gz.tbi\r +Axiom_Exome_Plus.genotypes.all_populations.poly.hg38.vcf.gz\r +Axiom_Exome_Plus.genotypes.all_populations.poly.hg38.vcf.gz.tbi\r +hapmap_3.3.hg38.vcf.gz\r +hapmap_3.3.hg38.vcf.gz.tbi\r +```\r +\r +\r +5. Set up the config files. Files that you need to edit with the correct paths to your data/jar files or other specific configurations are:\r + - `Multisample_Fastq_to_Gvcf_GATK4_inputs_hg38.json`\r + - `Multisample_jointgt_GATK4_inputs_hg38.json`\r + - both json files will need the correct paths to your reference file locations, and the file specifying your inputs i.e. `samples.txt` or `gvcfs.txt`\r + - `samples.txt`\r + - `gvcfs.txt`\r + - These are the sample input files (tab seperated)\r + - The format for samples.txt is sampleID, sampleID_readgroup, path_to_fastq_R1_file, path_to_fastq_R2_file,\r + - The format for gvcfs.txt is sample ID, gvcf, gvcf .tbi index file\r + - Examples are included in this repo\r + - NOTE: Having tabs, not spaces, is vital for parsing the file. Visual studio code tends to introduce spaces, so if you are having issues, check the file with another text editor such as sublime. \r + - `launch_cromwell.sh`\r + - `launch_jointgt.sh`\r + - These are the scripts which launch the pipeline. \r + - `launch_cromwell.sh` launches the fastq to gvcf stage\r + - `launch_jointgt.sh` launched the gvcf joint genotyping to cohort vcf step. This is perfomed when you have run all samples through the fastq to gvcf stage.\r + - Check the paths and parameters make sense for your machine\r + - `slurm.conf`\r + - the main options here relate to the job scheduler. If you are running on Zeus at Pawsey, you should not need to alter these parameters.\r + - `cromwell.options`\r + - `cromwell.options` requires editing to provide the directory where you would like the final workflow outputs to be written\r + - `Multisample_Fastq_to_Gvcf_GATK4.wdl`\r + - `ruddle_fastq_to_gvcf_single_sample_gatk4.wdl`\r + - The paths to your jar files will need to be updated\r + - The path to your conda `activate` binary will need to be updated (e.g. `/group/projectID/userID/miniconda/bin/activate`)\r +\r +6. Launch the job using `sbatch launch_cromwell.sh`. When that has completed successfully, you can launch the second stage of the pipeline (joint calling) with `sbatch launch_jointgt.sh`.\r +\r +### Overview of the steps in `Multisample_Fastq_to_Gvcf_GATK4.wdl`\r +This part of the pipeline takes short-read, Illumina paired-end fastq files as the input. The outputs generated are sorted, duplicate marked bam files and their indices, duplicate metric information, and a GVCF file for each sample. The GVCF files are used as input for the second part of the pipeline (joint genotyping).\r +\r +```\r +FastqToUbam\r +GetBwaVersion\r +SamToFastqAndBwaMem\r +MergeBamAlignment\r +SortAndFixTags\r +MarkDuplicates\r +CreateSequenceGroupingTSV\r +BaseRecalibrator\r +GatherBqsrReports\r +ApplyBQSR\r +GatherBamFiles\r +HaplotypeCaller\r +MergeGVCFs\r +```\r +\r +### Overview of the steps in `Multisample_jointgt_GATK4.wdl`\r +This part of the pipeline takes GVCF files (one per sample), and performs joint genotyping across all of the provided samples. This means that old previously generated GVCFs can be joint-called with new GVCFs whenever you need to add new samples. The key output from this is a joint-genotyped, cohort-wide VCF file.\r +\r +```\r +GetNumberOfSamples\r +ImportGVCFs\r +GenotypeGVCFs\r +HardFilterAndMakeSitesOnlyVcf\r +IndelsVariantRecalibrator\r +SNPsVariantRecalibratorCreateModel\r +SNPsVariantRecalibrator\r +GatherTranches\r +ApplyRecalibration\r +GatherVcfs\r +CollectVariantCallingMetrics\r +GatherMetrics\r +DynamicallyCombineIntervals\r +```\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/144?version=1" ; + schema1:isBasedOn "https://github.com/SarahBeecroft/slurmCromwellGATK4" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for GATK4 Fastq to joint-called cohort VCF with Cromwell on SLURM" ; + schema1:sdDatePublished "2024-07-12 13:36:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/144/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 29099 ; + schema1:dateCreated "2021-08-17T04:42:40Z" ; + schema1:dateModified "2023-01-16T13:51:44Z" ; + schema1:description """# SLURM HPC Cromwell implementation of GATK4 germline variant calling pipeline\r +See the [GATK](https://gatk.broadinstitute.org/hc/en-us) website for more information on this toolset \r +## Assumptions\r +- Using hg38 human reference genome build\r +- Running using HPC/SLURM scheduling. This repo was specifically tested on Pawsey Zeus machine, primarily running in the `/scratch` partition. \r +- Starting from short-read Illumina paired-end fastq files as input\r +\r +### Dependencies\r +The following versions have been tested and work, but GATK and Cromwell are regularly updated and so one must consider whether they would like to use newer versions of these tools. \r +- BWA/0.7.15\r +- GATK v4.0.6.0\r +- SAMtools/1.5\r +- picard/2.9\r +- Python/2.7\r +- Cromwell v61\r +\r +## Quick start guide\r +### Installing and preparing environment for GATK4 with Cromwell\r +\r +1. Clone repository\r +```\r +git clone https://github.com/SarahBeecroft/slurmCromwellGATK4.git\r +cd slurmCromwellGATK4\r +chmod +x *.sh\r +```\r +\r +2. Install [Miniconda](https://docs.conda.io/en/latest/miniconda.html) if you haven’t already. This is best placed in your `/group` directory to avoid filling your small `/home` directory, or being purged is placed in the `/scratch` directory.\r +\r +3. Create Conda environment using the supplied conda environment file\r +\r +```\r +conda env create --file gatk4_pipeline.yml\r +```\r +\r +3. Download the necessary .jar files\r + - The Cromwell workfow orchestration engine can be downloaded from https://github.com/broadinstitute/cromwell/releases/ \r + - GATK can be downloaded from https://github.com/broadinstitute/gatk/releases. Unzip the file with `unzip` \r + - Picard can be downloaded from https://github.com/broadinstitute/picard/releases/\r +\r +\r +4. If you do not have the resource bundle files already, these need to be downloaded. In future they will be cached on Pawsey systems. The bundle data should be download from the [Google Cloud bucket](https://console.cloud.google.com/storage/browser/genomics-public-data/references/hg38/v0;tab=objects?_ga=2.98248159.1769807612.1582055494-233304531.1578854612&pli=1&prefix=&forceOnObjectsSortingFiltering=false) and not from the FTP site, which is missing various files. Refer to this handy [blog post](https://davetang.org/muse/2020/02/21/using-google-cloud-sdk-to-download-gatk-resource-bundle-files/) on how to download the resource files using Google Cloud SDK. There is a Slurm script (download_bundle.slurm) that can be used to download all hg38 files from the Google Cloud bucket. The files were downloaded in /scratch/pawsey0001/sbeecroft/hg38/v0, which needs to be moved before the data becomes purged after 30 days. Note that Homo_sapiens_assembly38.dbsnp138.vcf.gz was from the FTP bundle as this file could not be downloaded using the Conda version of Google Cloud SDK.\r +\r +Note that the `hg38_wgs_scattered_calling_intervals.txt` will need to be to generated using the following:\r +\r +```\r +cd \r +find `pwd` -name "scattered.interval_list" -print | sort > hg38_wgs_scattered_calling_intervals.txt\r +```\r +\r +These files are required for Multisample_Fastq_to_Gvcf_GATK4.\r +\r +```\r +Homo_sapiens_assembly38.dict\r +Homo_sapiens_assembly38.fasta\r +Homo_sapiens_assembly38.fasta.fai\r +Homo_sapiens_assembly38.fasta.64.alt\r +Homo_sapiens_assembly38.fasta.64.amb\r +Homo_sapiens_assembly38.fasta.64.ann\r +Homo_sapiens_assembly38.fasta.64.bwt\r +Homo_sapiens_assembly38.fasta.64.pac\r +Homo_sapiens_assembly38.fasta.64.sa\r +Homo_sapiens_assembly38.fasta.amb\r +Homo_sapiens_assembly38.fasta.ann\r +Homo_sapiens_assembly38.fasta.bwt\r +Homo_sapiens_assembly38.fasta.pac\r +Homo_sapiens_assembly38.fasta.sa\r +Homo_sapiens_assembly38.dbsnp138.vcf.gz (needs to be gunzipped)\r +Homo_sapiens_assembly38.dbsnp138.vcf.idx\r +Mills_and_1000G_gold_standard.indels.hg38.vcf.gz\r +Mills_and_1000G_gold_standard.indels.hg38.vcf.gz.tbi\r +Homo_sapiens_assembly38.dbsnp138.vcf\r +Homo_sapiens_assembly38.dbsnp138.vcf.idx\r +Homo_sapiens_assembly38.known_indels.vcf.gz\r +Homo_sapiens_assembly38.known_indels.vcf.gz.tbi\r +```\r +\r +These files are required for Multisample_jointgt_GATK4.\r +\r +```\r +wgs_evaluation_regions.hg38.interval_list\r +hg38.custom_100Mb.intervals\r +Homo_sapiens_assembly38.dbsnp138.vcf\r +Homo_sapiens_assembly38.dbsnp138.vcf.idx\r +1000G_phase1.snps.high_confidence.hg38.vcf.gz\r +1000G_phase1.snps.high_confidence.hg38.vcf.gz.tbi\r +1000G_omni2.5.hg38.vcf.gz\r +1000G_omni2.5.hg38.vcf.gz.tbi\r +Axiom_Exome_Plus.genotypes.all_populations.poly.hg38.vcf.gz\r +Axiom_Exome_Plus.genotypes.all_populations.poly.hg38.vcf.gz.tbi\r +hapmap_3.3.hg38.vcf.gz\r +hapmap_3.3.hg38.vcf.gz.tbi\r +```\r +\r +\r +5. Set up the config files. Files that you need to edit with the correct paths to your data/jar files or other specific configurations are:\r + - `Multisample_Fastq_to_Gvcf_GATK4_inputs_hg38.json`\r + - `Multisample_jointgt_GATK4_inputs_hg38.json`\r + - both json files will need the correct paths to your reference file locations, and the file specifying your inputs i.e. `samples.txt` or `gvcfs.txt`\r + - `samples.txt`\r + - `gvcfs.txt`\r + - These are the sample input files (tab seperated)\r + - The format for samples.txt is sampleID, sampleID_readgroup, path_to_fastq_R1_file, path_to_fastq_R2_file,\r + - The format for gvcfs.txt is sample ID, gvcf, gvcf .tbi index file\r + - Examples are included in this repo\r + - NOTE: Having tabs, not spaces, is vital for parsing the file. Visual studio code tends to introduce spaces, so if you are having issues, check the file with another text editor such as sublime. \r + - `launch_cromwell.sh`\r + - `launch_jointgt.sh`\r + - These are the scripts which launch the pipeline. \r + - `launch_cromwell.sh` launches the fastq to gvcf stage\r + - `launch_jointgt.sh` launched the gvcf joint genotyping to cohort vcf step. This is perfomed when you have run all samples through the fastq to gvcf stage.\r + - Check the paths and parameters make sense for your machine\r + - `slurm.conf`\r + - the main options here relate to the job scheduler. If you are running on Zeus at Pawsey, you should not need to alter these parameters.\r + - `cromwell.options`\r + - `cromwell.options` requires editing to provide the directory where you would like the final workflow outputs to be written\r + - `Multisample_Fastq_to_Gvcf_GATK4.wdl`\r + - `ruddle_fastq_to_gvcf_single_sample_gatk4.wdl`\r + - The paths to your jar files will need to be updated\r + - The path to your conda `activate` binary will need to be updated (e.g. `/group/projectID/userID/miniconda/bin/activate`)\r +\r +6. Launch the job using `sbatch launch_cromwell.sh`. When that has completed successfully, you can launch the second stage of the pipeline (joint calling) with `sbatch launch_jointgt.sh`.\r +\r +### Overview of the steps in `Multisample_Fastq_to_Gvcf_GATK4.wdl`\r +This part of the pipeline takes short-read, Illumina paired-end fastq files as the input. The outputs generated are sorted, duplicate marked bam files and their indices, duplicate metric information, and a GVCF file for each sample. The GVCF files are used as input for the second part of the pipeline (joint genotyping).\r +\r +```\r +FastqToUbam\r +GetBwaVersion\r +SamToFastqAndBwaMem\r +MergeBamAlignment\r +SortAndFixTags\r +MarkDuplicates\r +CreateSequenceGroupingTSV\r +BaseRecalibrator\r +GatherBqsrReports\r +ApplyBQSR\r +GatherBamFiles\r +HaplotypeCaller\r +MergeGVCFs\r +```\r +\r +### Overview of the steps in `Multisample_jointgt_GATK4.wdl`\r +This part of the pipeline takes GVCF files (one per sample), and performs joint genotyping across all of the provided samples. This means that old previously generated GVCFs can be joint-called with new GVCFs whenever you need to add new samples. The key output from this is a joint-genotyped, cohort-wide VCF file.\r +\r +```\r +GetNumberOfSamples\r +ImportGVCFs\r +GenotypeGVCFs\r +HardFilterAndMakeSitesOnlyVcf\r +IndelsVariantRecalibrator\r +SNPsVariantRecalibratorCreateModel\r +SNPsVariantRecalibrator\r +GatherTranches\r +ApplyRecalibration\r +GatherVcfs\r +CollectVariantCallingMetrics\r +GatherMetrics\r +DynamicallyCombineIntervals\r +```\r +""" ; + schema1:isPartOf ; + schema1:keywords "GATK4, Genomics, Alignment, variant_calling, SNPs, INDELs" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "GATK4 Fastq to joint-called cohort VCF with Cromwell on SLURM" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/144?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for processing of 10xGenomics single cell rnaseq data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1021?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/scrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/scrnaseq" ; + schema1:sdDatePublished "2024-07-12 13:18:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1021/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4707 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:17Z" ; + schema1:dateModified "2024-06-11T12:55:17Z" ; + schema1:description "Pipeline for processing of 10xGenomics single cell rnaseq data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1021?version=13" ; + schema1:keywords "10x-genomics, 10xgenomics, alevin, bustools, Cellranger, kallisto, rna-seq, single-cell, star-solo" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/scrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1021?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=16" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-07-12 13:22:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=16" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9441 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:12Z" ; + schema1:dateModified "2024-06-11T12:55:12Z" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=16" ; + schema1:version 16 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "Purge Phased assembly of duplications and overlaps. Include purge steps for Primary and Alternate assemblies." ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/321?version=1" ; + schema1:isBasedOn "https://github.com/Delphine-L/iwc/tree/VGP/workflows/VGP-assembly-v2" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for VGP purge assembly with purge_dups pipeline" ; + schema1:sdDatePublished "2024-07-12 13:35:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/321/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 11049 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 60777 ; + schema1:creator ; + schema1:dateCreated "2022-04-05T22:16:05Z" ; + schema1:dateModified "2023-01-16T13:59:31Z" ; + schema1:description "Purge Phased assembly of duplications and overlaps. Include purge steps for Primary and Alternate assemblies." ; + schema1:image ; + schema1:keywords "vgp, Assembly, Galaxy" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "VGP purge assembly with purge_dups pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/321?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 29874 . + + a schema1:Dataset ; + schema1:datePublished "2022-10-14T09:09:18.098730" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-variation-reporting" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sars-cov-2-variation-reporting/COVID-19-VARIATION-REPORTING" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.11" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "call and score variants from WGS/WES of rare disease patients" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1014?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/raredisease" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/raredisease" ; + schema1:sdDatePublished "2024-07-12 13:19:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1014/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12171 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:08Z" ; + schema1:dateModified "2024-06-11T12:55:08Z" ; + schema1:description "call and score variants from WGS/WES of rare disease patients" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1014?version=5" ; + schema1:keywords "diagnostics, rare-disease, snv, structural-variants, variant-annotation, variant-calling, wes, WGS" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/raredisease" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1014?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.132.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Amber Constant pH MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:33:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/132/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 53577 ; + schema1:creator , + ; + schema1:dateCreated "2021-06-30T12:23:58Z" ; + schema1:dateModified "2022-09-15T12:33:02Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/132?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Amber Constant pH MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_amber_md_setup/8bcda75405183a84476acd7ba733e4cb666ce397/biobb_wf_amber_md_setup/notebooks/mdsetup_ph/biobb_amber_CpHMD_notebook.ipynb" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2024-06-24T10:11:39.189641" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/quality-and-contamination-control" ; + schema1:license "GPL-3.0-or-later" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "quality-and-contamination-control/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """Workflow for Creating a large disease network from various datasets and databases for IBM, and applying the active subnetwork identification method MOGAMUN.\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/681?version=4" ; + schema1:isBasedOn "https://github.com/jdwijnbergen/IBM_ASI_workflow.git" ; + schema1:license "notspecified" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Inclusion Body Myositis Active Subnetwork Identification Workflow" ; + schema1:sdDatePublished "2024-07-12 13:24:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/681/ro_crate?version=4" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 27177 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6953 ; + schema1:creator , + ; + schema1:dateCreated "2023-11-27T16:00:47Z" ; + schema1:dateModified "2023-11-27T16:00:47Z" ; + schema1:description """Workflow for Creating a large disease network from various datasets and databases for IBM, and applying the active subnetwork identification method MOGAMUN.\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/681?version=6" ; + schema1:keywords "Bioinformatics, CWL, Genomics, Transcriptomics, Protein-Protein Interaction" ; + schema1:license "https://choosealicense.com/no-permission/" ; + schema1:name "Inclusion Body Myositis Active Subnetwork Identification Workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/681?version=4" ; + schema1:version 4 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Given a set of pathways generated by RetroPath2.0, this workflow informs the user as to the theoretically best performing ones based on four criteria: FBA, thermodynamic feasibility, length of the pathway, and reaction rule score." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/22?version=1" ; + schema1:isBasedOn "https://galaxy-synbiocad.org/u/mdulac/w/rpanalysis-3" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Pathway Analysis" ; + schema1:sdDatePublished "2024-07-12 13:37:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/22/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5365 ; + schema1:creator ; + schema1:dateCreated "2020-05-29T10:01:25Z" ; + schema1:dateModified "2023-01-16T13:41:28Z" ; + schema1:description "Given a set of pathways generated by RetroPath2.0, this workflow informs the user as to the theoretically best performing ones based on four criteria: FBA, thermodynamic feasibility, length of the pathway, and reaction rule score." ; + schema1:keywords "Retrosynthesis" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Pathway Analysis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/22?version=1" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Mutations Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.290.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_md_setup_mutations/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Protein MD Setup tutorial with mutations" ; + schema1:sdDatePublished "2024-07-12 13:33:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/290/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7412 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-23T08:34:35Z" ; + schema1:dateModified "2023-01-16T13:58:35Z" ; + schema1:description """# Mutations Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/290?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Protein MD Setup tutorial with mutations" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_md_setup_mutations/python/workflow.py" ; + schema1:version 2 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-13T14:30:43.588577" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-decontamination-VGP9" ; + schema1:license "BSD-3-Clause" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-decontamination-VGP9/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.17" . + + a schema1:Dataset ; + schema1:datePublished "2023-01-20T14:55:06.084507" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/cutandrun" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "cutandrun/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.3" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# skim2rrna\r +\r +**skim2rrna** is a snakemake pipeline for the batch assembly, annotation, and phylogenetic analysis of ribosomal genes from low coverage genome skims. The pipeline was designed to work with sequence data from museum collections. However, it should also work with genome skims from recently collected samples.\r +\r +## Contents\r + - [Setup](#setup)\r + - [Example data](#example-data)\r + - [Input](#input)\r + - [Output](#output)\r + - [Filtering contaminants](#filtering-contaminants)\r + - [Assembly and annotation only](#assembly-and-annotation-only)\r + - [Running your own data](#running-your-own-data)\r + - [Getting help](#getting-help)\r + - [Citations](#citations)\r +\r +## Setup\r +\r +The pipeline is written in Snakemake and uses conda and singularity to install the necessary tools.\r +\r +It is *strongly recommended* to install conda using Mambaforge. See details here https://snakemake.readthedocs.io/en/stable/getting_started/installation.html\r +\r +Once conda is installed, you can pull the github repo and set up the base conda environment.\r +\r +```\r +# get github repo\r +git clone https://github.com/o-william-white/skim2rrna\r +\r +# change dir\r +cd skim2rrna\r +\r +# setup conda env\r +conda env create -n snakemake -f workflow/envs/conda_env.yaml\r +```\r +\r +
\r +\r +
\r +\r +## Example data\r +\r +Before you run your own data, it is recommended to run the example datasets provided . This will confirm there are no user-specific issues with the setup and it also installs all the dependencies. The example data includes simulated ribosomal data from 25 different butterfly species. \r +\r +To run the example data, use the code below. **Note that you need to change the user email to your own address**. The email is required by the Bio Entrez package to fetch reference sequences. The first time you run the pipeline, it will take some time to install each of the conda environments, so it is a good time to take a tea break :).\r +```\r +conda activate snakemake\r +\r +snakemake \\\r + --cores 4 \\\r + --use-conda \\\r + --use-singularity \\ \r + --config user_email=user@example_email.com\r +```\r +\r +
\r +\r +
\r +\r +## Input\r +\r +Snakemake requires a `config.yaml` and `samples.csv` to define input parameters and sequence data for each sample. \r +\r +For the example data provided, the config file is located here `config/config.yaml` and it looks like this:\r +```\r +# path to sample sheet csv with columns for ID,forward,reverse,taxid,seed,gene\r +samples: config/samples.csv\r +\r +# user email\r +user_email: user@example_email.com\r +\r +# getorganelle reference (go_fetch, custom)\r +go_reference: go_fetch\r +\r +# forward adapter\r +forward_adapter: AGATCGGAAGAGCACACGTCTGAACTCCAGTCA\r +\r +# reverse adapter\r +reverse_adapter: AGATCGGAAGAGCGTCGTGTAGGGAAAGAGTGT\r +\r +# fastp deduplication (True/False)\r +fastp_dedup: True\r +\r +# barrnap kindgom (Bacteria:bac, Archaea:arc, Eukaryota:euk, None:NA)\r +barrnap_kingdom: euk\r +\r +# alignment trimming method to use (gblocks or clipkit)\r +alignment_trim: gblocks\r +\r +# alignment missing data threshold for alignment (0.0 - 1.0)\r +missing_threshold: 0.5\r +\r +# name of outgroup sample (optional)\r +# use "NA" if there is no obvious outgroup\r +# if more than one outgroup use a comma separated list i.e. "sampleA,sampleB"\r +outgroup: Eurema_blanda\r +\r +# plot dimensions (cm)\r +plot_height: 20\r +plot_width: 20\r +```\r +\r +The example samples.csv file is located here `config/samples.csv` and it looks like this (note that the seed and gene columns are only required if the custom getorganelle database option is specified in the config file):\r +\r +\r + ID | forward | reverse | taxid | seed | gene \r +----|---------|---------|-------|------|------\r +Adelpha_iphiclus | .test/reads/Adelpha_iphiclus_1.fq.gz | .test/reads/Adelpha_iphiclus_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Anartia_jatrophae_saturata | .test/reads/Anartia_jatrophae_saturata_1.fq.gz | .test/reads/Anartia_jatrophae_saturata_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Araschnia_levana | .test/reads/Araschnia_levana_1.fq.gz | .test/reads/Araschnia_levana_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Auzakia_danava | .test/reads/Auzakia_danava_1.fq.gz | .test/reads/Auzakia_danava_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Baeotus_beotus | .test/reads/Baeotus_beotus_1.fq.gz | .test/reads/Baeotus_beotus_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Catacroptera_cloanthe | .test/reads/Catacroptera_cloanthe_1.fq.gz | .test/reads/Catacroptera_cloanthe_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Chalinga_pratti | .test/reads/Chalinga_pratti_1.fq.gz | .test/reads/Chalinga_pratti_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Diaethria_gabaza_eupepla | .test/reads/Diaethria_gabaza_eupepla_1.fq.gz | .test/reads/Diaethria_gabaza_eupepla_2.fq.gz | 127268 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Doleschallia_melana | .test/reads/Doleschallia_melana_1.fq.gz | .test/reads/Doleschallia_melana_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Eurema_blanda | .test/reads/Eurema_blanda_1.fq.gz | .test/reads/Eurema_blanda_2.fq.gz | 42450 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Hypolimnas_usambara | .test/reads/Hypolimnas_usambara_1.fq.gz | .test/reads/Hypolimnas_usambara_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Junonia_villida | .test/reads/Junonia_villida_1.fq.gz | .test/reads/Junonia_villida_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Kallima_paralekta | .test/reads/Kallima_paralekta_1.fq.gz | .test/reads/Kallima_paralekta_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Kallimoides_rumia | .test/reads/Kallimoides_rumia_1.fq.gz | .test/reads/Kallimoides_rumia_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Litinga_cottini | .test/reads/Litinga_cottini_1.fq.gz | .test/reads/Litinga_cottini_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Mallika_jacksoni | .test/reads/Mallika_jacksoni_1.fq.gz | .test/reads/Mallika_jacksoni_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Moduza_procris | .test/reads/Moduza_procris_1.fq.gz | .test/reads/Moduza_procris_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Parasarpa_zayla | .test/reads/Parasarpa_zayla_1.fq.gz | .test/reads/Parasarpa_zayla_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Phaedyma_columella | .test/reads/Phaedyma_columella_1.fq.gz | .test/reads/Phaedyma_columella_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Precis_pelarga | .test/reads/Precis_pelarga_1.fq.gz | .test/reads/Precis_pelarga_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Protogoniomorpha_temora | .test/reads/Protogoniomorpha_temora_1.fq.gz | .test/reads/Protogoniomorpha_temora_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Salamis_cacta | .test/reads/Salamis_cacta_1.fq.gz | .test/reads/Salamis_cacta_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Smyrna_blomfildia | .test/reads/Smyrna_blomfildia_1.fq.gz | .test/reads/Smyrna_blomfildia_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Tacola_larymna | .test/reads/Tacola_larymna_1.fq.gz | .test/reads/Tacola_larymna_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Yoma_algina | .test/reads/Yoma_algina_1.fq.gz | .test/reads/Yoma_algina_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +\r +\r +
\r +\r +
\r +\r +## Output\r +\r +All output files are saved to the `results` direcotry. Below is a table summarising all of the output files generated by the pipeline.\r +\r +| Directory | Description |\r +|-----------------------|---------------------------|\r +| fastqc_raw | Fastqc reports for raw input reads |\r +| fastp | Fastp reports from quality control of raw reads |\r +| fastqc_qc | Fastqc reports for quality controlled reads |\r +| go_fetch | Optional output containing reference databases used by GetOrganelle |\r +| getorganelle | GetOrganelle output with a directory for each sample |\r +| assembled_sequence | Assembled sequences selected from GetOrganelle output and renamed |\r +| seqkit | Seqkit summary of each assembly |\r +| blastn | Blastn output of each assembly |\r +| minimap | Mapping output of quality filtered reads against each assembly |\r +| blobtools | Blobtools assembly summary collating blastn and mapping output |\r +| annotations | Annotation outputs of mitos |\r +| summary | Summary per sample (seqkit stats), contig (GC content, length, coverage, taxonomy and annotations) and annotated gene counts |\r +| annotated_genes | Unaligned fasta files of annotated genes identified across all samples |\r +| mafft | Mafft aligned fasta files of annotated genes identified across all samples |\r +| mafft_filtered | Mafft aligned fasta files after the removal of sequences based on a missing data threshold |\r +| alignment_trim | Ambiguous parts of alignment removed using either gblocks or clipkit |\r +| iqtree | Iqtree phylogenetic analysis of annotated genes |\r +| plot_tree | Plots of phylogenetic trees |\r +\r +
\r +\r +
\r +\r +## Filtering contaminants\r +\r +If you are working with museum collections, it is possible that you may assemble and annotate sequences from contaminant/non-target species. *Contaminant sequences can be identified based on the blast search output or unusual placement in the phylogenetic trees* (see blobtools and plot_tree outputs). \r +\r +A supplementary python script `format_alignments.py `is provided to remove putative contaminants from alignments, and format the alignments for downstream phylogenetic analysis.\r +\r +For example, let's say we wanted to remove all sequences from the sample "Kallima_paralekta" and 5.8S ribosomal sequence, you could run the script as shown below. The script works by identifying and removing sequences that have names with `Kallima_paralekta` or `5_8S` in the sequence names. The filtered alignments are written to a new output directory `filter_alignments_output`.\r +\r +```\r +python workflow/scripts/format_alignments.py \\\r + --input results/mafft_filtered/ \\\r + --cont Kallima_paralekta 5_8S \\\r + --output filter_alignments_output\r +```\r +\r +*Note that the output fasta files have been reformatted so each alignment file is named after the gene and each sequence is named after the sample.* This is useful if you would like to run our related pipeline **gene2phylo** for further phylogenetic analyses.\r +\r +
\r +\r +
\r +\r +## Assembly and annotation only\r +\r +If you are only interested in the assembly of ribosomal sequences and annotation of genes without the phylogenetic analysis, you can stop the pipeline from running the gene alignment and phylogenetic analyses using the `--omit-from` parameter.\r +```\r +snakemake \\\r + --cores 4 \\\r + --use-conda \\\r + --use-singularity \\\r + --config user_email=user@example_email.com \\\r + --omit-from mafft \r +```\r +\r +
\r +\r +
\r +\r +## Running your own data\r +\r +The first thing you need to do is generate your own config.yaml and samples.csv files, using the files provided as a template.\r +\r +GetOrganelle requires reference data in the format of seed and gene reference fasta files. By default the pipeline uses a basic python script called go_fetch.py https://github.com/o-william-white/go_fetch to download and format reference data formatted for GetOrganelle. \r +\r +go_fetch.py works by searching NCBI based on the NCBI taxonomy specified by the taxid column in the samples.csv file. Note that the seed and gene columns in the samples.csv file are only required if you want to provide your own custom GetOrganelle seed and gene reference databases. \r +\r +You can use the default reference data for GetOrganelle, but I would recommend using custom reference databases where possible. See here for details of how to set up your own databases https://github.com/Kinggerm/GetOrganelle/wiki/FAQ#how-to-assemble-a-target-organelle-genome-using-my-own-reference\r +\r +## Getting help\r +\r +If you have any questions, please do get in touch in the issues or by email o.william.white@gmail.com\r +\r +
\r +\r +
\r +\r +## Citations\r +\r +If you use the pipeline, please cite our bioarxiv preprint: https://doi.org/10.1101/2023.08.11.552985\r +\r +Since the pipeline is a wrapper for several other bioinformatic tools we also ask that you cite the tools used by the pipeline:\r + - Fastqc https://github.com/s-andrews/FastQC\r + - Fastp https://doi.org/10.1093/bioinformatics/bty560\r + - GetOrganelle https://doi.org/10.1186/s13059-020-02154-5\r + - Blastn https://doi.org/10.1186/1471-2105-10-421\r + - Minimap2 https://doi.org/10.1093/bioinformatics/bty191\r + - Blobtools https://doi.org/10.12688/f1000research.12232.1\r + - Seqkit https://doi.org/10.1371/journal.pone.0163962\r + - MITOS2 https://doi.org/10.1016/j.ympev.2012.08.023\r + - Gblocks (default) https://doi.org/10.1093/oxfordjournals.molbev.a026334\r + - Clipkit (optional) https://doi.org/10.1371/journal.pbio.3001007\r + - Mafft https://doi.org/10.1093/molbev/mst010\r + - Iqtree https://doi.org/10.1093/molbev/msu300\r + - ete3 https://doi.org/10.1093/molbev/msw046\r + - ggtree https://doi.org/10.1111/2041-210X.12628\r +\r +
\r +\r +
\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/792?version=1" ; + schema1:isBasedOn "https://github.com/o-william-white/skim2rrna.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for skim2rrna" ; + schema1:sdDatePublished "2024-07-12 13:23:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/792/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2816 ; + schema1:dateCreated "2024-03-12T15:06:39Z" ; + schema1:dateModified "2024-03-12T15:09:08Z" ; + schema1:description """# skim2rrna\r +\r +**skim2rrna** is a snakemake pipeline for the batch assembly, annotation, and phylogenetic analysis of ribosomal genes from low coverage genome skims. The pipeline was designed to work with sequence data from museum collections. However, it should also work with genome skims from recently collected samples.\r +\r +## Contents\r + - [Setup](#setup)\r + - [Example data](#example-data)\r + - [Input](#input)\r + - [Output](#output)\r + - [Filtering contaminants](#filtering-contaminants)\r + - [Assembly and annotation only](#assembly-and-annotation-only)\r + - [Running your own data](#running-your-own-data)\r + - [Getting help](#getting-help)\r + - [Citations](#citations)\r +\r +## Setup\r +\r +The pipeline is written in Snakemake and uses conda and singularity to install the necessary tools.\r +\r +It is *strongly recommended* to install conda using Mambaforge. See details here https://snakemake.readthedocs.io/en/stable/getting_started/installation.html\r +\r +Once conda is installed, you can pull the github repo and set up the base conda environment.\r +\r +```\r +# get github repo\r +git clone https://github.com/o-william-white/skim2rrna\r +\r +# change dir\r +cd skim2rrna\r +\r +# setup conda env\r +conda env create -n snakemake -f workflow/envs/conda_env.yaml\r +```\r +\r +
\r +\r +
\r +\r +## Example data\r +\r +Before you run your own data, it is recommended to run the example datasets provided . This will confirm there are no user-specific issues with the setup and it also installs all the dependencies. The example data includes simulated ribosomal data from 25 different butterfly species. \r +\r +To run the example data, use the code below. **Note that you need to change the user email to your own address**. The email is required by the Bio Entrez package to fetch reference sequences. The first time you run the pipeline, it will take some time to install each of the conda environments, so it is a good time to take a tea break :).\r +```\r +conda activate snakemake\r +\r +snakemake \\\r + --cores 4 \\\r + --use-conda \\\r + --use-singularity \\ \r + --config user_email=user@example_email.com\r +```\r +\r +
\r +\r +
\r +\r +## Input\r +\r +Snakemake requires a `config.yaml` and `samples.csv` to define input parameters and sequence data for each sample. \r +\r +For the example data provided, the config file is located here `config/config.yaml` and it looks like this:\r +```\r +# path to sample sheet csv with columns for ID,forward,reverse,taxid,seed,gene\r +samples: config/samples.csv\r +\r +# user email\r +user_email: user@example_email.com\r +\r +# getorganelle reference (go_fetch, custom)\r +go_reference: go_fetch\r +\r +# forward adapter\r +forward_adapter: AGATCGGAAGAGCACACGTCTGAACTCCAGTCA\r +\r +# reverse adapter\r +reverse_adapter: AGATCGGAAGAGCGTCGTGTAGGGAAAGAGTGT\r +\r +# fastp deduplication (True/False)\r +fastp_dedup: True\r +\r +# barrnap kindgom (Bacteria:bac, Archaea:arc, Eukaryota:euk, None:NA)\r +barrnap_kingdom: euk\r +\r +# alignment trimming method to use (gblocks or clipkit)\r +alignment_trim: gblocks\r +\r +# alignment missing data threshold for alignment (0.0 - 1.0)\r +missing_threshold: 0.5\r +\r +# name of outgroup sample (optional)\r +# use "NA" if there is no obvious outgroup\r +# if more than one outgroup use a comma separated list i.e. "sampleA,sampleB"\r +outgroup: Eurema_blanda\r +\r +# plot dimensions (cm)\r +plot_height: 20\r +plot_width: 20\r +```\r +\r +The example samples.csv file is located here `config/samples.csv` and it looks like this (note that the seed and gene columns are only required if the custom getorganelle database option is specified in the config file):\r +\r +\r + ID | forward | reverse | taxid | seed | gene \r +----|---------|---------|-------|------|------\r +Adelpha_iphiclus | .test/reads/Adelpha_iphiclus_1.fq.gz | .test/reads/Adelpha_iphiclus_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Anartia_jatrophae_saturata | .test/reads/Anartia_jatrophae_saturata_1.fq.gz | .test/reads/Anartia_jatrophae_saturata_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Araschnia_levana | .test/reads/Araschnia_levana_1.fq.gz | .test/reads/Araschnia_levana_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Auzakia_danava | .test/reads/Auzakia_danava_1.fq.gz | .test/reads/Auzakia_danava_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Baeotus_beotus | .test/reads/Baeotus_beotus_1.fq.gz | .test/reads/Baeotus_beotus_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Catacroptera_cloanthe | .test/reads/Catacroptera_cloanthe_1.fq.gz | .test/reads/Catacroptera_cloanthe_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Chalinga_pratti | .test/reads/Chalinga_pratti_1.fq.gz | .test/reads/Chalinga_pratti_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Diaethria_gabaza_eupepla | .test/reads/Diaethria_gabaza_eupepla_1.fq.gz | .test/reads/Diaethria_gabaza_eupepla_2.fq.gz | 127268 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Doleschallia_melana | .test/reads/Doleschallia_melana_1.fq.gz | .test/reads/Doleschallia_melana_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Eurema_blanda | .test/reads/Eurema_blanda_1.fq.gz | .test/reads/Eurema_blanda_2.fq.gz | 42450 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Hypolimnas_usambara | .test/reads/Hypolimnas_usambara_1.fq.gz | .test/reads/Hypolimnas_usambara_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Junonia_villida | .test/reads/Junonia_villida_1.fq.gz | .test/reads/Junonia_villida_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Kallima_paralekta | .test/reads/Kallima_paralekta_1.fq.gz | .test/reads/Kallima_paralekta_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Kallimoides_rumia | .test/reads/Kallimoides_rumia_1.fq.gz | .test/reads/Kallimoides_rumia_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Litinga_cottini | .test/reads/Litinga_cottini_1.fq.gz | .test/reads/Litinga_cottini_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Mallika_jacksoni | .test/reads/Mallika_jacksoni_1.fq.gz | .test/reads/Mallika_jacksoni_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Moduza_procris | .test/reads/Moduza_procris_1.fq.gz | .test/reads/Moduza_procris_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Parasarpa_zayla | .test/reads/Parasarpa_zayla_1.fq.gz | .test/reads/Parasarpa_zayla_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Phaedyma_columella | .test/reads/Phaedyma_columella_1.fq.gz | .test/reads/Phaedyma_columella_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Precis_pelarga | .test/reads/Precis_pelarga_1.fq.gz | .test/reads/Precis_pelarga_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Protogoniomorpha_temora | .test/reads/Protogoniomorpha_temora_1.fq.gz | .test/reads/Protogoniomorpha_temora_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Salamis_cacta | .test/reads/Salamis_cacta_1.fq.gz | .test/reads/Salamis_cacta_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Smyrna_blomfildia | .test/reads/Smyrna_blomfildia_1.fq.gz | .test/reads/Smyrna_blomfildia_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Tacola_larymna | .test/reads/Tacola_larymna_1.fq.gz | .test/reads/Tacola_larymna_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Yoma_algina | .test/reads/Yoma_algina_1.fq.gz | .test/reads/Yoma_algina_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +\r +\r +
\r +\r +
\r +\r +## Output\r +\r +All output files are saved to the `results` direcotry. Below is a table summarising all of the output files generated by the pipeline.\r +\r +| Directory | Description |\r +|-----------------------|---------------------------|\r +| fastqc_raw | Fastqc reports for raw input reads |\r +| fastp | Fastp reports from quality control of raw reads |\r +| fastqc_qc | Fastqc reports for quality controlled reads |\r +| go_fetch | Optional output containing reference databases used by GetOrganelle |\r +| getorganelle | GetOrganelle output with a directory for each sample |\r +| assembled_sequence | Assembled sequences selected from GetOrganelle output and renamed |\r +| seqkit | Seqkit summary of each assembly |\r +| blastn | Blastn output of each assembly |\r +| minimap | Mapping output of quality filtered reads against each assembly |\r +| blobtools | Blobtools assembly summary collating blastn and mapping output |\r +| annotations | Annotation outputs of mitos |\r +| summary | Summary per sample (seqkit stats), contig (GC content, length, coverage, taxonomy and annotations) and annotated gene counts |\r +| annotated_genes | Unaligned fasta files of annotated genes identified across all samples |\r +| mafft | Mafft aligned fasta files of annotated genes identified across all samples |\r +| mafft_filtered | Mafft aligned fasta files after the removal of sequences based on a missing data threshold |\r +| alignment_trim | Ambiguous parts of alignment removed using either gblocks or clipkit |\r +| iqtree | Iqtree phylogenetic analysis of annotated genes |\r +| plot_tree | Plots of phylogenetic trees |\r +\r +
\r +\r +
\r +\r +## Filtering contaminants\r +\r +If you are working with museum collections, it is possible that you may assemble and annotate sequences from contaminant/non-target species. *Contaminant sequences can be identified based on the blast search output or unusual placement in the phylogenetic trees* (see blobtools and plot_tree outputs). \r +\r +A supplementary python script `format_alignments.py `is provided to remove putative contaminants from alignments, and format the alignments for downstream phylogenetic analysis.\r +\r +For example, let's say we wanted to remove all sequences from the sample "Kallima_paralekta" and 5.8S ribosomal sequence, you could run the script as shown below. The script works by identifying and removing sequences that have names with `Kallima_paralekta` or `5_8S` in the sequence names. The filtered alignments are written to a new output directory `filter_alignments_output`.\r +\r +```\r +python workflow/scripts/format_alignments.py \\\r + --input results/mafft_filtered/ \\\r + --cont Kallima_paralekta 5_8S \\\r + --output filter_alignments_output\r +```\r +\r +*Note that the output fasta files have been reformatted so each alignment file is named after the gene and each sequence is named after the sample.* This is useful if you would like to run our related pipeline **gene2phylo** for further phylogenetic analyses.\r +\r +
\r +\r +
\r +\r +## Assembly and annotation only\r +\r +If you are only interested in the assembly of ribosomal sequences and annotation of genes without the phylogenetic analysis, you can stop the pipeline from running the gene alignment and phylogenetic analyses using the `--omit-from` parameter.\r +```\r +snakemake \\\r + --cores 4 \\\r + --use-conda \\\r + --use-singularity \\\r + --config user_email=user@example_email.com \\\r + --omit-from mafft \r +```\r +\r +
\r +\r +
\r +\r +## Running your own data\r +\r +The first thing you need to do is generate your own config.yaml and samples.csv files, using the files provided as a template.\r +\r +GetOrganelle requires reference data in the format of seed and gene reference fasta files. By default the pipeline uses a basic python script called go_fetch.py https://github.com/o-william-white/go_fetch to download and format reference data formatted for GetOrganelle. \r +\r +go_fetch.py works by searching NCBI based on the NCBI taxonomy specified by the taxid column in the samples.csv file. Note that the seed and gene columns in the samples.csv file are only required if you want to provide your own custom GetOrganelle seed and gene reference databases. \r +\r +You can use the default reference data for GetOrganelle, but I would recommend using custom reference databases where possible. See here for details of how to set up your own databases https://github.com/Kinggerm/GetOrganelle/wiki/FAQ#how-to-assemble-a-target-organelle-genome-using-my-own-reference\r +\r +## Getting help\r +\r +If you have any questions, please do get in touch in the issues or by email o.william.white@gmail.com\r +\r +
\r +\r +
\r +\r +## Citations\r +\r +If you use the pipeline, please cite our bioarxiv preprint: https://doi.org/10.1101/2023.08.11.552985\r +\r +Since the pipeline is a wrapper for several other bioinformatic tools we also ask that you cite the tools used by the pipeline:\r + - Fastqc https://github.com/s-andrews/FastQC\r + - Fastp https://doi.org/10.1093/bioinformatics/bty560\r + - GetOrganelle https://doi.org/10.1186/s13059-020-02154-5\r + - Blastn https://doi.org/10.1186/1471-2105-10-421\r + - Minimap2 https://doi.org/10.1093/bioinformatics/bty191\r + - Blobtools https://doi.org/10.12688/f1000research.12232.1\r + - Seqkit https://doi.org/10.1371/journal.pone.0163962\r + - MITOS2 https://doi.org/10.1016/j.ympev.2012.08.023\r + - Gblocks (default) https://doi.org/10.1093/oxfordjournals.molbev.a026334\r + - Clipkit (optional) https://doi.org/10.1371/journal.pbio.3001007\r + - Mafft https://doi.org/10.1093/molbev/mst010\r + - Iqtree https://doi.org/10.1093/molbev/msu300\r + - ete3 https://doi.org/10.1093/molbev/msw046\r + - ggtree https://doi.org/10.1111/2041-210X.12628\r +\r +
\r +\r +
\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "skim2rrna" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/792?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "16S rRNA amplicon sequencing analysis workflow using QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-07-12 13:18:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3680 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:37Z" ; + schema1:dateModified "2024-06-11T12:54:37Z" ; + schema1:description "16S rRNA amplicon sequencing analysis workflow using QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2024-03-26T19:01:13.539900" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Purge-duplicate-contigs-VGP6/main" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Purge-duplicate-contigs-VGP6/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:version "0.3.6" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.279.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_md_setup/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:36:19 +0100" ; + schema1:url "https://workflowhub.eu/workflows/279/ro_crate?version=2" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 144627 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 28526 ; + schema1:creator , + ; + schema1:dateCreated "2023-06-07T10:04:37Z" ; + schema1:dateModified "2023-06-07T10:16:23Z" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/279?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CWL Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_md_setup/cwl/workflow.cwl" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=9" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-07-12 13:20:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13140 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:59Z" ; + schema1:dateModified "2024-06-11T12:54:59Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=9" ; + schema1:version 9 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 80077 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=19" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-07-12 13:19:16 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=19" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 18582 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:16Z" ; + schema1:dateModified "2024-06-11T12:55:16Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=19" ; + schema1:version 19 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "ChIP-seq peak-calling and differential analysis pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/971?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/chipseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/chipseq" ; + schema1:sdDatePublished "2024-07-12 13:22:05 +0100" ; + schema1:url "https://workflowhub.eu/workflows/971/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5685 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:44Z" ; + schema1:dateModified "2024-06-11T12:54:44Z" ; + schema1:description "ChIP-seq peak-calling and differential analysis pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/971?version=5" ; + schema1:keywords "ChIP, ChIP-seq, chromatin-immunoprecipitation, macs2, peak-calling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/chipseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/971?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-07-12 13:20:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4227 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:03Z" ; + schema1:dateModified "2024-06-11T12:55:03Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Flashlite-Trinity contains two workflows that run Trinity on the [University of Queensland's HPC, Flashlite](https://rcc.uq.edu.au/flashlite). Trinity performs de novo transcriptome assembly of RNA-seq data by combining three independent software modules Inchworm, Chrysalis and Butterfly to process RNA-seq reads. The algorithm can detect isoforms, handle paired-end reads, multiple insert sizes and strandedness. Users can run Flashlite-Trinity on single samples, or smaller samples requiring <500Gb of memory or staged Trinity which is recommended for global assemblies with multiple sample inputs. Both implementations make use of Singularity containers to install software. \r +\r +Infrastructure\\_deployment\\_metadata: FlashLite (QRISCloud)""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.149.1" ; + schema1:isBasedOn "https://github.com/Sydney-Informatics-Hub/Flashlite-Trinity" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Flashlite-Trinity" ; + schema1:sdDatePublished "2024-07-12 13:36:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/149/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3652 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2021-08-18T23:17:42Z" ; + schema1:dateModified "2023-01-16T13:51:45Z" ; + schema1:description """Flashlite-Trinity contains two workflows that run Trinity on the [University of Queensland's HPC, Flashlite](https://rcc.uq.edu.au/flashlite). Trinity performs de novo transcriptome assembly of RNA-seq data by combining three independent software modules Inchworm, Chrysalis and Butterfly to process RNA-seq reads. The algorithm can detect isoforms, handle paired-end reads, multiple insert sizes and strandedness. Users can run Flashlite-Trinity on single samples, or smaller samples requiring <500Gb of memory or staged Trinity which is recommended for global assemblies with multiple sample inputs. Both implementations make use of Singularity containers to install software. \r +\r +Infrastructure\\_deployment\\_metadata: FlashLite (QRISCloud)""" ; + schema1:isPartOf ; + schema1:keywords "trinity, Transcriptomics, Assembly, illumina, salmon, scalable, global assemblies, rna-seq, de novo, transcriptome, strandedness, rna, singularity, container, PBS" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Flashlite-Trinity" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/149?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible ancient and modern DNA pipeline in Nextflow and with cloud support." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-07-12 13:21:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4247 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:49Z" ; + schema1:dateModified "2024-06-11T12:54:49Z" ; + schema1:description "A fully reproducible ancient and modern DNA pipeline in Nextflow and with cloud support." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# ont-assembly-snake\r +\r +A Snakemake wrapper for easily creating *de novo* bacterial genome assemblies from Oxford Nanopore (ONT) sequencing data, and optionally Illumina data,\r +using any combination of read filtering, assembly, long and short read polishing, and reference-based polishing.\r +\r +## Included programs\r +\r +| read filtering | assembly | long read polishing | short read polishing | reference-based polishing |\r +| --- | --- | --- | --- | --- |\r +| [Filtlong](https://github.com/rrwick/Filtlong)
[Rasusa](https://github.com/mbhall88/rasusa) | [Flye](https://github.com/fenderglass/Flye)
[raven](https://github.com/lbcb-sci/raven)
[miniasm](https://github.com/lh3/miniasm)
[Unicycler](https://github.com/rrwick/Unicycler)
[Canu](https://github.com/marbl/canu) | [racon](https://github.com/lbcb-sci/racon)
[medaka](https://github.com/nanoporetech/medaka) | [pilon](https://github.com/broadinstitute/pilon/wiki)
[Polypolish](https://github.com/rrwick/Polypolish)
[POLCA](https://github.com/alekseyzimin/masurca#polca) | [Homopolish](https://github.com/ythuang0522/homopolish)
[proovframe](https://github.com/thackl/proovframe) | \r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.787.1" ; + schema1:isBasedOn "https://github.com/pmenzel/ont-assembly-snake" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ont-assembly-snake" ; + schema1:sdDatePublished "2024-07-12 13:23:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/787/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 23914 ; + schema1:creator ; + schema1:dateCreated "2024-03-06T10:25:52Z" ; + schema1:dateModified "2024-03-06T10:54:16Z" ; + schema1:description """# ont-assembly-snake\r +\r +A Snakemake wrapper for easily creating *de novo* bacterial genome assemblies from Oxford Nanopore (ONT) sequencing data, and optionally Illumina data,\r +using any combination of read filtering, assembly, long and short read polishing, and reference-based polishing.\r +\r +## Included programs\r +\r +| read filtering | assembly | long read polishing | short read polishing | reference-based polishing |\r +| --- | --- | --- | --- | --- |\r +| [Filtlong](https://github.com/rrwick/Filtlong)
[Rasusa](https://github.com/mbhall88/rasusa) | [Flye](https://github.com/fenderglass/Flye)
[raven](https://github.com/lbcb-sci/raven)
[miniasm](https://github.com/lh3/miniasm)
[Unicycler](https://github.com/rrwick/Unicycler)
[Canu](https://github.com/marbl/canu) | [racon](https://github.com/lbcb-sci/racon)
[medaka](https://github.com/nanoporetech/medaka) | [pilon](https://github.com/broadinstitute/pilon/wiki)
[Polypolish](https://github.com/rrwick/Polypolish)
[POLCA](https://github.com/alekseyzimin/masurca#polca) | [Homopolish](https://github.com/ythuang0522/homopolish)
[proovframe](https://github.com/thackl/proovframe) | \r +\r +""" ; + schema1:keywords "name:ONT, name:ILLUMINA, genome_assembly" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ont-assembly-snake" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/787?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Generic variation analysis reporting\r +\r +This workflow generates reports from a list of variants generated by [Variant Calling Workflow](https://workflowhub.eu/workflows/353). \r +\r +The workflow accepts a single input:\r +\r +- A collection of VCF files\r +\r +The workflow produces two outputs (format description below):\r +\r +1. A list of variants grouped by Sample\r +2. A list of variants grouped by Variant\r +\r +Here is example of output **by sample**. In this table all varinats in all samples are epxlicitrly listed:\r +\r +| Sample | POS | FILTER | REF | ALT | DP | AF | AFcaller | SB | DP4 | IMPACT | FUNCLASS | EFFECT | GENE | CODON | AA | TRID | min(AF) | max(AF) | countunique(change) | countunique(FUNCLASS) | change |\r +|----------|------|----------|---------|-----|-----|------|-----------|-----|-------|----------|---------------|-------------|--------|-------------| ---|--------|----------|-----------|-------------------------|------------------------------|------------|\r +| ERR3485786 | 11644 | PASS | A | G | 97 | 0.979381 | 0.907216 | 0 | 1,1,49,46 | LOW | SILENT | SYNONYMOUS_CODING | D7L | tgT/tgC | C512 | AKG51361.1 | 0.979381 | 1 | 1 | 1 | A>G |\r +| ERR3485786 | 11904 | PASS | T | C | 102 | 0.990196 | 0.95098 | 0 | 0,0,51,50 | MODERATE | MISSENSE | NON_SYNONYMOUS_CODING | D7L | Act/Gct | T426A | AKG51361.1 | 0.990196 | 1 | 1 | 1 | T>C |\r +\r +> **Note** the two alernative allele frequency fields: "AFcaller" ans "AF". LoFreq reports AF values listed in "AFcaller". They incorrect due to the known LoFreq [bug](https://github.com/CSB5/lofreq/issues/80). To correct for this we are recomputing AF values from DP4 and DP fields as follows: `AF == (DP4[2] + DP4[3]) / DP.`\r +\r +Here is an example of output **by variant**. In this table data is aggregated by variant across all samples in which this variant is present:\r +\r +| POS | REF | ALT | IMPACT | FUNCLASS | EFFECT | GENE | CODON | AA | TRID | countunique(Sample) | min(AF) | max(AF) | SAMPLES(above-thresholds) | SAMPLES(all) | AFs(all) | change |\r +|-----|-------|-----|-----------|----------------|------------|----------|-----------|------|--------|------------------------|----------|-----------|------------------------------------|------------------|----------|---------|\r +| 11644 | A | G | LOW | SILENT | SYNONYMOUS_CODING | D7L | tgT/tgC | C512 | AKG51361.1 | 11 | 0.979381 | 1 | ERR3485786,ERR3485787... | ERR3485786,ERR3485787,ERR3485789 ... | 0.979381,1.0... | A>G |\r +| 11904 | T | C | MODERATE | MISSENSE | NON_SYNONYMOUS_CODING | D7L | Act/Gct | T426A | AKG51361.1 | 12 | 0.990196 | 1 | ERR3485786,ERR3485787... | ERR3485786,ERR3485787,ERR3485789... | 0.990196,1.0,1.0... | T>C | \r +\r +The workflow can be accessed at [usegalaxy.org](https://usegalaxy.org/u/aun1/w/genetic-variation-analysis-reporting)\r +\r +The general idea of the workflow is:\r +\r +![](https://i.imgur.com/k2cIZK5.png)\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/354?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Generic variation analysis reporting" ; + schema1:sdDatePublished "2024-07-12 13:35:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/354/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 78824 ; + schema1:creator ; + schema1:dateCreated "2022-06-01T15:36:06Z" ; + schema1:dateModified "2023-01-16T14:00:24Z" ; + schema1:description """# Generic variation analysis reporting\r +\r +This workflow generates reports from a list of variants generated by [Variant Calling Workflow](https://workflowhub.eu/workflows/353). \r +\r +The workflow accepts a single input:\r +\r +- A collection of VCF files\r +\r +The workflow produces two outputs (format description below):\r +\r +1. A list of variants grouped by Sample\r +2. A list of variants grouped by Variant\r +\r +Here is example of output **by sample**. In this table all varinats in all samples are epxlicitrly listed:\r +\r +| Sample | POS | FILTER | REF | ALT | DP | AF | AFcaller | SB | DP4 | IMPACT | FUNCLASS | EFFECT | GENE | CODON | AA | TRID | min(AF) | max(AF) | countunique(change) | countunique(FUNCLASS) | change |\r +|----------|------|----------|---------|-----|-----|------|-----------|-----|-------|----------|---------------|-------------|--------|-------------| ---|--------|----------|-----------|-------------------------|------------------------------|------------|\r +| ERR3485786 | 11644 | PASS | A | G | 97 | 0.979381 | 0.907216 | 0 | 1,1,49,46 | LOW | SILENT | SYNONYMOUS_CODING | D7L | tgT/tgC | C512 | AKG51361.1 | 0.979381 | 1 | 1 | 1 | A>G |\r +| ERR3485786 | 11904 | PASS | T | C | 102 | 0.990196 | 0.95098 | 0 | 0,0,51,50 | MODERATE | MISSENSE | NON_SYNONYMOUS_CODING | D7L | Act/Gct | T426A | AKG51361.1 | 0.990196 | 1 | 1 | 1 | T>C |\r +\r +> **Note** the two alernative allele frequency fields: "AFcaller" ans "AF". LoFreq reports AF values listed in "AFcaller". They incorrect due to the known LoFreq [bug](https://github.com/CSB5/lofreq/issues/80). To correct for this we are recomputing AF values from DP4 and DP fields as follows: `AF == (DP4[2] + DP4[3]) / DP.`\r +\r +Here is an example of output **by variant**. In this table data is aggregated by variant across all samples in which this variant is present:\r +\r +| POS | REF | ALT | IMPACT | FUNCLASS | EFFECT | GENE | CODON | AA | TRID | countunique(Sample) | min(AF) | max(AF) | SAMPLES(above-thresholds) | SAMPLES(all) | AFs(all) | change |\r +|-----|-------|-----|-----------|----------------|------------|----------|-----------|------|--------|------------------------|----------|-----------|------------------------------------|------------------|----------|---------|\r +| 11644 | A | G | LOW | SILENT | SYNONYMOUS_CODING | D7L | tgT/tgC | C512 | AKG51361.1 | 11 | 0.979381 | 1 | ERR3485786,ERR3485787... | ERR3485786,ERR3485787,ERR3485789 ... | 0.979381,1.0... | A>G |\r +| 11904 | T | C | MODERATE | MISSENSE | NON_SYNONYMOUS_CODING | D7L | Act/Gct | T426A | AKG51361.1 | 12 | 0.990196 | 1 | ERR3485786,ERR3485787... | ERR3485786,ERR3485787,ERR3485789... | 0.990196,1.0,1.0... | T>C | \r +\r +The workflow can be accessed at [usegalaxy.org](https://usegalaxy.org/u/aun1/w/genetic-variation-analysis-reporting)\r +\r +The general idea of the workflow is:\r +\r +![](https://i.imgur.com/k2cIZK5.png)\r +\r +""" ; + schema1:keywords "mpvx, generic" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Generic variation analysis reporting" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/354?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "Create Meryl Database used for the estimation of assembly parameters and quality control with Merqury. Part of the VGP pipeline." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/309?version=1" ; + schema1:isBasedOn "https://github.com/Delphine-L/iwc/tree/VGP/workflows/VGP-assembly-v2" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for VGP genome profile analysis" ; + schema1:sdDatePublished "2024-07-12 13:35:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/309/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 2887 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17406 ; + schema1:creator ; + schema1:dateCreated "2022-04-05T11:22:34Z" ; + schema1:dateModified "2023-01-16T13:59:21Z" ; + schema1:description "Create Meryl Database used for the estimation of assembly parameters and quality control with Merqury. Part of the VGP pipeline." ; + schema1:keywords "vgp, Galaxy, Assembly" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "VGP genome profile analysis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/309?version=1" ; + schema1:version 1 ; + ns1:input . + + a schema1:Dataset ; + schema1:datePublished "2023-08-31T07:11:57.476431" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/chipseq-pe" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "chipseq-pe/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Genes and transcripts annotation with Isoseq using uLTRA and TAMA" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/993?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/isoseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/isoseq" ; + schema1:sdDatePublished "2024-07-12 13:21:02 +0100" ; + schema1:url "https://workflowhub.eu/workflows/993/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7742 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:57Z" ; + schema1:dateModified "2024-06-11T12:54:57Z" ; + schema1:description "Genes and transcripts annotation with Isoseq using uLTRA and TAMA" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/993?version=6" ; + schema1:keywords "isoseq, isoseq-3, rna, tama, ultra" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/isoseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/993?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:description """# Scaffolding using HiC data with YAHS\r +\r +This workflow has been created from a Vertebrate Genomes Project (VGP) scaffolding workflow. \r +\r +* For more information about the VGP project see https://galaxyproject.org/projects/vgp/. \r +* The scaffolding workflow is at https://dockstore.org/workflows/github.com/iwc-workflows/Scaffolding-HiC-VGP8/main:main?tab=info\r +* Please see that link for the workflow diagram. \r +\r +Some minor changes have been made to better fit with TSI project data: \r +\r +* optional inputs of SAK info and sequence graph have been removed\r +* the required input format for the genome is changed from gfa to fasta\r +* the estimated genome size now requires user input rather than being extracted from output of a previous workflow. \r +\r +Inputs: \r +\r +* assembly.fasta [note - scaffolding is done only one haplotype at a time. eg hap1 or primary]\r +* Concatenated HiC forward reads in fastqsanger.gz\r +* Concatenated HiC reverse reads in fastqsanger.gz\r +* Restriction enzyme sequence\r +* Estimated genome size (enter as integer)\r +* Lineage for busco \r +\r +Outputs: the main outputs are: \r +\r +* scaffolded_assmbly.fasta\r +* comparison of pre- post- scaffolding contact maps\r +\r +\r +\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.1054.1" ; + schema1:license "GPL-3.0+" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for TSI-Scaffolding-with-HiC (based on VGP-HiC-scaffolding)" ; + schema1:sdDatePublished "2024-07-12 13:17:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1054/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 83089 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-21T00:48:07Z" ; + schema1:dateModified "2024-06-21T01:10:22Z" ; + schema1:description """# Scaffolding using HiC data with YAHS\r +\r +This workflow has been created from a Vertebrate Genomes Project (VGP) scaffolding workflow. \r +\r +* For more information about the VGP project see https://galaxyproject.org/projects/vgp/. \r +* The scaffolding workflow is at https://dockstore.org/workflows/github.com/iwc-workflows/Scaffolding-HiC-VGP8/main:main?tab=info\r +* Please see that link for the workflow diagram. \r +\r +Some minor changes have been made to better fit with TSI project data: \r +\r +* optional inputs of SAK info and sequence graph have been removed\r +* the required input format for the genome is changed from gfa to fasta\r +* the estimated genome size now requires user input rather than being extracted from output of a previous workflow. \r +\r +Inputs: \r +\r +* assembly.fasta [note - scaffolding is done only one haplotype at a time. eg hap1 or primary]\r +* Concatenated HiC forward reads in fastqsanger.gz\r +* Concatenated HiC reverse reads in fastqsanger.gz\r +* Restriction enzyme sequence\r +* Estimated genome size (enter as integer)\r +* Lineage for busco \r +\r +Outputs: the main outputs are: \r +\r +* scaffolded_assmbly.fasta\r +* comparison of pre- post- scaffolding contact maps\r +\r +\r +\r +\r +""" ; + schema1:keywords "TSI" ; + schema1:license "https://spdx.org/licenses/GPL-3.0+" ; + schema1:name "TSI-Scaffolding-with-HiC (based on VGP-HiC-scaffolding)" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1054?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description "Performs Long Read assembly using PacBio data and Hifiasm. Part of VGP assembly pipeline. This workflow generate a phased assembly." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/325?version=1" ; + schema1:isBasedOn "https://github.com/Delphine-L/iwc/tree/VGP/workflows/VGP-assembly-v2" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for VGP HiFi phased assembly with hifiasm and HiC data" ; + schema1:sdDatePublished "2024-07-12 13:35:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/325/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 11731 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 50590 ; + schema1:creator ; + schema1:dateCreated "2022-04-06T01:23:56Z" ; + schema1:dateModified "2023-01-16T13:59:39Z" ; + schema1:description "Performs Long Read assembly using PacBio data and Hifiasm. Part of VGP assembly pipeline. This workflow generate a phased assembly." ; + schema1:keywords "vgp, Assembly, Galaxy" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "VGP HiFi phased assembly with hifiasm and HiC data" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/325?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.279.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_md_setup/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-07-12 13:36:19 +0100" ; + schema1:url "https://workflowhub.eu/workflows/279/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 154478 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 24688 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-17T09:18:45Z" ; + schema1:dateModified "2023-01-16T13:58:29Z" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/279?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CWL Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_md_setup/cwl/workflow.cwl" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2021-07-26T10:22:23.305126" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-consensus-from-variation" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:sdDatePublished "2021-10-27 15:45:11 +0100" ; + schema1:softwareVersion "v0.2.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.6" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + ; + schema1:description "This workflow combines SDF files from all fragments into a single dataset and filters to include only the lowest (best) scoring pose for each compound. This file of optimal poses for all ligands is used to compare to a database of Enamine and Chemspace compounds to select the best scoring 500 matches. More info can be found at https://covid19.galaxyproject.org/cheminformatics/" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/17?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Cheminformatics - Filter results" ; + schema1:sdDatePublished "2024-07-12 13:37:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/17/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 5540 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 21903 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2020-04-10T14:56:44Z" ; + schema1:dateModified "2023-01-16T13:41:12Z" ; + schema1:description "This workflow combines SDF files from all fragments into a single dataset and filters to include only the lowest (best) scoring pose for each compound. This file of optimal poses for all ligands is used to compare to a database of Enamine and Chemspace compounds to select the best scoring 500 matches. More info can be found at https://covid19.galaxyproject.org/cheminformatics/" ; + schema1:image ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Cheminformatics - Filter results" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/17?version=1" ; + schema1:version 1 ; + ns1:input , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 12188 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# pod5_by_pore\r +\r +A Snakemake workflow to take the POD5 files produced by an Oxford Nanopore sequencing run and\r +re-batch them by pore (ie. by channel).\r +\r +This is useful if you want to run duplex basecalling because you can meaningfully run\r +"dorado duplex" on a single (or a subset of) the POD5 files.\r +\r +## Know issues\r +\r +It is assumed all POD5 input files are from the same sequencing run, but this is not checked.\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/889?version=1" ; + schema1:isBasedOn "https://github.com/tbooth/pod5_by_pore.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for POD5 by pore" ; + schema1:sdDatePublished "2024-07-12 13:22:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/889/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4613 ; + schema1:creator ; + schema1:dateCreated "2024-05-24T14:28:49Z" ; + schema1:dateModified "2024-05-24T14:32:41Z" ; + schema1:description """# pod5_by_pore\r +\r +A Snakemake workflow to take the POD5 files produced by an Oxford Nanopore sequencing run and\r +re-batch them by pore (ie. by channel).\r +\r +This is useful if you want to run duplex basecalling because you can meaningfully run\r +"dorado duplex" on a single (or a subset of) the POD5 files.\r +\r +## Know issues\r +\r +It is assumed all POD5 input files are from the same sequencing run, but this is not checked.\r +""" ; + schema1:image ; + schema1:keywords "nanopore, pod5" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "POD5 by pore" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/889?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 1573 . + + a schema1:Dataset ; + schema1:datePublished "2022-09-15T00:06:28+00:00" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/wombat-p/WOMBAT-Pipelines/tree/dev/" ; + schema1:mainEntity . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:codeRepository "https://github.com/wombat-p/WOMBAT-Pipelines/tree/dev/" ; + schema1:name "main" ; + schema1:programmingLanguage ; + schema1:url "https://raw.githubusercontent.com/wombat-p/WOMBAT-Pipelines/44442336ce5110fb9b68d305517e0795ec61d434/main.nf" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:description """\r +\r +Galaxy Workflow created on Galaxy-E european instance, ecology.usegalaxy.eu, related to the Galaxy training tutorial "Antarctic sea ecoregionalization" .\r +\r +This workflow allows to analyze marine benthic biodiversity data to compute ecoregions regarding environmental data.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/658?version=1" ; + schema1:isBasedOn "https://ecology.usegalaxy.eu/u/ylebras/w/workflow-constructed-from-history-test-ecoregionalization" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Ecoregionalization on Antarctic sea" ; + schema1:sdDatePublished "2024-07-12 13:26:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/658/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12973 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-11-09T20:55:03Z" ; + schema1:dateModified "2023-11-09T21:01:01Z" ; + schema1:description """\r +\r +Galaxy Workflow created on Galaxy-E european instance, ecology.usegalaxy.eu, related to the Galaxy training tutorial "Antarctic sea ecoregionalization" .\r +\r +This workflow allows to analyze marine benthic biodiversity data to compute ecoregions regarding environmental data.\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Ecoregionalization on Antarctic sea" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/658?version=1" ; + schema1:version 1 ; + ns1:input , + . + + a schema1:MediaObject ; + schema1:contentSize 3954107 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "0.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954344 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "1.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954590 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "10.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953773 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "11.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953870 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "12.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954234 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "13.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954008 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "14.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954432 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "15.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954068 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "16.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954256 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "17.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953419 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "18.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954109 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "19.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953610 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "2.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954118 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "20.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954451 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "21.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954298 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "22.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953962 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "23.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954459 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "24.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953457 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "25.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953859 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "26.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953543 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "27.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953815 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "28.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954113 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "29.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954237 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "3.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953766 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "30.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954029 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "31.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954023 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "32.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953996 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "33.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953653 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "34.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953861 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "35.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954184 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "36.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953467 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "37.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953928 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "38.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953695 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "39.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954368 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "4.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953917 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "40.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953894 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "41.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954012 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "42.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953858 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "43.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953932 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "44.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953856 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "45.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953828 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "46.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954216 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "47.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954061 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "48.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954117 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "49.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954654 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "5.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953977 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "50.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954034 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "51.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953859 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "52.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954004 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "53.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953829 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "54.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953981 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "55.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953660 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "56.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953761 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "57.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953900 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "58.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954381 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "59.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953701 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "6.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953827 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "60.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954330 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "61.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953584 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "62.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954005 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "63.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954094 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "64.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953866 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "65.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953953 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "66.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953762 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "67.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953640 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "68.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953847 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "69.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953880 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "7.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953717 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "70.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953606 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "71.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953953 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "72.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953858 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "73.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954285 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "74.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954304 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "75.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953619 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "76.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953761 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "77.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953469 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "78.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953864 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "79.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953434 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "8.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954254 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "80.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953675 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "81.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953777 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "82.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953973 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "83.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954439 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "84.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954185 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "85.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954354 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "86.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953921 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "87.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954006 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "88.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954238 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "89.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954268 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "9.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953599 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "90.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954093 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "91.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953864 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "92.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954424 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "93.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953990 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "94.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953414 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "95.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953431 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "96.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953921 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "97.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953795 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "98.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953786 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "99.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 24 ; + schema1:dateModified "2024-06-06T10:12:43" ; + schema1:name "Result.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256 ; + schema1:dateModified "2024-04-30T10:11:40+00:00" ; + schema1:name "A.0.0" ; + schema1:sdDatePublished "2024-04-30T13:04:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256 ; + schema1:dateModified "2024-04-30T10:11:40+00:00" ; + schema1:name "A.0.1" ; + schema1:sdDatePublished "2024-04-30T13:04:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256 ; + schema1:dateModified "2024-04-30T10:11:40+00:00" ; + schema1:name "A.1.0" ; + schema1:sdDatePublished "2024-04-30T13:04:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256 ; + schema1:dateModified "2024-04-30T10:11:40+00:00" ; + schema1:name "A.1.1" ; + schema1:sdDatePublished "2024-04-30T13:04:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256 ; + schema1:dateModified "2024-04-30T10:11:40+00:00" ; + schema1:name "B.0.0" ; + schema1:sdDatePublished "2024-04-30T13:04:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256 ; + schema1:dateModified "2024-04-30T10:11:40+00:00" ; + schema1:name "B.0.1" ; + schema1:sdDatePublished "2024-04-30T13:04:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256 ; + schema1:dateModified "2024-04-30T10:11:40+00:00" ; + schema1:name "B.1.0" ; + schema1:sdDatePublished "2024-04-30T13:04:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256 ; + schema1:dateModified "2024-04-30T10:11:40+00:00" ; + schema1:name "B.1.1" ; + schema1:sdDatePublished "2024-04-30T13:04:29+00:00" . + + a schema1:Dataset ; + schema1:dateModified "2024-03-29T14:48:57+00:00" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:name "Files1" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:14+00:00" ; + schema1:name "results_0_1.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:08+00:00" ; + schema1:name "results_0_10.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:09+00:00" ; + schema1:name "results_0_11.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:09+00:00" ; + schema1:name "results_0_12.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:10+00:00" ; + schema1:name "results_0_13.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:11+00:00" ; + schema1:name "results_0_14.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:12+00:00" ; + schema1:name "results_0_15.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:12+00:00" ; + schema1:name "results_0_16.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:12+00:00" ; + schema1:name "results_0_17.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:13+00:00" ; + schema1:name "results_0_18.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:13+00:00" ; + schema1:name "results_0_19.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:18+00:00" ; + schema1:name "results_0_2.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:14+00:00" ; + schema1:name "results_0_20.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:15+00:00" ; + schema1:name "results_0_21.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:15+00:00" ; + schema1:name "results_0_22.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:16+00:00" ; + schema1:name "results_0_23.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:16+00:00" ; + schema1:name "results_0_24.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:17+00:00" ; + schema1:name "results_0_25.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:17+00:00" ; + schema1:name "results_0_26.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:19+00:00" ; + schema1:name "results_0_3.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:19+00:00" ; + schema1:name "results_0_4.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:20+00:00" ; + schema1:name "results_0_5.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:20+00:00" ; + schema1:name "results_0_6.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:21+00:00" ; + schema1:name "results_0_7.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:21+00:00" ; + schema1:name "results_0_8.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:22+00:00" ; + schema1:name "results_0_9.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:28+00:00" ; + schema1:name "results_1_1.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:23+00:00" ; + schema1:name "results_1_10.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:24+00:00" ; + schema1:name "results_1_11.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:24+00:00" ; + schema1:name "results_1_12.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:25+00:00" ; + schema1:name "results_1_13.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:25+00:00" ; + schema1:name "results_1_14.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:26+00:00" ; + schema1:name "results_1_15.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:26+00:00" ; + schema1:name "results_1_16.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:27+00:00" ; + schema1:name "results_1_17.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:27+00:00" ; + schema1:name "results_1_18.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:28+00:00" ; + schema1:name "results_1_19.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:33+00:00" ; + schema1:name "results_1_2.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:29+00:00" ; + schema1:name "results_1_20.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:29+00:00" ; + schema1:name "results_1_21.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:30+00:00" ; + schema1:name "results_1_22.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:31+00:00" ; + schema1:name "results_1_23.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:31+00:00" ; + schema1:name "results_1_24.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:32+00:00" ; + schema1:name "results_1_25.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:33+00:00" ; + schema1:name "results_1_26.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:34+00:00" ; + schema1:name "results_1_3.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:34+00:00" ; + schema1:name "results_1_4.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:35+00:00" ; + schema1:name "results_1_5.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:36+00:00" ; + schema1:name "results_1_6.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:36+00:00" ; + schema1:name "results_1_7.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:37+00:00" ; + schema1:name "results_1_8.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:37+00:00" ; + schema1:name "results_1_9.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:44+00:00" ; + schema1:name "results_2_1.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:38+00:00" ; + schema1:name "results_2_10.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:39+00:00" ; + schema1:name "results_2_11.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:39+00:00" ; + schema1:name "results_2_12.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:40+00:00" ; + schema1:name "results_2_13.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:40+00:00" ; + schema1:name "results_2_14.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:41+00:00" ; + schema1:name "results_2_15.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:41+00:00" ; + schema1:name "results_2_16.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:42+00:00" ; + schema1:name "results_2_17.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:43+00:00" ; + schema1:name "results_2_18.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:44+00:00" ; + schema1:name "results_2_19.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:49+00:00" ; + schema1:name "results_2_2.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:45+00:00" ; + schema1:name "results_2_20.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:45+00:00" ; + schema1:name "results_2_21.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:46+00:00" ; + schema1:name "results_2_22.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:47+00:00" ; + schema1:name "results_2_23.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:48+00:00" ; + schema1:name "results_2_24.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:48+00:00" ; + schema1:name "results_2_25.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:49+00:00" ; + schema1:name "results_2_26.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:50+00:00" ; + schema1:name "results_2_3.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:51+00:00" ; + schema1:name "results_2_4.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:51+00:00" ; + schema1:name "results_2_5.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:52+00:00" ; + schema1:name "results_2_6.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:53+00:00" ; + schema1:name "results_2_7.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:53+00:00" ; + schema1:name "results_2_8.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:54+00:00" ; + schema1:name "results_2_9.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:00+00:00" ; + schema1:name "results_3_1.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:55+00:00" ; + schema1:name "results_3_10.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:55+00:00" ; + schema1:name "results_3_11.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:56+00:00" ; + schema1:name "results_3_12.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:56+00:00" ; + schema1:name "results_3_13.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:57+00:00" ; + schema1:name "results_3_14.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:57+00:00" ; + schema1:name "results_3_15.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:58+00:00" ; + schema1:name "results_3_16.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:58+00:00" ; + schema1:name "results_3_17.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:59+00:00" ; + schema1:name "results_3_18.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:00+00:00" ; + schema1:name "results_3_19.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:05+00:00" ; + schema1:name "results_3_2.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:01+00:00" ; + schema1:name "results_3_20.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:01+00:00" ; + schema1:name "results_3_21.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:02+00:00" ; + schema1:name "results_3_22.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:03+00:00" ; + schema1:name "results_3_23.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:03+00:00" ; + schema1:name "results_3_24.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:04+00:00" ; + schema1:name "results_3_25.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:04+00:00" ; + schema1:name "results_3_26.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:05+00:00" ; + schema1:name "results_3_3.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:06+00:00" ; + schema1:name "results_3_4.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:07+00:00" ; + schema1:name "results_3_5.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:07+00:00" ; + schema1:name "results_3_6.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:08+00:00" ; + schema1:name "results_3_7.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:08+00:00" ; + schema1:name "results_3_8.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:09+00:00" ; + schema1:name "results_3_9.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:15+00:00" ; + schema1:name "results_4_1.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:10+00:00" ; + schema1:name "results_4_10.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:10+00:00" ; + schema1:name "results_4_11.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:11+00:00" ; + schema1:name "results_4_12.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:11+00:00" ; + schema1:name "results_4_13.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:12+00:00" ; + schema1:name "results_4_14.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:12+00:00" ; + schema1:name "results_4_15.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:13+00:00" ; + schema1:name "results_4_16.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:13+00:00" ; + schema1:name "results_4_17.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:14+00:00" ; + schema1:name "results_4_18.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:14+00:00" ; + schema1:name "results_4_19.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:19+00:00" ; + schema1:name "results_4_2.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:15+00:00" ; + schema1:name "results_4_20.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:16+00:00" ; + schema1:name "results_4_21.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:16+00:00" ; + schema1:name "results_4_22.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:17+00:00" ; + schema1:name "results_4_23.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:17+00:00" ; + schema1:name "results_4_24.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:18+00:00" ; + schema1:name "results_4_25.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:18+00:00" ; + schema1:name "results_4_26.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:19+00:00" ; + schema1:name "results_4_3.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:20+00:00" ; + schema1:name "results_4_4.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:20+00:00" ; + schema1:name "results_4_5.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:21+00:00" ; + schema1:name "results_4_6.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:22+00:00" ; + schema1:name "results_4_7.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:22+00:00" ; + schema1:name "results_4_8.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:23+00:00" ; + schema1:name "results_4_9.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:30+00:00" ; + schema1:name "results_5_1.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:24+00:00" ; + schema1:name "results_5_10.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:25+00:00" ; + schema1:name "results_5_11.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:25+00:00" ; + schema1:name "results_5_12.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:26+00:00" ; + schema1:name "results_5_13.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:26+00:00" ; + schema1:name "results_5_14.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:27+00:00" ; + schema1:name "results_5_15.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:27+00:00" ; + schema1:name "results_5_16.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:28+00:00" ; + schema1:name "results_5_17.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:29+00:00" ; + schema1:name "results_5_18.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:29+00:00" ; + schema1:name "results_5_19.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:34+00:00" ; + schema1:name "results_5_2.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:30+00:00" ; + schema1:name "results_5_20.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:31+00:00" ; + schema1:name "results_5_21.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:31+00:00" ; + schema1:name "results_5_22.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:32+00:00" ; + schema1:name "results_5_23.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:32+00:00" ; + schema1:name "results_5_24.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:33+00:00" ; + schema1:name "results_5_25.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:33+00:00" ; + schema1:name "results_5_26.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:34+00:00" ; + schema1:name "results_5_3.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:34+00:00" ; + schema1:name "results_5_4.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:35+00:00" ; + schema1:name "results_5_5.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:35+00:00" ; + schema1:name "results_5_6.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:36+00:00" ; + schema1:name "results_5_7.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:37+00:00" ; + schema1:name "results_5_8.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:37+00:00" ; + schema1:name "results_5_9.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:44+00:00" ; + schema1:name "results_6_1.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:38+00:00" ; + schema1:name "results_6_10.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:39+00:00" ; + schema1:name "results_6_11.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:39+00:00" ; + schema1:name "results_6_12.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:40+00:00" ; + schema1:name "results_6_13.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:41+00:00" ; + schema1:name "results_6_14.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:41+00:00" ; + schema1:name "results_6_15.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:42+00:00" ; + schema1:name "results_6_16.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:42+00:00" ; + schema1:name "results_6_17.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:43+00:00" ; + schema1:name "results_6_18.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:43+00:00" ; + schema1:name "results_6_19.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:49+00:00" ; + schema1:name "results_6_2.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:45+00:00" ; + schema1:name "results_6_20.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:45+00:00" ; + schema1:name "results_6_21.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:46+00:00" ; + schema1:name "results_6_22.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:46+00:00" ; + schema1:name "results_6_23.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:47+00:00" ; + schema1:name "results_6_24.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:47+00:00" ; + schema1:name "results_6_25.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:48+00:00" ; + schema1:name "results_6_26.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:49+00:00" ; + schema1:name "results_6_3.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:50+00:00" ; + schema1:name "results_6_4.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:50+00:00" ; + schema1:name "results_6_5.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:51+00:00" ; + schema1:name "results_6_6.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:51+00:00" ; + schema1:name "results_6_7.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:52+00:00" ; + schema1:name "results_6_8.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:52+00:00" ; + schema1:name "results_6_9.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:59+00:00" ; + schema1:name "results_7_1.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:53+00:00" ; + schema1:name "results_7_10.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:54+00:00" ; + schema1:name "results_7_11.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:55+00:00" ; + schema1:name "results_7_12.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:55+00:00" ; + schema1:name "results_7_13.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:56+00:00" ; + schema1:name "results_7_14.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:56+00:00" ; + schema1:name "results_7_15.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:57+00:00" ; + schema1:name "results_7_16.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:57+00:00" ; + schema1:name "results_7_17.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:58+00:00" ; + schema1:name "results_7_18.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:58+00:00" ; + schema1:name "results_7_19.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:04+00:00" ; + schema1:name "results_7_2.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:00+00:00" ; + schema1:name "results_7_20.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:01+00:00" ; + schema1:name "results_7_21.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:01+00:00" ; + schema1:name "results_7_22.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:02+00:00" ; + schema1:name "results_7_23.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:02+00:00" ; + schema1:name "results_7_24.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:03+00:00" ; + schema1:name "results_7_25.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:04+00:00" ; + schema1:name "results_7_26.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:05+00:00" ; + schema1:name "results_7_3.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:05+00:00" ; + schema1:name "results_7_4.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:06+00:00" ; + schema1:name "results_7_5.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:07+00:00" ; + schema1:name "results_7_6.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:07+00:00" ; + schema1:name "results_7_7.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:08+00:00" ; + schema1:name "results_7_8.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:08+00:00" ; + schema1:name "results_7_9.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:14+00:00" ; + schema1:name "results_8_1.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:09+00:00" ; + schema1:name "results_8_10.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:10+00:00" ; + schema1:name "results_8_11.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:11+00:00" ; + schema1:name "results_8_12.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:11+00:00" ; + schema1:name "results_8_13.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:11+00:00" ; + schema1:name "results_8_14.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:12+00:00" ; + schema1:name "results_8_15.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:12+00:00" ; + schema1:name "results_8_16.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:13+00:00" ; + schema1:name "results_8_17.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:13+00:00" ; + schema1:name "results_8_18.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:14+00:00" ; + schema1:name "results_8_19.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:18+00:00" ; + schema1:name "results_8_2.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:15+00:00" ; + schema1:name "results_8_20.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:15+00:00" ; + schema1:name "results_8_21.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:16+00:00" ; + schema1:name "results_8_22.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:16+00:00" ; + schema1:name "results_8_23.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:17+00:00" ; + schema1:name "results_8_24.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:17+00:00" ; + schema1:name "results_8_25.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:18+00:00" ; + schema1:name "results_8_26.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:19+00:00" ; + schema1:name "results_8_3.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:20+00:00" ; + schema1:name "results_8_4.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:20+00:00" ; + schema1:name "results_8_5.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:21+00:00" ; + schema1:name "results_8_6.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:21+00:00" ; + schema1:name "results_8_7.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:22+00:00" ; + schema1:name "results_8_8.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:22+00:00" ; + schema1:name "results_8_9.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "energy.selection" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "genion.group" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1044 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "ions.mdp" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 971 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "minim.mdp" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116154 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "133l.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 114615 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "134l.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116316 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1aki.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 117369 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1bhz.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115425 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1bvx.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115668 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1bwh.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115506 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1bwi.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116802 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1bwj.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 132678 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1c46.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 126279 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1ckh.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 121338 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1f0w.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119961 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1f10.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110322 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1fly.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 212058 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1gxv.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 213030 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1gxx.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123687 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1hel.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 121662 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1hem.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 121986 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1hen.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123282 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1heo.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 124254 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1hep.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123201 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1heq.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 124902 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1her.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119880 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1hsw.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 120042 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1hsx.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 118422 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1i20.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115506 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1ioq.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 114210 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1ior.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113400 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1ios.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111051 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1iot.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111942 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1ir7.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 114858 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1ir8.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112428 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1ir9.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 178848 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1iy3.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 178605 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1iy4.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 95661 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1ja2.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 95499 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1ja4.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 95013 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1ja6.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 120690 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1jis.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 121419 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1jit.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119880 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1jiy.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 117369 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1jj1.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110241 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1jpo.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 133974 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1jwr.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111537 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1kxw.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113238 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1kxx.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115101 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1kxy.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119070 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1laa.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 118989 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lhh.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 120285 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lhi.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123444 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lhj.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119718 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lhk.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119394 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lhl.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115992 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lhm.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 109755 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1loz.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 165483 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lsa.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 162162 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lsb.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 157707 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lsc.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 158355 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lsd.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 157059 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lse.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 162567 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lsf.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123120 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lsm.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 127332 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lsn.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 122472 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lsy.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111618 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lyo.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 117612 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lyy.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 145881 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lyz.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 118665 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lz1.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116478 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lza.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115668 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lzd.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 124254 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lzt.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 114129 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1rex.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 114372 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1rfp.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113724 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1tay.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115587 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1tby.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 114291 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1tcy.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 114615 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1tdy.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110889 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1uia.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 109998 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1uic.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113562 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1uid.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111294 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1uie.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111051 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1uif.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111699 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1uig.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 141507 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1vds.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 145314 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1vdt.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113967 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1ved.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 118908 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1xei.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116964 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1xej.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 120042 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1xek.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 94284 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "2a6u.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116397 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "2aub.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 125793 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "2bqg.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 125550 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "2bqh.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 124740 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "2bqi.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 129114 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "2bqk.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 124821 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "2bqm.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 173745 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "2c8o.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 173826 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "2c8p.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 118989 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "2epe.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101412 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "2hs7.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103113 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "2hs9.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99711 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "2hso.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 121824 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "2lhm.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123525 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "2lym.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 133488 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "2lyz.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 122472 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "2yvb.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116640 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "2zq4.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 117288 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "3a3r.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 218214 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "3exd.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115182 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "3iju.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115992 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "3ijv.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 109755 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "3j4g.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106029 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "3j6k.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 125469 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "3lym.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119394 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "3lyz.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116397 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "3qy4.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 211977 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "3wmk.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119475 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "3wpj.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 122553 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "3wvx.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 206955 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "4axt.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 209790 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "4b0d.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115425 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "4hv1.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112914 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "4hv2.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 117693 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "4i8s.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113967 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "4ias.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 128385 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "4lym.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110727 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "4lyo.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 133812 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "4lyz.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 210762 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "4nhi.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 208089 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "4qeq.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 30294 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "4r0p.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113724 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "4rlm.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112347 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "4rln.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 190107 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "4wmg.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 186624 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "5a3e.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 125064 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "5amy.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112995 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "5e4p.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116235 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "5hnc.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105138 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "5hnl.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 125388 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "5i4w.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123282 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "5k2k.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 218052 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "5k2n.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 222183 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "5k2p.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 217890 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "5k2q.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 121743 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "5k2r.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123201 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "5k2s.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 141345 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "5lyt.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 133893 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "5lyz.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 277182 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "5njm.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 122391 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "5yin.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112023 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "6gf0.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111537 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "6h0k.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112428 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "6h0l.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 127170 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "6lyt.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 134379 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "6lyz.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111456 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "6s2n.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116883 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "7byo.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 118098 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "7byp.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 203391 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "7c09.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115992 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "7d01.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116964 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "7d02.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 114372 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "7d04.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 117855 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "7d05.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103275 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "7lyz.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 120771 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "8lyz.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100402 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "133l.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 593157 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "133l.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1244624 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "133l_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "133l_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1243836 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "133l_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100402 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "133l_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4196 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "133l_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2148 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "133l_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1435957 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "133l_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1435336 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "133l_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100177 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "134l.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 592390 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "134l.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1377720 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "134l_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "134l_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1376884 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "134l_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100177 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "134l_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4430 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "134l_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2154 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "134l_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1686427 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "134l_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1685896 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "134l_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98776 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1aki.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577611 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1aki.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1388044 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1aki_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1aki_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1387304 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1aki_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98776 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1aki_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4711 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1aki_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2121 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1aki_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1727686 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1aki_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1726975 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1aki_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88471 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1bhz.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578224 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1bhz.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1331016 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1bhz_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1bhz_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1330276 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1bhz_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88471 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1bhz_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4384 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1bhz_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2154 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1bhz_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1617481 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1bhz_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1616680 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1bhz_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101081 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bvx.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577618 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bvx.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1315836 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bvx_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bvx_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1315096 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bvx_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101081 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bvx_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4192 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bvx_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2154 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bvx_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1592291 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bvx_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1591580 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bvx_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100676 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1bwh.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577615 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1bwh.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1447164 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1bwh_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1bwh_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1446424 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1bwh_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100676 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1bwh_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4706 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1bwh_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2160 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1bwh_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1838531 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1bwh_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1837820 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1bwh_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100676 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1bwi.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577615 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1bwi.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1300500 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1bwi_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1bwi_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1299760 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1bwi_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100676 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1bwi_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4038 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1bwi_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2132 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1bwi_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1563536 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1bwi_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1562825 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1bwi_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102296 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bwj.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577618 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bwj.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1325988 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bwj_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bwj_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1325248 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bwj_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102296 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bwj_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4094 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bwj_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2121 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bwj_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1611326 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bwj_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1610615 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bwj_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123706 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1c46.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 596803 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1c46.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1619344 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1c46_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1c46_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1618604 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1c46_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123706 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1c46_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4161 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1c46_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1c46_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2133316 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1c46_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2132605 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1c46_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123221 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ckh.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 595515 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ckh.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1463304 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ckh_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ckh_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1462564 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ckh_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123221 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ckh_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4710 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ckh_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ckh_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1842311 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ckh_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1841600 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ckh_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106741 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1f0w.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577605 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1f0w.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1386532 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1f0w_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1f0w_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1385792 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1f0w_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106741 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1f0w_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4573 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1f0w_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2124 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1f0w_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1724851 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1f0w_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1724140 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1f0w_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105301 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1f10.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577982 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1f10.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1330940 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1f10_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1f10_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1330224 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1f10_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105301 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1f10_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4227 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1f10_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2169 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1f10_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1619326 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1f10_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1618525 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1f10_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97291 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1fly.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578505 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1fly.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1289252 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1fly_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1fly_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1288512 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1fly_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97291 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1fly_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3991 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1fly_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2157 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1fly_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1540981 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1fly_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1540270 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1fly_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88968 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1gxv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577990 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1gxv.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1243100 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1gxv_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1gxv_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1242384 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1gxv_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88968 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1gxv_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4095 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1gxv_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2177 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1gxv_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1454628 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1gxv_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1453827 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1gxv_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88968 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1gxx.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577986 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1gxx.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1234244 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1gxx_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1gxx_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1233528 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1gxx_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88968 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1gxx_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4439 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1gxx_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2139 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1gxx_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1438023 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1gxx_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1437222 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1gxx_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113235 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1hel.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577619 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1hel.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1615216 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1hel_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1hel_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1614476 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1hel_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113235 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1hel_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4144 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1hel_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2152 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1hel_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2153625 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1hel_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2152914 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1hel_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113370 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1hem.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578521 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1hem.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1613872 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1hem_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1hem_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1613132 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1hem_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113370 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1hem_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4283 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1hem_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2160 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1hem_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2149710 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1hem_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2148999 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1hem_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113235 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1hen.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577615 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1hen.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1620400 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1hen_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1hen_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1619660 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1hen_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113235 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1hen_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4062 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1hen_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2148 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1hen_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2163345 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1hen_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2162634 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1hen_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113100 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1heo.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 576719 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1heo.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1614040 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1heo_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1heo_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1613300 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1heo_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113100 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1heo_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4251 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1heo_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2160 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1heo_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2152815 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1heo_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2152104 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1heo_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113100 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1hep.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 576722 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1hep.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1636144 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1hep_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1hep_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1635404 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1hep_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113100 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1hep_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4291 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1hep_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1hep_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2194260 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1hep_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2193549 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1hep_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113235 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1heq.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577625 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1heq.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1649488 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1heq_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1heq_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1648748 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1heq_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113235 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1heq_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4369 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1heq_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2157 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1heq_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2217885 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1heq_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2217174 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1heq_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113100 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1her.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 576720 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1her.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1620304 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1her_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1her_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1619564 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1her_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113100 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1her_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4086 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1her_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2175 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1her_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2164560 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1her_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2163849 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1her_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106643 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1hsw.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577642 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1hsw.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1299040 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1hsw_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1hsw_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1298300 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1hsw_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106643 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1hsw_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4033 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1hsw_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1hsw_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1560863 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1hsw_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1560106 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1hsw_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108919 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1hsx.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577623 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1hsx.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1371328 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1hsx_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1hsx_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1370588 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1hsx_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108919 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1hsx_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4492 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1hsx_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1hsx_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1696384 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1hsx_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1695646 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1hsx_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102153 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1i20.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 595550 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1i20.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1302028 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1i20_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1i20_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1301240 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1i20_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102153 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1i20_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3445 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1i20_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2162 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1i20_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1540038 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1i20_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1539417 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1i20_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101928 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1ioq.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 579472 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1ioq.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1355956 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1ioq_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1ioq_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1355216 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1ioq_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101928 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1ioq_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4208 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1ioq_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1ioq_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1665498 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1ioq_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1664787 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1ioq_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100308 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1ior.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 579375 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1ior.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1307236 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1ior_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1ior_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1306520 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1ior_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100308 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1ior_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4250 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1ior_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2124 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1ior_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1573158 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1ior_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1572357 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1ior_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99048 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ios.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578998 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ios.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1296156 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ios_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ios_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1295416 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ios_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99048 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ios_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4052 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ios_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2153 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ios_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1553673 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ios_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1552962 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ios_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98103 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1iot.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578904 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1iot.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1349028 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1iot_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1iot_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1348312 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1iot_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98103 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1iot_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4245 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1iot_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2154 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1iot_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1651818 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1iot_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1651017 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1iot_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96661 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1ir7.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 576647 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1ir7.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1293636 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1ir7_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1ir7_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1292896 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1ir7_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96661 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1ir7_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4312 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1ir7_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2147 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1ir7_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1552096 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1ir7_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1551385 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1ir7_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101521 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1ir8.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 576641 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1ir8.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1305300 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1ir8_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1ir8_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1304560 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1ir8_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101521 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1ir8_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4230 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1ir8_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2175 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1ir8_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1573966 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1ir8_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1573255 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1ir8_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97516 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1ir9.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577021 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1ir9.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1295356 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1ir9_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1ir9_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1294640 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1ir9_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97516 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1ir9_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4040 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1ir9_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2153 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1ir9_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1554031 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1ir9_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1553230 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1ir9_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 90991 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1iy3.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594848 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1iy3.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1236936 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1iy3_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1iy3_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1236196 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1iy3_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 90991 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1iy3_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4188 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1iy3_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2175 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1iy3_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1418851 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1iy3_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1418140 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1iy3_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 90991 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1iy4.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594849 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1iy4.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1309944 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1iy4_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1iy4_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1309204 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1iy4_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 90991 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1iy4_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4066 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1iy4_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2167 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1iy4_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1555741 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1iy4_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1555030 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1iy4_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88258 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ja2.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577595 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ja2.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1287376 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ja2_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ja2_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1286636 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ja2_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88258 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ja2_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4042 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ja2_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2129 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ja2_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1538968 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ja2_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1538236 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ja2_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88258 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1ja4.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577601 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1ja4.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1294144 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1ja4_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1ja4_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1293404 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1ja4_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88258 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1ja4_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4055 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1ja4_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2161 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1ja4_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1551658 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1ja4_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1550926 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1ja4_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88258 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ja6.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577598 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ja6.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1270384 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ja6_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ja6_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1269644 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ja6_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88258 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ja6_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4055 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ja6_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2162 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ja6_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1507108 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ja6_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1506376 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ja6_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107461 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1jis.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577988 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1jis.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1390844 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1jis_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1jis_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1390128 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1jis_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107461 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1jis_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4653 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1jis_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2147 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1jis_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1731646 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1jis_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1730845 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1jis_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108541 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1jit.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577982 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1jit.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1332740 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1jit_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1jit_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1332024 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1jit_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108541 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1jit_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4022 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1jit_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2167 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1jit_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1622701 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1jit_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1621900 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1jit_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108631 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1jiy.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577608 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1jiy.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1413028 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1jiy_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1jiy_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1412288 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1jiy_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108631 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1jiy_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4615 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1jiy_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2133 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1jiy_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1774531 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1jiy_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1773820 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1jiy_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106336 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jj1.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577605 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jj1.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1448524 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jj1_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jj1_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1447784 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jj1_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106336 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jj1_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4555 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jj1_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2129 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jj1_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1841086 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jj1_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1840375 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jj1_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97021 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1jpo.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577608 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1jpo.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1278316 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1jpo_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1jpo_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1277576 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1jpo_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97021 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1jpo_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4046 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1jpo_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2169 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1jpo_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1521946 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1jpo_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1521235 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1jpo_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 132436 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jwr.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594871 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jwr.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1414416 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jwr_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jwr_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1413676 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jwr_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 132436 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jwr_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4552 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jwr_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2130 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jwr_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1751626 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jwr_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1750915 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jwr_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96121 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1kxw.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577058 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1kxw.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1355304 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1kxw_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1kxw_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1354516 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1kxw_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96121 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1kxw_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4325 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1kxw_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2158 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1kxw_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1666981 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1kxw_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1666360 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1kxw_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96346 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1kxx.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577608 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1kxx.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1269820 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1kxx_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1kxx_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1269080 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1kxx_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96346 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1kxx_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4022 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1kxx_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1kxx_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1506016 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1kxx_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1505305 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1kxx_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99406 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1kxy.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578153 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1kxy.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1286792 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1kxy_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1kxy_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1286100 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1kxy_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99406 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1kxy_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4019 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1kxy_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2144 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1kxy_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1537156 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1kxy_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1536355 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1kxy_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106252 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1laa.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 595780 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1laa.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1330060 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1laa_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1laa_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1329320 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1laa_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106252 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1laa_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4224 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1laa_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1laa_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1592062 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1laa_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1591351 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1laa_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104542 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lhh.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594816 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lhh.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1291572 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lhh_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lhh_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1290832 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lhh_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104542 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lhh_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4111 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lhh_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2144 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lhh_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1521367 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lhh_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1520656 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lhh_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107287 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lhi.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 592144 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lhi.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1386532 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lhi_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lhi_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1385792 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lhi_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107287 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lhi_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4481 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lhi_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2160 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lhi_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1703527 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lhi_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1702816 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lhi_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107962 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lhj.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 592147 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lhj.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1258732 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lhj_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lhj_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1257992 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lhj_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107962 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lhj_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4173 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lhj_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2155 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lhj_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1463902 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lhj_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1463191 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lhj_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106612 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lhk.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 596095 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lhk.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1300688 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lhk_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lhk_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1299996 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lhk_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106612 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lhk_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4058 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lhk_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2156 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lhk_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1536532 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lhk_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1535731 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lhk_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106702 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1lhl.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 596710 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1lhl.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1296228 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1lhl_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1lhl_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1295488 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1lhl_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106702 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1lhl_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4202 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1lhl_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2175 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1lhl_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1527307 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1lhl_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1526596 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1lhl_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104632 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lhm.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594361 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lhm.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1270792 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lhm_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lhm_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1270052 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lhm_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104632 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lhm_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4106 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lhm_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2167 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lhm_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1482982 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lhm_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1482271 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lhm_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96841 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1loz.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 593733 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1loz.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1316392 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1loz_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1loz_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1315628 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1loz_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96841 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1loz_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4287 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1loz_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2157 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1loz_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1568071 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1loz_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1567360 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1loz_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105405 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lsa.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577622 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lsa.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1447456 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lsa_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lsa_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1446716 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lsa_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105405 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lsa_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4640 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lsa_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lsa_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1839075 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lsa_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1838364 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lsa_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104730 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1lsb.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577625 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1lsb.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1392808 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1lsb_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1lsb_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1392068 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1lsb_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104730 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1lsb_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4637 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1lsb_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2150 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1lsb_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1736610 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1lsb_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1735899 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1lsb_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102435 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsc.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577619 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsc.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1353784 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsc_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsc_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1353044 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsc_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102435 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsc_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4111 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsc_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2152 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsc_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1663440 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsc_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1662729 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsc_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101220 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lsd.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577619 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1lsd.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1919056 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1lsd_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1lsd_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1918316 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1lsd_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101220 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1lsd_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4385 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1lsd_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2157 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1lsd_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2723325 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1lsd_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2722614 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1lsd_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101625 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1lse.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577622 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1lse.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1584688 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1lse_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1lse_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1583948 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1lse_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101625 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1lse_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4146 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1lse_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2149 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1lse_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2096385 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1lse_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2095674 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1lse_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104325 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lsf.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577625 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lsf.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1400224 ; + schema1:dateModified "2024-03-05T11:04:35" ; + schema1:name "1lsf_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:35" ; + schema1:name "1lsf_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1399484 ; + schema1:dateModified "2024-03-05T11:04:35" ; + schema1:name "1lsf_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104325 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lsf_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4614 ; + schema1:dateModified "2024-03-05T11:04:35" ; + schema1:name "1lsf_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2148 ; + schema1:dateModified "2024-03-05T11:04:35" ; + schema1:name "1lsf_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1750515 ; + schema1:dateModified "2024-03-05T11:04:35" ; + schema1:name "1lsf_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1749804 ; + schema1:dateModified "2024-03-05T11:04:35" ; + schema1:name "1lsf_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111300 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsm.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578165 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsm.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1660268 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsm_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsm_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1659576 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsm_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111300 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsm_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4340 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsm_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsm_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2237145 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsm_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2236344 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsm_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112200 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lsn.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577806 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lsn.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1581756 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lsn_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lsn_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1581064 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lsn_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112200 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lsn_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4132 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lsn_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2155 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lsn_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2090625 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lsn_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2089824 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lsn_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107025 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lsy.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577747 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lsy.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1369716 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lsy_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lsy_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1369048 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lsy_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107025 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lsy_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4130 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lsy_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2158 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lsy_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1692465 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lsy_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1691574 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lsy_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98101 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lyo.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577607 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lyo.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1227412 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lyo_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lyo_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1226672 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lyo_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98101 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lyo_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4319 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lyo_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2152 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lyo_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1426501 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lyo_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1425790 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lyo_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105946 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lyy.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 597112 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lyy.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1415876 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lyy_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lyy_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1415184 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lyy_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105886 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lyy_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4568 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lyy_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2145 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lyy_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1751401 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lyy_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1750600 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lyy_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102165 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1lyz.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578012 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1lyz.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1288896 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1lyz_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1lyz_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1288132 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1lyz_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102165 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1lyz_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3867 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1lyz_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2145 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1lyz_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1539645 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1lyz_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1538934 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1lyz_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 90997 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1lz1.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594854 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1lz1.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1240180 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1lz1_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1lz1_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1239440 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1lz1_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 90997 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1lz1_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4202 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1lz1_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1lz1_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1424932 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1lz1_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1424221 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1lz1_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102300 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lza.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577625 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lza.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1292224 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lza_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lza_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1291484 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lza_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102300 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lza_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4028 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lza_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2147 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lza_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1548015 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lza_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1547304 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lza_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98790 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1lzd.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 576403 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1lzd.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1262000 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1lzd_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1lzd_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1261260 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1lzd_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98790 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1lzd_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4093 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1lzd_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2149 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1lzd_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1493205 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1lzd_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1492494 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1lzd_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 118020 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lzt.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577622 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lzt.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1383664 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lzt_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 940 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lzt_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1382924 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lzt_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 117960 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lzt_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2905 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lzt_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 964 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lzt_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1719465 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lzt_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1718754 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lzt_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105301 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1rex.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594877 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1rex.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1268112 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1rex_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1rex_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1267372 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1rex_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105301 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1rex_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4367 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1rex_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2163 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1rex_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1477306 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1rex_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1476595 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1rex_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101476 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1rfp.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577608 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1rfp.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1304092 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1rfp_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1rfp_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1303352 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1rfp_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101476 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1rfp_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4259 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1rfp_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2150 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1rfp_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1570276 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1rfp_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1569565 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1rfp_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100492 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tay.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 591433 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tay.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1333676 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tay_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tay_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1332936 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tay_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100492 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tay_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4242 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tay_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2126 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tay_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1605067 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tay_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1604356 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tay_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101302 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1tby.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594177 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1tby.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1328348 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1tby_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1tby_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1327608 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1tby_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101302 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1tby_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4185 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1tby_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2124 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1tby_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1590892 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1tby_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1590181 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1tby_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101077 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1tcy.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594654 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1tcy.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1299300 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1tcy_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1tcy_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1298560 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1tcy_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101077 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1tcy_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4058 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1tcy_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2155 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1tcy_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1536127 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1tcy_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1535416 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1tcy_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101122 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tdy.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 596102 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tdy.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1329372 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tdy_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tdy_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1328632 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tdy_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101122 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tdy_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4124 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tdy_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2160 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tdy_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1590307 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tdy_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1589596 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tdy_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96391 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1uia.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 565547 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1uia.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1336384 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1uia_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1uia_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1335596 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1uia_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96391 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1uia_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4252 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1uia_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2157 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1uia_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1649296 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1uia_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1648675 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1uia_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96706 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1uic.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 575266 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1uic.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1285608 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1uic_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1uic_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1284868 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1uic_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96706 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1uic_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3999 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1uic_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2169 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1uic_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1540261 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1uic_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1539550 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1uic_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97831 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1uid.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578487 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1uid.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1309984 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1uid_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1uid_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1309244 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1uid_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97831 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1uid_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4269 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1uid_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2129 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1uid_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1581481 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1uid_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1580770 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1uid_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96841 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1uie.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 574369 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1uie.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1293608 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1uie_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1uie_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1292868 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1uie_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96841 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1uie_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4011 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1uie_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2162 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1uie_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1556731 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1uie_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1556020 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1uie_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96706 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1uif.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577157 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1uif.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1294008 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1uif_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1uif_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1293268 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1uif_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96706 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1uif_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4020 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1uif_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2129 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1uif_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1553221 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1uif_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1552510 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1uif_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97291 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1uig.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577605 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1uig.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1298476 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1uig_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1uig_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1297736 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1uig_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97291 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1uig_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4364 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1uig_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2147 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1uig_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1559746 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1uig_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1559035 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1uig_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104055 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1vds.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577625 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1vds.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1316828 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1vds_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1vds_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1316088 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1vds_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104055 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1vds_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4161 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1vds_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2177 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1vds_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1594185 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1vds_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1593453 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1vds_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103785 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1vdt.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577619 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1vdt.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1330076 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1vdt_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1vdt_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1329336 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1vdt_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103785 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1vdt_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4258 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1vdt_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2115 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1vdt_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1619025 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1vdt_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1618293 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1vdt_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98790 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1ved.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577625 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1ved.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1335260 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1ved_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1ved_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1334520 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1ved_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98790 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1ved_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4077 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1ved_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1ved_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1628745 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1ved_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1628013 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1ved_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102508 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1xei.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577632 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1xei.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1448080 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1xei_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1xei_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1447340 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1xei_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102448 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1xei_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4569 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1xei_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2144 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1xei_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1840303 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1xei_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1839556 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1xei_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98863 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1xej.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577635 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1xej.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1393504 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1xej_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1xej_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1392764 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1xej_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98803 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1xej_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3513 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1xej_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1xej_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1737973 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1xej_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1737226 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1xej_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 94543 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1xek.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577638 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1xek.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1390264 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1xek_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1xek_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1389524 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1xek_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 94483 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1xek_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3433 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1xek_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2148 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1xek_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1731898 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1xek_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1731151 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1xek_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88248 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2a6u.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577591 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2a6u.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1307908 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2a6u_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2a6u_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1307168 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2a6u_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88248 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2a6u_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4285 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2a6u_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2132 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2a6u_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1577433 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2a6u_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1576722 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2a6u_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 95820 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2aub.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577625 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2aub.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1351460 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2aub_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2aub_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1350720 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2aub_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 95820 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2aub_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4145 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2aub_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2131 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2aub_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1659120 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2aub_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1658388 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2aub_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115291 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2bqg.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 592458 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2bqg.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1458228 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2bqg_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2bqg_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1457488 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2bqg_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115291 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2bqg_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4582 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2bqg_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2129 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2bqg_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1837216 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2bqg_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1836505 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2bqg_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112456 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2bqh.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 592461 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2bqh.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2198604 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2bqh_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2bqh_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2197864 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2bqh_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112456 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2bqh_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3754 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2bqh_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2190 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2bqh_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3225421 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2bqh_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3224710 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2bqh_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110701 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "2bqi.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 592464 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "2bqi.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1369092 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "2bqi_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "2bqi_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1368352 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "2bqi_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110701 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "2bqi_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4137 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "2bqi_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2162 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "2bqi_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1670086 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "2bqi_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1669375 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "2bqi_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 120151 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2bqk.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 592454 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2bqk.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2200836 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2bqk_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2bqk_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2200096 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2bqk_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 120151 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2bqk_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4395 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2bqk_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2196 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2bqk_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3229606 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2bqk_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3228895 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2bqk_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113536 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2bqm.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 592458 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2bqm.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1459236 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2bqm_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2bqm_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1458496 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2bqm_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113536 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2bqm_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4696 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2bqm_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2130 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2bqm_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1839106 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2bqm_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1838395 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2bqm_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106216 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2c8o.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577623 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2c8o.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1316324 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2c8o_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2c8o_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1315584 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2c8o_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106216 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2c8o_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4245 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2c8o_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2133 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2c8o_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1593241 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2c8o_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1592508 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2c8o_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106351 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2c8p.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577626 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2c8p.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1294076 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2c8p_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2c8p_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1293336 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2c8p_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106351 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2c8p_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4073 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2c8p_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2161 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2c8p_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1551526 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2c8p_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1550793 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2c8p_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 94773 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2epe.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577981 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2epe.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1292780 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2epe_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2epe_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1292064 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2epe_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 94773 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2epe_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4029 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2epe_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2130 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2epe_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1547778 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2epe_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1546977 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2epe_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101355 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "2hs7.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577619 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "2hs7.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1382996 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "2hs7_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "2hs7_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1382256 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "2hs7_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101355 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "2hs7_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4148 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "2hs7_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2124 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "2hs7_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1718250 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "2hs7_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1717518 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "2hs7_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100815 ; + schema1:dateModified "2024-03-05T11:04:23" ; + schema1:name "2hs9.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577616 ; + schema1:dateModified "2024-03-05T11:04:23" ; + schema1:name "2hs9.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1399916 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2hs9_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2hs9_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1399176 ; + schema1:dateModified "2024-03-05T11:04:23" ; + schema1:name "2hs9_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100815 ; + schema1:dateModified "2024-03-05T11:04:23" ; + schema1:name "2hs9_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4771 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2hs9_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2148 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2hs9_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1749975 ; + schema1:dateModified "2024-03-05T11:04:23" ; + schema1:name "2hs9_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1749243 ; + schema1:dateModified "2024-03-05T11:04:23" ; + schema1:name "2hs9_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100950 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2hso.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577619 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2hso.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1383572 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2hso_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2hso_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1382832 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2hso_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100950 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2hso_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4452 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2hso_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2129 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2hso_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1719330 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2hso_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1718598 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2hso_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110032 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2lhm.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594097 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2lhm.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1241404 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2lhm_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2lhm_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1240568 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2lhm_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110032 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2lhm_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4290 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2lhm_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2lhm_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1428442 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2lhm_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1427911 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2lhm_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108645 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2lym.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577622 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2lym.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1448896 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2lym_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2lym_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1448156 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2lym_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108645 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2lym_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4584 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2lym_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2lym_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1841775 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2lym_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1841064 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2lym_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2lyz.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577622 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2lyz.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1342048 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2lyz_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2lyz_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1341308 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2lyz_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2lyz_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3645 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2lyz_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2131 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2lyz_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1641435 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2lyz_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1640724 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2lyz_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99183 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "2yvb.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577610 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "2yvb.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1296676 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2yvb_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2yvb_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1295936 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2yvb_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99183 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "2yvb_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4086 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2yvb_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2162 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2yvb_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1556373 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "2yvb_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1555662 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2yvb_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98913 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "2zq4.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577610 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "2zq4.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1380268 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "2zq4_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "2zq4_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1379528 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "2zq4_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98913 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "2zq4_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4191 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "2zq4_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2160 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "2zq4_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1713108 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "2zq4_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1712397 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "2zq4_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100983 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3a3r.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577063 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3a3r.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1370784 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3a3r_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3a3r_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1369996 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3a3r_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100983 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3a3r_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4381 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3a3r_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2155 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3a3r_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1696008 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3a3r_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1695387 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3a3r_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119703 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3exd.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577610 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3exd.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1351252 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3exd_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3exd_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1350512 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3exd_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119703 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3exd_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4186 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3exd_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2131 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3exd_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1658703 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3exd_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1657992 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3exd_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99183 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3iju.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577607 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3iju.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1341172 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3iju_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3iju_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1340432 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3iju_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99183 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3iju_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4267 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3iju_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2154 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3iju_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1639803 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3iju_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1639092 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3iju_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99183 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3ijv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577610 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3ijv.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1292428 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3ijv_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3ijv_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1291688 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3ijv_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99183 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3ijv_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4031 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3ijv_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2161 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3ijv_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1548408 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3ijv_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1547697 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3ijv_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 93198 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "3j4g.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578129 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "3j4g.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1307368 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "3j4g_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "3j4g_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1306628 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "3j4g_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 93198 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "3j4g_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4126 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "3j4g_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2163 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "3j4g_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1575768 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "3j4g_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1575057 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "3j4g_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 91353 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "3j6k.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577613 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "3j6k.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1329292 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "3j6k_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "3j6k_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1328552 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "3j6k_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 91353 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "3j6k_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4240 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "3j6k_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2162 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "3j6k_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1617528 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "3j6k_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1616817 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "3j6k_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110265 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3lym.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577619 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3lym.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1445368 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3lym_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3lym_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1444628 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3lym_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110265 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3lym_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4558 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3lym_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2145 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3lym_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1835160 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3lym_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1834449 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3lym_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3lyz.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577619 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3lyz.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1324336 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3lyz_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3lyz_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1323596 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3lyz_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3lyz_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3648 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3lyz_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2152 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3lyz_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1608225 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3lyz_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1607514 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3lyz_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99813 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "3qy4.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 571160 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "3qy4.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1300228 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "3qy4_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "3qy4_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1299440 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "3qy4_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99813 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "3qy4_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4240 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "3qy4_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2119 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "3qy4_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1572663 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "3qy4_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1572042 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "3qy4_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103098 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3wmk.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577607 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3wmk.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1342756 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3wmk_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3wmk_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1342016 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3wmk_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103098 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3wmk_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4299 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3wmk_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2129 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3wmk_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1642773 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3wmk_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1642062 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3wmk_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97428 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3wpj.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577610 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3wpj.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1357732 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3wpj_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3wpj_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1356992 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3wpj_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97428 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3wpj_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4128 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3wpj_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3wpj_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1670853 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3wpj_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1670142 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3wpj_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104223 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "3wvx.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 576940 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "3wvx.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1287888 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "3wvx_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "3wvx_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1287196 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "3wvx_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104223 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "3wvx_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4064 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "3wvx_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2147 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "3wvx_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1540758 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "3wvx_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1539957 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "3wvx_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 109308 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4axt.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577607 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4axt.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1334044 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4axt_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4axt_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1333304 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4axt_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 109308 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4axt_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4087 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4axt_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2145 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4axt_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1626438 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4axt_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1625727 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4axt_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110793 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4b0d.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577607 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4b0d.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1355428 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4b0d_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4b0d_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1354688 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4b0d_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110793 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4b0d_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4150 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4b0d_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2129 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4b0d_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1666533 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4b0d_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1665822 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4b0d_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98778 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4hv1.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577610 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4hv1.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1339660 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4hv1_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4hv1_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1338920 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4hv1_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98778 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4hv1_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4334 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4hv1_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2171 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4hv1_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1636968 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4hv1_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1636257 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4hv1_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 94593 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4hv2.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577613 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4hv2.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1346284 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4hv2_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4hv2_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1345544 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4hv2_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 94593 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4hv2_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4272 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4hv2_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2162 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4hv2_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1649388 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4hv2_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1648677 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4hv2_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103233 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4i8s.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577613 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4i8s.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1343908 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4i8s_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4i8s_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1343168 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4i8s_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103233 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4i8s_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4241 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4i8s_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2150 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4i8s_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1644933 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4i8s_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1644222 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4i8s_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 95943 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4ias.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577607 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4ias.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1345492 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4ias_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4ias_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1344752 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4ias_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 95943 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4ias_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4232 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4ias_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2148 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4ias_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1647903 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4ias_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1647192 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4ias_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 109455 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4lym.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577625 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4lym.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1375384 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4lym_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4lym_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1374644 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4lym_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 109455 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4lym_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4178 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4lym_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2161 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4lym_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1703940 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4lym_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1703229 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4lym_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96256 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "4lyo.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577985 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "4lyo.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1286444 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "4lyo_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "4lyo_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1285728 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "4lyo_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96256 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "4lyo_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4065 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "4lyo_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2165 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "4lyo_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1535896 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "4lyo_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1535095 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "4lyo_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "4lyz.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577622 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "4lyz.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1287256 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "4lyz_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "4lyz_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1286516 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "4lyz_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "4lyz_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4058 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "4lyz_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2145 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "4lyz_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1538700 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "4lyz_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1537989 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "4lyz_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105258 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4nhi.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577607 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4nhi.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1384156 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4nhi_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4nhi_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1383416 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4nhi_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105258 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4nhi_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4554 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4nhi_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2155 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4nhi_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1720398 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4nhi_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1719687 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4nhi_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106968 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4qeq.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577742 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4qeq.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1360228 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4qeq_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4qeq_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1359464 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4qeq_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106968 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4qeq_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4419 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4qeq_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2146 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4qeq_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1673778 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4qeq_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1673067 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4qeq_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4953 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4r0p.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 32024 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4r0p.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 229932 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4r0p_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4r0p_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 229932 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4r0p_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4953 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4r0p_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4039 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4r0p_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2153 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4r0p_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 342858 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4r0p_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 342867 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4r0p_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 93378 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4rlm.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577610 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4rlm.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1301644 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4rlm_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4rlm_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1300904 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4rlm_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 93378 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4rlm_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4063 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4rlm_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2150 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4rlm_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1565688 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4rlm_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1564977 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4rlm_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 93648 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4rln.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577613 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4rln.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1306180 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4rln_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4rln_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1305440 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4rln_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 93648 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4rln_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4280 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4rln_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2148 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4rln_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1574193 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4rln_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1573482 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4rln_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88653 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "4wmg.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577610 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "4wmg.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1353772 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "4wmg_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "4wmg_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1353032 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "4wmg_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88653 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "4wmg_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4142 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "4wmg_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2146 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "4wmg_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1663428 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "4wmg_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1662717 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "4wmg_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88743 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5a3e.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 572027 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5a3e.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1295536 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5a3e_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5a3e_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1294796 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5a3e_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88743 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5a3e_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4161 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5a3e_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5a3e_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1562403 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5a3e_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1561692 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5a3e_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103638 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "5amy.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577610 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "5amy.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1296892 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "5amy_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "5amy_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1296152 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "5amy_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103638 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "5amy_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4257 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "5amy_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2146 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "5amy_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1556778 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "5amy_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1556067 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "5amy_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99453 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5e4p.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577607 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5e4p.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1355428 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5e4p_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5e4p_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1354688 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5e4p_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99453 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5e4p_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4218 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5e4p_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2153 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5e4p_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1666533 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5e4p_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1665822 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5e4p_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102288 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5hnc.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577610 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5hnc.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1295164 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5hnc_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5hnc_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1294424 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5hnc_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102288 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5hnc_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4093 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5hnc_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2163 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5hnc_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1553538 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5hnc_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1552827 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5hnc_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 91398 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5hnl.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577984 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5hnl.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1328996 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5hnl_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5hnl_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1328280 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5hnl_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 91398 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5hnl_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4168 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5hnl_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2162 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5hnl_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1615683 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5hnl_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1614882 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5hnl_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115113 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5i4w.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577610 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5i4w.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1356436 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5i4w_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5i4w_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1355696 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5i4w_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115113 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5i4w_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4175 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5i4w_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2129 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5i4w_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1668423 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5i4w_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1667712 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5i4w_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104043 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "5k2k.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577613 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "5k2k.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1272700 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "5k2k_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "5k2k_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1271960 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "5k2k_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104043 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "5k2k_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4078 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "5k2k_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2148 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "5k2k_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1511418 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "5k2k_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1510707 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "5k2k_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108723 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "5k2n.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577742 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "5k2n.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1339564 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "5k2n_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "5k2n_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1338800 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "5k2n_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108723 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "5k2n_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4143 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "5k2n_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2102 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "5k2n_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1635033 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "5k2n_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1634322 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "5k2n_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111063 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5k2p.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577607 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5k2p.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1329580 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5k2p_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5k2p_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1328840 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5k2p_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111063 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5k2p_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4347 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5k2p_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2144 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5k2p_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1618068 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5k2p_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1617357 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5k2p_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107283 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2q.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577610 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2q.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1325188 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2q_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2q_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1324448 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2q_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107283 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2q_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4333 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2q_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2q_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1609833 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2q_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1609122 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2q_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104718 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "5k2r.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577604 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "5k2r.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1299052 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "5k2r_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "5k2r_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1298312 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "5k2r_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104718 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "5k2r_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4048 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "5k2r_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "5k2r_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1560828 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "5k2r_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1560117 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "5k2r_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106203 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2s.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577610 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2s.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1290916 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2s_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2s_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1290176 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2s_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106203 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2s_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4084 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2s_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2164 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2s_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1545573 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2s_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1544862 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2s_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 120525 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5lyt.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578006 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5lyt.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1424328 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5lyt_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5lyt_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1423564 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5lyt_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 120525 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5lyt_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4317 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5lyt_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2165 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5lyt_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1793580 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5lyt_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1792869 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5lyt_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5lyz.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577619 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5lyz.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1287256 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5lyz_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5lyz_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1286516 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5lyz_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5lyz_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4051 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5lyz_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2149 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5lyz_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1538700 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5lyz_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1537989 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5lyz_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97563 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "5njm.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577607 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "5njm.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1314172 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "5njm_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "5njm_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1313432 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "5njm_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97563 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "5njm_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4147 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "5njm_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2130 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "5njm_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1589178 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "5njm_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1588467 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "5njm_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107283 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "5yin.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577609 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "5yin.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1226764 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "5yin_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "5yin_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1226024 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "5yin_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107283 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "5yin_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4345 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "5yin_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2155 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "5yin_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1425288 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "5yin_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1424577 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "5yin_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 93198 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6gf0.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 572030 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6gf0.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1350040 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6gf0_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6gf0_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1349300 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6gf0_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 93198 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6gf0_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4386 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6gf0_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2148 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6gf0_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1664598 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6gf0_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1663887 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6gf0_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97248 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "6h0k.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 572027 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "6h0k.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1350616 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "6h0k_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "6h0k_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1349876 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "6h0k_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97248 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "6h0k_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4394 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "6h0k_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2130 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "6h0k_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1665678 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "6h0k_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1664967 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "6h0k_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98193 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "6h0l.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 572030 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "6h0l.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1391728 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "6h0l_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "6h0l_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1390988 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "6h0l_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98193 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "6h0l_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4631 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "6h0l_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2163 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "6h0l_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1742763 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "6h0l_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1742052 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "6h0l_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101760 ; + schema1:dateModified "2024-03-05T11:04:35" ; + schema1:name "6lyt.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577625 ; + schema1:dateModified "2024-03-05T11:04:35" ; + schema1:name "6lyt.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1333336 ; + schema1:dateModified "2024-03-05T11:04:35" ; + schema1:name "6lyt_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:35" ; + schema1:name "6lyt_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1332596 ; + schema1:dateModified "2024-03-05T11:04:35" ; + schema1:name "6lyt_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101760 ; + schema1:dateModified "2024-03-05T11:04:35" ; + schema1:name "6lyt_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4257 ; + schema1:dateModified "2024-03-05T11:04:35" ; + schema1:name "6lyt_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2134 ; + schema1:dateModified "2024-03-05T11:04:35" ; + schema1:name "6lyt_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1625100 ; + schema1:dateModified "2024-03-05T11:04:35" ; + schema1:name "6lyt_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1624389 ; + schema1:dateModified "2024-03-05T11:04:35" ; + schema1:name "6lyt_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6lyz.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577624 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6lyz.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1214896 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6lyz_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6lyz_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1214156 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6lyz_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6lyz_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3758 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6lyz_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2146 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6lyz_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1403025 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6lyz_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1402314 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6lyz_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96348 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "6s2n.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577607 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "6s2n.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1348012 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "6s2n_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "6s2n_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1347272 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "6s2n_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96348 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "6s2n_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4224 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "6s2n_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2100 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "6s2n_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1652628 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "6s2n_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1651917 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "6s2n_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102018 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7byo.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577607 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7byo.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1318996 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7byo_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7byo_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1318256 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7byo_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102018 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7byo_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4186 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7byo_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2167 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7byo_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1598223 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7byo_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1597512 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7byo_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101613 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "7byp.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577607 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "7byp.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1328716 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "7byp_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "7byp_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1327976 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "7byp_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101613 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "7byp_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4173 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "7byp_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2157 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "7byp_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1616448 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "7byp_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1615737 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "7byp_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96888 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "7c09.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577610 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "7c09.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1351612 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "7c09_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "7c09_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1350872 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "7c09_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96888 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "7c09_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4181 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "7c09_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2132 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "7c09_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1659378 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "7c09_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1658667 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "7c09_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100803 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "7d01.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577610 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "7d01.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1325332 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "7d01_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "7d01_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1324592 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "7d01_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100803 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "7d01_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4087 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "7d01_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2117 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "7d01_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1610103 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "7d01_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1609392 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "7d01_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99723 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7d02.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577604 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7d02.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1331452 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7d02_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7d02_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1330712 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7d02_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99723 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7d02_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4244 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7d02_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2126 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7d02_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1621578 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7d02_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1620867 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7d02_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97968 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "7d04.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577610 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "7d04.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1326196 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "7d04_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "7d04_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1325456 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "7d04_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97968 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "7d04_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4199 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "7d04_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2119 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "7d04_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1611723 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "7d04_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1611012 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "7d04_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98913 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "7d05.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577613 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "7d05.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1333684 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "7d05_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "7d05_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1332944 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "7d05_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98913 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "7d05_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4289 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "7d05_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2126 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "7d05_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1625763 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "7d05_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1625052 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "7d05_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88320 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "7lyz.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577596 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "7lyz.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1187464 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "7lyz_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "7lyz_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1186724 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "7lyz_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88260 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "7lyz_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4065 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "7lyz_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2167 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "7lyz_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1351590 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "7lyz_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1350879 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "7lyz_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88260 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "8lyz.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577597 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "8lyz.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1343272 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "8lyz_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "8lyz_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1342532 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "8lyz_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88260 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "8lyz_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3467 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "8lyz_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2162 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "8lyz_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1643730 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "8lyz_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1643019 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "8lyz_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 231510 ; + schema1:dateModified "2024-03-05T11:04:35" ; + schema1:name "POTENTIAL_RESULTS.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "energy.selection" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "genion.group" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1044 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "ions.mdp" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 971 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "minim.mdp" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 109917 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1aki.pdb" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2998215 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1u3m.pdb" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2918025 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1xyw.pdb" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88246 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1aki.gro" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577586 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1aki.top" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1280044 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1aki_em.tpr" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1aki_em_energy.edr" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1279304 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1aki_ions.tpr" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88246 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1aki_newbox.gro" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2142 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1aki_potential.xvg" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1525186 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1aki_solv.gro" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1524475 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1aki_solv_ions.gro" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 82046 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1u3m.gro" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 537920 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1u3m.top" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1416272 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1u3m_em.tpr" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1u3m_em_energy.edr" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1415196 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1u3m_ions.tpr" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 82046 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1u3m_newbox.gro" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2158 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1u3m_potential.xvg" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1837046 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1u3m_solv.gro" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1836965 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1u3m_solv_ions.gro" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 80112 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1xyw.gro" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 523996 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1xyw.top" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1408872 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1xyw_em.tpr" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1xyw_em_energy.edr" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1407844 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1xyw_ions.tpr" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 80112 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1xyw_newbox.gro" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2099 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1xyw_potential.xvg" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1845237 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1xyw_solv.gro" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1845066 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1xyw_solv_ions.gro" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3 ; + schema1:name "energy.selection" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3 ; + schema1:name "genion.group" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1044 ; + schema1:name "ions.mdp" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 971 ; + schema1:name "minim.mdp" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116154 ; + schema1:name "133l.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 114615 ; + schema1:name "134l.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116316 ; + schema1:name "1aki.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 117369 ; + schema1:name "1bhz.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115425 ; + schema1:name "1bvx.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115668 ; + schema1:name "1bwh.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115506 ; + schema1:name "1bwi.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116802 ; + schema1:name "1bwj.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 132678 ; + schema1:name "1c46.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 126279 ; + schema1:name "1ckh.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 121338 ; + schema1:name "1f0w.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119961 ; + schema1:name "1f10.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110322 ; + schema1:name "1fly.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 212058 ; + schema1:name "1gxv.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 213030 ; + schema1:name "1gxx.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123687 ; + schema1:name "1hel.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 121662 ; + schema1:name "1hem.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 121986 ; + schema1:name "1hen.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123282 ; + schema1:name "1heo.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 124254 ; + schema1:name "1hep.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123201 ; + schema1:name "1heq.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 124902 ; + schema1:name "1her.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119880 ; + schema1:name "1hsw.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 120042 ; + schema1:name "1hsx.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 118422 ; + schema1:name "1i20.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115506 ; + schema1:name "1ioq.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 114210 ; + schema1:name "1ior.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113400 ; + schema1:name "1ios.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111051 ; + schema1:name "1iot.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111942 ; + schema1:name "1ir7.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 114858 ; + schema1:name "1ir8.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112428 ; + schema1:name "1ir9.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 178848 ; + schema1:name "1iy3.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 178605 ; + schema1:name "1iy4.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 95661 ; + schema1:name "1ja2.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 95499 ; + schema1:name "1ja4.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 95013 ; + schema1:name "1ja6.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 120690 ; + schema1:name "1jis.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 121419 ; + schema1:name "1jit.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119880 ; + schema1:name "1jiy.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 117369 ; + schema1:name "1jj1.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110241 ; + schema1:name "1jpo.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 133974 ; + schema1:name "1jwr.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111537 ; + schema1:name "1kxw.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113238 ; + schema1:name "1kxx.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115101 ; + schema1:name "1kxy.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119070 ; + schema1:name "1laa.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 118989 ; + schema1:name "1lhh.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 120285 ; + schema1:name "1lhi.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123444 ; + schema1:name "1lhj.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119718 ; + schema1:name "1lhk.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119394 ; + schema1:name "1lhl.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115992 ; + schema1:name "1lhm.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 109755 ; + schema1:name "1loz.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 165483 ; + schema1:name "1lsa.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 162162 ; + schema1:name "1lsb.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 157707 ; + schema1:name "1lsc.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 158355 ; + schema1:name "1lsd.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 157059 ; + schema1:name "1lse.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 162567 ; + schema1:name "1lsf.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123120 ; + schema1:name "1lsm.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 127332 ; + schema1:name "1lsn.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 122472 ; + schema1:name "1lsy.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111618 ; + schema1:name "1lyo.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 117612 ; + schema1:name "1lyy.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 145881 ; + schema1:name "1lyz.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 118665 ; + schema1:name "1lz1.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116478 ; + schema1:name "1lza.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115668 ; + schema1:name "1lzd.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 124254 ; + schema1:name "1lzt.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 114129 ; + schema1:name "1rex.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 114372 ; + schema1:name "1rfp.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113724 ; + schema1:name "1tay.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115587 ; + schema1:name "1tby.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 114291 ; + schema1:name "1tcy.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 114615 ; + schema1:name "1tdy.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110889 ; + schema1:name "1uia.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 109998 ; + schema1:name "1uic.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113562 ; + schema1:name "1uid.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111294 ; + schema1:name "1uie.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111051 ; + schema1:name "1uif.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111699 ; + schema1:name "1uig.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115425 ; + schema1:name "1vdq.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 141507 ; + schema1:name "1vds.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 145314 ; + schema1:name "1vdt.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113967 ; + schema1:name "1ved.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 118908 ; + schema1:name "1xei.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116964 ; + schema1:name "1xej.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 120042 ; + schema1:name "1xek.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 94284 ; + schema1:name "2a6u.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116397 ; + schema1:name "2aub.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 125793 ; + schema1:name "2bqg.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 125550 ; + schema1:name "2bqh.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 124740 ; + schema1:name "2bqi.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 129114 ; + schema1:name "2bqk.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 124821 ; + schema1:name "2bqm.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 173745 ; + schema1:name "2c8o.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 173826 ; + schema1:name "2c8p.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112833 ; + schema1:name "2cds.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 118989 ; + schema1:name "2epe.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101412 ; + schema1:name "2hs7.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103113 ; + schema1:name "2hs9.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99711 ; + schema1:name "2hso.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 121824 ; + schema1:name "2lhm.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123525 ; + schema1:name "2lym.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 133488 ; + schema1:name "2lyz.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 122472 ; + schema1:name "2yvb.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116640 ; + schema1:name "2zq4.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 117288 ; + schema1:name "3a3r.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 218214 ; + schema1:name "3exd.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115182 ; + schema1:name "3iju.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115992 ; + schema1:name "3ijv.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 109755 ; + schema1:name "3j4g.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106029 ; + schema1:name "3j6k.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 125469 ; + schema1:name "3lym.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119394 ; + schema1:name "3lyz.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116397 ; + schema1:name "3qy4.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 211977 ; + schema1:name "3wmk.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119475 ; + schema1:name "3wpj.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 122553 ; + schema1:name "3wvx.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 206955 ; + schema1:name "4axt.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 209790 ; + schema1:name "4b0d.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115425 ; + schema1:name "4hv1.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112914 ; + schema1:name "4hv2.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 117693 ; + schema1:name "4i8s.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113967 ; + schema1:name "4ias.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123849 ; + schema1:name "4iat.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 128385 ; + schema1:name "4lym.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110727 ; + schema1:name "4lyo.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 133812 ; + schema1:name "4lyz.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 210762 ; + schema1:name "4nhi.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 208089 ; + schema1:name "4qeq.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 30294 ; + schema1:name "4r0p.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113724 ; + schema1:name "4rlm.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112347 ; + schema1:name "4rln.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 190107 ; + schema1:name "4wmg.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 186624 ; + schema1:name "5a3e.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 125064 ; + schema1:name "5amy.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112995 ; + schema1:name "5e4p.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116235 ; + schema1:name "5hnc.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105138 ; + schema1:name "5hnl.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 125388 ; + schema1:name "5i4w.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123282 ; + schema1:name "5k2k.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 218052 ; + schema1:name "5k2n.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 222183 ; + schema1:name "5k2p.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 217890 ; + schema1:name "5k2q.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 121743 ; + schema1:name "5k2r.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123201 ; + schema1:name "5k2s.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 141345 ; + schema1:name "5lyt.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 133893 ; + schema1:name "5lyz.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 277182 ; + schema1:name "5njm.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 122391 ; + schema1:name "5yin.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112023 ; + schema1:name "6gf0.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111537 ; + schema1:name "6h0k.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112428 ; + schema1:name "6h0l.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 127170 ; + schema1:name "6lyt.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 134379 ; + schema1:name "6lyz.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111456 ; + schema1:name "6s2n.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116883 ; + schema1:name "7byo.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 118098 ; + schema1:name "7byp.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 203391 ; + schema1:name "7c09.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115992 ; + schema1:name "7d01.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116964 ; + schema1:name "7d02.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 114372 ; + schema1:name "7d04.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 117855 ; + schema1:name "7d05.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103275 ; + schema1:name "7lyz.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 120771 ; + schema1:name "8lyz.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100402 ; + schema1:name "133l.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 593089 ; + schema1:name "133l.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1244624 ; + schema1:name "133l_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "133l_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1243836 ; + schema1:name "133l_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100402 ; + schema1:name "133l_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2129 ; + schema1:name "133l_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1435957 ; + schema1:name "133l_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1435336 ; + schema1:name "133l_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100177 ; + schema1:name "134l.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 592325 ; + schema1:name "134l.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1377720 ; + schema1:name "134l_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "134l_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1376884 ; + schema1:name "134l_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100177 ; + schema1:name "134l_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2115 ; + schema1:name "134l_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1686427 ; + schema1:name "134l_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1685896 ; + schema1:name "134l_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98776 ; + schema1:name "1aki.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577534 ; + schema1:name "1aki.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1388044 ; + schema1:name "1aki_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "1aki_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1387304 ; + schema1:name "1aki_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98776 ; + schema1:name "1aki_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2098 ; + schema1:name "1aki_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1727686 ; + schema1:name "1aki_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1726975 ; + schema1:name "1aki_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88471 ; + schema1:name "1bhz.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578150 ; + schema1:name "1bhz.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1331016 ; + schema1:name "1bhz_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1bhz_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1330276 ; + schema1:name "1bhz_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88471 ; + schema1:name "1bhz_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2143 ; + schema1:name "1bhz_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1617481 ; + schema1:name "1bhz_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1616680 ; + schema1:name "1bhz_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101081 ; + schema1:name "1bvx.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577547 ; + schema1:name "1bvx.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1315836 ; + schema1:name "1bvx_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1bvx_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1315096 ; + schema1:name "1bvx_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101081 ; + schema1:name "1bvx_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2127 ; + schema1:name "1bvx_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1592291 ; + schema1:name "1bvx_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1591580 ; + schema1:name "1bvx_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100676 ; + schema1:name "1bwh.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577547 ; + schema1:name "1bwh.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1447164 ; + schema1:name "1bwh_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1bwh_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1446424 ; + schema1:name "1bwh_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100676 ; + schema1:name "1bwh_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2115 ; + schema1:name "1bwh_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1838531 ; + schema1:name "1bwh_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1837820 ; + schema1:name "1bwh_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100676 ; + schema1:name "1bwi.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577550 ; + schema1:name "1bwi.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1300500 ; + schema1:name "1bwi_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "1bwi_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1299760 ; + schema1:name "1bwi_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100676 ; + schema1:name "1bwi_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2097 ; + schema1:name "1bwi_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1563536 ; + schema1:name "1bwi_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1562825 ; + schema1:name "1bwi_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102296 ; + schema1:name "1bwj.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577550 ; + schema1:name "1bwj.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1325988 ; + schema1:name "1bwj_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "1bwj_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1325248 ; + schema1:name "1bwj_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102296 ; + schema1:name "1bwj_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2092 ; + schema1:name "1bwj_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1611326 ; + schema1:name "1bwj_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1610615 ; + schema1:name "1bwj_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123706 ; + schema1:name "1c46.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 596738 ; + schema1:name "1c46.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1619344 ; + schema1:name "1c46_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1c46_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1618604 ; + schema1:name "1c46_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123706 ; + schema1:name "1c46_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2112 ; + schema1:name "1c46_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2133316 ; + schema1:name "1c46_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2132605 ; + schema1:name "1c46_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123221 ; + schema1:name "1ckh.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 595447 ; + schema1:name "1ckh.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1463304 ; + schema1:name "1ckh_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1ckh_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1462564 ; + schema1:name "1ckh_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123221 ; + schema1:name "1ckh_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2098 ; + schema1:name "1ckh_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1842311 ; + schema1:name "1ckh_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1841600 ; + schema1:name "1ckh_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106741 ; + schema1:name "1f0w.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577537 ; + schema1:name "1f0w.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1386532 ; + schema1:name "1f0w_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "1f0w_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1385792 ; + schema1:name "1f0w_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106741 ; + schema1:name "1f0w_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2093 ; + schema1:name "1f0w_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1724851 ; + schema1:name "1f0w_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1724140 ; + schema1:name "1f0w_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105301 ; + schema1:name "1f10.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577917 ; + schema1:name "1f10.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1330940 ; + schema1:name "1f10_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1f10_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1330224 ; + schema1:name "1f10_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105301 ; + schema1:name "1f10_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2133 ; + schema1:name "1f10_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1619326 ; + schema1:name "1f10_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1618525 ; + schema1:name "1f10_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97291 ; + schema1:name "1fly.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578440 ; + schema1:name "1fly.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1289252 ; + schema1:name "1fly_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1fly_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1288512 ; + schema1:name "1fly_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97291 ; + schema1:name "1fly_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2120 ; + schema1:name "1fly_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1540981 ; + schema1:name "1fly_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1540270 ; + schema1:name "1fly_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88968 ; + schema1:name "1gxv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577913 ; + schema1:name "1gxv.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1243100 ; + schema1:name "1gxv_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1gxv_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1242384 ; + schema1:name "1gxv_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88968 ; + schema1:name "1gxv_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2133 ; + schema1:name "1gxv_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1454628 ; + schema1:name "1gxv_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1453827 ; + schema1:name "1gxv_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88968 ; + schema1:name "1gxx.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577918 ; + schema1:name "1gxx.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1234244 ; + schema1:name "1gxx_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1gxx_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1233528 ; + schema1:name "1gxx_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88968 ; + schema1:name "1gxx_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2116 ; + schema1:name "1gxx_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1438023 ; + schema1:name "1gxx_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1437222 ; + schema1:name "1gxx_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113235 ; + schema1:name "1hel.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577554 ; + schema1:name "1hel.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1615216 ; + schema1:name "1hel_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1hel_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1614476 ; + schema1:name "1hel_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113235 ; + schema1:name "1hel_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2122 ; + schema1:name "1hel_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2153625 ; + schema1:name "1hel_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2152914 ; + schema1:name "1hel_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113370 ; + schema1:name "1hem.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578453 ; + schema1:name "1hem.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1613872 ; + schema1:name "1hem_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1hem_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1613132 ; + schema1:name "1hem_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113370 ; + schema1:name "1hem_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2133 ; + schema1:name "1hem_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2149710 ; + schema1:name "1hem_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2148999 ; + schema1:name "1hem_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113235 ; + schema1:name "1hen.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577550 ; + schema1:name "1hen.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1620400 ; + schema1:name "1hen_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1hen_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1619660 ; + schema1:name "1hen_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113235 ; + schema1:name "1hen_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2128 ; + schema1:name "1hen_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2163345 ; + schema1:name "1hen_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2162634 ; + schema1:name "1hen_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113100 ; + schema1:name "1heo.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 576651 ; + schema1:name "1heo.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1614040 ; + schema1:name "1heo_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1heo_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1613300 ; + schema1:name "1heo_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113100 ; + schema1:name "1heo_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2143 ; + schema1:name "1heo_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2152815 ; + schema1:name "1heo_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2152104 ; + schema1:name "1heo_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113100 ; + schema1:name "1hep.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 576654 ; + schema1:name "1hep.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1636144 ; + schema1:name "1hep_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1hep_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1635404 ; + schema1:name "1hep_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113100 ; + schema1:name "1hep_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2133 ; + schema1:name "1hep_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2194260 ; + schema1:name "1hep_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2193549 ; + schema1:name "1hep_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113235 ; + schema1:name "1heq.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577551 ; + schema1:name "1heq.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1649488 ; + schema1:name "1heq_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1heq_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1648748 ; + schema1:name "1heq_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113235 ; + schema1:name "1heq_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2116 ; + schema1:name "1heq_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2217885 ; + schema1:name "1heq_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2217174 ; + schema1:name "1heq_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113100 ; + schema1:name "1her.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 576652 ; + schema1:name "1her.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1620304 ; + schema1:name "1her_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1her_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1619564 ; + schema1:name "1her_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113100 ; + schema1:name "1her_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2114 ; + schema1:name "1her_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2164560 ; + schema1:name "1her_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2163849 ; + schema1:name "1her_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106643 ; + schema1:name "1hsw.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577574 ; + schema1:name "1hsw.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1299040 ; + schema1:name "1hsw_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1hsw_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1298300 ; + schema1:name "1hsw_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106643 ; + schema1:name "1hsw_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2126 ; + schema1:name "1hsw_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1560863 ; + schema1:name "1hsw_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1560106 ; + schema1:name "1hsw_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108919 ; + schema1:name "1hsx.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577558 ; + schema1:name "1hsx.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1371328 ; + schema1:name "1hsx_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1hsx_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1370588 ; + schema1:name "1hsx_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108919 ; + schema1:name "1hsx_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2128 ; + schema1:name "1hsx_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1696384 ; + schema1:name "1hsx_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1695646 ; + schema1:name "1hsx_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102153 ; + schema1:name "1i20.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 595476 ; + schema1:name "1i20.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1302028 ; + schema1:name "1i20_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1i20_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1301240 ; + schema1:name "1i20_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102153 ; + schema1:name "1i20_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2099 ; + schema1:name "1i20_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1540038 ; + schema1:name "1i20_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1539417 ; + schema1:name "1i20_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101928 ; + schema1:name "1ioq.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 579407 ; + schema1:name "1ioq.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1355956 ; + schema1:name "1ioq_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1ioq_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1355216 ; + schema1:name "1ioq_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101928 ; + schema1:name "1ioq_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2128 ; + schema1:name "1ioq_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1665498 ; + schema1:name "1ioq_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1664787 ; + schema1:name "1ioq_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100308 ; + schema1:name "1ior.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 579307 ; + schema1:name "1ior.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1307236 ; + schema1:name "1ior_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "1ior_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1306520 ; + schema1:name "1ior_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100308 ; + schema1:name "1ior_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2085 ; + schema1:name "1ior_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1573158 ; + schema1:name "1ior_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1572357 ; + schema1:name "1ior_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99048 ; + schema1:name "1ios.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578933 ; + schema1:name "1ios.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1296156 ; + schema1:name "1ios_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1ios_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1295416 ; + schema1:name "1ios_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99048 ; + schema1:name "1ios_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2121 ; + schema1:name "1ios_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1553673 ; + schema1:name "1ios_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1552962 ; + schema1:name "1ios_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98103 ; + schema1:name "1iot.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578836 ; + schema1:name "1iot.top" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1349028 ; + schema1:name "1iot_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1iot_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1348312 ; + schema1:name "1iot_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98103 ; + schema1:name "1iot_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2112 ; + schema1:name "1iot_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1651818 ; + schema1:name "1iot_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1651017 ; + schema1:name "1iot_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96661 ; + schema1:name "1ir7.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 576573 ; + schema1:name "1ir7.top" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1293636 ; + schema1:name "1ir7_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1ir7_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1292896 ; + schema1:name "1ir7_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96661 ; + schema1:name "1ir7_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2113 ; + schema1:name "1ir7_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1552096 ; + schema1:name "1ir7_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1551385 ; + schema1:name "1ir7_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101521 ; + schema1:name "1ir8.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 576573 ; + schema1:name "1ir8.top" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1305300 ; + schema1:name "1ir8_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1ir8_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1304560 ; + schema1:name "1ir8_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101521 ; + schema1:name "1ir8_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2121 ; + schema1:name "1ir8_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1573966 ; + schema1:name "1ir8_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1573255 ; + schema1:name "1ir8_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97516 ; + schema1:name "1ir9.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 576953 ; + schema1:name "1ir9.top" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1295356 ; + schema1:name "1ir9_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1ir9_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1294640 ; + schema1:name "1ir9_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97516 ; + schema1:name "1ir9_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2129 ; + schema1:name "1ir9_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1554031 ; + schema1:name "1ir9_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1553230 ; + schema1:name "1ir9_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 90991 ; + schema1:name "1iy3.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594780 ; + schema1:name "1iy3.top" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1236936 ; + schema1:name "1iy3_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1iy3_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1236196 ; + schema1:name "1iy3_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 90991 ; + schema1:name "1iy3_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2121 ; + schema1:name "1iy3_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1418851 ; + schema1:name "1iy3_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1418140 ; + schema1:name "1iy3_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 90991 ; + schema1:name "1iy4.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594784 ; + schema1:name "1iy4.top" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1309944 ; + schema1:name "1iy4_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1iy4_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1309204 ; + schema1:name "1iy4_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 90991 ; + schema1:name "1iy4_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2122 ; + schema1:name "1iy4_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1555741 ; + schema1:name "1iy4_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1555030 ; + schema1:name "1iy4_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88258 ; + schema1:name "1ja2.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577530 ; + schema1:name "1ja2.top" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1287376 ; + schema1:name "1ja2_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1ja2_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1286636 ; + schema1:name "1ja2_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88258 ; + schema1:name "1ja2_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2121 ; + schema1:name "1ja2_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1538968 ; + schema1:name "1ja2_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1538236 ; + schema1:name "1ja2_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88258 ; + schema1:name "1ja4.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577524 ; + schema1:name "1ja4.top" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1294144 ; + schema1:name "1ja4_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1ja4_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1293404 ; + schema1:name "1ja4_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88258 ; + schema1:name "1ja4_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2116 ; + schema1:name "1ja4_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1551658 ; + schema1:name "1ja4_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1550926 ; + schema1:name "1ja4_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88258 ; + schema1:name "1ja6.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577530 ; + schema1:name "1ja6.top" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1270384 ; + schema1:name "1ja6_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1ja6_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1269644 ; + schema1:name "1ja6_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88258 ; + schema1:name "1ja6_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2123 ; + schema1:name "1ja6_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1507108 ; + schema1:name "1ja6_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1506376 ; + schema1:name "1ja6_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107461 ; + schema1:name "1jis.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577914 ; + schema1:name "1jis.top" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1390844 ; + schema1:name "1jis_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1jis_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1390128 ; + schema1:name "1jis_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107461 ; + schema1:name "1jis_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2127 ; + schema1:name "1jis_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1731646 ; + schema1:name "1jis_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1730845 ; + schema1:name "1jis_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108541 ; + schema1:name "1jit.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577914 ; + schema1:name "1jit.top" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1332740 ; + schema1:name "1jit_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1jit_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1332024 ; + schema1:name "1jit_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108541 ; + schema1:name "1jit_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2133 ; + schema1:name "1jit_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1622701 ; + schema1:name "1jit_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1621900 ; + schema1:name "1jit_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108631 ; + schema1:name "1jiy.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577540 ; + schema1:name "1jiy.top" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1413028 ; + schema1:name "1jiy_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "1jiy_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1412288 ; + schema1:name "1jiy_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108631 ; + schema1:name "1jiy_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2104 ; + schema1:name "1jiy_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1774531 ; + schema1:name "1jiy_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1773820 ; + schema1:name "1jiy_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106336 ; + schema1:name "1jj1.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577537 ; + schema1:name "1jj1.top" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1448524 ; + schema1:name "1jj1_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1jj1_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1447784 ; + schema1:name "1jj1_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106336 ; + schema1:name "1jj1_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2121 ; + schema1:name "1jj1_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1841086 ; + schema1:name "1jj1_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1840375 ; + schema1:name "1jj1_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97021 ; + schema1:name "1jpo.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577537 ; + schema1:name "1jpo.top" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1278316 ; + schema1:name "1jpo_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1jpo_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1277576 ; + schema1:name "1jpo_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97021 ; + schema1:name "1jpo_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2115 ; + schema1:name "1jpo_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1521946 ; + schema1:name "1jpo_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1521235 ; + schema1:name "1jpo_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 132436 ; + schema1:name "1jwr.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594803 ; + schema1:name "1jwr.top" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1414416 ; + schema1:name "1jwr_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1jwr_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1413676 ; + schema1:name "1jwr_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 132436 ; + schema1:name "1jwr_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2097 ; + schema1:name "1jwr_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1751626 ; + schema1:name "1jwr_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1750915 ; + schema1:name "1jwr_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96121 ; + schema1:name "1kxw.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 576993 ; + schema1:name "1kxw.top" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1355304 ; + schema1:name "1kxw_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1kxw_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1354516 ; + schema1:name "1kxw_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96121 ; + schema1:name "1kxw_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2114 ; + schema1:name "1kxw_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1666981 ; + schema1:name "1kxw_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1666360 ; + schema1:name "1kxw_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96346 ; + schema1:name "1kxx.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577537 ; + schema1:name "1kxx.top" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1269820 ; + schema1:name "1kxx_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1kxx_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1269080 ; + schema1:name "1kxx_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96346 ; + schema1:name "1kxx_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2126 ; + schema1:name "1kxx_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1506016 ; + schema1:name "1kxx_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1505305 ; + schema1:name "1kxx_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99406 ; + schema1:name "1kxy.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578088 ; + schema1:name "1kxy.top" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1286792 ; + schema1:name "1kxy_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1kxy_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1286100 ; + schema1:name "1kxy_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99406 ; + schema1:name "1kxy_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2116 ; + schema1:name "1kxy_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1537156 ; + schema1:name "1kxy_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1536355 ; + schema1:name "1kxy_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106252 ; + schema1:name "1laa.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 595712 ; + schema1:name "1laa.top" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1330060 ; + schema1:name "1laa_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1laa_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1329320 ; + schema1:name "1laa_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106252 ; + schema1:name "1laa_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2128 ; + schema1:name "1laa_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1592062 ; + schema1:name "1laa_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1591351 ; + schema1:name "1laa_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104542 ; + schema1:name "1lhh.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594748 ; + schema1:name "1lhh.top" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1291572 ; + schema1:name "1lhh_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lhh_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1290832 ; + schema1:name "1lhh_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104542 ; + schema1:name "1lhh_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2133 ; + schema1:name "1lhh_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1521367 ; + schema1:name "1lhh_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1520656 ; + schema1:name "1lhh_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107287 ; + schema1:name "1lhi.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 592076 ; + schema1:name "1lhi.top" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1386532 ; + schema1:name "1lhi_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lhi_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1385792 ; + schema1:name "1lhi_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107287 ; + schema1:name "1lhi_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2129 ; + schema1:name "1lhi_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1703527 ; + schema1:name "1lhi_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1702816 ; + schema1:name "1lhi_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107962 ; + schema1:name "1lhj.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 592079 ; + schema1:name "1lhj.top" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1258732 ; + schema1:name "1lhj_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lhj_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1257992 ; + schema1:name "1lhj_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107962 ; + schema1:name "1lhj_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2126 ; + schema1:name "1lhj_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1463902 ; + schema1:name "1lhj_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1463191 ; + schema1:name "1lhj_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106612 ; + schema1:name "1lhk.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 596021 ; + schema1:name "1lhk.top" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1300688 ; + schema1:name "1lhk_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lhk_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1299996 ; + schema1:name "1lhk_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106612 ; + schema1:name "1lhk_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2126 ; + schema1:name "1lhk_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1536532 ; + schema1:name "1lhk_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1535731 ; + schema1:name "1lhk_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106702 ; + schema1:name "1lhl.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 596642 ; + schema1:name "1lhl.top" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1296228 ; + schema1:name "1lhl_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lhl_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1295488 ; + schema1:name "1lhl_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106702 ; + schema1:name "1lhl_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2135 ; + schema1:name "1lhl_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1527307 ; + schema1:name "1lhl_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1526596 ; + schema1:name "1lhl_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104632 ; + schema1:name "1lhm.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594293 ; + schema1:name "1lhm.top" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1270792 ; + schema1:name "1lhm_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lhm_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1270052 ; + schema1:name "1lhm_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104632 ; + schema1:name "1lhm_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2120 ; + schema1:name "1lhm_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1482982 ; + schema1:name "1lhm_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1482271 ; + schema1:name "1lhm_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96841 ; + schema1:name "1loz.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 593659 ; + schema1:name "1loz.top" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1316392 ; + schema1:name "1loz_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1loz_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1315628 ; + schema1:name "1loz_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96841 ; + schema1:name "1loz_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2097 ; + schema1:name "1loz_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1568071 ; + schema1:name "1loz_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1567360 ; + schema1:name "1loz_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105405 ; + schema1:name "1lsa.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577554 ; + schema1:name "1lsa.top" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1447456 ; + schema1:name "1lsa_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lsa_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1446716 ; + schema1:name "1lsa_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105405 ; + schema1:name "1lsa_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2113 ; + schema1:name "1lsa_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1839075 ; + schema1:name "1lsa_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1838364 ; + schema1:name "1lsa_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104730 ; + schema1:name "1lsb.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577551 ; + schema1:name "1lsb.top" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1392808 ; + schema1:name "1lsb_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lsb_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1392068 ; + schema1:name "1lsb_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104730 ; + schema1:name "1lsb_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2121 ; + schema1:name "1lsb_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1736610 ; + schema1:name "1lsb_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1735899 ; + schema1:name "1lsb_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102435 ; + schema1:name "1lsc.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577551 ; + schema1:name "1lsc.top" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1353784 ; + schema1:name "1lsc_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lsc_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1353044 ; + schema1:name "1lsc_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102435 ; + schema1:name "1lsc_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2120 ; + schema1:name "1lsc_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1663440 ; + schema1:name "1lsc_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1662729 ; + schema1:name "1lsc_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101220 ; + schema1:name "1lsd.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577554 ; + schema1:name "1lsd.top" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1919056 ; + schema1:name "1lsd_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lsd_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1918316 ; + schema1:name "1lsd_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101220 ; + schema1:name "1lsd_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2126 ; + schema1:name "1lsd_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2723325 ; + schema1:name "1lsd_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2722614 ; + schema1:name "1lsd_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101625 ; + schema1:name "1lse.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577554 ; + schema1:name "1lse.top" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1584688 ; + schema1:name "1lse_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lse_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1583948 ; + schema1:name "1lse_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101625 ; + schema1:name "1lse_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2127 ; + schema1:name "1lse_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2096385 ; + schema1:name "1lse_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2095674 ; + schema1:name "1lse_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104325 ; + schema1:name "1lsf.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577548 ; + schema1:name "1lsf.top" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1400224 ; + schema1:name "1lsf_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lsf_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1399484 ; + schema1:name "1lsf_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104325 ; + schema1:name "1lsf_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2120 ; + schema1:name "1lsf_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1750515 ; + schema1:name "1lsf_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1749804 ; + schema1:name "1lsf_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111300 ; + schema1:name "1lsm.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578100 ; + schema1:name "1lsm.top" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1660268 ; + schema1:name "1lsm_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lsm_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1659576 ; + schema1:name "1lsm_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111300 ; + schema1:name "1lsm_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2114 ; + schema1:name "1lsm_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2237145 ; + schema1:name "1lsm_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2236344 ; + schema1:name "1lsm_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112200 ; + schema1:name "1lsn.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577738 ; + schema1:name "1lsn.top" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1581756 ; + schema1:name "1lsn_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lsn_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1581064 ; + schema1:name "1lsn_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112200 ; + schema1:name "1lsn_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2121 ; + schema1:name "1lsn_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2090625 ; + schema1:name "1lsn_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2089824 ; + schema1:name "1lsn_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107025 ; + schema1:name "1lsy.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577679 ; + schema1:name "1lsy.top" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1369716 ; + schema1:name "1lsy_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lsy_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1369048 ; + schema1:name "1lsy_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107025 ; + schema1:name "1lsy_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2113 ; + schema1:name "1lsy_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1692465 ; + schema1:name "1lsy_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1691574 ; + schema1:name "1lsy_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98101 ; + schema1:name "1lyo.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577539 ; + schema1:name "1lyo.top" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1227412 ; + schema1:name "1lyo_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lyo_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1226672 ; + schema1:name "1lyo_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98101 ; + schema1:name "1lyo_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2135 ; + schema1:name "1lyo_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1426501 ; + schema1:name "1lyo_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1425790 ; + schema1:name "1lyo_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105946 ; + schema1:name "1lyy.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 597044 ; + schema1:name "1lyy.top" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1415876 ; + schema1:name "1lyy_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lyy_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1415184 ; + schema1:name "1lyy_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105886 ; + schema1:name "1lyy_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2125 ; + schema1:name "1lyy_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1751401 ; + schema1:name "1lyy_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1750600 ; + schema1:name "1lyy_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102165 ; + schema1:name "1lyz.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577938 ; + schema1:name "1lyz.top" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1288896 ; + schema1:name "1lyz_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lyz_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1288132 ; + schema1:name "1lyz_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102165 ; + schema1:name "1lyz_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2103 ; + schema1:name "1lyz_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1539645 ; + schema1:name "1lyz_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1538934 ; + schema1:name "1lyz_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 90997 ; + schema1:name "1lz1.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594789 ; + schema1:name "1lz1.top" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1240180 ; + schema1:name "1lz1_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lz1_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1239440 ; + schema1:name "1lz1_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 90997 ; + schema1:name "1lz1_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2126 ; + schema1:name "1lz1_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1424932 ; + schema1:name "1lz1_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1424221 ; + schema1:name "1lz1_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102300 ; + schema1:name "1lza.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577551 ; + schema1:name "1lza.top" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1292224 ; + schema1:name "1lza_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lza_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1291484 ; + schema1:name "1lza_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102300 ; + schema1:name "1lza_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2128 ; + schema1:name "1lza_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1548015 ; + schema1:name "1lza_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1547304 ; + schema1:name "1lza_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98790 ; + schema1:name "1lzd.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 576329 ; + schema1:name "1lzd.top" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1262000 ; + schema1:name "1lzd_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lzd_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1261260 ; + schema1:name "1lzd_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98790 ; + schema1:name "1lzd_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2133 ; + schema1:name "1lzd_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1493205 ; + schema1:name "1lzd_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1492494 ; + schema1:name "1lzd_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 118020 ; + schema1:name "1lzt.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577554 ; + schema1:name "1lzt.top" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1383664 ; + schema1:name "1lzt_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 940 ; + schema1:name "1lzt_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1382924 ; + schema1:name "1lzt_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 117960 ; + schema1:name "1lzt_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 947 ; + schema1:name "1lzt_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1719465 ; + schema1:name "1lzt_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1718754 ; + schema1:name "1lzt_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105301 ; + schema1:name "1rex.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594803 ; + schema1:name "1rex.top" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1268112 ; + schema1:name "1rex_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1rex_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1267372 ; + schema1:name "1rex_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105301 ; + schema1:name "1rex_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2122 ; + schema1:name "1rex_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1477306 ; + schema1:name "1rex_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1476595 ; + schema1:name "1rex_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101476 ; + schema1:name "1rfp.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577540 ; + schema1:name "1rfp.top" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1304092 ; + schema1:name "1rfp_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1rfp_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1303352 ; + schema1:name "1rfp_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101476 ; + schema1:name "1rfp_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2122 ; + schema1:name "1rfp_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1570276 ; + schema1:name "1rfp_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1569565 ; + schema1:name "1rfp_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100492 ; + schema1:name "1tay.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 591365 ; + schema1:name "1tay.top" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1333676 ; + schema1:name "1tay_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "1tay_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1332936 ; + schema1:name "1tay_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100492 ; + schema1:name "1tay_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2106 ; + schema1:name "1tay_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1605067 ; + schema1:name "1tay_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1604356 ; + schema1:name "1tay_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101302 ; + schema1:name "1tby.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594109 ; + schema1:name "1tby.top" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1328348 ; + schema1:name "1tby_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "1tby_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1327608 ; + schema1:name "1tby_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101302 ; + schema1:name "1tby_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2099 ; + schema1:name "1tby_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1590892 ; + schema1:name "1tby_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1590181 ; + schema1:name "1tby_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101077 ; + schema1:name "1tcy.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594586 ; + schema1:name "1tcy.top" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1299300 ; + schema1:name "1tcy_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1tcy_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1298560 ; + schema1:name "1tcy_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101077 ; + schema1:name "1tcy_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2128 ; + schema1:name "1tcy_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1536127 ; + schema1:name "1tcy_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1535416 ; + schema1:name "1tcy_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101122 ; + schema1:name "1tdy.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 596034 ; + schema1:name "1tdy.top" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1329372 ; + schema1:name "1tdy_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1tdy_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1328632 ; + schema1:name "1tdy_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101122 ; + schema1:name "1tdy_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2143 ; + schema1:name "1tdy_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1590307 ; + schema1:name "1tdy_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1589596 ; + schema1:name "1tdy_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96391 ; + schema1:name "1uia.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 565479 ; + schema1:name "1uia.top" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1336384 ; + schema1:name "1uia_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1uia_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1335596 ; + schema1:name "1uia_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96391 ; + schema1:name "1uia_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2121 ; + schema1:name "1uia_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1649296 ; + schema1:name "1uia_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1648675 ; + schema1:name "1uia_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96706 ; + schema1:name "1uic.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 575198 ; + schema1:name "1uic.top" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1285608 ; + schema1:name "1uic_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1uic_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1284868 ; + schema1:name "1uic_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96706 ; + schema1:name "1uic_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2128 ; + schema1:name "1uic_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1540261 ; + schema1:name "1uic_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1539550 ; + schema1:name "1uic_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97831 ; + schema1:name "1uid.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578422 ; + schema1:name "1uid.top" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1309984 ; + schema1:name "1uid_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1uid_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1309244 ; + schema1:name "1uid_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97831 ; + schema1:name "1uid_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2137 ; + schema1:name "1uid_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1581481 ; + schema1:name "1uid_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1580770 ; + schema1:name "1uid_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96841 ; + schema1:name "1uie.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 574301 ; + schema1:name "1uie.top" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1293608 ; + schema1:name "1uie_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1uie_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1292868 ; + schema1:name "1uie_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96841 ; + schema1:name "1uie_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2127 ; + schema1:name "1uie_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1556731 ; + schema1:name "1uie_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1556020 ; + schema1:name "1uie_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96706 ; + schema1:name "1uif.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577092 ; + schema1:name "1uif.top" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1294008 ; + schema1:name "1uif_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1uif_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1293268 ; + schema1:name "1uif_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96706 ; + schema1:name "1uif_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2121 ; + schema1:name "1uif_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1553221 ; + schema1:name "1uif_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1552510 ; + schema1:name "1uif_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97291 ; + schema1:name "1uig.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577537 ; + schema1:name "1uig.top" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1298476 ; + schema1:name "1uig_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1uig_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1297736 ; + schema1:name "1uig_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97291 ; + schema1:name "1uig_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2116 ; + schema1:name "1uig_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1559746 ; + schema1:name "1uig_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1559035 ; + schema1:name "1uig_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98250 ; + schema1:name "1vdq.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577551 ; + schema1:name "1vdq.top" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1358804 ; + schema1:name "1vdq_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1vdq_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1358064 ; + schema1:name "1vdq_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98250 ; + schema1:name "1vdq_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2125 ; + schema1:name "1vdq_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1672890 ; + schema1:name "1vdq_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1672158 ; + schema1:name "1vdq_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104055 ; + schema1:name "1vds.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577551 ; + schema1:name "1vds.top" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1316828 ; + schema1:name "1vds_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1vds_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1316088 ; + schema1:name "1vds_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104055 ; + schema1:name "1vds_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2098 ; + schema1:name "1vds_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1594185 ; + schema1:name "1vds_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1593453 ; + schema1:name "1vds_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103785 ; + schema1:name "1vdt.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577551 ; + schema1:name "1vdt.top" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1330076 ; + schema1:name "1vdt_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "1vdt_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1329336 ; + schema1:name "1vdt_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103785 ; + schema1:name "1vdt_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2093 ; + schema1:name "1vdt_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1619025 ; + schema1:name "1vdt_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1618293 ; + schema1:name "1vdt_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98790 ; + schema1:name "1ved.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577551 ; + schema1:name "1ved.top" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1335260 ; + schema1:name "1ved_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1ved_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1334520 ; + schema1:name "1ved_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98790 ; + schema1:name "1ved_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2123 ; + schema1:name "1ved_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1628745 ; + schema1:name "1ved_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1628013 ; + schema1:name "1ved_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102508 ; + schema1:name "1xei.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577564 ; + schema1:name "1xei.top" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1448080 ; + schema1:name "1xei_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1xei_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1447340 ; + schema1:name "1xei_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102448 ; + schema1:name "1xei_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2130 ; + schema1:name "1xei_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1840303 ; + schema1:name "1xei_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1839556 ; + schema1:name "1xei_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98863 ; + schema1:name "1xej.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577567 ; + schema1:name "1xej.top" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1393504 ; + schema1:name "1xej_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1xej_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1392764 ; + schema1:name "1xej_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98803 ; + schema1:name "1xej_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2128 ; + schema1:name "1xej_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1737973 ; + schema1:name "1xej_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1737226 ; + schema1:name "1xej_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 94543 ; + schema1:name "1xek.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577564 ; + schema1:name "1xek.top" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1390264 ; + schema1:name "1xek_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1xek_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1389524 ; + schema1:name "1xek_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 94483 ; + schema1:name "1xek_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2122 ; + schema1:name "1xek_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1731898 ; + schema1:name "1xek_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1731151 ; + schema1:name "1xek_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88248 ; + schema1:name "2a6u.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577517 ; + schema1:name "2a6u.top" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1307908 ; + schema1:name "2a6u_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "2a6u_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1307168 ; + schema1:name "2a6u_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88248 ; + schema1:name "2a6u_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2087 ; + schema1:name "2a6u_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1577433 ; + schema1:name "2a6u_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1576722 ; + schema1:name "2a6u_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 95820 ; + schema1:name "2aub.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577551 ; + schema1:name "2aub.top" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1351460 ; + schema1:name "2aub_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "2aub_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1350720 ; + schema1:name "2aub_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 95820 ; + schema1:name "2aub_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2122 ; + schema1:name "2aub_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1659120 ; + schema1:name "2aub_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1658388 ; + schema1:name "2aub_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115291 ; + schema1:name "2bqg.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 592390 ; + schema1:name "2bqg.top" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1458228 ; + schema1:name "2bqg_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "2bqg_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1457488 ; + schema1:name "2bqg_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115291 ; + schema1:name "2bqg_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2116 ; + schema1:name "2bqg_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1837216 ; + schema1:name "2bqg_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1836505 ; + schema1:name "2bqg_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112456 ; + schema1:name "2bqh.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 592393 ; + schema1:name "2bqh.top" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2198604 ; + schema1:name "2bqh_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "2bqh_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2197864 ; + schema1:name "2bqh_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112456 ; + schema1:name "2bqh_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2150 ; + schema1:name "2bqh_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3225421 ; + schema1:name "2bqh_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3224710 ; + schema1:name "2bqh_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110701 ; + schema1:name "2bqi.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 592390 ; + schema1:name "2bqi.top" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1369092 ; + schema1:name "2bqi_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "2bqi_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1368352 ; + schema1:name "2bqi_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110701 ; + schema1:name "2bqi_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2127 ; + schema1:name "2bqi_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1670086 ; + schema1:name "2bqi_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1669375 ; + schema1:name "2bqi_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 120151 ; + schema1:name "2bqk.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 592389 ; + schema1:name "2bqk.top" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2200836 ; + schema1:name "2bqk_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "2bqk_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2200096 ; + schema1:name "2bqk_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 120151 ; + schema1:name "2bqk_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2170 ; + schema1:name "2bqk_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3229606 ; + schema1:name "2bqk_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3228895 ; + schema1:name "2bqk_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113536 ; + schema1:name "2bqm.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 592390 ; + schema1:name "2bqm.top" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1459236 ; + schema1:name "2bqm_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "2bqm_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1458496 ; + schema1:name "2bqm_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113536 ; + schema1:name "2bqm_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2098 ; + schema1:name "2bqm_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1839106 ; + schema1:name "2bqm_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1838395 ; + schema1:name "2bqm_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106216 ; + schema1:name "2c8o.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577555 ; + schema1:name "2c8o.top" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1316324 ; + schema1:name "2c8o_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "2c8o_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1315584 ; + schema1:name "2c8o_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106216 ; + schema1:name "2c8o_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2100 ; + schema1:name "2c8o_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1593241 ; + schema1:name "2c8o_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1592508 ; + schema1:name "2c8o_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106351 ; + schema1:name "2c8p.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577552 ; + schema1:name "2c8p.top" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1294076 ; + schema1:name "2c8p_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "2c8p_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1293336 ; + schema1:name "2c8p_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106351 ; + schema1:name "2c8p_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2125 ; + schema1:name "2c8p_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1551526 ; + schema1:name "2c8p_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1550793 ; + schema1:name "2c8p_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97586 ; + schema1:name "2cds.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577562 ; + schema1:name "2cds.top" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1313332 ; + schema1:name "2cds_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "2cds_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1312592 ; + schema1:name "2cds_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97586 ; + schema1:name "2cds_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2127 ; + schema1:name "2cds_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1587581 ; + schema1:name "2cds_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1586870 ; + schema1:name "2cds_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 94773 ; + schema1:name "2epe.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577916 ; + schema1:name "2epe.top" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1292780 ; + schema1:name "2epe_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "2epe_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1292064 ; + schema1:name "2epe_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 94773 ; + schema1:name "2epe_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2106 ; + schema1:name "2epe_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1547778 ; + schema1:name "2epe_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1546977 ; + schema1:name "2epe_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101355 ; + schema1:name "2hs7.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577554 ; + schema1:name "2hs7.top" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1382996 ; + schema1:name "2hs7_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "2hs7_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1382256 ; + schema1:name "2hs7_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101355 ; + schema1:name "2hs7_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2069 ; + schema1:name "2hs7_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1718250 ; + schema1:name "2hs7_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1717518 ; + schema1:name "2hs7_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100815 ; + schema1:name "2hs9.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577548 ; + schema1:name "2hs9.top" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1399916 ; + schema1:name "2hs9_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "2hs9_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1399176 ; + schema1:name "2hs9_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100815 ; + schema1:name "2hs9_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2128 ; + schema1:name "2hs9_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1749975 ; + schema1:name "2hs9_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1749243 ; + schema1:name "2hs9_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100950 ; + schema1:name "2hso.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577551 ; + schema1:name "2hso.top" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1383572 ; + schema1:name "2hso_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "2hso_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1382832 ; + schema1:name "2hso_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100950 ; + schema1:name "2hso_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2107 ; + schema1:name "2hso_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1719330 ; + schema1:name "2hso_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1718598 ; + schema1:name "2hso_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110032 ; + schema1:name "2lhm.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594032 ; + schema1:name "2lhm.top" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1241404 ; + schema1:name "2lhm_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "2lhm_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1240568 ; + schema1:name "2lhm_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110032 ; + schema1:name "2lhm_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2115 ; + schema1:name "2lhm_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1428442 ; + schema1:name "2lhm_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1427911 ; + schema1:name "2lhm_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108645 ; + schema1:name "2lym.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577554 ; + schema1:name "2lym.top" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1448896 ; + schema1:name "2lym_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "2lym_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1448156 ; + schema1:name "2lym_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108645 ; + schema1:name "2lym_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2133 ; + schema1:name "2lym_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1841775 ; + schema1:name "2lym_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1841064 ; + schema1:name "2lym_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:name "2lyz.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577551 ; + schema1:name "2lyz.top" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1342048 ; + schema1:name "2lyz_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "2lyz_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1341308 ; + schema1:name "2lyz_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:name "2lyz_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2124 ; + schema1:name "2lyz_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1641435 ; + schema1:name "2lyz_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1640724 ; + schema1:name "2lyz_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99183 ; + schema1:name "2yvb.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577542 ; + schema1:name "2yvb.top" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1296676 ; + schema1:name "2yvb_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "2yvb_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1295936 ; + schema1:name "2yvb_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99183 ; + schema1:name "2yvb_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2122 ; + schema1:name "2yvb_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1556373 ; + schema1:name "2yvb_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1555662 ; + schema1:name "2yvb_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98913 ; + schema1:name "2zq4.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577542 ; + schema1:name "2zq4.top" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1380268 ; + schema1:name "2zq4_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "2zq4_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1379528 ; + schema1:name "2zq4_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98913 ; + schema1:name "2zq4_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2098 ; + schema1:name "2zq4_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1713108 ; + schema1:name "2zq4_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1712397 ; + schema1:name "2zq4_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100983 ; + schema1:name "3a3r.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 576995 ; + schema1:name "3a3r.top" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1370784 ; + schema1:name "3a3r_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "3a3r_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1369996 ; + schema1:name "3a3r_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100983 ; + schema1:name "3a3r_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2128 ; + schema1:name "3a3r_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1696008 ; + schema1:name "3a3r_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1695387 ; + schema1:name "3a3r_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119703 ; + schema1:name "3exd.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577539 ; + schema1:name "3exd.top" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1351252 ; + schema1:name "3exd_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "3exd_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1350512 ; + schema1:name "3exd_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119703 ; + schema1:name "3exd_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2107 ; + schema1:name "3exd_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1658703 ; + schema1:name "3exd_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1657992 ; + schema1:name "3exd_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99183 ; + schema1:name "3iju.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577542 ; + schema1:name "3iju.top" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1341172 ; + schema1:name "3iju_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "3iju_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1340432 ; + schema1:name "3iju_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99183 ; + schema1:name "3iju_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2114 ; + schema1:name "3iju_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1639803 ; + schema1:name "3iju_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1639092 ; + schema1:name "3iju_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99183 ; + schema1:name "3ijv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577539 ; + schema1:name "3ijv.top" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1292428 ; + schema1:name "3ijv_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "3ijv_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1291688 ; + schema1:name "3ijv_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99183 ; + schema1:name "3ijv_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2133 ; + schema1:name "3ijv_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1548408 ; + schema1:name "3ijv_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1547697 ; + schema1:name "3ijv_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 93198 ; + schema1:name "3j4g.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578055 ; + schema1:name "3j4g.top" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1307368 ; + schema1:name "3j4g_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "3j4g_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1306628 ; + schema1:name "3j4g_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 93198 ; + schema1:name "3j4g_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2130 ; + schema1:name "3j4g_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1575768 ; + schema1:name "3j4g_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1575057 ; + schema1:name "3j4g_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 91353 ; + schema1:name "3j6k.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577536 ; + schema1:name "3j6k.top" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1329292 ; + schema1:name "3j6k_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "3j6k_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1328552 ; + schema1:name "3j6k_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 91353 ; + schema1:name "3j6k_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2113 ; + schema1:name "3j6k_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1617528 ; + schema1:name "3j6k_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1616817 ; + schema1:name "3j6k_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110265 ; + schema1:name "3lym.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577554 ; + schema1:name "3lym.top" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1445368 ; + schema1:name "3lym_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "3lym_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1444628 ; + schema1:name "3lym_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110265 ; + schema1:name "3lym_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2127 ; + schema1:name "3lym_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1835160 ; + schema1:name "3lym_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1834449 ; + schema1:name "3lym_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:name "3lyz.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577551 ; + schema1:name "3lyz.top" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1324336 ; + schema1:name "3lyz_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "3lyz_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1323596 ; + schema1:name "3lyz_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:name "3lyz_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2119 ; + schema1:name "3lyz_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1608225 ; + schema1:name "3lyz_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1607514 ; + schema1:name "3lyz_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99813 ; + schema1:name "3qy4.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 571095 ; + schema1:name "3qy4.top" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1300228 ; + schema1:name "3qy4_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "3qy4_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1299440 ; + schema1:name "3qy4_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99813 ; + schema1:name "3qy4_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2097 ; + schema1:name "3qy4_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1572663 ; + schema1:name "3qy4_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1572042 ; + schema1:name "3qy4_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103098 ; + schema1:name "3wmk.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577539 ; + schema1:name "3wmk.top" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1342756 ; + schema1:name "3wmk_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "3wmk_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1342016 ; + schema1:name "3wmk_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103098 ; + schema1:name "3wmk_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2098 ; + schema1:name "3wmk_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1642773 ; + schema1:name "3wmk_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1642062 ; + schema1:name "3wmk_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97428 ; + schema1:name "3wpj.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577542 ; + schema1:name "3wpj.top" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1357732 ; + schema1:name "3wpj_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "3wpj_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1356992 ; + schema1:name "3wpj_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97428 ; + schema1:name "3wpj_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2116 ; + schema1:name "3wpj_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1670853 ; + schema1:name "3wpj_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1670142 ; + schema1:name "3wpj_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104223 ; + schema1:name "3wvx.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 576872 ; + schema1:name "3wvx.top" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1287888 ; + schema1:name "3wvx_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "3wvx_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1287196 ; + schema1:name "3wvx_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104223 ; + schema1:name "3wvx_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2125 ; + schema1:name "3wvx_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1540758 ; + schema1:name "3wvx_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1539957 ; + schema1:name "3wvx_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 109308 ; + schema1:name "4axt.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577539 ; + schema1:name "4axt.top" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1334044 ; + schema1:name "4axt_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "4axt_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1333304 ; + schema1:name "4axt_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 109308 ; + schema1:name "4axt_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2129 ; + schema1:name "4axt_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1626438 ; + schema1:name "4axt_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1625727 ; + schema1:name "4axt_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110793 ; + schema1:name "4b0d.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577542 ; + schema1:name "4b0d.top" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1355428 ; + schema1:name "4b0d_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "4b0d_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1354688 ; + schema1:name "4b0d_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110793 ; + schema1:name "4b0d_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2122 ; + schema1:name "4b0d_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1666533 ; + schema1:name "4b0d_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1665822 ; + schema1:name "4b0d_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98778 ; + schema1:name "4hv1.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577542 ; + schema1:name "4hv1.top" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1339660 ; + schema1:name "4hv1_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "4hv1_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1338920 ; + schema1:name "4hv1_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98778 ; + schema1:name "4hv1_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2137 ; + schema1:name "4hv1_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1636968 ; + schema1:name "4hv1_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1636257 ; + schema1:name "4hv1_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 94593 ; + schema1:name "4hv2.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577539 ; + schema1:name "4hv2.top" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1346284 ; + schema1:name "4hv2_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "4hv2_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1345544 ; + schema1:name "4hv2_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 94593 ; + schema1:name "4hv2_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2122 ; + schema1:name "4hv2_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1649388 ; + schema1:name "4hv2_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1648677 ; + schema1:name "4hv2_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103233 ; + schema1:name "4i8s.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577539 ; + schema1:name "4i8s.top" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1343908 ; + schema1:name "4i8s_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "4i8s_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1343168 ; + schema1:name "4i8s_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103233 ; + schema1:name "4i8s_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2127 ; + schema1:name "4i8s_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1644933 ; + schema1:name "4i8s_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1644222 ; + schema1:name "4i8s_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 95943 ; + schema1:name "4ias.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577539 ; + schema1:name "4ias.top" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1345492 ; + schema1:name "4ias_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "4ias_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1344752 ; + schema1:name "4ias_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 95943 ; + schema1:name "4ias_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2121 ; + schema1:name "4ias_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1647903 ; + schema1:name "4ias_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1647192 ; + schema1:name "4ias_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105798 ; + schema1:name "4iat.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577542 ; + schema1:name "4iat.top" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1335412 ; + schema1:name "4iat_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "4iat_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1334672 ; + schema1:name "4iat_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105798 ; + schema1:name "4iat_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2129 ; + schema1:name "4iat_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1629003 ; + schema1:name "4iat_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1628292 ; + schema1:name "4iat_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 109455 ; + schema1:name "4lym.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577551 ; + schema1:name "4lym.top" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1375384 ; + schema1:name "4lym_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "4lym_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1374644 ; + schema1:name "4lym_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 109455 ; + schema1:name "4lym_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2129 ; + schema1:name "4lym_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1703940 ; + schema1:name "4lym_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1703229 ; + schema1:name "4lym_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96256 ; + schema1:name "4lyo.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577917 ; + schema1:name "4lyo.top" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1286444 ; + schema1:name "4lyo_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "4lyo_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1285728 ; + schema1:name "4lyo_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96256 ; + schema1:name "4lyo_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2143 ; + schema1:name "4lyo_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1535896 ; + schema1:name "4lyo_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1535095 ; + schema1:name "4lyo_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:name "4lyz.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577551 ; + schema1:name "4lyz.top" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1287256 ; + schema1:name "4lyz_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "4lyz_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1286516 ; + schema1:name "4lyz_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:name "4lyz_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2104 ; + schema1:name "4lyz_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1538700 ; + schema1:name "4lyz_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1537989 ; + schema1:name "4lyz_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105258 ; + schema1:name "4nhi.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577539 ; + schema1:name "4nhi.top" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1384156 ; + schema1:name "4nhi_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "4nhi_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1383416 ; + schema1:name "4nhi_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105258 ; + schema1:name "4nhi_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2116 ; + schema1:name "4nhi_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1720398 ; + schema1:name "4nhi_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1719687 ; + schema1:name "4nhi_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106968 ; + schema1:name "4qeq.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577668 ; + schema1:name "4qeq.top" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1360228 ; + schema1:name "4qeq_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "4qeq_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1359464 ; + schema1:name "4qeq_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106968 ; + schema1:name "4qeq_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2107 ; + schema1:name "4qeq_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1673778 ; + schema1:name "4qeq_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1673067 ; + schema1:name "4qeq_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4953 ; + schema1:name "4r0p.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 31947 ; + schema1:name "4r0p.top" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 229932 ; + schema1:name "4r0p_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "4r0p_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 229932 ; + schema1:name "4r0p_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4953 ; + schema1:name "4r0p_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2126 ; + schema1:name "4r0p_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 342858 ; + schema1:name "4r0p_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 342867 ; + schema1:name "4r0p_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 93378 ; + schema1:name "4rlm.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577542 ; + schema1:name "4rlm.top" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1301644 ; + schema1:name "4rlm_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "4rlm_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1300904 ; + schema1:name "4rlm_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 93378 ; + schema1:name "4rlm_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2129 ; + schema1:name "4rlm_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1565688 ; + schema1:name "4rlm_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1564977 ; + schema1:name "4rlm_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 93648 ; + schema1:name "4rln.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577539 ; + schema1:name "4rln.top" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1306180 ; + schema1:name "4rln_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "4rln_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1305440 ; + schema1:name "4rln_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 93648 ; + schema1:name "4rln_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2125 ; + schema1:name "4rln_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1574193 ; + schema1:name "4rln_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1573482 ; + schema1:name "4rln_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88653 ; + schema1:name "4wmg.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577542 ; + schema1:name "4wmg.top" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1353772 ; + schema1:name "4wmg_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "4wmg_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1353032 ; + schema1:name "4wmg_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88653 ; + schema1:name "4wmg_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2122 ; + schema1:name "4wmg_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1663428 ; + schema1:name "4wmg_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1662717 ; + schema1:name "4wmg_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88743 ; + schema1:name "5a3e.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 571959 ; + schema1:name "5a3e.top" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1295536 ; + schema1:name "5a3e_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "5a3e_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1294796 ; + schema1:name "5a3e_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88743 ; + schema1:name "5a3e_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2116 ; + schema1:name "5a3e_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1562403 ; + schema1:name "5a3e_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1561692 ; + schema1:name "5a3e_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103638 ; + schema1:name "5amy.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577542 ; + schema1:name "5amy.top" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1296892 ; + schema1:name "5amy_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "5amy_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1296152 ; + schema1:name "5amy_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103638 ; + schema1:name "5amy_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2128 ; + schema1:name "5amy_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1556778 ; + schema1:name "5amy_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1556067 ; + schema1:name "5amy_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99453 ; + schema1:name "5e4p.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577539 ; + schema1:name "5e4p.top" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1355428 ; + schema1:name "5e4p_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "5e4p_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1354688 ; + schema1:name "5e4p_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99453 ; + schema1:name "5e4p_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2123 ; + schema1:name "5e4p_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1666533 ; + schema1:name "5e4p_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1665822 ; + schema1:name "5e4p_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102288 ; + schema1:name "5hnc.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577539 ; + schema1:name "5hnc.top" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1295164 ; + schema1:name "5hnc_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "5hnc_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1294424 ; + schema1:name "5hnc_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102288 ; + schema1:name "5hnc_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2098 ; + schema1:name "5hnc_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1553538 ; + schema1:name "5hnc_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1552827 ; + schema1:name "5hnc_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 91398 ; + schema1:name "5hnl.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577919 ; + schema1:name "5hnl.top" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1328996 ; + schema1:name "5hnl_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "5hnl_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1328280 ; + schema1:name "5hnl_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 91398 ; + schema1:name "5hnl_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2122 ; + schema1:name "5hnl_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1615683 ; + schema1:name "5hnl_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1614882 ; + schema1:name "5hnl_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115113 ; + schema1:name "5i4w.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577542 ; + schema1:name "5i4w.top" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1356436 ; + schema1:name "5i4w_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "5i4w_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1355696 ; + schema1:name "5i4w_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115113 ; + schema1:name "5i4w_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2078 ; + schema1:name "5i4w_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1668423 ; + schema1:name "5i4w_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1667712 ; + schema1:name "5i4w_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104043 ; + schema1:name "5k2k.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577536 ; + schema1:name "5k2k.top" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1272700 ; + schema1:name "5k2k_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "5k2k_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1271960 ; + schema1:name "5k2k_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104043 ; + schema1:name "5k2k_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2114 ; + schema1:name "5k2k_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1511418 ; + schema1:name "5k2k_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1510707 ; + schema1:name "5k2k_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108723 ; + schema1:name "5k2n.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577668 ; + schema1:name "5k2n.top" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1339564 ; + schema1:name "5k2n_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "5k2n_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1338800 ; + schema1:name "5k2n_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108723 ; + schema1:name "5k2n_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2087 ; + schema1:name "5k2n_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1635033 ; + schema1:name "5k2n_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1634322 ; + schema1:name "5k2n_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111063 ; + schema1:name "5k2p.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577542 ; + schema1:name "5k2p.top" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1329580 ; + schema1:name "5k2p_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "5k2p_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1328840 ; + schema1:name "5k2p_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111063 ; + schema1:name "5k2p_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2115 ; + schema1:name "5k2p_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1618068 ; + schema1:name "5k2p_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1617357 ; + schema1:name "5k2p_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107283 ; + schema1:name "5k2q.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577539 ; + schema1:name "5k2q.top" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1325188 ; + schema1:name "5k2q_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "5k2q_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1324448 ; + schema1:name "5k2q_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107283 ; + schema1:name "5k2q_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2125 ; + schema1:name "5k2q_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1609833 ; + schema1:name "5k2q_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1609122 ; + schema1:name "5k2q_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104718 ; + schema1:name "5k2r.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577539 ; + schema1:name "5k2r.top" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1299052 ; + schema1:name "5k2r_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "5k2r_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1298312 ; + schema1:name "5k2r_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104718 ; + schema1:name "5k2r_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2127 ; + schema1:name "5k2r_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1560828 ; + schema1:name "5k2r_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1560117 ; + schema1:name "5k2r_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106203 ; + schema1:name "5k2s.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577542 ; + schema1:name "5k2s.top" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1290916 ; + schema1:name "5k2s_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "5k2s_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1290176 ; + schema1:name "5k2s_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106203 ; + schema1:name "5k2s_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2143 ; + schema1:name "5k2s_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1545573 ; + schema1:name "5k2s_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1544862 ; + schema1:name "5k2s_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 120525 ; + schema1:name "5lyt.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577941 ; + schema1:name "5lyt.top" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1424328 ; + schema1:name "5lyt_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "5lyt_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1423564 ; + schema1:name "5lyt_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 120525 ; + schema1:name "5lyt_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2135 ; + schema1:name "5lyt_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1793580 ; + schema1:name "5lyt_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1792869 ; + schema1:name "5lyt_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:name "5lyz.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577551 ; + schema1:name "5lyz.top" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1287256 ; + schema1:name "5lyz_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "5lyz_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1286516 ; + schema1:name "5lyz_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:name "5lyz_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2125 ; + schema1:name "5lyz_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1538700 ; + schema1:name "5lyz_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1537989 ; + schema1:name "5lyz_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97563 ; + schema1:name "5njm.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577539 ; + schema1:name "5njm.top" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1314172 ; + schema1:name "5njm_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "5njm_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1313432 ; + schema1:name "5njm_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97563 ; + schema1:name "5njm_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2097 ; + schema1:name "5njm_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1589178 ; + schema1:name "5njm_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1588467 ; + schema1:name "5njm_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107283 ; + schema1:name "5yin.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577541 ; + schema1:name "5yin.top" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1226764 ; + schema1:name "5yin_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "5yin_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1226024 ; + schema1:name "5yin_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107283 ; + schema1:name "5yin_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2143 ; + schema1:name "5yin_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1425288 ; + schema1:name "5yin_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1424577 ; + schema1:name "5yin_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 93198 ; + schema1:name "6gf0.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 571956 ; + schema1:name "6gf0.top" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1350040 ; + schema1:name "6gf0_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "6gf0_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1349300 ; + schema1:name "6gf0_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 93198 ; + schema1:name "6gf0_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2091 ; + schema1:name "6gf0_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1664598 ; + schema1:name "6gf0_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1663887 ; + schema1:name "6gf0_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97248 ; + schema1:name "6h0k.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 571959 ; + schema1:name "6h0k.top" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1350616 ; + schema1:name "6h0k_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "6h0k_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1349876 ; + schema1:name "6h0k_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97248 ; + schema1:name "6h0k_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2115 ; + schema1:name "6h0k_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1665678 ; + schema1:name "6h0k_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1664967 ; + schema1:name "6h0k_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98193 ; + schema1:name "6h0l.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 571956 ; + schema1:name "6h0l.top" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1391728 ; + schema1:name "6h0l_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "6h0l_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1390988 ; + schema1:name "6h0l_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98193 ; + schema1:name "6h0l_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2116 ; + schema1:name "6h0l_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1742763 ; + schema1:name "6h0l_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1742052 ; + schema1:name "6h0l_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101760 ; + schema1:name "6lyt.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577548 ; + schema1:name "6lyt.top" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1333336 ; + schema1:name "6lyt_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "6lyt_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1332596 ; + schema1:name "6lyt_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101760 ; + schema1:name "6lyt_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2068 ; + schema1:name "6lyt_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1625100 ; + schema1:name "6lyt_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1624389 ; + schema1:name "6lyt_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:name "6lyz.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577550 ; + schema1:name "6lyz.top" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1214896 ; + schema1:name "6lyz_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "6lyz_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1214156 ; + schema1:name "6lyz_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:name "6lyz_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2110 ; + schema1:name "6lyz_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1403025 ; + schema1:name "6lyz_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1402314 ; + schema1:name "6lyz_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96348 ; + schema1:name "6s2n.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577542 ; + schema1:name "6s2n.top" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1348012 ; + schema1:name "6s2n_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "6s2n_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1347272 ; + schema1:name "6s2n_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96348 ; + schema1:name "6s2n_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2114 ; + schema1:name "6s2n_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1652628 ; + schema1:name "6s2n_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1651917 ; + schema1:name "6s2n_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102018 ; + schema1:name "7byo.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577539 ; + schema1:name "7byo.top" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1318996 ; + schema1:name "7byo_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "7byo_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1318256 ; + schema1:name "7byo_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102018 ; + schema1:name "7byo_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2116 ; + schema1:name "7byo_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1598223 ; + schema1:name "7byo_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1597512 ; + schema1:name "7byo_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101613 ; + schema1:name "7byp.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577542 ; + schema1:name "7byp.top" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1328716 ; + schema1:name "7byp_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "7byp_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1327976 ; + schema1:name "7byp_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101613 ; + schema1:name "7byp_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2123 ; + schema1:name "7byp_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1616448 ; + schema1:name "7byp_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1615737 ; + schema1:name "7byp_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96888 ; + schema1:name "7c09.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577542 ; + schema1:name "7c09.top" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1351612 ; + schema1:name "7c09_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "7c09_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1350872 ; + schema1:name "7c09_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96888 ; + schema1:name "7c09_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2123 ; + schema1:name "7c09_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1659378 ; + schema1:name "7c09_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1658667 ; + schema1:name "7c09_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100803 ; + schema1:name "7d01.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577539 ; + schema1:name "7d01.top" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1325332 ; + schema1:name "7d01_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "7d01_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1324592 ; + schema1:name "7d01_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100803 ; + schema1:name "7d01_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2100 ; + schema1:name "7d01_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1610103 ; + schema1:name "7d01_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1609392 ; + schema1:name "7d01_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99723 ; + schema1:name "7d02.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577539 ; + schema1:name "7d02.top" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1331452 ; + schema1:name "7d02_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "7d02_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1330712 ; + schema1:name "7d02_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99723 ; + schema1:name "7d02_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2098 ; + schema1:name "7d02_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1621578 ; + schema1:name "7d02_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1620867 ; + schema1:name "7d02_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97968 ; + schema1:name "7d04.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577542 ; + schema1:name "7d04.top" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1326196 ; + schema1:name "7d04_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "7d04_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1325456 ; + schema1:name "7d04_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97968 ; + schema1:name "7d04_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2108 ; + schema1:name "7d04_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1611723 ; + schema1:name "7d04_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1611012 ; + schema1:name "7d04_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98913 ; + schema1:name "7d05.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577539 ; + schema1:name "7d05.top" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1333684 ; + schema1:name "7d05_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "7d05_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1332944 ; + schema1:name "7d05_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98913 ; + schema1:name "7d05_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2096 ; + schema1:name "7d05_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1625763 ; + schema1:name "7d05_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1625052 ; + schema1:name "7d05_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88320 ; + schema1:name "7lyz.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577528 ; + schema1:name "7lyz.top" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1187464 ; + schema1:name "7lyz_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "7lyz_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1186724 ; + schema1:name "7lyz_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88260 ; + schema1:name "7lyz_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2129 ; + schema1:name "7lyz_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1351590 ; + schema1:name "7lyz_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1350879 ; + schema1:name "7lyz_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88260 ; + schema1:name "8lyz.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577529 ; + schema1:name "8lyz.top" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1343272 ; + schema1:name "8lyz_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "8lyz_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1342532 ; + schema1:name "8lyz_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88260 ; + schema1:name "8lyz_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2126 ; + schema1:name "8lyz_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1643730 ; + schema1:name "8lyz_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1643019 ; + schema1:name "8lyz_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 127 ; + schema1:dateModified "2023-07-20T15:48:27" ; + schema1:name "analytic.out" ; + schema1:sdDatePublished "2023-07-20T15:48:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1 ; + schema1:dateModified "2023-07-20T15:48:27" ; + schema1:name "file_1" ; + schema1:sdDatePublished "2023-07-20T15:48:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 39 ; + schema1:dateModified "2023-07-20T15:48:27" ; + schema1:name "file_1.out" ; + schema1:sdDatePublished "2023-07-20T15:48:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1 ; + schema1:dateModified "2023-07-20T15:48:27" ; + schema1:name "file_2" ; + schema1:sdDatePublished "2023-07-20T15:48:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 39 ; + schema1:dateModified "2023-07-20T15:48:27" ; + schema1:name "file_2.out" ; + schema1:sdDatePublished "2023-07-20T15:48:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1 ; + schema1:dateModified "2023-07-20T15:48:27" ; + schema1:name "file_3" ; + schema1:sdDatePublished "2023-07-20T15:48:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 39 ; + schema1:dateModified "2023-07-20T15:48:27" ; + schema1:name "file_3.out" ; + schema1:sdDatePublished "2023-07-20T15:48:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777334 ; + schema1:dateModified "2024-01-22T13:48:22" ; + schema1:name "file0.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777273 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file1.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777631 ; + schema1:dateModified "2024-01-22T13:48:22" ; + schema1:name "file10.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777365 ; + schema1:dateModified "2024-01-22T13:48:23" ; + schema1:name "file11.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777823 ; + schema1:dateModified "2024-01-22T13:48:22" ; + schema1:name "file12.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777530 ; + schema1:dateModified "2024-01-22T13:48:23" ; + schema1:name "file13.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777823 ; + schema1:dateModified "2024-01-22T13:48:22" ; + schema1:name "file14.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777615 ; + schema1:dateModified "2024-01-22T13:48:23" ; + schema1:name "file15.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777819 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file16.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777771 ; + schema1:dateModified "2024-01-22T13:48:22" ; + schema1:name "file17.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777684 ; + schema1:dateModified "2024-01-22T13:48:23" ; + schema1:name "file18.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777600 ; + schema1:dateModified "2024-01-22T13:48:22" ; + schema1:name "file19.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777443 ; + schema1:dateModified "2024-01-22T13:48:21" ; + schema1:name "file2.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777653 ; + schema1:dateModified "2024-01-22T13:48:23" ; + schema1:name "file20.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777454 ; + schema1:dateModified "2024-01-22T13:48:21" ; + schema1:name "file21.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777851 ; + schema1:dateModified "2024-01-22T13:48:23" ; + schema1:name "file22.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777584 ; + schema1:dateModified "2024-01-22T13:48:23" ; + schema1:name "file23.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777580 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file24.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777605 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file25.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777806 ; + schema1:dateModified "2024-01-22T13:48:23" ; + schema1:name "file26.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777360 ; + schema1:dateModified "2024-01-22T13:48:22" ; + schema1:name "file27.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777505 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file28.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777513 ; + schema1:dateModified "2024-01-22T13:48:22" ; + schema1:name "file29.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777723 ; + schema1:dateModified "2024-01-22T13:48:23" ; + schema1:name "file3.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777327 ; + schema1:dateModified "2024-01-22T13:48:23" ; + schema1:name "file30.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777312 ; + schema1:dateModified "2024-01-22T13:48:23" ; + schema1:name "file31.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16778157 ; + schema1:dateModified "2024-01-22T13:48:23" ; + schema1:name "file32.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777453 ; + schema1:dateModified "2024-01-22T13:48:21" ; + schema1:name "file33.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777676 ; + schema1:dateModified "2024-01-22T13:48:22" ; + schema1:name "file34.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777547 ; + schema1:dateModified "2024-01-22T13:48:21" ; + schema1:name "file35.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777985 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file36.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777330 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file37.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777368 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file38.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777520 ; + schema1:dateModified "2024-01-22T13:48:22" ; + schema1:name "file39.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777456 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file4.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777740 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file40.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777473 ; + schema1:dateModified "2024-01-22T13:48:22" ; + schema1:name "file41.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777533 ; + schema1:dateModified "2024-01-22T13:48:22" ; + schema1:name "file42.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777612 ; + schema1:dateModified "2024-01-22T13:48:22" ; + schema1:name "file43.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777761 ; + schema1:dateModified "2024-01-22T13:48:23" ; + schema1:name "file44.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777252 ; + schema1:dateModified "2024-01-22T13:48:22" ; + schema1:name "file45.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777684 ; + schema1:dateModified "2024-01-22T13:48:22" ; + schema1:name "file46.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777316 ; + schema1:dateModified "2024-01-22T13:48:23" ; + schema1:name "file47.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777747 ; + schema1:dateModified "2024-01-22T13:48:23" ; + schema1:name "file48.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777238 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file49.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777494 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file5.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777282 ; + schema1:dateModified "2024-01-22T13:48:21" ; + schema1:name "file50.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777662 ; + schema1:dateModified "2024-01-22T13:48:22" ; + schema1:name "file51.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777583 ; + schema1:dateModified "2024-01-22T13:48:23" ; + schema1:name "file52.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777519 ; + schema1:dateModified "2024-01-22T13:48:21" ; + schema1:name "file53.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777329 ; + schema1:dateModified "2024-01-22T13:48:21" ; + schema1:name "file54.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777557 ; + schema1:dateModified "2024-01-22T13:48:22" ; + schema1:name "file55.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777223 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file56.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777622 ; + schema1:dateModified "2024-01-22T13:48:23" ; + schema1:name "file57.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777407 ; + schema1:dateModified "2024-01-22T13:48:22" ; + schema1:name "file58.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777409 ; + schema1:dateModified "2024-01-22T13:48:23" ; + schema1:name "file59.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777574 ; + schema1:dateModified "2024-01-22T13:48:21" ; + schema1:name "file6.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777376 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file60.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777222 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file61.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777495 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file62.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777656 ; + schema1:dateModified "2024-01-22T13:48:23" ; + schema1:name "file63.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777516 ; + schema1:dateModified "2024-01-22T13:48:22" ; + schema1:name "file7.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777769 ; + schema1:dateModified "2024-01-22T13:48:21" ; + schema1:name "file8.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777430 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file9.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:Dataset ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:name "dataset_64f_16mb" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777334 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file0.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777273 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file1.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777631 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file10.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777365 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file11.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777823 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file12.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777530 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file13.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777823 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file14.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777615 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file15.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777819 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file16.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777771 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file17.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777684 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file18.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777600 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file19.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777443 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file2.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777653 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file20.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777454 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file21.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777851 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file22.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777584 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file23.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777580 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file24.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777605 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file25.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777806 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file26.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777360 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file27.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777505 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file28.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777513 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file29.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777723 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file3.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777327 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file30.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777312 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file31.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16778157 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file32.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777453 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file33.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777676 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file34.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777547 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file35.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777985 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file36.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777330 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file37.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777368 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file38.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777520 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file39.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777456 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file4.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777740 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file40.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777473 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file41.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777533 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file42.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777612 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file43.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777761 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file44.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777252 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file45.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777684 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file46.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777316 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file47.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777747 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file48.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777238 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file49.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777494 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file5.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777282 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file50.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777662 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file51.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777583 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file52.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777519 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file53.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777329 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file54.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777557 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file55.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777223 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file56.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777622 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file57.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777407 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file58.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777409 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file59.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777574 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file6.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777376 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file60.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777222 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file61.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777495 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file62.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777656 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file63.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777516 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file7.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777769 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file8.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777430 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file9.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 687939 ; + schema1:name "21123123_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:name "21123123_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256196 ; + schema1:name "22010100_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 11277 ; + schema1:name "22010100_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 255094 ; + schema1:name "22010101_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 11272 ; + schema1:name "22010101_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 692232 ; + schema1:name "22010102_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:name "22010102_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256476 ; + schema1:name "22010103_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 11278 ; + schema1:name "22010103_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 255353 ; + schema1:name "22010104_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 11280 ; + schema1:name "22010104_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 693555 ; + schema1:name "22010105_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:name "22010105_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 260775 ; + schema1:name "22010106_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 26313 ; + schema1:name "22010106_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 684795 ; + schema1:name "22010107_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:name "22010107_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256807 ; + schema1:name "22010108_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 11267 ; + schema1:name "22010108_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 275764 ; + schema1:name "22010109_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 52616 ; + schema1:name "22010109_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 690921 ; + schema1:name "22010110_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:name "22010110_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 691522 ; + schema1:name "22010111_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:name "22010111_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 696702 ; + schema1:name "22010112_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:name "22010112_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256656 ; + schema1:name "22010113_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 11267 ; + schema1:name "22010113_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 253888 ; + schema1:name "22010114_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 11271 ; + schema1:name "22010114_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 691096 ; + schema1:name "22010115_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:name "22010115_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 691623 ; + schema1:name "22010116_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:name "22010116_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 698358 ; + schema1:name "22010117_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:name "22010117_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 696109 ; + schema1:name "22010118_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:name "22010118_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256064 ; + schema1:name "22010119_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 11277 ; + schema1:name "22010119_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 260455 ; + schema1:name "22010120_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 22551 ; + schema1:name "22010120_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 693734 ; + schema1:name "22010121_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:name "22010121_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 255243 ; + schema1:name "22010122_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 11268 ; + schema1:name "22010122_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:SoftwareApplication ; + schema1:citation ; + schema1:downloadUrl "https://manual.gromacs.org/documentation/2019.1/download.html" ; + schema1:installUrl "https://anaconda.org/bioconda/gromacs" ; + schema1:license ; + schema1:name "GROMACS" ; + schema1:softwareHelp "https://doi.org/10.5281/zenodo.2564761" ; + schema1:url "https://manual.gromacs.org/documentation/2019.1/" ; + schema1:version "2019.1" . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:SoftwareApplication ; + schema1:name "dislib" ; + schema1:url "https://github.com/bsc-wdc/dislib" ; + schema1:version "0.9.0" . + + a schema1:DataDownload ; + schema1:contentSize "10978259" ; + schema1:contentUrl "https://github.com/by-covid/BY-COVID_WP5_T5.2_baseline-use-case/archive/refs/heads/main.zip" ; + schema1:encodingFormat "application/zip" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0001-5691-7000" ; + schema1:name "Elliott J. Price" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Jacopo Selva" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0001-6409-8009" ; + schema1:name "Jessica Gomez-Garrido" . + + a schema1:Person ; + schema1:name "Simon Rolph" . + + a schema1:Person ; + schema1:name "Viktoria Isabel Schwarz" . + + a schema1:Person ; + schema1:name "Christoph Steinbeck" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0001-7522-4007" ; + schema1:name "Julian Uszkoreit" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Roberto Tonini" . + + a schema1:Person ; + schema1:name "Job van Riet" . + + a schema1:Person ; + schema1:name "Maria Sorokina" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-0324-8728" ; + schema1:name "Nandan Deshpande" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Alessandro D'Anca" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-0399-8713" ; + schema1:name "Marie-Dominique Devignes" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Fabrizio Bernardi" . + + a schema1:Person ; + schema1:name "Fabrice Touzain" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-1235-9462" ; + schema1:name "Valentine Murigneux" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Stefano Lorito" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-1480-3563" ; + schema1:name "Mike Thang" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-2021-6883" ; + schema1:name "Cristiane Taniguti" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-2151-7418" ; + schema1:name "Kary Ocaña" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Jorge Macías" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-3503-6383" ; + schema1:name "Varsha Kale" . + + a schema1:Person ; + schema1:name "Adam Tofilski" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-4032-5331" ; + schema1:name "Ziad Al-Bkhetan" . + + a schema1:Person ; + schema1:name "Daniel Seeliger" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-4131-4458" ; + schema1:name "Diego Garrido-Martín" . + + a schema1:Person ; + schema1:name "Kristian Peters" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-5192-126X" ; + schema1:name "Pavankumar Videm" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Carlos Sánchez Linares" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-5738-4477" ; + schema1:name "Roderic Guigó" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-7025-2241" ; + schema1:name "Hrishikesh Dhondge" . + + a schema1:Person ; + schema1:name "Isaure Chauvot de Beauchêne" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-7139-4073" ; + schema1:name "Cyril Noel" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-7429-1846" ; + schema1:name "Marco De La Pierre" . + + a schema1:Person ; + schema1:name "Daan Hazelaar", + "Daan Hazelaar" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Steven J. Gibbons" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-8122-9522" ; + schema1:name "Luiz Gadelha" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-8330-4071" ; + schema1:name "Mahnoor Zulfiqar" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Finn Løvholt" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Louise Cordrie" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Sonia Scardigno" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-2556-2125" ; + schema1:name "Jasper Ouwerkerk" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Fabrizio Romano" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-2949-6719" ; + schema1:name "Damon-Lee Pointon" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Marc de la Asunción" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Manuel J. Castro" . + + a schema1:Person ; + schema1:name "Bert L. de Groot" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-3683-6208" ; + schema1:name "Michael Hall" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Manuela Volpe" . + + a schema1:Person ; + schema1:name "Ying Sims" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0009-0006-9956-0404" ; + schema1:name "Will Eagles", + "William Eagles" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Archit Dabral" . + + a schema1:SoftwareApplication ; + schema1:license ; + schema1:name "numpy" ; + schema1:version "1.21.1" . + + a schema1:MediaObject ; + schema1:name "housing.csv" . + + a schema1:Organization ; + schema1:name "Indian Institute of Technology, BHU" . + + a schema1:CollegeOrUniversity, + schema1:Organization ; + schema1:alternateName "UNIMAN" ; + schema1:name "The University of Manchester" ; + schema1:url "https://www.manchester.ac.uk/" . + + a schema1:Thing ; + schema1:alternateName "GÖG" ; + schema1:name "Gesundheit Österreich" ; + schema1:url "https://goeg.at/" . + + a schema1:Organization ; + schema1:name "Virginia Tech" . + + a schema1:Organization ; + schema1:name "International Center for Numerical Methods in Engineering" . + + a schema1:Organization ; + schema1:name "Sciensano" ; + schema1:url "https://www.sciensano.be/" . + + a schema1:CreativeWork ; + schema1:identifier "BSD-3-Clause" ; + schema1:name "BSD 3-Clause \"New\" or \"Revised\" License" . + + a schema1:CreativeWork ; + schema1:name "Process Run Crate" ; + schema1:version "0.5" . + + a schema1:CreativeWork ; + schema1:name "Workflow Run Crate" ; + schema1:version "0.5" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Collection ; + schema1:name "TronFlow" . + + a schema1:Collection ; + schema1:name "Vertebrate Genomes Pipelines (VGP) workflows" . + + a schema1:Person ; + schema1:name "Marco De La Pierre" . + + a schema1:Person ; + schema1:name "Julian Uszkoreit" . + + a schema1:Person ; + schema1:name "Valentine Murigneux" . + + a schema1:Person ; + schema1:name "Alexandre Cormier" . + + a schema1:Person ; + schema1:name "Cyril Noel" . + + a schema1:Person ; + schema1:name "Pierre Cuzin" . + + a schema1:Person ; + schema1:name "Mike Thang" . + + a schema1:Person ; + schema1:name "Hrishikesh Dhondge" . + + a schema1:Person ; + schema1:name "Jasper Ouwerkerk" . + + a schema1:Person ; + schema1:name "Kary Ocaña" . + + a schema1:Person ; + schema1:name "Marie-Dominique Devignes" . + + a schema1:Person ; + schema1:name "Varsha Kale" . + + a schema1:Person ; + schema1:name "Tatiana Gurbich" . + + a schema1:Person ; + schema1:name "Cristiane Taniguti" . + + a schema1:Person ; + schema1:name "Peter Menzel" . + + a schema1:Person ; + schema1:name "Diego Garrido-Martín" . + + a schema1:Person ; + schema1:name "Roderic Guigó" . + + a schema1:Person ; + schema1:name "Mahnoor Zulfiqar" . + + a schema1:Person ; + schema1:name "Luiz Gadelha" . + + a schema1:Person ; + schema1:name "Anne Fouilloux" . + + a schema1:Person ; + schema1:name "Jessica Gomez-Garrido" . + + a schema1:Person ; + schema1:name "Michael Hall" . + + a schema1:Person ; + schema1:name "Ziad Al-Bkhetan" . + + a schema1:Person ; + schema1:name "Elliott J. Price" . + + a schema1:Person ; + schema1:name "Pavankumar Videm" . + + a schema1:Person ; + schema1:name "Paul Brack" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Air Quality Prediction" . + + a schema1:Organization, + schema1:Project ; + schema1:name "CAPSID" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Read2Map" . + + a schema1:Organization, + schema1:Project ; + schema1:name "ParslRNA-Seq: an efficient and scalable RNAseq analysis workflow for studies of differentiated gene expression" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Apis-wings" . + + a schema1:Organization, + schema1:Project ; + schema1:name "BioDT Use Case 4.1.1.2 Ecosystem services" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Metabolomics-Reproducibility" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Workflows for geographic science" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Katdetectr" . + + a schema1:Organization, + schema1:Project ; + schema1:name "ANSES-Ploufragan" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Tree of Life Genome Assembly" . + + a schema1:Organization, + schema1:Project ; + schema1:name "CholGen" . + + a schema1:Organization, + schema1:Project ; + schema1:name "BY-COVID (general)" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Pangenome database project" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Peter Menzel's Team" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Genome Data Compression Team" . + + a schema1:Organization, + schema1:Project ; + schema1:name "UX trial team" . + + a schema1:Organization, + schema1:Project ; + schema1:name "EOSC-Life-WP6-Demos" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Pawsey Supercomputing Research Centre" . + + a schema1:Organization, + schema1:Project ; + schema1:name "SeBiMER" . + + a schema1:Organization, + schema1:Project ; + schema1:name "nf-core viralrecon" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Applied Computational Biology at IEG/HMGU" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Single Cell Unit" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Workflows Australia" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Medizinisches Proteom-Center, Medical Bioinformatics" . + + a schema1:Organization, + schema1:Project ; + schema1:name "usegalaxy.be workflows" . + + a schema1:WebSite ; + schema1:name "Acrobat PDF 1.7 - Portable Document Format" . + + a schema1:MediaObject ; + schema1:name "randomsvd_matrix_cfg.tar" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "ashishbhawvl@gmail.com" ; + schema1:identifier "ashishbhawvl@gmail.com" ; + schema1:url "https://orcid.org/0000-0003-0606-2512", + "https://orcid.org/0009-0007-1637-4487" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R1.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R1.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R3.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/params.yml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R1.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R2.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/QuantifiedPeptides.tsv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R2.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R2.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/lfq_param_file.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R1.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R2.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R2.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R2.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R1.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R1_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R3.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R3.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R2.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R1.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R2_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R2.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/Normalyzer_design.tsv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/mzRange.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R1.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R2.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R1.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R1.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R3.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R3.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R3.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R1.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R3.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R1.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R1.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/modificationSpecificPeptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R2.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R1.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R2_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R3.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R1.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R1.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R2.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R2.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R3.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R2.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R2.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R1.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R2.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R3.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/all_pep_quant_merged.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R2.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R1_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R2.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R3.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R3_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/evidence.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R3.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R1.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R3_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R3.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R3.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/msms.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R2.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R1.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R1.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R2.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R1.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R1.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R2.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R3.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R1.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R3_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R2.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R1.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R3.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R1.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R3.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R1_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R1.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R1.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R2.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R2.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R2.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R2.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R2_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R2.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R2.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R3.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R3.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R3.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R3.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R3.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R1.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R2.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R2.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R3.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R1.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R1.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R3.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R3_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R1_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R3.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R3_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R3.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R3.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R2.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R2.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R1.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R3_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R2.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R2.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R3.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R2.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R1.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R1.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R2.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R3.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R2.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R2.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R2.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R3.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R1.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R2.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R1.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R2_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R3.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R1.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R3.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R1.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R3.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R3.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R3.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R3.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R2.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/import_file_list.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R3.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R3.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R1.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R3.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R3_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R3.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R3.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/searchgui.par" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R2.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R1.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R3.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R3.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R2.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R2.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R2.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R1.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R3.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R3_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R1.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R2.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R3_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R2.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R1.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R3_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R2.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/changed_params.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R3.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R3.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R2.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R1.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R3_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R2.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/versions.yml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R1.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R1.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R2_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R3.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R2.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R2.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R1.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R2_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R1_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R1_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R1.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R3.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R1_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R1.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R3.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R2.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R3.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R1.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R3.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R1.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/quant_exp_design.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R1.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R2.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R2.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/allPeptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R1_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R1_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R2.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R2.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/sdrf_merge.version.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/stand_pep_quant_merged.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R1.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R1.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R2.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R2.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/prepare_files.version.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R2_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R3.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R2_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R1.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R2.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R1.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R1.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R1.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R3.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R3.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R1_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R1.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R2.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R1_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R3.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R2.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R2_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R3.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R3_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R1.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R3.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R2.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R1.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R1.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R2.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R2_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R2.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/yeast_UPS_concatenated_target_decoy.fasta" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R3.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R2_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R3.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R3.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R2.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R1.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R3.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R3_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R1.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R3.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R3.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R1.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R3.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R1_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R2.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R2.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R2_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R1.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R2.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R1.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R1.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R3.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R3.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R1.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/params_out.yml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R2.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R1.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R2.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R3.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R1.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R2.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R2_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R2.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R3.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R1.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R1_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R2.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R1.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R3.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R3.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R3.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R3.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R3.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R3.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R2.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/proline_results_2686ac07-85e9-4b13-a22e-186821fd3e84.xlsx" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R3.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R1.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R3_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R1.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R2_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R3.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R3.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R1.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/all_prot_quant_merged.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R3.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R1.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R1.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R3_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R3.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R2.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R3.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R1.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R3.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R3.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R2.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R1_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R2.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R2.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R2.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R3_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/mqpar.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R2.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R2.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R1_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R1.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R3.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R2_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R2.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R1.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R3_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R2_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R2.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R2.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R3.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R3.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R1.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R2.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R2.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R1.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R3.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R2.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R3_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R3.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R1.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R2.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R1.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R2.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R3.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R3_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R2.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R2.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R3.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R1.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R1.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R3_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R2_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R2.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R2.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R3.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R1.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R3.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R3.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R1.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R2.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R1.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R3_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/exp_design.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R3.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R1.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R3.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R3.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R1.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R2.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R1.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R1.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/parameters.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R1.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R2.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R2.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R2.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R2.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R3.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/Oxidation (M)Sites.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R2.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R3_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R2_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R2.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R3.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R2.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R2.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R1_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R1.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R3.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R1.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R1.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R1.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/Normalyzer_comparisons.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R2_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R2.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R3.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R1.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R2.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R3.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R1.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R3.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R1.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R2.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R1_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R1.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R2.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/QuantifiedProteins.tsv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R3.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R3.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R1.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R2_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R2.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R3.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R1.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R1.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R3.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R3.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R3.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R3.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R2.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R3.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/summary.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R2.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R1_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R2.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R3.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R1.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R3.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R2_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R3.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R3.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R1.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R1.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/stand_prot_quant_merged.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R1_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R1.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R3.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R1.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R1.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R1.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R1_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R3_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R2.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R3.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R1.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R2.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/msmsScans.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R1.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R3_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/comet.params" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R1_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R2.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R2.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R2.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/sdrf_temp.tsv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R3_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R2.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R1.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R2.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R2_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R1.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R1.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R2_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R2.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/msScans.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R3_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R3.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R1.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R2.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R3.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R1.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/ms3Scans.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/QuantifiedPeaks.tsv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/sdrf_local.tsv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R1_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R2_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R2.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R1_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R3_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R1.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R3.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R2_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R3.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R1.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R3.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R2_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R1.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R2.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R1.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R2.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R1.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R3.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R3.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R2.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R2.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R3.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R1_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/proteinGroups.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R1_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R3.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R1.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R1.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R1_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R1_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R2.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R3.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R1.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R3.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R2_proteins.txt" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Assembly-Hifi-Trio-phasing-VGP5/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5" ; + schema1:version "0.1.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "fragment-based-docking-scoring/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/fragment-based-docking-scoring" ; + schema1:version "0.1.2" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2023-12-04T14:19:45+00:00" ; + schema1:description """**Name:** Java Wordcount +**Contact Person**: support-compss@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs + +# Description +Wordcount application. There are two versions of Wordcount, depending on how the input data is given. + +## Version 1 +''Single input file'', where all the text is given in the same file and the chunks are calculated with a BLOCK_SIZE parameter. + +## Version 2 +''Multiple input files'', where the text fragments are already in different files under the same directory + +# Execution instructions +Usage: +``` +runcompss --classpath=application_sources/jar/wordcount.jar wordcount.multipleFiles.Wordcount DATA_FOLDER +runcompss --classpath=application_sources/jar/wordcount.jar wordcount.uniqueFile.Wordcount DATA_FILE BLOCK_SIZE +``` + +where: +* DATA_FOLDER: Absolute path to the base folder of the dataset files +* DATA_FILE: Absolute path to the dabase file +* BLOCK_SIZE: Number of bytes of each block + +# Execution Examples +``` +runcompss --classpath=application_sources/jar/wordcount.jar wordcount.multipleFiles.Wordcount dataset/data-set/ +runcompss --classpath=application_sources/jar/wordcount.jar wordcount.uniqueFile.Wordcount dataset/data-set/file_small.txt 650 +runcompss --classpath=application_sources/jar/wordcount.jar wordcount.uniqueFile.Wordcount dataset/data-set/file_long.txt 250000 + +``` + +# Build + +## Option 1: Native java +``` +cd application_sources/; javac src/main/java/wordcount/*.java +cd src/main/java/; jar cf wordcount.jar wordcount/ +cd ../../../; mv src/main/java/wordcount.jar jar/ +``` + +## Option 2: Maven +``` +cd application_sources/ +mvn clean package +``` +""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Java COMPSs wordcount example (files used as inputs)" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "chipseq-pe/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/chipseq-pe" ; + schema1:version "0.5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Scaffolding-HiC-VGP8/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Scaffolding-HiC-VGP8" ; + schema1:version "0.2.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "rnaseq-pe/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/rnaseq-pe" ; + schema1:version "0.5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:hasPart ; + schema1:name "conf/metadata/experiment_data" ; + schema1:programmingLanguage ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Assembly-Hifi-only-VGP3/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-only-VGP3" ; + schema1:version "0.1.6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "VGP-meryldb-creation-trio/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/VGP-meryldb-creation-trio" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "dada2/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/dada2" ; + schema1:version "0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "allele-based-pathogen-identification/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/allele-based-pathogen-identification" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "baredsc/baredSC-1d-logNorm" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/baredsc" ; + schema1:version "0.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "Assembly-decontamination-VGP9/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-decontamination-VGP9" ; + schema1:version "0.1.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "protein-ligand-complex-parameterization/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/protein-ligand-complex-parameterization" ; + schema1:version "0.1.1" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-chip-sr" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.6" . + + a schema1:Dataset ; + schema1:creator , + ; + schema1:datePublished "2022-07-20T08:59:58+00:00" ; + schema1:description "Lysozyme in Water simplest version, from COMPSs Tutorial" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Lysozyme in Water COMPSs workflow" ; + schema1:publisher , + . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 7912 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "lysozyme_in_water.py" ; + schema1:programmingLanguage ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "sars-cov-2-consensus-from-variation/COVID-19-CONSENSUS-CONSTRUCTION" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-consensus-from-variation" ; + schema1:version "0.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "rnaseq-sr/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/rnaseq-sr" ; + schema1:version "0.4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "qiime2-II-denoising/IIa-denoising-se" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/qiime2-II-denoising" ; + schema1:version "0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "chipseq-sr/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/chipseq-sr" ; + schema1:version "0.9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + , + , + ; + schema1:name "fastq-to-matrix-10x/scrna-seq-fastq-to-matrix-10x-cellplex" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/fastq-to-matrix-10x" ; + schema1:version "0.4" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2023-05-30T08:48:00+00:00" ; + schema1:description """**Name:** Matrix multiplication with Files +**Contact Person**: support-compss@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs + +# Description +Matrix multiplication is a binary operation that takes a pair of matrices and produces another matrix. + +If A is an n×m matrix and B is an m×p matrix, the result AB of their multiplication is an n×p matrix defined only if the number of columns m in A is equal to the number of rows m in B. When multiplying A and B, the elements of the rows in A are multiplied with corresponding columns in B. + +In this implementation, A and B are square matrices (same number of rows and columns), and so it is the result matrix C. Each matrix is divided in N blocks of M doubles. The multiplication of two blocks is done by a multiply task method with a simple three-nested-loop implementation. When executed with COMPSs, the main program generates N^3^ tasks arranged as N^2^ chains of N tasks in the dependency graph. + +# Execution instructions +Usage: +``` +runcompss --lang=python src/matmul_files.py numberOfBlocks blockSize +``` + +where: +* numberOfBlocks: Number of blocks inside each matrix +* blockSize: Size of each block + + +# Execution Examples +``` +runcompss --lang=python src/matmul_files.py 4 4 +runcompss src/matmul_files.py 4 4 +python -m pycompss src/matmul_files.py 4 4 +``` + +# Build +No build is required +""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "PyCOMPSs Matrix Multiplication, out-of-core using files" ; + schema1:publisher . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.0.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.0.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.0.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.0.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.0.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.0.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.0.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.0.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.1.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.1.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.1.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.1.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.1.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.1.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.1.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.1.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.2.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.2.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.2.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.2.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.2.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.2.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.2.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.2.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.3.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.3.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.3.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.3.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.3.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.3.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.3.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.3.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.4.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.4.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.4.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.4.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.4.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.4.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.4.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.4.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.5.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.5.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.5.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.5.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.5.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.5.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.5.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.5.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.6.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.6.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.6.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.6.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.6.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.6.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.6.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.6.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.7.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.7.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.7.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.7.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.7.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.7.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.7.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.7.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "assembly-with-flye/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/assembly-with-flye" ; + schema1:version "0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "chipseq-pe/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/chipseq-pe" ; + schema1:version "0.9" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-03-22T11:45:33+00:00" ; + schema1:description """**Name:** Matmul GPU +**Contact Person**: cristian.tatu@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs +**Machine**: Minotauro-MN4 + +Matmul running on the GPU leveraging COMPSs GPU Cache for deserialization speedup. +Launched using 32 GPUs (16 nodes). +C = A @ B +Where A: shape (320, 56_900_000) block_size (10, 11_380_000) + B: shape (56_900_000, 10) block_size (11_380_000, 10) + C: shape (320, 10) block_size (10, 10) +Total dataset size 291 GB. +Version dislib-0.9 (https://github.com/bsc-wdc/dislib/tree/release-0.9) +""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "COMPSs GPU Cache Matrix Multiplication" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "repeatmasking/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/repeatmasking" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "Assembly-decontamination-VGP9/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-decontamination-VGP9" ; + schema1:version "0.1.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Assembly-Hifi-only-VGP3/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-only-VGP3" ; + schema1:version "0.1.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "dada2/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/dada2" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + , + , + ; + schema1:name "fastq-to-matrix-10x/scrna-seq-fastq-to-matrix-10x-cellplex" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/fastq-to-matrix-10x" ; + schema1:version "0.2" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-07-05T11:38:51+00:00" ; + schema1:description """**Name:** KMeans +**Contact Person**: support-compss@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs +**Machine**: MareNostrum5 + +KMEans for clustering the housing.csv dataset (https://github.com/sonarsushant/California-House-Price-Prediction/blob/master/housing.csv). +This application used [dislib-0.9.0](https://github.com/bsc-wdc/dislib/tree/release-0.9) +""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "KMeans housing" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "lcms-preprocessing/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/lcms-preprocessing" ; + schema1:version "1.0" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "chipseq-sr/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/chipseq-sr" ; + schema1:version "0.1" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator , + , + , + ; + schema1:datePublished "2023-10-20T11:05:06+00:00" ; + schema1:description "A demonstration workflow for Reduced Order Modeling (ROM) within the eFlows4HPC project, implemented using Kratos Multiphysics, EZyRB, COMPSs, and Dislib." ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "https://spdx.org/licenses/BSD-4-Clause.html" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "eFlows4HPC Demo ROM Workflow" ; + schema1:publisher , + , + . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Scaffolding-HiC-VGP8/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Scaffolding-HiC-VGP8" ; + schema1:version "0.2.4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "atacseq/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/atacseq" ; + schema1:version "0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "pathogen-detection-pathogfair-samples-aggregation-and-visualisation/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/pathogen-detection-pathogfair-samples-aggregation-and-visualisation" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "sars-cov-2-pe-illumina-artic-variant-calling/COVID-19-PE-ARTIC-ILLUMINA" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-variant-calling" ; + schema1:version "0.4.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "gcms-metams/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/gcms-metams" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "chipseq-pe/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/chipseq-pe" ; + schema1:version "0.10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "amr_gene_detection/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/amr_gene_detection" ; + schema1:version "1.1.1" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2023-05-30T07:15:34+00:00" ; + schema1:description """**Name:** Matrix Multiplication +**Contact Person:** support-compss@bsc.es +**Access Level:** public +**License Agreement:** Apache2 +**Platform:** COMPSs + +# Description +Matrix multiplication is a binary operation that takes a pair of matrices and produces another matrix. + +If A is an n×m matrix and B is an m×p matrix, the result AB of their multiplication is an n×p matrix defined only if the number of columns m in A is equal to the number of rows m in B. When multiplying A and B, the elements of the rows in A are multiplied with corresponding columns in B. + +In this implementation, A and B are square matrices (same number of rows and columns), and so it is the result matrix C. Each matrix is divided in N blocks of M doubles. The multiplication of two blocks is done by a multiply task method with a simple three-nested-loop implementation. When executed with COMPSs, the main program generates N^3^ tasks arranged as N^2^ chains of N tasks in the dependency graph. + +# Versions +There are three versions of Matrix Multiplication, depending on the data types used to store the blocks. +## Version 1 +''files'', where the matrix blocks are stored in files. +## Version 2 +''objects'', where the matrix blocks are represented by objects. +## Version 3 +''arrays'', where the matrix blocks are stored in arrays. + +# Execution instructions +Usage: +``` +runcompss matmul.files.Matmul numberOfBlocks blockSize +runcompss matmul.objects.Matmul numberOfBlocks blockSize +runcompss matmul.arrays.Matmul numberOfBlocks blockSize +``` + +where: + * numberOfBlocks: Number of blocks inside each matrix + * blockSize: Size of each block + + +# Execution Example +``` +runcompss matmul.objects.Matmul 16 4 +runcompss matmul.files.Matmul 16 4 +runcompss matmul.arrays.Matmul 16 4 +``` + +# Build +## Option 1: Native java +``` +cd ~/tutorial_apps/java/matmul/; javac src/main/java/matmul/*/*.java +cd src/main/java/; jar cf matmul.jar matmul/ +cd ../../../; mv src/main/java/matmul.jar jar/ +``` + +## Option 2: Maven +``` +cd ~/tutorial_apps/java/matmul/ +mvn clean package +``` +""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "COMPSs Matrix Multiplication, out-of-core using files" ; + schema1:publisher . + + a schema1:MediaObject ; + schema1:contentSize 295 ; + schema1:dateModified "2023-05-30T07:15:32" ; + schema1:name "C.0.0" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295 ; + schema1:dateModified "2023-05-30T07:15:32" ; + schema1:name "C.0.1" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 301 ; + schema1:dateModified "2023-05-30T07:15:32" ; + schema1:name "C.0.2" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 301 ; + schema1:dateModified "2023-05-30T07:15:32" ; + schema1:name "C.0.3" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 296 ; + schema1:dateModified "2023-05-30T07:15:32" ; + schema1:name "C.1.0" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 296 ; + schema1:dateModified "2023-05-30T07:15:32" ; + schema1:name "C.1.1" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295 ; + schema1:dateModified "2023-05-30T07:15:32" ; + schema1:name "C.1.2" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 296 ; + schema1:dateModified "2023-05-30T07:15:32" ; + schema1:name "C.1.3" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295 ; + schema1:dateModified "2023-05-30T07:15:32" ; + schema1:name "C.2.0" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 298 ; + schema1:dateModified "2023-05-30T07:15:32" ; + schema1:name "C.2.1" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 299 ; + schema1:dateModified "2023-05-30T07:15:32" ; + schema1:name "C.2.2" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 294 ; + schema1:dateModified "2023-05-30T07:15:32" ; + schema1:name "C.2.3" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 299 ; + schema1:dateModified "2023-05-30T07:15:32" ; + schema1:name "C.3.0" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 297 ; + schema1:dateModified "2023-05-30T07:15:32" ; + schema1:name "C.3.1" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 299 ; + schema1:dateModified "2023-05-30T07:15:32" ; + schema1:name "C.3.2" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 297 ; + schema1:dateModified "2023-05-30T07:15:32" ; + schema1:name "C.3.3" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "sars-cov-2-pe-illumina-artic-ivar-analysis/SARS-COV-2-ILLUMINA-AMPLICON-IVAR-PANGOLIN-NEXTCLADE" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-ivar-analysis" ; + schema1:version "0.2.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Scaffolding-HiC-VGP8/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Scaffolding-HiC-VGP8" ; + schema1:version "0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "qiime2-I-import/Ia-import-multiplexed-se" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/qiime2-I-import" ; + schema1:version "0.2" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-03-05T11:04:51+00:00" ; + schema1:description "Lysozyme in water full COMPSs application run at MareNostrum IV, using dataset_small with two workers" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Lysozyme in water full version, using dataset_small, two workers, data_persistence True" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "gene-based-pathogen-identification/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/gene-based-pathogen-identification" ; + schema1:version "0.1" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2023-11-02T10:54:22+00:00" ; + schema1:description """**Name:** TruncatedSVD (Randomized SVD) +**Contact Person**: support-compss@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs +**Machine**: MareNostrum4 + +TruncatedSVD (Randomized SVD) for computing just 456 singular values out of a (3.6M x 1200) size matrix. +The input matrix represents a CFD transient simulation of aire moving past a cylinder. +This application used dislib-0.9.0 (https://github.com/bsc-wdc/dislib/tree/release-0.9) +""" ; + schema1:hasPart , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Randomized SVD" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Purge-duplicates-one-haplotype-VGP6b/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Purge-duplicates-one-haplotype-VGP6b" ; + schema1:version "0.6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "parallel-accession-download/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/parallel-accession-download" ; + schema1:version "0.1.13" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "sars-cov-2-variation-reporting/COVID-19-VARIATION-REPORTING" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-variation-reporting" ; + schema1:version "0.1.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:version "0.1.10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Mitogenome-assembly-VGP0/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Mitogenome-assembly-VGP0" ; + schema1:version "0.1" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-chip-sr" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "1.0" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "COVID-19: variation analysis on WGS PE data" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-wgs-variant-calling" ; + schema1:version "0.2.1" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2023-12-19T11:21:53+00:00" ; + schema1:description """**Name:** Lanczos SVD +**Contact Person**: support-compss@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs +**Machine**: MareNostrum4 + +Lanczos SVD for computing singular values needed to reach an epsilon of 1e-3 on a matrix of (150000, 150). +The input matrix is generated synthetically. +This application used [dislib-0.9.0](https://github.com/bsc-wdc/dislib/tree/release-0.9) +""" ; + schema1:hasPart , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Lanczos SVD" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Purge-duplicates-one-haplotype-VGP6b/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Purge-duplicates-one-haplotype-VGP6b" ; + schema1:version "0.5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "parallel-accession-download/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/parallel-accession-download" ; + schema1:version "0.1.6" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-chip-sr" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:version "0.6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "fragment-based-docking-scoring/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/fragment-based-docking-scoring" ; + schema1:version "0.1.4" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2023-11-10T13:57:34+00:00" ; + schema1:description """**Name:** K-means +**Contact Person**: support-compss@bsc.es +**Access Level**: Public +**License Agreement**: Apache2 +**Platform**: COMPSs + +# Description +K-means clustering is a method of cluster analysis that aims to partition ''n'' points into ''k'' clusters in which each point belongs to the cluster with the nearest mean. It follows an iterative refinement strategy to find the centers of natural clusters in the data. + +When executed with COMPSs, K-means first generates the input points by means of initialization tasks. For parallelism purposes, the points are split in a number of fragments received as parameter, each fragment being created by an initialization task and filled with random points. + +After the initialization, the algorithm goes through a set of iterations. In every iteration, a computation task is created for each fragment; then, there is a reduction phase where the results of each computation are accumulated two at a time by merge tasks; finally, at the end of the iteration the main program post-processes the merged result, generating the current clusters that will be used in the next iteration. Consequently, if ''F'' is the total number of fragments, K-means generates ''F'' computation tasks and ''F-1'' merge tasks per iteration. + +# Execution instructions +Usage: +``` +runcompss --classpath=application_sources/jar/kmeans.jar kmeans.KMeans <...> +``` + +where ''<...>'': +* -c Number of clusters +* -i Number of iterations +* -n Number of points +* -d Number of dimensions +* -f Number of fragments + +# Execution Examples +``` +runcompss --classpath=application_sources/jar/kmeans.jar kmeans.KMeans +runcompss --classpath=application_sources/jar/kmeans.jar kmeans.KMeans -c 4 -i 10 -n 2000 -d 2 -f 2 +``` + +# Build +## Option 1: Native java +``` +cd application_sources/; javac src/main/java/kmeans/*.java +cd src/main/java/; jar cf kmeans.jar kmeans/ +cd ../../../; mv src/main/java/kmeans.jar jar/ +``` + +## Option 2: Maven +``` +cd application_sources/ +mvn clean package +``` +""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Java COMPSs K-means clustering example (executed at Marenostrum IV supercomputer, inputs generated by the code)" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "velocyto/Velocyto-on10X-from-bundled" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/velocyto" ; + schema1:version "0.2" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-04-30T10:49:33+00:00" ; + schema1:description """**Name:** Matrix multiplication with Files, reproducibility example +**Contact Person**: support-compss@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs + +# Description +Matrix multiplication is a binary operation that takes a pair of matrices and produces another matrix. + +If A is an n×m matrix and B is an m×p matrix, the result AB of their multiplication is an n×p matrix defined only if the number of columns m in A is equal to the number of rows m in B. When multiplying A and B, the elements of the rows in A are multiplied with corresponding columns in B. + +In this implementation, A and B are square matrices (same number of rows and columns), and so it is the result matrix C. Each matrix is divided in N blocks of M doubles (N hardcoded to 2, and M hardcoded to 8). The multiplication of two blocks is done by a multiply task method with a simple three-nested-loop implementation. When executed with COMPSs, the main program generates N^3^ tasks arranged as N^2^ chains of N tasks in the dependency graph. + +# Reproducibility +To reproduce the exact results of this example, follow the instructions at the [Workflow Provenance section at COMPSs User Manual](https://compss-doc.readthedocs.io/en/stable/Sections/05_Tools/04_Workflow_Provenance.html) + +# Execution instructions +Usage: +``` +runcompss --lang=python src/matmul_files.py inputs_folder/ outputs_folder/ +``` + +where: +* inputs_folder/: Folder where A and B matrices are located +* outputs_folder/: Folder with the resulting C matrix + + +# Execution Examples +``` +runcompss --lang=python src/matmul_files.py inputs/ outputs/ +runcompss src/matmul_files.py inputs/ outputs/ +python -m pycompss src/matmul_files.py inputs/ outputs/ +``` + +# Build +No build is required +""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "PyCOMPSs Matrix Multiplication, out-of-core using files, reproducibility example" ; + schema1:publisher . + + a schema1:MediaObject ; + schema1:contentSize 320 ; + schema1:dateModified "2024-04-30T10:49:29+00:00" ; + schema1:name "C.0.0" ; + schema1:sdDatePublished "2024-04-30T10:49:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 320 ; + schema1:dateModified "2024-04-30T10:49:29+00:00" ; + schema1:name "C.0.1" ; + schema1:sdDatePublished "2024-04-30T10:49:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 320 ; + schema1:dateModified "2024-04-30T10:49:29+00:00" ; + schema1:name "C.1.0" ; + schema1:sdDatePublished "2024-04-30T10:49:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 320 ; + schema1:dateModified "2024-04-30T10:49:29+00:00" ; + schema1:name "C.1.1" ; + schema1:sdDatePublished "2024-04-30T10:49:33+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2023-11-02T10:55:02+00:00" ; + schema1:description """**Name:** Word Count +**Contact Person**: support-compss@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs + +# Description +Wordcount is an application that counts the number of words for a given set of files. + +To allow parallelism every file is treated separately and merged afterwards. + +# Execution instructions +Usage: +``` +runcompss --lang=python src/wordcount.py datasetPath +``` + +where: +* datasetPath: Absolute path of the file to parse (e.g. /home/compss/tutorial_apps/python/wordcount/data/) + +# Execution Examples +``` +runcompss --lang=python src/wordcount.py $(pwd)/data/ +runcompss src/wordcount.py $(pwd)/data/ +python -m pycompss src/wordcount.py $(pwd)/data/ +``` + +# Build +No build is required +""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "PyCOMPSs Wordcount test, using files" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "cutandrun/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/cutandrun" ; + schema1:version "0.4" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 21805 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + , + ; + schema1:name "bacterial_genome_annotation/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/bacterial_genome_annotation" ; + schema1:version "1.0" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "COVID-19: variation analysis reporting" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-variation-reporting" ; + schema1:version "0.1.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:version "0.1.6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "gromacs-mmgbsa/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/gromacs-mmgbsa" ; + schema1:version "0.1.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "protein-ligand-complex-parameterization/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/protein-ligand-complex-parameterization" ; + schema1:version "0.1.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + , + ; + schema1:name "bacterial-genome-assembly/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/bacterial-genome-assembly" ; + schema1:version "1.1.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "ChIPseq_PE/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/ChIPseq_PE" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "sars-cov-2-pe-illumina-artic-variant-calling/COVID-19-PE-ARTIC-ILLUMINA" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-variant-calling" ; + schema1:version "0.5.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Assembly-Hifi-Trio-phasing-VGP5/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5" ; + schema1:version "0.1.4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "generic-variant-calling-wgs-pe/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/generic-variant-calling-wgs-pe" ; + schema1:version "0.1" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2023-06-23T13:59:37+00:00" ; + schema1:description """**Name:** SparseLU +**Contact Person:** support-compss@bsc.es +**Access Level:** public +**License Agreement:** Apache2 +**Platform:** COMPSs + +# Description +The Sparse LU application computes an LU matrix factorization on a sparse blocked matrix. The matrix size (number of blocks) and the block size are parameters of the application. + +As the algorithm progresses, the area of the matrix that is accessed is smaller; concretely, at each iteration, the 0th row and column of the current matrix are discarded. On the other hand, due to the sparseness of the matrix, some of its blocks might not be allocated and, therefore, no work is generated for them. + +When executed with COMPSs, Sparse LU produces several types of task with different granularity and numerous dependencies between them. + +# Versions +There are three versions of Sparse LU, depending on the data types used to store the blocks. +## Version 1 +''files'', where the matrix blocks are stored in files. +## Version 2 +''objects'', where the matrix blocks are represented by objects. +## Version 3 +''arrays'', where the matrix blocks are stored in arrays. + + +# Execution instructions +Usage: +``` +runcompss sparseLU.files.SparseLU numberOfBlocks blockSize +runcompss sparseLU.objects.SparseLU numberOfBlocks blockSize +runcompss sparseLU.arrays.SparseLU numberOfBlocks blockSize +``` + +where: + * numberOfBlocks: Number of blocks inside each matrix + * blockSize: Size of each block + + +# Execution Example +``` +runcompss sparseLU.objects.SparseLU 16 4 +runcompss sparseLU.files.SparseLU 16 4 +runcompss sparseLU.arrays.SparseLU 16 4 +``` + + +# Build +## Option 1: Native java +``` +cd application_sources/; javac src/main/java/sparseLU/*/*.java +cd src/main/java/; jar cf sparseLU.jar sparseLU/ +cd ../../../; mv src/main/java/sparseLU.jar jar/ +``` + +## Option 2: Maven +``` +cd application_sources/ +mvn clean package +``` +""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Java COMPSs LU Factorization for Sparse Matrices" ; + schema1:publisher . + + a schema1:MediaObject ; + schema1:contentSize 1234 ; + schema1:dateModified "2023-06-23T13:59:35" ; + schema1:name "A.0.0" ; + schema1:sdDatePublished "2023-06-23T13:59:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1182 ; + schema1:dateModified "2023-06-23T13:59:35" ; + schema1:name "A.0.1" ; + schema1:sdDatePublished "2023-06-23T13:59:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1217 ; + schema1:dateModified "2023-06-23T13:59:35" ; + schema1:name "A.0.2" ; + schema1:sdDatePublished "2023-06-23T13:59:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1244 ; + schema1:dateModified "2023-06-23T13:59:35" ; + schema1:name "A.1.0" ; + schema1:sdDatePublished "2023-06-23T13:59:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1238 ; + schema1:dateModified "2023-06-23T13:59:35" ; + schema1:name "A.1.1" ; + schema1:sdDatePublished "2023-06-23T13:59:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1233 ; + schema1:dateModified "2023-06-23T13:59:35" ; + schema1:name "A.1.2" ; + schema1:sdDatePublished "2023-06-23T13:59:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1214 ; + schema1:dateModified "2023-06-23T13:59:35" ; + schema1:name "A.2.0" ; + schema1:sdDatePublished "2023-06-23T13:59:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1255 ; + schema1:dateModified "2023-06-23T13:59:35" ; + schema1:name "A.2.1" ; + schema1:sdDatePublished "2023-06-23T13:59:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1251 ; + schema1:dateModified "2023-06-23T13:59:35" ; + schema1:name "A.2.2" ; + schema1:sdDatePublished "2023-06-23T13:59:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1219 ; + schema1:dateModified "2023-06-23T13:59:35" ; + schema1:name "A.2.3" ; + schema1:sdDatePublished "2023-06-23T13:59:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1290 ; + schema1:dateModified "2023-06-23T13:59:35" ; + schema1:name "A.3.2" ; + schema1:sdDatePublished "2023-06-23T13:59:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1250 ; + schema1:dateModified "2023-06-23T13:59:35" ; + schema1:name "A.3.3" ; + schema1:sdDatePublished "2023-06-23T13:59:37+00:00" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Assembly-Hifi-Trio-phasing-VGP5/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "chipseq-sr/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/chipseq-sr" ; + schema1:version "0.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + , + ; + schema1:name "quality-and-contamination-control/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/quality-and-contamination-control" ; + schema1:version "1.0" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "atacseq/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/atacseq" ; + schema1:version "0.8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "sars-cov-2-pe-illumina-artic-ivar-analysis/SARS-COV-2-ILLUMINA-AMPLICON-IVAR-PANGOLIN-NEXTCLADE" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-ivar-analysis" ; + schema1:version "0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "sars-cov-2-ont-artic-variant-calling/COVID-19-ARTIC-ONT" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-ont-artic-variant-calling" ; + schema1:version "0.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "assembly-with-flye/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/assembly-with-flye" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "gromacs-dctmd/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/gromacs-dctmd" ; + schema1:version "0.1.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "cutandrun/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/cutandrun" ; + schema1:version "0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "sars-cov-2-pe-illumina-wgs-variant-calling/COVID-19-PE-WGS-ILLUMINA" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-wgs-variant-calling" ; + schema1:version "0.2.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "atacseq/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/atacseq" ; + schema1:version "0.15" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "atacseq/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/atacseq" ; + schema1:version "0.6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "sars-cov-2-variation-reporting/COVID-19-VARIATION-REPORTING" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-variation-reporting" ; + schema1:version "0.1.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Scaffolding-HiC-VGP8/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Scaffolding-HiC-VGP8" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:version "0.3" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-06-18T13:22:31+00:00" ; + schema1:description """**Name:** SparseLU +**Contact Person:** support-compss@bsc.es +**Access Level:** public +**License Agreement:** Apache2 +**Platform:** COMPSs + +# Description +The Sparse LU application computes an LU matrix factorization on a sparse blocked matrix. The matrix size (number of blocks) and the block size are parameters of the application. + +As the algorithm progresses, the area of the matrix that is accessed is smaller; concretely, at each iteration, the 0th row and column of the current matrix are discarded. On the other hand, due to the sparseness of the matrix, some of its blocks might not be allocated and, therefore, no work is generated for them. + +When executed with COMPSs, Sparse LU produces several types of task with different granularity and numerous dependencies between them. + +# Versions +There are three versions of Sparse LU, depending on the data types used to store the blocks. +## Version 1 +''files'', where the matrix blocks are stored in files. +## Version 2 +''objects'', where the matrix blocks are represented by objects. +## Version 3 +''arrays'', where the matrix blocks are stored in arrays. + + +# Execution instructions +Usage: +``` +runcompss sparseLU.files.SparseLU numberOfBlocks blockSize +runcompss sparseLU.objects.SparseLU numberOfBlocks blockSize +runcompss sparseLU.arrays.SparseLU numberOfBlocks blockSize +``` + +where: + * numberOfBlocks: Number of blocks inside each matrix + * blockSize: Size of each block + + +# Execution Example +``` +runcompss sparseLU.objects.SparseLU 16 4 +runcompss sparseLU.files.SparseLU 16 4 +runcompss sparseLU.arrays.SparseLU 16 4 +``` + + +# Build +## Option 1: Native java +``` +cd application_sources/; javac src/main/java/sparseLU/*/*.java +cd src/main/java/; jar cf sparseLU.jar sparseLU/ +cd ../../../; mv src/main/java/sparseLU.jar jar/ +``` + +## Option 2: Maven +``` +cd application_sources/ +mvn clean package +``` +""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Java COMPSs LU Factorization for Sparse Matrices, MareNostrum V, 3 nodes, no data persistence" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + , + , + ; + schema1:name "fastq-to-matrix-10x/scrna-seq-fastq-to-matrix-10x-cellplex" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/fastq-to-matrix-10x" ; + schema1:version "0.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "cutandrun/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/cutandrun" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + , + , + ; + schema1:name "fastq-to-matrix-10x/scrna-seq-fastq-to-matrix-10x-cellplex" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/fastq-to-matrix-10x" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "fragment-based-docking-scoring/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/fragment-based-docking-scoring" ; + schema1:version "0.1.1" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-01-24T14:28:04+00:00" ; + schema1:description "Lysozyme in water full COMPSs application, using dataset_small" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Lysozyme in water full version, using dataset_small" ; + schema1:publisher . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator , + ; + schema1:datePublished "2024-01-22T16:06:45+00:00" ; + schema1:description "Cluster Comparison COMPSs application" ; + schema1:hasPart , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Cluster Comparison" ; + schema1:publisher , + . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-06-18T13:37:29+00:00" ; + schema1:description "COMPSs Matrix Multiplication, out-of-core using files. Hypermatrix size used 2x2 blocks (MSIZE=2), block size used 2x2 elements (BSIZE=2)" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "PyCOMPSs Matrix Multiplication, out-of-core using files. Example using DIRECTORY parameters executed at laptop, data persistence True." ; + schema1:publisher . + + a schema1:Dataset ; + schema1:dateModified "2024-06-18T13:37:24+00:00" ; + schema1:hasPart , + , + , + ; + schema1:name "C" ; + schema1:sdDatePublished "2024-06-18T13:37:29+00:00" . + + a schema1:Dataset ; + schema1:creator , + , + ; + schema1:datePublished "2022-07-14T17:40:14+00:00" ; + schema1:description "Multi-band array detection and location of seismic sources. BackTrackBB is a program for detection and space-time location of seismic sources based on multi-scale, frequency-selective statistical coherence of the wave field recorded by dense large-scale seismic networks and local antennas. The method is designed to enhance coherence of the signal statistical features across the array of sensors and consists of three steps. They are signal processing, space-time imaging and detection and location." ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "CECILL-2.1" ; + schema1:mainEntity ; + schema1:name "BackTrackBB" ; + schema1:publisher , + . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 20476 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "btbb_continuous.py" ; + schema1:programmingLanguage ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Assembly-Hifi-Trio-phasing-VGP5/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5" ; + schema1:version "0.1.2" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-chip-sr" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "kmer-profiling-hifi-trio-VGP2/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/kmer-profiling-hifi-trio-VGP2" ; + schema1:version "0.1.2" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-chip-sr" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "1.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "rnaseq-pe/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/rnaseq-pe" ; + schema1:version "0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:version "0.7" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-03-22T17:53:30+00:00" ; + schema1:description """**Name:** K-Means GPU +**Contact Person**: cristian.tatu@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs +**Machine**: Minotauro-MN4 + +K-Means running on the GPU leveraging COMPSs GPU Cache for deserialization speedup. +Launched using 32 GPUs (16 nodes). Parameters used: K=40 and 32 blocks of size (1_000_000, 1200). +It creates a block for each GPU. Total dataset shape is (32_000_000, 1200). +Version dislib-0.9.0 (https://github.com/bsc-wdc/dislib/tree/release-0.9) +""" ; + schema1:hasPart , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "COMPSs GPU Cache K-Means" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "atacseq/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/atacseq" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "parallel-accession-download/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/parallel-accession-download" ; + schema1:version "0.1.4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "rnaseq-pe/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/rnaseq-pe" ; + schema1:version "0.4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:version "0.4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "sars-cov-2-se-illumina-wgs-variant-calling/COVID-19-SE-WGS-ILLUMINA" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-se-illumina-wgs-variant-calling" ; + schema1:version "0.1.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "kmer-profiling-hifi-trio-VGP2/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/kmer-profiling-hifi-trio-VGP2" ; + schema1:version "0.1.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Assembly-Hifi-Trio-phasing-VGP5/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5" ; + schema1:version "0.1.6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Scaffolding-HiC-VGP8/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Scaffolding-HiC-VGP8" ; + schema1:version "0.1.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "nanopore-pre-processing/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/nanopore-pre-processing" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "Assembly-decontamination-VGP9/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-decontamination-VGP9" ; + schema1:version "0.1.4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "kmer-profiling-hifi-trio-VGP2/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/kmer-profiling-hifi-trio-VGP2" ; + schema1:version "0.1.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "velocyto/Velocyto-on10X-from-bundled" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/velocyto" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "cutandrun/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/cutandrun" ; + schema1:version "0.6.1" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-03-22T11:39:26+00:00" ; + schema1:description """**Name:** Matmul GPU +**Contact Person**: cristian.tatu@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs +**Machine**: Minotauro-MN4 + +Matmul running on the GPU leveraging COMPSs GPU Cache for deserialization speedup. +Launched using 32 GPUs (16 nodes). +C = A @ B +Where A: shape (320, 56_900_000) block_size (10, 11_380_000) + B: shape (56_900_000, 10) block_size (11_380_000, 10) + C: shape (320, 10) block_size (10, 10) +Total dataset size 291 GB. +Version dislib-0.9 (https://github.com/bsc-wdc/dislib/tree/release-0.9) +""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "COMPSs GPU Cache Matrix Multiplication" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Assembly-Hifi-only-VGP3/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-only-VGP3" ; + schema1:version "0.1.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + , + ; + schema1:name "bacterial-genome-assembly/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/bacterial-genome-assembly" ; + schema1:version "1.1" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-03-25T11:02:59+00:00" ; + schema1:description """**Name:** PyTorch CNN - Imagenet +**Contact Person**: cristian.tatu@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs +**Machine**: Minotauro-MN4 + +PyTorch distributed training of CNN on GPU and leveraging COMPSs GPU Cache for deserialization speedup. +Launched using 32 GPUs (16 nodes). +Version dislib-0.9 +Version PyTorch 1.7.1+cu101 +""" ; + schema1:hasPart , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "COMPSs GPU Cache PyTorch CNN Distributed Training" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "allele-based-pathogen-identification/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/allele-based-pathogen-identification" ; + schema1:version "0.1.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "parallel-accession-download/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/parallel-accession-download" ; + schema1:version "0.1.5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "brew3r/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/brew3r" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Scaffolding-Bionano-VGP7/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Scaffolding-Bionano-VGP7" ; + schema1:version "0.1.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + , + ; + schema1:name "bacterial-genome-assembly/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/bacterial-genome-assembly" ; + schema1:version "1.0" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "atacseq/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/atacseq" ; + schema1:version "0.7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:version "0.2.2" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-03-25T10:49:09+00:00" ; + schema1:description """**Name:** PyTorch CNN - Imagenet +**Contact Person**: cristian.tatu@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs +**Machine**: Minotauro-MN4 + +PyTorch distributed training of CNN on GPU and leveraging COMPSs GPU Cache for deserialization speedup. +Launched using 32 GPUs (16 nodes). +Version dislib-0.9 +Version PyTorch 1.7.1+cu101 +""" ; + schema1:hasPart , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "COMPSs GPU Cache PyTorch CNN Distributed Training" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "Assembly-decontamination-VGP9/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-decontamination-VGP9" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "taxonomy-profiling-and-visualization-with-krona/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/taxonomy-profiling-and-visualization-with-krona" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "cutandrun/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/cutandrun" ; + schema1:version "0.6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "sars-cov-2-pe-illumina-artic-variant-calling/COVID-19-PE-ARTIC-ILLUMINA" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-variant-calling" ; + schema1:version "0.5.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "parallel-accession-download/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/parallel-accession-download" ; + schema1:version "0.1.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "rnaseq-sr/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/rnaseq-sr" ; + schema1:version "0.7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Scaffolding-HiC-VGP8/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Scaffolding-HiC-VGP8" ; + schema1:version "0.1.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + , + ; + schema1:name "quality-and-contamination-control/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/quality-and-contamination-control" ; + schema1:version "1.1.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "gromacs-dctmd/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/gromacs-dctmd" ; + schema1:version "0.1.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:version "0.1.7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "chipseq-sr/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/chipseq-sr" ; + schema1:version "0.10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Scaffolding-Bionano-VGP7/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Scaffolding-Bionano-VGP7" ; + schema1:version "0.1.1" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator , + ; + schema1:datePublished "2023-07-20T15:48:28+00:00" ; + schema1:description "Sample workflow that combines simulations with data analytics. It is not a real workflow, but mimics this type of workflows." ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "My Workflow Multiple" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "rnaseq-pe/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/rnaseq-pe" ; + schema1:version "0.7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:version "0.1.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "atacseq/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/atacseq" ; + schema1:version "0.5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "atacseq/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/atacseq" ; + schema1:version "0.9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "chipseq-sr/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/chipseq-sr" ; + schema1:version "0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "sars-cov-2-variation-reporting/COVID-19-VARIATION-REPORTING" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-variation-reporting" ; + schema1:version "0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "sars-cov-2-consensus-from-variation/COVID-19-CONSENSUS-CONSTRUCTION" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-consensus-from-variation" ; + schema1:version "0.2.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "cutandrun/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/cutandrun" ; + schema1:version "0.11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "atacseq/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/atacseq" ; + schema1:version "0.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "hic-hicup-cooler/hic-fastq-to-cool-hicup-cooler" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/hic-hicup-cooler" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:version "0.1.11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Scaffolding-Bionano-VGP7/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Scaffolding-Bionano-VGP7" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "gromacs-mmgbsa/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/gromacs-mmgbsa" ; + schema1:version "0.1.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "average-bigwig-between-replicates/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/average-bigwig-between-replicates" ; + schema1:version "0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Assembly-Hifi-only-VGP3/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-only-VGP3" ; + schema1:version "0.1.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "hic-hicup-cooler/hic-fastq-to-cool-hicup-cooler" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/hic-hicup-cooler" ; + schema1:version "0.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "atacseq/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/atacseq" ; + schema1:version "0.4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "chipseq-pe/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/chipseq-pe" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "rnaseq-pe/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/rnaseq-pe" ; + schema1:version "0.4.1" . + + a schema1:Dataset ; + dct:conformsTo ; + schema1:assesses "Research Question: How effective have the SARS-CoV-2 vaccination programmes been in preventing SARS-CoV-2 infections?" ; + schema1:author , + , + , + , + ; + schema1:codeRepository "https://github.com/by-covid/BY-COVID_WP5_T5.2_baseline-use-case" ; + schema1:dateModified "2023-10-05" ; + schema1:datePublished "2023-04-19" ; + schema1:description "This publication corresponds to the Research Objects (RO) of the Baseline Use Case proposed in T.5.2 (WP5) in the BY-COVID project on “COVID-19 Vaccine(s) effectiveness in preventing SARS-CoV-2 infection”." ; + schema1:distribution ; + schema1:funder ; + schema1:funding ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.6913045" ; + schema1:isBasedOn , + , + , + ; + schema1:keywords "COVID-19, vaccines, comparative effectiveness, causal inference, international comparison, SARS-CoV-2, common data model, directed acyclic graph, synthetic data" ; + schema1:license ; + schema1:material "Cohort definition: All individuals (from 5 to 115 years old, included) vaccinated with at least one dose of the SARS-CoV-2 vaccine (any of the available brands) and all individuals eligible to be vaccinated with a documented positive diagnosis (irrespective of the type of test) for a SARS-CoV-2 infection during the data extraction period." ; + schema1:materialExtent """Inclusion criteria: + All people vaccinated with at least one dose of the COVID-19 vaccine (any of the available brands) in an area of residence. Any person eligible to be vaccinated (from 5 to 115 years old, included) with a positive diagnosis (irrespective of the type of test) for SARS-CoV-2 infection (COVID-19) during the period of data extraction. + + Exclusion criteria: + People not eligible for the vaccine (from 0 to 4 years old, included)""" ; + schema1:mentions , + , + , + , + , + ; + schema1:name "BY-COVID WP5 T5.2 Baseline Use Case" ; + schema1:publisher ; + schema1:publishingPrinciples """Study Design: An observational retrospective longitudinal study to assess the effectiveness of the SARS-CoV-2 vaccines in preventing SARS-CoV-2 infections using routinely collected social, health and care data from several countries. + +A causal model was established using Directed Acyclic Graphs (DAGs) to map domain knowledge, theories and assumptions about the causal relationship between exposure and outcome. """ ; + schema1:releaseNotes """- Updated Causal model to eliminate the consideration of 'vaccination_schedule_cd' as a mediator +- Adjusted the study period to be consistent with the Study Protocol +- Updated 'sex_cd' as a required variable +- Added 'chronic_liver_disease_bl' as a comorbidity at the individual level +- Updated 'socecon_lvl_cd' at the area level as a recommended variable + -Added crosswalks for the definition of 'chronic_liver_disease_bl' in a separate sheet + -Updated the 'vaccination_schedule_cd' reference to the 'Vaccine' node in the updated DAG + -Updated the description of the 'confirmed_case_dt' and 'previous_infection_dt' variables to clarify the definition and the need for a single registry per person""" ; + schema1:temporalCoverage "Study Period: From the date of the first documented SARS-CoV-2 infection in each country to the most recent date in which data is available at the time of analysis. Roughly from 01-03-2020 to 30-06-2022, depending on the country." ; + schema1:url "https://by-covid.github.io/BY-COVID_WP5_T5.2_baseline-use-case/" ; + schema1:usageInfo "The scripts (software) included in the publication are offered \"as-is\", without warranty, and disclaiming liability for damages resulting from using it. The software is released under the CC-BY-4.0 licence, which gives you permission to use the content for almost any purpose (but does not grant you any trademark permissions), so long as you note the license and give credit." ; + schema1:version "1.2.0" ; + rel:cite-as . + + a schema1:Dataset ; + schema1:datePublished "2023-01-26" ; + schema1:description "following the causal model" ; + schema1:hasPart , + , + , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.6913045" ; + schema1:name "Common data model specification" ; + schema1:url "https://zenodo.org/record/7572373" ; + schema1:version "1.1.0" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "atacseq/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/atacseq" ; + schema1:version "0.14" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Scaffolding-HiC-VGP8/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Scaffolding-HiC-VGP8" ; + schema1:version "0.2.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "sars-cov-2-ont-artic-variant-calling/COVID-19-ARTIC-ONT" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-ont-artic-variant-calling" ; + schema1:version "0.3.1" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-04-30T13:04:29+00:00" ; + schema1:description """**Name:** Matrix multiplication with Files, reproducibility example, without data persistence +**Contact Person**: support-compss@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs + +# Description +Matrix multiplication is a binary operation that takes a pair of matrices and produces another matrix. + +If A is an n×m matrix and B is an m×p matrix, the result AB of their multiplication is an n×p matrix defined only if the number of columns m in A is equal to the number of rows m in B. When multiplying A and B, the elements of the rows in A are multiplied with corresponding columns in B. + +In this implementation, A and B are square matrices (same number of rows and columns), and so it is the result matrix C. Each matrix is divided in N blocks of M doubles (N hardcoded to 2, and M hardcoded to 8). The multiplication of two blocks is done by a multiply task method with a simple three-nested-loop implementation. When executed with COMPSs, the main program generates N^3^ tasks arranged as N^2^ chains of N tasks in the dependency graph. + +# Reproducibility +To reproduce the exact results of this example, follow the instructions at the [Workflow Provenance section at COMPSs User Manual](https://compss-doc.readthedocs.io/en/stable/Sections/05_Tools/04_Workflow_Provenance.html), WITHOUT data persistence, PyCOMPSs application. + +# Execution instructions +Usage: +``` +runcompss --lang=python src/matmul_files.py inputs_folder/ outputs_folder/ +``` + +where: +* inputs_folder/: Folder where A and B matrices are located +* outputs_folder/: Folder with the resulting C matrix + + +# Execution Examples +``` +runcompss --lang=python src/matmul_files.py inputs/ outputs/ +runcompss src/matmul_files.py inputs/ outputs/ +python -m pycompss src/matmul_files.py inputs/ outputs/ +``` + +# Build +No build is required +""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "PyCOMPSs Matrix Multiplication, out-of-core using files, MareNostrum V, reproducibility example, without data persistence" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "cutandrun/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/cutandrun" ; + schema1:version "0.10" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-07-04T13:38:20+00:00" ; + schema1:description """**Name:** TruncatedSVD (Randomized SVD) +**Contact Person**: support-compss@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs +**Machine**: MareNostrum5 + +TruncatedSVD (Randomized SVD) for computing just 456 singular values out of a (4.5M x 850) size matrix. +The input matrix represents a CFD transient simulation of air moving past a cylinder. +This application used [dislib-0.9.0](https://github.com/bsc-wdc/dislib/tree/release-0.9) +""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Randomized SVD" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 13899 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:name "random_svd_compss.py" ; + schema1:programmingLanguage ; + schema1:softwareRequirements . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "amr_gene_detection/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/amr_gene_detection" ; + schema1:version "1.1" . + + a schema1:Dataset ; + dct:conformsTo ; + schema1:assesses "Research Question: How effective have the SARS-CoV-2 vaccination programmes been in preventing SARS-CoV-2 infections?" ; + schema1:author , + , + , + , + ; + schema1:codeRepository "https://github.com/by-covid/BY-COVID_WP5_T5.2_baseline-use-case" ; + schema1:datePublished "2023-04-19" ; + schema1:description "This publication corresponds to the Research Objects (RO) of the Baseline Use Case proposed in T.5.2 (WP5) in the BY-COVID project on “COVID-19 Vaccine(s) effectiveness in preventing SARS-CoV-2 infection”." ; + schema1:funder ; + schema1:funding ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.6913045" ; + schema1:isBasedOn , + , + , + ; + schema1:keywords "COVID-19, vaccines, comparative effectiveness, causal inference, international comparison, SARS-CoV-2, common data model, directed acyclic graph, synthetic data" ; + schema1:license ; + schema1:material "Cohort definition: All individuals (from 5 to 115 years old, included) vaccinated with at least one dose of the SARS-CoV-2 vaccine (any of the available brands) and all individuals eligible to be vaccinated with a documented positive diagnosis (irrespective of the type of test) for a SARS-CoV-2 infection during the data extraction period." ; + schema1:materialExtent """Inclusion criteria: + All people vaccinated with at least one dose of the COVID-19 vaccine (any of the available brands) in an area of residence. Any person eligible to be vaccinated (from 5 to 115 years old, included) with a positive diagnosis (irrespective of the type of test) for SARS-CoV-2 infection (COVID-19) during the period of data extraction. + + Exclusion criteria: + People not eligible for the vaccine (from 0 to 4 years old, included)""" ; + schema1:mentions , + , + , + , + , + ; + schema1:name "BY-COVID WP5 T5.2 Baseline Use Case" ; + schema1:publisher ; + schema1:publishingPrinciples """Study Design: An observational retrospective longitudinal study to assess the effectiveness of the SARS-CoV-2 vaccines in preventing SARS-CoV-2 infections using routinely collected social, health and care data from several countries. + +A causal model was established using Directed Acyclic Graphs (DAGs) to map domain knowledge, theories and assumptions about the causal relationship between exposure and outcome. """ ; + schema1:releaseNotes """- Updated Causal model to eliminate the consideration of 'vaccination_schedule_cd' as a mediator +- Adjusted the study period to be consistent with the Study Protocol +- Updated 'sex_cd' as a required variable +- Added 'chronic_liver_disease_bl' as a comorbidity at the individual level +- Updated 'socecon_lvl_cd' at the area level as a recommended variable + -Added crosswalks for the definition of 'chronic_liver_disease_bl' in a separate sheet + -Updated the 'vaccination_schedule_cd' reference to the 'Vaccine' node in the updated DAG + -Updated the description of the 'confirmed_case_dt' and 'previous_infection_dt' variables to clarify the definition and the need for a single registry per person""" ; + schema1:temporalCoverage "Study Period: From the date of the first documented SARS-CoV-2 infection in each country to the most recent date in which data is available at the time of analysis. Roughly from 01-03-2020 to 30-06-2022, depending on the country." ; + schema1:url "https://by-covid.github.io/BY-COVID_WP5_T5.2_baseline-use-case/" ; + schema1:usageInfo "The scripts (software) included in the publication are offered \"as-is\", without warranty, and disclaiming liability for damages resulting from using it. The software is released under the CC-BY-4.0 licence, which gives you permission to use the content for almost any purpose (but does not grant you any trademark permissions), so long as you note the license and give credit." ; + schema1:version "1.2.0" . + + a schema1:Dataset ; + schema1:datePublished "2023-01-26" ; + schema1:description "following the causal model" ; + schema1:hasPart , + , + , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.6913045" ; + schema1:name "Common data model specification" ; + schema1:url "https://zenodo.org/record/7572373" ; + schema1:version "1.1.0" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2023-11-16T14:25:14+00:00" ; + schema1:description """**Contact Person:** support-compss@bsc.es +**Access Level:** public +**License Agreement:** Apache2 +**Platform:** COMPSs + +# Description + +Simple is an application that takes one value and increases it by five units. The purpose of this application is to show how tasks are managed by COMPSs. + +# Execution instructions +Usage: +``` +runcompss --lang=python src/simple.py initValue +``` + +where: +* initValue: Initial value for counter + +# Execution Examples +``` +runcompss --lang=python src/simple.py 1 +runcompss src/simple.py 1 +python -m pycompss src/simple.py 1 +``` + +# Build +No build is required +""" ; + schema1:hasPart , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "PyCOMPSs simple example (ran on macOS laptop, input generated by the code, INOUT file example)" ; + schema1:publisher . + + a schema1:MediaObject ; + schema1:contentSize 1 ; + schema1:dateModified "2023-11-16T14:25:10" ; + schema1:name "counter" ; + schema1:sdDatePublished "2023-11-16T14:25:14+00:00" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:version "0.2.4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "openms-metaprosip/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/openms-metaprosip" ; + schema1:version "0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "openms-metaprosip/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/openms-metaprosip" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "parallel-accession-download/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/parallel-accession-download" ; + schema1:version "0.1.3" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-03-28T09:14:42+00:00" ; + schema1:description "Calculates the Fibonacci series up to a specified length." ; + schema1:hasPart , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "My COMPSs Fibonacci Series" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Purge-duplicates-one-haplotype-VGP6b/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Purge-duplicates-one-haplotype-VGP6b" ; + schema1:version "0.4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "polish-with-long-reads/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/polish-with-long-reads" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "gromacs-mmgbsa/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/gromacs-mmgbsa" ; + schema1:version "0.1.4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Purge-duplicates-one-haplotype-VGP6b/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Purge-duplicates-one-haplotype-VGP6b" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Assembly-Hifi-only-VGP3/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-only-VGP3" ; + schema1:version "0.1.4" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-chip-sr" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Assembly-Hifi-only-VGP3/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-only-VGP3" ; + schema1:version "0.1.5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Scaffolding-Bionano-VGP7/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Scaffolding-Bionano-VGP7" ; + schema1:version "0.1.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "chipseq-pe/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/chipseq-pe" ; + schema1:version "0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:version "0.1.9" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-chip-sr" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.5" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-02-14T13:47:39+00:00" ; + schema1:description """**Name:** Random Forest +**Contact Person**: support-compss@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs +**Machine**: MareNostrum4 +This is an example of Random Forest algorithm from dislib. To show the usage, the code generates a synthetical input matrix. +The results are printed by screen. +This application used [dislib-0.9.0](https://github.com/bsc-wdc/dislib/tree/release-0.9) +""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Random Forest executed in 3 worker nodes, with a generated dataset, using 1 Million rows x 100 features" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "rnaseq-pe/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/rnaseq-pe" ; + schema1:version "0.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "average-bigwig-between-replicates/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/average-bigwig-between-replicates" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "baredsc/baredSC-1d-logNorm" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/baredsc" ; + schema1:version "0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "chipseq-pe/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/chipseq-pe" ; + schema1:version "0.3" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-04-19T08:05:56+00:00" ; + schema1:description """**Name:** Incrementation and Fibonacci +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs + +# Description +**Brief Overview:** Demonstrates COMPSs task parallelism with increment and Fibonacci computations. + +**Detailed Description:** + Performs multiple increments of input values in parallel using COMPSs. + Concurrently calculates Fibonacci numbers using recursive COMPSs tasks. + Demonstrates task synchronization via `compss_wait_on`. + +# Execution instructions +Usage: +``` +runcompss src/increment_fibonacci.py value1 Value2 Value3 +#add more values if you want + +``` + +# Execution Examples +``` +runcompss src/increment_fibonacci.py 1 4 3 9 6 9 + +``` + +# Build +No build is required +""" ; + schema1:hasPart , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "COMPSs Incrementation and Fibonacci series example" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "baredsc/baredSC-1d-logNorm" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/baredsc" ; + schema1:version "0.4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:image ; + schema1:name "" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/crs4/fair-crcc-send-data" ; + schema1:version "1.0" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Purge-duplicates-one-haplotype-VGP6b/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Purge-duplicates-one-haplotype-VGP6b" ; + schema1:version "0.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + , + ; + schema1:name "bacterial_genome_annotation/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/bacterial_genome_annotation" ; + schema1:version "1.1.1" . + + a schema1:Dataset ; + dct:conformsTo ; + schema1:assesses "Research Question: How effective have the SARS-CoV-2 vaccination programmes been in preventing SARS-CoV-2 infections?" ; + schema1:author , + , + , + , + ; + schema1:codeRepository "https://github.com/by-covid/BY-COVID_WP5_T5.2_baseline-use-case" ; + schema1:dateModified "2023-10-05" ; + schema1:datePublished "2023-04-19" ; + schema1:description "This publication corresponds to the Research Objects (RO) of the Baseline Use Case proposed in T.5.2 (WP5) in the BY-COVID project on “COVID-19 Vaccine(s) effectiveness in preventing SARS-CoV-2 infection”." ; + schema1:distribution ; + schema1:funder ; + schema1:funding ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.6913045" ; + schema1:isBasedOn , + , + , + ; + schema1:keywords "COVID-19, vaccines, comparative effectiveness, causal inference, international comparison, SARS-CoV-2, common data model, directed acyclic graph, synthetic data" ; + schema1:license ; + schema1:material "Cohort definition: All individuals (from 5 to 115 years old, included) vaccinated with at least one dose of the SARS-CoV-2 vaccine (any of the available brands) and all individuals eligible to be vaccinated with a documented positive diagnosis (irrespective of the type of test) for a SARS-CoV-2 infection during the data extraction period." ; + schema1:materialExtent """Inclusion criteria: + All people vaccinated with at least one dose of the COVID-19 vaccine (any of the available brands) in an area of residence. Any person eligible to be vaccinated (from 5 to 115 years old, included) with a positive diagnosis (irrespective of the type of test) for SARS-CoV-2 infection (COVID-19) during the period of data extraction. + + Exclusion criteria: + People not eligible for the vaccine (from 0 to 4 years old, included)""" ; + schema1:mentions , + , + , + , + , + ; + schema1:name "BY-COVID WP5 T5.2 Baseline Use Case" ; + schema1:publisher ; + schema1:publishingPrinciples """Study Design: An observational retrospective longitudinal study to assess the effectiveness of the SARS-CoV-2 vaccines in preventing SARS-CoV-2 infections using routinely collected social, health and care data from several countries. + +A causal model was established using Directed Acyclic Graphs (DAGs) to map domain knowledge, theories and assumptions about the causal relationship between exposure and outcome. """ ; + schema1:releaseNotes """- Updated Causal model to eliminate the consideration of 'vaccination_schedule_cd' as a mediator +- Adjusted the study period to be consistent with the Study Protocol +- Updated 'sex_cd' as a required variable +- Added 'chronic_liver_disease_bl' as a comorbidity at the individual level +- Updated 'socecon_lvl_cd' at the area level as a recommended variable + -Added crosswalks for the definition of 'chronic_liver_disease_bl' in a separate sheet + -Updated the 'vaccination_schedule_cd' reference to the 'Vaccine' node in the updated DAG + -Updated the description of the 'confirmed_case_dt' and 'previous_infection_dt' variables to clarify the definition and the need for a single registry per person""" ; + schema1:temporalCoverage "Study Period: From the date of the first documented SARS-CoV-2 infection in each country to the most recent date in which data is available at the time of analysis. Roughly from 01-03-2020 to 30-06-2022, depending on the country." ; + schema1:url "https://by-covid.github.io/BY-COVID_WP5_T5.2_baseline-use-case/" ; + schema1:usageInfo "The scripts (software) included in the publication are offered \"as-is\", without warranty, and disclaiming liability for damages resulting from using it. The software is released under the CC-BY-4.0 licence, which gives you permission to use the content for almost any purpose (but does not grant you any trademark permissions), so long as you note the license and give credit." ; + schema1:version "1.2.0" ; + rel:cite-as . + + a schema1:Dataset ; + schema1:datePublished "2023-01-26" ; + schema1:description "following the causal model" ; + schema1:hasPart , + , + , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.6913045" ; + schema1:name "Common data model specification" ; + schema1:url "https://zenodo.org/record/7572373" ; + schema1:version "1.1.0" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2023-11-24T08:24:54+00:00" ; + schema1:description """**Name:** Increment +**Contact Person**: support-compss@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs + +# Description +Increment is an application that takes three different values and increases them a number of given times. + +The purpose of this application is to show parallelism between the different increments. + +# Execution instructions +Usage: +``` +runcompss --lang=python src/increment.py N initValue1 initValue2 initValue3 +``` + +where: +* N: Number of times to increase the counters +* initValue1: Initial value for counter 1 +* initValue2: Initial value for counter 2 +* initValue3: Initial value for counter 3 + +# Execution Examples +``` +runcompss --lang=python src/increment.py 10 1 2 3 +runcompss src/wordcount.py src/increment.py 10 1 2 3 +python -m pycompss src/wordcount.py src/increment.py 10 1 2 3 +``` + +# Build +No build is required +""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "PyCOMPSs Increment example, ran at Marenostrum IV supercomputer, example of INOUT file and compss_open usage" ; + schema1:publisher . + + a schema1:MediaObject ; + schema1:contentSize 2 ; + schema1:dateModified "2023-11-24T08:24:50" ; + schema1:name "file1" ; + schema1:sdDatePublished "2023-11-24T08:24:54+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2 ; + schema1:dateModified "2023-11-24T08:24:50" ; + schema1:name "file2" ; + schema1:sdDatePublished "2023-11-24T08:24:54+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2 ; + schema1:dateModified "2023-11-24T08:24:50" ; + schema1:name "file3" ; + schema1:sdDatePublished "2023-11-24T08:24:54+00:00" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "fragment-based-docking-scoring/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/fragment-based-docking-scoring" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Assembly-Hifi-only-VGP3/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-only-VGP3" ; + schema1:version "0.1" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator , + ; + schema1:datePublished "2024-01-22T15:53:19+00:00" ; + schema1:description "K-means COMPSs application" ; + schema1:hasPart , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "K-means" ; + schema1:publisher , + . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Assembly-Hifi-Trio-phasing-VGP5/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5" ; + schema1:version "0.1.3" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-06-06T10:12:50+00:00" ; + schema1:description "Small program using PyCOMPSs to perform a Monte Carlo simulation to estimate the value of Pi. The idea is to randomly generate points in a unit square and count how many fall inside the unit circle. The ratio of the points inside the circle to the total number of points gives an approximation of Pi/4." ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Monte Carlo Simulation for Pi Estimation" ; + schema1:publisher . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-01-25T16:09:26+00:00" ; + schema1:description "Lysozyme in water sample COMPSs application" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Lysozyme in water sample" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Scaffolding-HiC-VGP8/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Scaffolding-HiC-VGP8" ; + schema1:version "0.2.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "cutandrun/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/cutandrun" ; + schema1:version "0.5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "rnaseq-sr/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/rnaseq-sr" ; + schema1:version "0.5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:version "0.2" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-03-22T17:53:30+00:00" ; + schema1:description """**Name:** K-Means GPU +**Contact Person**: cristian.tatu@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs +**Machine**: Minotauro-MN4 + +K-Means running on the GPU leveraging COMPSs GPU Cache for deserialization speedup. +Launched using 32 GPUs (16 nodes). Parameters used: K=40 and 32 blocks of size (1_000_000, 1200). +It creates a block for each GPU. Total dataset shape is (32_000_000, 1200). +Version dislib-0.9.0 (https://github.com/bsc-wdc/dislib/tree/release-0.9) +""" ; + schema1:hasPart , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "COMPSs GPU Cache K-Means" ; + schema1:publisher . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator , + ; + schema1:datePublished "2024-01-22T16:19:50+00:00" ; + schema1:description "Wordcount COMPSs application" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Wordcount" ; + schema1:publisher , + . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "parallel-accession-download/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/parallel-accession-download" ; + schema1:version "0.1.14" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "baredsc/baredSC-1d-logNorm" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/baredsc" ; + schema1:version "0.5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "kmer-profiling-hifi-VGP1/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/kmer-profiling-hifi-VGP1" ; + schema1:version "0.1.5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "COVID-19: variation analysis on ARTIC PE data" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-variant-calling" ; + schema1:version "0.4.1" . + + a schema1:Dataset ; + schema1:author , + ; + schema1:citation ; + schema1:contactPoint ; + schema1:datePublished "2021-09-28T17:27:00+0100" ; + schema1:description "This PyCOMPSs workflow tutorial aims to illustrate the process of setting up a simulation system containing a protein, step by step, using the BioExcel Building Blocks library (biobb) in PyCOMPSs for execution on HPC. Three variants of the MD Setup workflows are included, supporting a list of structures, a list of mutations, or a cumulative set of mutations. " ; + schema1:hasPart , + , + ; + schema1:license ; + schema1:mainEntity ; + schema1:name "Protein MD Setup HPC tutorial using BioExcel Building Blocks (biobb) in PyCOMPSs" ; + schema1:publisher ; + schema1:url "https://github.com/bioexcel/biobb_hpc_workflows" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "amr_gene_detection/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/amr_gene_detection" ; + schema1:version "1.0" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "hic-hicup-cooler/hic-fastq-to-cool-hicup-cooler" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/hic-hicup-cooler" ; + schema1:version "0.2.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "chipseq-sr/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/chipseq-sr" ; + schema1:version "0.5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Plot-Nx-Size/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Plot-Nx-Size" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "COVID-19: variation analysis on WGS SE data" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-se-illumina-wgs-variant-calling" ; + schema1:version "0.1.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:version "0.5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "baredsc/baredSC-1d-logNorm" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/baredsc" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "fluorescence-nuclei-segmentation-and-counting/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/fluorescence-nuclei-segmentation-and-counting" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "rnaseq-pe/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/rnaseq-pe" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "sars-cov-2-pe-illumina-artic-variant-calling/COVID-19-PE-ARTIC-ILLUMINA" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-variant-calling" ; + schema1:version "0.5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "sars-cov-2-consensus-from-variation/COVID-19-CONSENSUS-CONSTRUCTION" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-consensus-from-variation" ; + schema1:version "0.4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "rnaseq-sr/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/rnaseq-sr" ; + schema1:version "0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "rnaseq-sr/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/rnaseq-sr" ; + schema1:version "0.1" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator , + ; + schema1:datePublished "2024-01-22T15:55:23+00:00" ; + schema1:description "Cholesky COMPSs application" ; + schema1:hasPart , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Cholesky" ; + schema1:publisher , + . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2023-11-03T08:10:10+00:00" ; + schema1:description """**Name:** TruncatedSVD (Randomized SVD) +**Contact Person**: support-compss@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs +**Machine**: MareNostrum4 + +TruncatedSVD (Randomized SVD) for computing just 456 singular values out of a (3.6M x 1200) size matrix. +The input matrix represents a CFD transient simulation of air moving past a cylinder. +This application used dislib-0.9.0 (https://github.com/bsc-wdc/dislib/tree/release-0.9) +""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Randomized SVD" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Parallel Accession Download" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/parallel-accession-download" ; + schema1:version "0.1.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:version "0.1.8" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator , + ; + schema1:datePublished "2024-01-22T16:32:29+00:00" ; + schema1:description "Wordcount COMPSs application" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Wordcount" ; + schema1:publisher , + . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "chipseq-sr/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/chipseq-sr" ; + schema1:version "0.4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + , + ; + schema1:name "bacterial_genome_annotation/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/bacterial_genome_annotation" ; + schema1:version "1.1" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-chip-sr" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.2" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-chip-sr" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:version "0.1.4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:version "0.2.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "gromacs-mmgbsa/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/gromacs-mmgbsa" ; + schema1:version "0.1.5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Plot-Nx-Size/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Plot-Nx-Size" ; + schema1:version "0.1.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "atacseq/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/atacseq" ; + schema1:version "0.5.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:version "0.2.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "VGP-meryldb-creation/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/VGP-meryldb-creation" ; + schema1:version "0.1" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-01-24T15:00:23+00:00" ; + schema1:description "Lysozyme in water full COMPSs application, using dataset_small" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Lysozyme in water full version, using dataset_small, data_persistence False" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "sars-cov-2-pe-illumina-artic-ivar-analysis/SARS-COV-2-ILLUMINA-AMPLICON-IVAR-PANGOLIN-NEXTCLADE" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-ivar-analysis" ; + schema1:version "0.2.1" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2023-10-27T13:17:52+00:00" ; + schema1:description """**Name:** Matrix multiplication with Objects +**Contact Person**: support-compss@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs + +# Description +Matrix multiplication is a binary operation that takes a pair of matrices and produces another matrix. + +If A is an n×m matrix and B is an m×p matrix, the result AB of their multiplication is an n×p matrix defined only if the number of columns m in A is equal to the number of rows m in B. When multiplying A and B, the elements of the rows in A are multiplied with corresponding columns in B. + +In this implementation, A and B are square matrices (same number of rows and columns), and so it is the result matrix C. Each matrix is divided in N blocks of M doubles. The multiplication of two blocks is done by a multiply task method with a simple three-nested-loop implementation. When executed with COMPSs, the main program generates N^3^ tasks arranged as N^2^ chains of N tasks in the dependency graph. + +# Execution instructions +Usage: +``` +runcompss --lang=python src/matmul_objects.py numberOfBlocks blockSize +``` + +where: +* numberOfBlocks: Number of blocks inside each matrix +* blockSize: Size of each block + + +# Execution Examples +``` +runcompss --lang=python src/matmul_objects.py 16 4 +runcompss src/matmul_objects.py 16 4 +python -m pycompss src/matmul_objects.py 16 4 +``` + +# Build +No build is required +""" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "PyCOMPSs Matrix Multiplication with Objects (inputs generated by the code)" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "rnaseq-sr/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/rnaseq-sr" ; + schema1:version "0.4.1" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2023-12-15T14:53:21+00:00" ; + schema1:description """**Name:** Word Count +**Contact Person**: support-compss@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs + +# Description +Wordcount is an application that counts the number of words for a given set of files. + +To allow parallelism the file is divided in blocks that are treated separately and merged afterwards. + +Results are printed to a Pickle binary file, so they can be checked using: python -mpickle result.txt + +This example also shows how to manually add input or output datasets to the workflow provenance recording (using the 'input' and 'output' terms in the ro-crate-info.yaml file). + +# Execution instructions +Usage: +``` +runcompss --lang=python $(pwd)/application_sources/src/wordcount_blocks.py filePath resultPath blockSize +``` + +where: +* filePath: Absolute path of the file to parse +* resultPath: Absolute path to the result file +* blockSize: Size of each block. The lower the number, the more tasks will be generated in the workflow + +# Execution Examples +``` +runcompss --lang=python $(pwd)/application_sources/src/wordcount_blocks.py $(pwd)/dataset/data/compss.txt result.txt 300 +runcompss $(pwd)/application_sources/src/wordcount_blocks.py $(pwd)/dataset/data/compss.txt result.txt 300 +python -m pycompss $(pwd)/application_sources/src/wordcount.py $(pwd)/dataset/data/compss.txt result.txt 300 +``` + +# Build +No build is required +""" ; + schema1:hasPart , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "PyCOMPSs Wordcount test, dividing input file in blocks, only Python dictionaries used as task parameters (run at MareNostrum IV)" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "sars-cov-2-variation-reporting/COVID-19-VARIATION-REPORTING" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-variation-reporting" ; + schema1:version "0.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + , + ; + schema1:name "quality-and-contamination-control/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/quality-and-contamination-control" ; + schema1:version "1.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "Assembly-decontamination-VGP9/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-decontamination-VGP9" ; + schema1:version "0.1.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "cutandrun/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/cutandrun" ; + schema1:version "0.3" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2023-11-03T08:10:10+00:00" ; + schema1:description """**Name:** TruncatedSVD (Randomized SVD) +**Contact Person**: support-compss@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs +**Machine**: MareNostrum4 + +TruncatedSVD (Randomized SVD) for computing just 456 singular values out of a (3.6M x 1200) size matrix. +The input matrix represents a CFD transient simulation of air moving past a cylinder. +This application used dislib-0.9.0 (https://github.com/bsc-wdc/dislib/tree/release-0.9) +""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Randomized SVD" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "chipseq-pe/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/chipseq-pe" ; + schema1:version "0.4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "COVID-19: consensus construction" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-consensus-from-variation" ; + schema1:version "0.2.1" . + + a schema1:MediaObject ; + schema1:contentSize 304 ; + schema1:dateModified "2024-06-18T13:22:30+00:00" ; + schema1:name "A.0.0" ; + schema1:sdDatePublished "2024-06-18T13:22:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 303 ; + schema1:dateModified "2024-06-18T13:22:30+00:00" ; + schema1:name "A.0.1" ; + schema1:sdDatePublished "2024-06-18T13:22:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 306 ; + schema1:dateModified "2024-06-18T13:22:30+00:00" ; + schema1:name "A.0.2" ; + schema1:sdDatePublished "2024-06-18T13:22:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 311 ; + schema1:dateModified "2024-06-18T13:22:30+00:00" ; + schema1:name "A.1.0" ; + schema1:sdDatePublished "2024-06-18T13:22:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 320 ; + schema1:dateModified "2024-06-18T13:22:30+00:00" ; + schema1:name "A.1.1" ; + schema1:sdDatePublished "2024-06-18T13:22:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 312 ; + schema1:dateModified "2024-06-18T13:22:30+00:00" ; + schema1:name "A.1.2" ; + schema1:sdDatePublished "2024-06-18T13:22:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 319 ; + schema1:dateModified "2024-06-18T13:22:30+00:00" ; + schema1:name "A.2.0" ; + schema1:sdDatePublished "2024-06-18T13:22:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 323 ; + schema1:dateModified "2024-06-18T13:22:30+00:00" ; + schema1:name "A.2.1" ; + schema1:sdDatePublished "2024-06-18T13:22:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 311 ; + schema1:dateModified "2024-06-18T13:22:30+00:00" ; + schema1:name "A.2.2" ; + schema1:sdDatePublished "2024-06-18T13:22:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 303 ; + schema1:dateModified "2024-06-18T13:22:30+00:00" ; + schema1:name "A.2.3" ; + schema1:sdDatePublished "2024-06-18T13:22:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 320 ; + schema1:dateModified "2024-06-18T13:22:30+00:00" ; + schema1:name "A.3.2" ; + schema1:sdDatePublished "2024-06-18T13:22:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 310 ; + schema1:dateModified "2024-06-18T13:22:30+00:00" ; + schema1:name "A.3.3" ; + schema1:sdDatePublished "2024-06-18T13:22:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 320 ; + schema1:dateModified "2024-04-30T13:04:24+00:00" ; + schema1:name "C.0.0" ; + schema1:sdDatePublished "2024-04-30T13:04:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 320 ; + schema1:dateModified "2024-04-30T13:04:24+00:00" ; + schema1:name "C.0.1" ; + schema1:sdDatePublished "2024-04-30T13:04:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 320 ; + schema1:dateModified "2024-04-30T13:04:24+00:00" ; + schema1:name "C.1.0" ; + schema1:sdDatePublished "2024-04-30T13:04:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 320 ; + schema1:dateModified "2024-04-30T13:04:24+00:00" ; + schema1:name "C.1.1" ; + schema1:sdDatePublished "2024-04-30T13:04:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 29586272 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshot_matrix_at_final_time.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_10.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_100.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1000.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1010.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1020.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1030.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1040.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1050.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1060.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1070.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1080.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1090.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_110.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1100.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1110.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1120.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1130.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1140.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1150.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1160.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1170.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1180.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1190.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_120.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1200.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_130.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_140.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_150.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_160.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_170.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_180.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_190.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_20.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_200.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_210.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_220.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_230.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_240.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_250.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_260.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_270.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_280.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_290.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_30.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_300.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_310.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_320.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_330.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_340.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_350.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_360.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_370.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_380.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_390.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_40.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_400.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_410.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_420.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_430.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_440.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_450.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_460.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_470.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_480.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_490.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_50.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_500.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_510.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_520.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_530.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_540.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_550.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_560.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_570.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_580.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_590.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_60.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_600.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_610.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_620.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_630.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_640.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_650.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_660.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_670.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_680.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_690.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_70.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_700.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_710.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_720.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_730.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_740.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_750.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_760.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_770.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_780.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_790.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_80.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_800.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_810.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_820.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_830.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_840.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_850.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_860.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_870.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_880.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_890.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_90.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_900.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_910.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_920.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_930.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_940.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_950.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_960.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_970.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_980.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_990.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:Project ; + schema1:description "BeYond-COVID, make data from COVID-19 and other infectious diseases open and accessible to everyone" ; + schema1:funding ; + schema1:name "BY-COVID" ; + schema1:url "https://by-covid.eu/" . + + a schema1:WebPage ; + schema1:about ; + schema1:name "dataspice CSV template" . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:SoftwareApplication ; + schema1:name "pandas-profiling v3.1.0" ; + schema1:url "https://github.com/pandas-profiling/pandas-profiling" ; + schema1:version "3.1.0" . + + a schema1:SoftwareApplication ; + schema1:name "Quarto" ; + schema1:url "https://quarto.org/" ; + schema1:version "1.0.8" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0001-5980-8002" ; + schema1:name "Ozan Ozisik" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0001-6353-0808" ; + schema1:name "Volodymyr Savchenko" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0001-6744-996X" ; + schema1:name "Helge Hecht" . + + a schema1:Person ; + schema1:affiliation ; + schema1:name "Bruno P. Kinoshita" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0001-9091-257X" ; + schema1:name "Jordi Rambla" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0001-9818-9320" ; + schema1:name "Johannes Köster" . + + a schema1:Person ; + schema1:name "Bugra Oezdemir" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-0613-3651" ; + schema1:name "Cenna Doornbos" . + + a schema1:Person ; + schema1:name "Simon Bray" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-0637-9950" ; + schema1:name "Juma Bayjan" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-2853-6742" ; + schema1:name "Arnau Soler Costa" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-2961-9670" ; + schema1:name "Michael R. Crusoe" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-4405-6802" ; + schema1:name "Haris Zafeiropoulos" . + + a schema1:Person ; + schema1:name "Andrii Neronov" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-8080-9170" ; + schema1:name "Jeanette Reinshagen" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-9009-7453" ; + schema1:name "Katherine Farquharson" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-1086-2483" ; + schema1:name "Amy Curwin" . + + a schema1:Person ; + schema1:contactPoint ; + schema1:name "Ashish Bhawel" . + + a schema1:SoftwareApplication ; + schema1:description "Bio Building Blocks to analyse Molecular Dynamics trajectories" ; + schema1:downloadUrl "https://quay.io/biocontainers/biobb_analysis:3.6.0--pyhdfd78af_0" ; + schema1:installUrl "https://anaconda.org/bioconda/biobb_analysis" ; + schema1:isPartOf "https://pypi.org/project/biobb/" ; + schema1:license ; + schema1:name "biobb_analysis" ; + schema1:publisher ; + schema1:softwareHelp "https://biobb-analysis.readthedocs.io/en/latest/" ; + schema1:softwareRequirements , + ; + schema1:url "https://github.com/bioexcel/biobb_analysis" ; + schema1:version "3.6.0" . + + a schema1:SoftwareApplication ; + schema1:description "Bio Building Blocks to setup and run Molecular Dynamics simulations." ; + schema1:downloadUrl "https://quay.io/biocontainers/biobb_md:3.6.0--pyhdfd78af_0" ; + schema1:installUrl "https://anaconda.org/bioconda/biobb_md" ; + schema1:isPartOf "https://pypi.org/project/biobb/" ; + schema1:license ; + schema1:name "biobb_md" ; + schema1:publisher ; + schema1:softwareHelp "https://biobb-md.readthedocs.io/en/latest/" ; + schema1:softwareRequirements ; + schema1:url "https://github.com/bioexcel/biobb_md" ; + schema1:version "3.6.0" . + + a schema1:SoftwareApplication ; + schema1:description "Bio Building Blocks to setup and run Alchemical Free Energy calculations" ; + schema1:downloadUrl "https://quay.io/biocontainers/biobb_pmx:3.6.0--pyhdfd78af_0" ; + schema1:installUrl "https://anaconda.org/bioconda/biobb_pmx" ; + schema1:isPartOf "https://pypi.org/project/biobb/" ; + schema1:license ; + schema1:name "biobb_pmx" ; + schema1:publisher ; + schema1:softwareHelp "https://pypi.org/project/biobb-pmx/3.6.0/en/latest/" ; + schema1:softwareRequirements ; + schema1:url "https://github.com/bioexcel/biobb_pmx" ; + schema1:version "3.6.0" . + + a schema1:SoftwareApplication ; + schema1:description "Bio Building Blocks to modify or extract information from a PDB structure file" ; + schema1:downloadUrl "https://quay.io/biocontainers/biobb_structure_utils:3.6.1--pyhdfd78af_0" ; + schema1:installUrl "https://anaconda.org/bioconda/biobb_structure_utils" ; + schema1:isPartOf "https://pypi.org/project/biobb/" ; + schema1:license ; + schema1:name "biobb_structure_utils" ; + schema1:publisher ; + schema1:softwareHelp "https://biobb-structure-utils.readthedocs.io/en/latest/" ; + schema1:softwareRequirements , + ; + schema1:url "https://github.com/bioexcel/biobb_structure_utils" ; + schema1:version "3.6.1" . + + a schema1:ComputerLanguage, + schema1:SoftwareApplication ; + schema1:description "Python Binding for COMP Superscalar Runtime" ; + schema1:name "PyCOMPSs" ; + schema1:publisher ; + schema1:softwareRequirements ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "2.9" . + + a schema1:WebPage ; + schema1:name "Quarto Markdown" . + + a schema1:Organization ; + schema1:name "Institut de Physique du Globe de Paris (IPGP)" . + + a schema1:Organization ; + schema1:name "Universitat Politècnica de Catalunya" . + + a schema1:Organization ; + schema1:name "University of Naples Federico II" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "Juma Bayjan" . + + a schema1:Person ; + schema1:name "Ozan Ozisik" . - a schema1:HowTo, - schema1:MediaObject, - schema1:SoftwareSourceCode ; - schema1:contentSize 6391 ; - schema1:programmingLanguage . + a schema1:Person ; + schema1:name "Patrick Durand" . + + a schema1:Person ; + schema1:name "Laura Leroi" . + + a schema1:Person ; + schema1:name "Volodymyr Savchenko" . + + a schema1:Person ; + schema1:name "Cenna Doornbos" . + + a schema1:Person ; + schema1:name "Haris Zafeiropoulos" . + + a schema1:Person ; + schema1:name "Ambarish Kumar" . + + a schema1:Person ; + schema1:name "Marvin Martens" . + + a schema1:Person ; + schema1:name "Jeanette Reinshagen" . + + a schema1:Person ; + schema1:name "Katherine Farquharson" . + + a schema1:Person ; + schema1:name "Vasiliki Panagi" . + + a schema1:Person ; + schema1:name "Konstantinos Kyritsis" . + + a schema1:Person ; + schema1:name "Denys Savchenko" . + + a schema1:Person ; + schema1:name "Michael R. Crusoe" . + + a schema1:Person ; + schema1:name "Amy Curwin" . + + a schema1:Person ; + schema1:name "Arnau Soler Costa" . + + a schema1:Person ; + schema1:name "Helge Hecht" . + + a schema1:Person ; + schema1:name "Zargham Ahmad" . + + a schema1:Person ; + schema1:name "Jordi Rambla" . + + a schema1:Organization, + schema1:Project ; + schema1:name "TRON gGmbH" . + + a schema1:Organization, + schema1:Project ; + schema1:name "NGFF Tools" . + + a schema1:Organization, + schema1:Project ; + schema1:name "NHM Clark group" . + + a schema1:Organization, + schema1:Project ; + schema1:name "RECETOX SpecDatRI" . + + a schema1:Organization, + schema1:Project ; + schema1:name "EGA QC" . + + a schema1:Organization, + schema1:Project ; + schema1:name "SARS-CoV-2 Data Hubs" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Snakemake-Workflows" . + + a schema1:Organization, + schema1:Project ; + schema1:name "emo-bon" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Toxicology community" . + + a schema1:Organization, + schema1:Project ; + schema1:name "EU-Openscreen" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 9085 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "ptf_workflow.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 9085 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "ptf_workflow.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 3683 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "Wordcount.java" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 1948 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "matmul_files.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 5243 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "cch_matmul_test.py" ; + schema1:programmingLanguage ; + schema1:softwareRequirements . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 2271 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "main.py" ; + schema1:programmingLanguage ; + schema1:softwareRequirements . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 46258 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "Workflow.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 4503 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "Matmul.java" ; + schema1:programmingLanguage . + + a schema1:Person ; + schema1:name "Debjyoti Ghosh" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 8973 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "lysozyme_in_water_full.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 9298 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "random_svd_compss.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 1254 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "lanczos_dislib_version.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 7564 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "KMeans.java" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 1959 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "matmul_files.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 2897 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "wordcount.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 6602 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "SparseLU.java" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 6602 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "SparseLU.java" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 7601 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "lysozyme_in_water.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 5344 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "cc.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 2312 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "matmul_directory.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 1101 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "cch_kmeans_test.py" ; + schema1:programmingLanguage ; + schema1:softwareRequirements . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 5243 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "cch_matmul_test.py" ; + schema1:programmingLanguage ; + schema1:softwareRequirements . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 2107 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "main_pytorch_sync_5_nodes.py" ; + schema1:programmingLanguage ; + schema1:softwareRequirements , + . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 2107 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "main_pytorch_sync_5_nodes.py" ; + schema1:programmingLanguage ; + schema1:softwareRequirements , + . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 2415 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "my_workflow_multiple.py" ; + schema1:programmingLanguage . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 1959 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "matmul_files.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 1765 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "simple.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 480 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "fibonacci.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 1276 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "main_rf.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 1380 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "increment_fibonacci.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 2836 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "increment.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 6421 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "kmeans.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 2277 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "monte_carlo_pi.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 7601 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "lysozyme_in_water.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 1101 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "cch_kmeans_test.py" ; + schema1:programmingLanguage ; + schema1:softwareRequirements . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 2168 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "wc_reduce.py" ; + schema1:programmingLanguage . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 4293 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "cholesky.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 9301 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "random_svd_compss.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 2782 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "wc_merge.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 7601 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "lysozyme_in_water.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 1955 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "matmul_objects.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 2285 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "wordcount_blocks.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 9301 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "random_svd_compss.py" ; + schema1:programmingLanguage . + + a schema1:Dataset ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:name "MakeTrainingMatricesSerial" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:SoftwareApplication ; + schema1:name "dataspice" ; + schema1:url "https://docs.ropensci.org/dataspice/" ; + schema1:version "1.1.0" . + + a schema1:Grant ; + schema1:funder ; + schema1:identifier "https://doi.org/10.3030/101046203" ; + schema1:name "HORIZON-INFRA-2021-EMERGENCY-01 101046203" . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0001-6867-2039" ; + schema1:name "Ekaterina Sakharova" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0001-8035-341X" ; + schema1:name "Konstantinos Kyritsis" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0001-8222-008X" ; + schema1:name "Samuel Lambert" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0001-8449-1502" ; + schema1:name "Cali Willet" . + + a schema1:Person ; + schema1:name "Fotis Psomopoulos" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-2913-0779" ; + schema1:name "Benjamin Wingfield" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-4159-6096" ; + schema1:name "Asier Gonzalez-Uriarte" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-6454-2680" ; + schema1:name "Bryan Raubenolt" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-7341-1842" ; + schema1:name "Laurence Livermore" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-7664-8490" ; + schema1:name "Denys Savchenko" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-7770-620X" ; + schema1:name "Friederike Ehrhart" . + + a schema1:Person ; + schema1:name "Matthieu Muffato" . + + a schema1:Person ; + schema1:name "Laurent Gil" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-9984-4689" ; + schema1:name "Melchior du Lac" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-0914-2470" ; + schema1:name "Anthony Bretaudeau" . + + a schema1:Person ; + schema1:name "Nikolaos Pechlivanis" . + + a schema1:Organization ; + schema1:name "European Commission" . + + a schema1:Organization ; + schema1:name "Euro-Mediterranean Center for Climate Change" . + + a schema1:Organization ; + schema1:name "Norwegian Geotechnical Institute" . + + a schema1:Organization ; + schema1:alternateName "IACS" ; + schema1:legalName "Instituto Aragonés de Ciencias de la Salud" ; + schema1:name "Instute for Health Science in Aragon (IACS)" ; + schema1:url "https://www.iacs.es/" . + + a schema1:Collection ; + schema1:name "CEITEC CryoEM Facility Workflows" . + + a schema1:Person ; + schema1:name "Friederike Ehrhart" . + + a schema1:Person ; + schema1:name "Melchior du Lac" . + + a schema1:Person ; + schema1:name "Asier Gonzalez-Uriarte" . + + a schema1:Person ; + schema1:name "Laurence Livermore" . + + a schema1:Person ; + schema1:name "Cali Willet" . + + a schema1:Person ; + schema1:name "Benjamin Wingfield" . + + a schema1:Person ; + schema1:name "Samuel Lambert" . + + a schema1:Person ; + schema1:name "Ekaterina Sakharova" . + + a schema1:Person ; + schema1:name "Bryan Raubenolt" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Seq4AMR" . + + a schema1:Organization, + schema1:Project ; + schema1:name "NanoGalaxy" . + + a schema1:Organization, + schema1:Project ; + schema1:name "SANBI Pathogen Bioinformatics" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Biodata Analysis Group" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Polygenic Score Catalog" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Scipion CNB" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Tree of Life Genome Analysis" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Generalized Open-Source Workflows for Atomistic Molecular Dynamics Simulations of Viral Helicases" . + + a schema1:Organization, + schema1:Project ; + schema1:name "EOSC-Life - Demonstrator 7: Rare Diseases" . + + a schema1:Organization, + schema1:Project ; + schema1:name "ODA" . + + a schema1:Organization, + schema1:Project ; + schema1:name "OpenEBench" . + + a schema1:WebSite ; + schema1:name "Java Archive Format" . + + a schema1:ComputerLanguage, + schema1:SoftwareApplication ; + schema1:name "Python 3.8.0" ; + schema1:url "https://www.python.org/downloads/release/python-380/" ; + schema1:version "3.8.0" . - a schema1:MediaObject, + a schema1:MediaObject, schema1:SoftwareSourceCode, ; - dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; - schema1:contentSize 33953 ; - schema1:creator , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - ; - schema1:dateCreated "2020-04-10T12:54:10Z" ; - schema1:dateModified "2023-01-16T13:40:12Z" ; - schema1:description "Analysis of variation within individual COVID-19 samples using Illumina Single End data. More info can be found at https://covid19.galaxyproject.org/genomics/" ; - schema1:image ; - schema1:keywords "covid-19" ; - schema1:license "https://spdx.org/licenses/MIT" ; - schema1:name "Genomics - SE Variation" ; - schema1:producer ; - schema1:programmingLanguage ; - schema1:sdPublisher ; - schema1:subjectOf ; - schema1:url "https://workflowhub.eu/workflows/8?version=1" ; - schema1:version 1 ; - ns1:input , - . + schema1:author , + ; + schema1:dateModified "2021-09-03T00:00:00.000Z" ; + schema1:description "Performs a system setup and runs a molecular dynamics simulation on each one of the structures listed in the YAML properties file." ; + schema1:hasPart , + , + , + ; + schema1:isBasedOn ; + schema1:name "md_list.py" ; + schema1:programmingLanguage ; + schema1:runtimePlatform . - a , - schema1:ImageObject, - schema1:MediaObject ; - schema1:contentSize 11562 . + a schema1:Organization ; + schema1:name "Nadolina Brajuka" . - a schema1:Dataset ; - dct:conformsTo , - ; - schema1:author , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - ; - schema1:description "This workflow employs a recombination detection algorithm (GARD) developed by Kosakovsky Pond et al. and implemented in the hyphy package. More info can be found at https://covid19.galaxyproject.org/genomics/" ; - schema1:hasPart , - , - ; - schema1:identifier "https://workflowhub.eu/workflows/10?version=1" ; - schema1:license "MIT" ; - schema1:mainEntity ; - schema1:name "Research Object Crate for Genomics - Recombination and selection analysis" ; - schema1:sdDatePublished "2024-06-17 10:59:50 +0100" ; - schema1:url "https://workflowhub.eu/workflows/10/ro_crate?version=1" . + a schema1:MediaObject . - a schema1:HowTo, - schema1:MediaObject, - schema1:SoftwareSourceCode ; - schema1:contentSize 2947 ; - schema1:programmingLanguage . + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-3877-4170" ; + schema1:name "Sergi Sayols" . - a schema1:MediaObject, - schema1:SoftwareSourceCode, - ; - dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; - schema1:contentSize 14263 ; - schema1:creator , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - ; - schema1:dateCreated "2020-04-10T13:30:37Z" ; - schema1:dateModified "2023-01-16T13:40:25Z" ; - schema1:description "This workflow employs a recombination detection algorithm (GARD) developed by Kosakovsky Pond et al. and implemented in the hyphy package. More info can be found at https://covid19.galaxyproject.org/genomics/" ; - schema1:image ; - schema1:keywords "covid-19" ; - schema1:license "https://spdx.org/licenses/MIT" ; - schema1:name "Genomics - Recombination and selection analysis" ; - schema1:producer ; - schema1:programmingLanguage ; - schema1:sdPublisher ; - schema1:subjectOf ; - schema1:url "https://workflowhub.eu/workflows/10?version=1" ; - schema1:version 1 ; - ns1:input . + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-4166-4343" ; + schema1:name "Kim Philipp Jablonski" . - a , - schema1:ImageObject, - schema1:MediaObject ; - schema1:contentSize 6047 . + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-4319-260X" ; + schema1:name "Jorrit Mesman" . - a schema1:Dataset ; - dct:conformsTo , - ; - schema1:author , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - ; - schema1:description "Analysis of S-protein polymorphism. This workflow includes: obtaining coding sequences of S proteins from a diverse group of coronaviruses and generating amino acid alignments to assess conservation of the polymorphic location. More info can be found at https://covid19.galaxyproject.org/genomics/" ; - schema1:hasPart , - , - ; - schema1:identifier "https://workflowhub.eu/workflows/9?version=1" ; - schema1:license "MIT" ; - schema1:mainEntity ; - schema1:name "Research Object Crate for Genomics - Analysis of S-protein polymorphism" ; - schema1:sdDatePublished "2024-06-17 10:59:51 +0100" ; - schema1:url "https://workflowhub.eu/workflows/9/ro_crate?version=1" . + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-5516-8391" ; + schema1:name "Germán Royval" . + + a schema1:Person ; + schema1:name " Priyanka Surana", + "Priyanka Surana" . + + a schema1:Person ; + schema1:name "Petr Walczysko" . + + a schema1:Person ; + schema1:name "Pau Andrio" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-4308-5617" ; + schema1:name "Coline Royaux" . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Oliver Woolland" . + + a schema1:Person ; + schema1:name "Germán Royval" . + + a schema1:Person ; + schema1:name "Kim Philipp Jablonski" . + + a schema1:Person ; + schema1:name "Jorrit Mesman" . + + a schema1:Person ; + schema1:name "Coline Royaux" . + + a schema1:Person ; + schema1:name "Sergi Sayols" . + + a schema1:Organization, + schema1:Project ; + schema1:name "TRE-FX" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Euro-BioImaging" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Lake Erken modelling setup" . + + a schema1:Organization, + schema1:Project ; + schema1:name "IMBforge" . + + a schema1:Organization, + schema1:Project ; + schema1:name "iPC: individualizedPaediatricCure" . + + a schema1:Organization, + schema1:Project ; + schema1:name "CO2MICS Lab" . + + a schema1:MediaObject . + + a schema1:Organization ; + schema1:memberOf , + , + , + , + ; + schema1:name "Molecular Modeling and Bioinformatics unit" ; + schema1:url "https://mmb.irbbarcelona.org/" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Fernando Vázquez-Novoa" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0001-8293-4816" ; + schema1:name "Tom Brown" . + + a schema1:Person ; + schema1:affiliation ; + schema1:name "Alexander Degelsegger-Marquez" . + + a schema1:Person ; + schema1:affiliation ; + schema1:name "Simon Saldner" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-7561-0810" ; + schema1:name "Ivan Topolsky" . + + a schema1:Person ; + schema1:affiliation ; + schema1:name "Lorenz Dolanski-Aghamanoukjan" . + + a schema1:Organization ; + schema1:name "Universidad de Málaga" . + + a schema1:CreativeWork ; + schema1:description """Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License.""" ; + schema1:identifier "https://www.apache.org/licenses/LICENSE-2.0" ; + schema1:name "Apache License 2.0" . + + a schema1:CreativeWork, + schema1:Thing ; + schema1:identifier "CC-BY-4.0" ; + schema1:name "Creative Commons Attribution 4.0 International" ; + schema1:url "https://creativecommons.org/licenses/by/4.0/" . + + a schema1:Collection ; + schema1:name "ERGA Assembly Galaxy HiFi & HiC Pipelines (Hifiasm-HiC + Purge_Dups + YaHS)" . + + a schema1:Collection ; + schema1:name "16S Microbial Analysis with mothur (on Galaxy Australia)" . + + a schema1:Collection ; + schema1:name "ERGA Assembly Galaxy ONT+Illumina & HiC Pipelines (NextDenovo-HyPo + Purge_Dups + YaHS)" . + + a schema1:Collection ; + schema1:name "ERGA Assembly Galaxy ONT+Illumina & HiC Pipelines (Flye-HyPo + Purge_Dups + YaHS)" . + + a schema1:Collection ; + schema1:name "ERGA Assembly Snakemake HiFi & HiC Pipelines" . + + a schema1:Collection ; + schema1:name "IDR" . + + a schema1:Person ; + schema1:name "Ivan Topolsky" . + + a schema1:Person ; + schema1:name "Delphine Lariviere" . + + a schema1:Person ; + schema1:name "Tom Brown" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Integrated and Urban Plant Pathology Laboratory" . + + a schema1:Organization, + schema1:Project ; + schema1:name "CWL workflow SARS-CoV-2" . + + a schema1:Organization, + schema1:Project ; + schema1:name "V-Pipe" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Specimen Data Refinery" . + + a schema1:Dataset ; + schema1:contributor , + , + , + ; + schema1:creator , + , + , + , + ; + schema1:datePublished "2023-01-26" ; + schema1:description "Synthetic data set to test the causal model proposed by the baseline use case in BY-COVID WP5 assessing the effectiveness of the COVID-19 vaccine(s)" ; + schema1:distribution ; + schema1:hasPart , + , + , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.6913045" ; + schema1:keywords "causal model", + "covid-19", + "dataset", + "effectiveness", + "synthetic data", + "vaccines" ; + schema1:license ; + schema1:name "COVID-19 vaccine(s) effectiveness assessment (synthetic dataset)" ; + schema1:spatialCoverage _:b0 ; + schema1:subjectOf ; + schema1:temporalCoverage "01-01-2020/31-12-2024" ; + schema1:url "https://zenodo.org/record/7572373" ; + schema1:variableMeasured _:b1, + _:b10, + _:b11, + _:b12, + _:b13, + _:b14, + _:b15, + _:b16, + _:b17, + _:b18, + _:b19, + _:b2, + _:b20, + _:b21, + _:b22, + _:b23, + _:b24, + _:b25, + _:b26, + _:b27, + _:b28, + _:b29, + _:b3, + _:b30, + _:b31, + _:b32, + _:b33, + _:b34, + _:b35, + _:b36, + _:b37, + _:b38, + _:b39, + _:b4, + _:b40, + _:b41, + _:b5, + _:b6, + _:b7, + _:b8, + _:b9 ; + schema1:version "1.1.0" . + + a schema1:Dataset ; + schema1:contributor , + , + , + ; + schema1:creator , + , + , + , + ; + schema1:datePublished "2023-01-26" ; + schema1:description "Synthetic data set to test the causal model proposed by the baseline use case in BY-COVID WP5 assessing the effectiveness of the COVID-19 vaccine(s)" ; + schema1:distribution ; + schema1:hasPart , + , + , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.6913045" ; + schema1:keywords "causal model", + "covid-19", + "dataset", + "effectiveness", + "synthetic data", + "vaccines" ; + schema1:name "COVID-19 vaccine(s) effectiveness assessment (synthetic dataset)" ; + schema1:spatialCoverage _:b0 ; + schema1:subjectOf ; + schema1:temporalCoverage "01-01-2020/31-12-2024" ; + schema1:url "https://zenodo.org/record/7572373" ; + schema1:variableMeasured _:b1, + _:b10, + _:b11, + _:b12, + _:b13, + _:b14, + _:b15, + _:b16, + _:b17, + _:b18, + _:b19, + _:b2, + _:b20, + _:b21, + _:b22, + _:b23, + _:b24, + _:b25, + _:b26, + _:b27, + _:b28, + _:b29, + _:b3, + _:b30, + _:b31, + _:b32, + _:b33, + _:b34, + _:b35, + _:b36, + _:b37, + _:b38, + _:b39, + _:b4, + _:b40, + _:b41, + _:b5, + _:b6, + _:b7, + _:b8, + _:b9 ; + schema1:version "1.1.0" . + + a schema1:Dataset ; + schema1:contributor , + , + , + ; + schema1:creator , + , + , + , + ; + schema1:datePublished "2023-01-26" ; + schema1:description "Synthetic data set to test the causal model proposed by the baseline use case in BY-COVID WP5 assessing the effectiveness of the COVID-19 vaccine(s)" ; + schema1:distribution ; + schema1:hasPart , + , + , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.6913045" ; + schema1:keywords "causal model", + "covid-19", + "dataset", + "effectiveness", + "synthetic data", + "vaccines" ; + schema1:license ; + schema1:name "COVID-19 vaccine(s) effectiveness assessment (synthetic dataset)" ; + schema1:spatialCoverage _:b0 ; + schema1:subjectOf ; + schema1:temporalCoverage "01-01-2020/31-12-2024" ; + schema1:url "https://zenodo.org/record/7572373" ; + schema1:variableMeasured _:b1, + _:b10, + _:b11, + _:b12, + _:b13, + _:b14, + _:b15, + _:b16, + _:b17, + _:b18, + _:b19, + _:b2, + _:b20, + _:b21, + _:b22, + _:b23, + _:b24, + _:b25, + _:b26, + _:b27, + _:b28, + _:b29, + _:b3, + _:b30, + _:b31, + _:b32, + _:b33, + _:b34, + _:b35, + _:b36, + _:b37, + _:b38, + _:b39, + _:b4, + _:b40, + _:b41, + _:b5, + _:b6, + _:b7, + _:b8, + _:b9 ; + schema1:version "1.1.0" . + + a schema1:Person ; + schema1:name "Mridul Johari" . + + a schema1:Person ; + schema1:name "Yasmmin Martins" . + + a schema1:Person ; + schema1:name "Daphne Wijnbergen" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Jorge Ejarque" . + + a schema1:Organization ; + schema1:name "IRB Barcelona", + "Institute for Research in Biomedicine" ; + schema1:url "https://www.irbbarcelona.org/" . + + a schema1:Person ; + schema1:name "Saskia Hiltemann" . + + a schema1:Organization, + schema1:Project ; + schema1:name "IBISBA Workflows" . + + a schema1:Organization, + schema1:Project ; + schema1:name "yPublish - Bioinfo tools" . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Organization ; + schema1:name "IWC" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-1789-1861" ; + schema1:name "Jean-Marie Burel" . + + a schema1:Person ; + schema1:name "Hans-Rudolf Hotz" . + + a schema1:Person ; + schema1:name "Mehmet Tekman" . + + a schema1:Person ; + schema1:name "Daniel Blankenberg" . + + a schema1:Person ; + schema1:name "Wendi Bacon" . + + a schema1:Person ; + schema1:name "Marius van den Beek" . + + a schema1:Organization ; + schema1:name "National Institute of Geophysics and Volcanology" . + + a schema1:Person ; + schema1:name "Jean-Marie Burel" . + + a schema1:Organization, + schema1:Project ; + schema1:name "PerMedCoE" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Galaxy Climate" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Biodiversity Genomics Europe (general)" . + + a schema1:Organization, + schema1:Project ; + schema1:name "OME" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint , + ; + schema1:name "Javier Conejero" . + + a schema1:Person ; + schema1:affiliation ; + schema1:identifier "https://orcid.org/0000-0001-9842-9718" ; + schema1:name "Stian Soiland-Reyes" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-2439-8650" ; + schema1:name "Gareth Price" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-2488-953X" ; + schema1:name "Rosemarie Sadsad" . + + a schema1:Person ; + schema1:name "Clea Siguret" . + + a schema1:Collection ; + schema1:name "HiFi genome assembly on Galaxy" . + + a schema1:Person ; + schema1:name "Rosemarie Sadsad" . - a schema1:HowTo, - schema1:MediaObject, - schema1:SoftwareSourceCode ; - schema1:contentSize 1619 ; - schema1:programmingLanguage . + a schema1:Person ; + schema1:name "Gareth Price" . - a schema1:MediaObject, - schema1:SoftwareSourceCode, - ; - dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; - schema1:contentSize 6982 ; - schema1:creator , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - ; - schema1:dateCreated "2020-04-10T13:00:36Z" ; - schema1:dateModified "2023-01-16T13:40:19Z" ; - schema1:description "Analysis of S-protein polymorphism. This workflow includes: obtaining coding sequences of S proteins from a diverse group of coronaviruses and generating amino acid alignments to assess conservation of the polymorphic location. More info can be found at https://covid19.galaxyproject.org/genomics/" ; - schema1:image ; - schema1:keywords "covid-19" ; - schema1:license "https://spdx.org/licenses/MIT" ; - schema1:name "Genomics - Analysis of S-protein polymorphism" ; - schema1:producer ; - schema1:programmingLanguage ; - schema1:sdPublisher ; - schema1:subjectOf ; - schema1:url "https://workflowhub.eu/workflows/9?version=1" ; - schema1:version 1 ; - ns1:input . + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-8504-068X" ; + schema1:name "Yvan Le Bras" . - a , - schema1:ImageObject, - schema1:MediaObject ; - schema1:contentSize 3531 . + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-0419-1476" ; + schema1:name "Georgina Samaha" . - a schema1:Dataset ; - dct:conformsTo , - ; - schema1:author , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - ; - schema1:description "Preprocessing of raw SARS-CoV-2 reads. More info can be found at https://covid19.galaxyproject.org/genomics/" ; - schema1:hasPart , - , - ; - schema1:identifier "https://workflowhub.eu/workflows/2?version=1" ; - schema1:license "MIT" ; - schema1:mainEntity ; - schema1:name "Research Object Crate for Genomics - Read pre-processing" ; - schema1:sdDatePublished "2024-06-17 10:59:53 +0100" ; - schema1:url "https://workflowhub.eu/workflows/2/ro_crate?version=1" . + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-3472-3736" ; + schema1:name "Martin Beracochea" . - a schema1:HowTo, - schema1:MediaObject, - schema1:SoftwareSourceCode ; - schema1:contentSize 7753 ; - schema1:programmingLanguage . + a schema1:CreativeWork ; + schema1:name "Process Run Crate" ; + schema1:version "0.4" . - a schema1:MediaObject, - schema1:SoftwareSourceCode, - ; - dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; - schema1:contentSize 41191 ; - schema1:creator , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - ; - schema1:dateCreated "2020-04-10T10:20:13Z" ; - schema1:dateModified "2023-01-16T13:39:40Z" ; - schema1:description "Preprocessing of raw SARS-CoV-2 reads. More info can be found at https://covid19.galaxyproject.org/genomics/" ; - schema1:image ; - schema1:keywords "covid-19" ; - schema1:license "https://spdx.org/licenses/MIT" ; - schema1:name "Genomics - Read pre-processing" ; - schema1:producer ; - schema1:programmingLanguage ; - schema1:sdPublisher ; - schema1:subjectOf ; - schema1:url "https://workflowhub.eu/workflows/2?version=1" ; - schema1:version 1 ; - ns1:input , - . + a schema1:CreativeWork ; + schema1:name "Workflow Run Crate" ; + schema1:version "0.4" . - a , - schema1:ImageObject, - schema1:MediaObject ; - schema1:contentSize 12854 . + a schema1:Collection ; + schema1:name "Workflows in EuroScienceGateway" . - a schema1:Dataset ; - dct:conformsTo , - ; - schema1:author , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - ; - schema1:description "Analysis of variation within individual COVID-19 samples using Illumina Paired End data. More info can be found at https://covid19.galaxyproject.org/genomics/" ; - schema1:hasPart , - , - ; - schema1:identifier "https://workflowhub.eu/workflows/7?version=1" ; - schema1:license "MIT" ; - schema1:mainEntity ; - schema1:name "Research Object Crate for Genomics - PE Variation" ; - schema1:sdDatePublished "2024-06-17 10:59:52 +0100" ; - schema1:url "https://workflowhub.eu/workflows/7/ro_crate?version=1" . + a schema1:Person ; + schema1:name "Georgina Samaha" . - a schema1:HowTo, - schema1:MediaObject, - schema1:SoftwareSourceCode ; - schema1:contentSize 7268 ; - schema1:programmingLanguage . + a schema1:Person ; + schema1:name "Martin Beracochea" . - a schema1:MediaObject, - schema1:SoftwareSourceCode, - ; - dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; - schema1:contentSize 37131 ; - schema1:creator , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - ; - schema1:dateCreated "2020-04-10T12:52:22Z" ; - schema1:dateModified "2023-05-30T12:07:57Z" ; - schema1:description "Analysis of variation within individual COVID-19 samples using Illumina Paired End data. More info can be found at https://covid19.galaxyproject.org/genomics/" ; - schema1:image ; - schema1:keywords "covid-19" ; - schema1:license "https://spdx.org/licenses/MIT" ; - schema1:name "Genomics - PE Variation" ; - schema1:producer ; - schema1:programmingLanguage ; - schema1:sdPublisher ; - schema1:subjectOf ; - schema1:url "https://workflowhub.eu/workflows/7?version=1" ; - schema1:version 1 ; - ns1:input , - . + a schema1:Person ; + schema1:name "Yvan Le Bras" . - a , - schema1:ImageObject, - schema1:MediaObject ; - schema1:contentSize 13428 . + a schema1:Organization, + schema1:Project ; + schema1:name "HoloFood at MGnify" . - a schema1:Dataset ; - dct:conformsTo , - ; - schema1:author , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - ; - schema1:description "Preprocessing of raw SARS-CoV-2 reads. This workflow contains an alternate starting point to avoid the data to be downloaded from the NCBI SRA. More info can be found at https://covid19.galaxyproject.org/genomics/" ; - schema1:hasPart , - , - ; - schema1:identifier "https://workflowhub.eu/workflows/4?version=1" ; - schema1:license "MIT" ; - schema1:mainEntity ; - schema1:name "Research Object Crate for Genomics - Read pre-processing without downloading from SRA" ; - schema1:sdDatePublished "2024-06-17 10:59:53 +0100" ; - schema1:url "https://workflowhub.eu/workflows/4/ro_crate?version=1" . + a schema1:Organization, + schema1:Project ; + schema1:name "MGnify" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-4806-5140" ; + schema1:name "José Mª Fernández" . + + a schema1:Person ; + schema1:name "Pierre Osteil" . + + a schema1:Collection ; + schema1:name "BioExcel Building Blocks (BioBB) Protein MD Setup Tutorials" . + + a schema1:Person ; + schema1:name "José Mª Fernández" . + + a schema1:Organization, + schema1:Project ; + schema1:name "PNDB" . + + a schema1:Organization, + schema1:Project ; + schema1:name "EJPRD WP13 case-studies workflows" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-5752-2155" ; + schema1:name "Cristóbal Gallardo" . + + a schema1:Person ; + schema1:name "Clea Siguret", + "Pierre Marin" . + + a schema1:Person ; + schema1:affiliation ; + schema1:name "Martínez-Lizaga, Natalia" . + + a schema1:Collection ; + schema1:name "TSI annotation workflows" . + + a schema1:Person ; + schema1:name "Cristóbal Gallardo" . + + a schema1:Person ; + schema1:name "Diego De Panis" . + + a schema1:Organization, + schema1:Project ; + schema1:name "EuroScienceGateway" . + + a schema1:Organization ; + schema1:name "abromics-consortium" . + + a schema1:Person ; + schema1:name "Tracy Chew" . - a schema1:HowTo, - schema1:MediaObject, - schema1:SoftwareSourceCode ; - schema1:contentSize 6333 ; - schema1:programmingLanguage . + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint , + ; + schema1:name "Rosa M Badia", + "Rosa M. Badia" . - a schema1:MediaObject, - schema1:SoftwareSourceCode, - ; - dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; - schema1:contentSize 35202 ; - schema1:creator , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - ; - schema1:dateCreated "2020-04-10T12:06:00Z" ; - schema1:dateModified "2023-01-16T13:39:49Z" ; - schema1:description "Preprocessing of raw SARS-CoV-2 reads. This workflow contains an alternate starting point to avoid the data to be downloaded from the NCBI SRA. More info can be found at https://covid19.galaxyproject.org/genomics/" ; - schema1:image ; - schema1:keywords "covid-19" ; - schema1:license "https://spdx.org/licenses/MIT" ; - schema1:name "Genomics - Read pre-processing without downloading from SRA" ; - schema1:producer ; - schema1:programmingLanguage ; - schema1:sdPublisher ; - schema1:subjectOf ; - schema1:url "https://workflowhub.eu/workflows/4?version=1" ; - schema1:version 1 ; - ns1:input , - . + a schema1:Organization, + schema1:Project ; + schema1:name "usegalaxy-eu" . - a , - schema1:ImageObject, - schema1:MediaObject ; - schema1:contentSize 10958 . + a schema1:Organization, + schema1:Project ; + schema1:name "TransBioNet" . - a schema1:Dataset ; - dct:conformsTo , - ; - schema1:creativeWorkStatus "Stable" ; - schema1:description """Virus genome assembly with Unicycler and Spades,\r -The 2 assemblers works in parallel. The graph visualization is made with Bandage.\r -workflow git repository : https://github.com/fjrmoreews/cwl-workflow-SARS-CoV-2/blob/master/Assembly/workflow/assembly-wf-virus.cwl\r -Based on https://github.com/galaxyproject/SARS-CoV-2/blob/master/genomics/2-Assembly/as_wf.png\r -""" ; - schema1:hasPart , - ; - schema1:identifier "https://workflowhub.eu/workflows/3?version=1" ; - schema1:license "MIT" ; - schema1:mainEntity ; - schema1:name "Research Object Crate for Virus genome assembly with Unicycler and Spades." ; - schema1:sdDatePublished "2024-06-17 10:59:50 +0100" ; - schema1:url "https://workflowhub.eu/workflows/3/ro_crate?version=1" . + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0001-6421-3484" ; + schema1:name "Delphine Lariviere" . - a schema1:MediaObject, - schema1:SoftwareSourceCode, - ; - dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; - schema1:contentSize 8602 ; - schema1:dateCreated "2020-04-10T10:45:00Z" ; - schema1:dateModified "2023-01-16T13:39:45Z" ; - schema1:description """Virus genome assembly with Unicycler and Spades,\r -The 2 assemblers works in parallel. The graph visualization is made with Bandage.\r -workflow git repository : https://github.com/fjrmoreews/cwl-workflow-SARS-CoV-2/blob/master/Assembly/workflow/assembly-wf-virus.cwl\r -Based on https://github.com/galaxyproject/SARS-CoV-2/blob/master/genomics/2-Assembly/as_wf.png\r -""" ; - schema1:image ; - schema1:keywords "covid-19, Assembly" ; - schema1:license "https://spdx.org/licenses/MIT" ; - schema1:name "Virus genome assembly with Unicycler and Spades." ; - schema1:producer ; - schema1:programmingLanguage ; - schema1:sdPublisher ; - schema1:url "https://workflowhub.eu/workflows/3?version=1" ; - schema1:version 1 ; - ns1:input , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - ; - ns1:output , - , - , - , - , - , - , - , - , - , - , - , - . + a schema1:Person ; + schema1:affiliation ; + schema1:email "jgonzalezga.iacs@aragon.es" ; + schema1:name "Javier González-Galindo" . - a , - schema1:ImageObject, - schema1:MediaObject ; - schema1:contentSize 34311 . + a schema1:Organization, + schema1:Project ; + schema1:name "Sydney Informatics Hub" . - a schema1:Dataset ; - dct:conformsTo , - ; - schema1:author , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - ; - schema1:description "Dating the most recent common ancestor (MRCA) of SARS-CoV-2. The workflow is used to extract full length sequences of SARS-CoV-2, tidy up their names in FASTA files, produce a multiple sequences alignment and compute a maximum likelihood tree. More info can be found at https://covid19.galaxyproject.org/genomics/" ; - schema1:hasPart , - , - ; - schema1:identifier "https://workflowhub.eu/workflows/6?version=1" ; - schema1:license "MIT" ; - schema1:mainEntity ; - schema1:name "Research Object Crate for Genomics - MRCA analysis" ; - schema1:sdDatePublished "2024-06-17 10:59:52 +0100" ; - schema1:url "https://workflowhub.eu/workflows/6/ro_crate?version=1" . + a schema1:Person ; + schema1:affiliation ; + schema1:email "ebernal.iacs@aragon.es" ; + schema1:name "Enrique Bernal-Delgado" . - a schema1:HowTo, - schema1:MediaObject, - schema1:SoftwareSourceCode ; - schema1:contentSize 3760 ; - schema1:programmingLanguage . + a schema1:Person ; + schema1:affiliation ; + schema1:email "festupinnan.iacs@aragon.es" ; + schema1:name "Francisco Estupiñán-Romero" . - a schema1:MediaObject, - schema1:SoftwareSourceCode, - ; - dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; - schema1:contentSize 17202 ; - schema1:creator , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - , - ; - schema1:dateCreated "2020-04-10T12:44:38Z" ; - schema1:dateModified "2023-01-16T13:39:57Z" ; - schema1:description "Dating the most recent common ancestor (MRCA) of SARS-CoV-2. The workflow is used to extract full length sequences of SARS-CoV-2, tidy up their names in FASTA files, produce a multiple sequences alignment and compute a maximum likelihood tree. More info can be found at https://covid19.galaxyproject.org/genomics/" ; - schema1:image ; - schema1:keywords "covid-19" ; - schema1:license "https://spdx.org/licenses/MIT" ; - schema1:name "Genomics - MRCA analysis" ; - schema1:producer ; - schema1:programmingLanguage ; - schema1:sdPublisher ; - schema1:subjectOf ; - schema1:url "https://workflowhub.eu/workflows/6?version=1" ; - schema1:version 1 ; - ns1:input . + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0001-8172-8981" ; + schema1:name "Jasper Koehorst" . - a , - schema1:ImageObject, - schema1:MediaObject ; - schema1:contentSize 7247 . + a schema1:Person ; + schema1:name "Jasper Koehorst" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Galaxy Training Network" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0001-9524-5964" ; + schema1:name "Bart Nijsse" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Cristian Tatu" . + + a schema1:Person ; + schema1:name "Bart Nijsse" . + + a schema1:Organization, + schema1:Project ; + schema1:name "UNLOCK" . + + a schema1:Organization, + schema1:Project ; + schema1:name "ERGA Assembly" . + + a schema1:WebSite ; + schema1:name "Java Compiled Object Code" . + + a schema1:Person ; + schema1:affiliation ; + schema1:email "Nina.VanGoethem@sciensano.be" ; + schema1:name "Nina Van Goethem" . + + a schema1:Person ; + schema1:affiliation ; + schema1:email "Marjan.Meurisse@sciensano.be" ; + schema1:name "Marjan Meurisse" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-4929-1219" ; + schema1:name "Laura Rodriguez-Navas" . + + a schema1:Person ; + schema1:name "Laura Rodriguez-Navas" . a schema1:Organization, schema1:Project ; schema1:name "GalaxyProject SARS-CoV-2" . + a schema1:Collection ; + schema1:name "scRNAseq processing in galaxy" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-1248-3594" ; + schema1:name "Douglas Lowe" . + + a schema1:Person ; + schema1:name "Douglas Lowe" . + + a schema1:Person ; + schema1:name "Anna Syme" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-9906-0673" ; + schema1:name "Anna Syme" . + + a schema1:CreativeWork ; + schema1:name "Workflow Run Crate" ; + schema1:version "0.1" . + + a schema1:Organization, + schema1:Project ; + schema1:name "QCIF Bioinformatics" . + + a schema1:Person ; + schema1:name "Wolfgang Maier" . + + a schema1:CreativeWork ; + schema1:name "Process Run Crate" ; + schema1:version "0.1" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint , + , + ; + schema1:name "Ashish Bhawel", + "Raül Sirvent" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Galaxy Australia" . + + a schema1:WebSite ; + schema1:name "YAML" . + + a schema1:WebSite ; + schema1:name "Scalable Vector Graphics" . + + a schema1:Collection ; + schema1:name "BioCommons ‘Bring Your Own Data’ Expansion Project" . + + a schema1:WebSite ; + schema1:name "JSON Data Interchange Format" . + + a schema1:Organization ; + schema1:name "Barcelona Supercomputing Center", + "Barcelona Supercomputing Centre" ; + schema1:url "https://www.bsc.es/" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Australian BioCommons" . + + a schema1:Collection ; + schema1:name "Tutorials for BioExcel Building Blocks (BioBB)" . + + a schema1:Organization ; + schema1:name "VGP" . + + a schema1:Collection ; + schema1:name "Interactive Jupyter Notebooks for FAIR and reproducible biomolecular simulation workflows" . + + a schema1:Person ; + schema1:name "Lucille Delisle" . + + a schema1:ComputerLanguage ; + schema1:alternateName "Jupyter" ; + schema1:identifier ; + schema1:name "Jupyter Notebook" ; + schema1:url . + + a schema1:Person ; + schema1:name "Adam Hospital" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-0513-0288" ; + schema1:name "Genís Bayarri" . + + a schema1:Person ; + schema1:name "Genís Bayarri" . + + a schema1:Organization, + schema1:Project ; + schema1:name "BioBB Building Blocks" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:identifier "https://orcid.org/0000-0002-8291-8071" ; + schema1:name "Adam Hospital" . + +ns2:PlanemoEngine a schema1:SoftwareApplication ; + schema1:name "Planemo" ; + schema1:url . + +ns2:GithubService a ns2:TestService ; + schema1:name "Github Actions" ; + schema1:url . + + a schema1:Organization, + schema1:Project ; + schema1:name "nf-core" . + a schema1:Organization ; schema1:name "WorkflowHub" ; schema1:url "https://about.workflowhub.eu/" . + a schema1:CreativeWork ; + schema1:name "Workflow RO-Crate" ; + schema1:version "1.0" . + diff --git a/poetry.lock b/poetry.lock new file mode 100644 index 0000000..93f6511 --- /dev/null +++ b/poetry.lock @@ -0,0 +1,1404 @@ +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. + +[[package]] +name = "appdirs" +version = "1.4.4" +description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +optional = false +python-versions = "*" +files = [ + {file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"}, + {file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"}, +] + +[[package]] +name = "arcp" +version = "0.2.1" +description = "arcp (Archive and Package) URI parser and generator" +optional = false +python-versions = "*" +files = [ + {file = "arcp-0.2.1-py2.py3-none-any.whl", hash = "sha256:4e09b2d8a9fc3fda7ec112b553498ff032ea7de354e27dbeb1acc53667122444"}, + {file = "arcp-0.2.1.tar.gz", hash = "sha256:5c17ac7972c9ef82979cc2caf2b3a87c1aefd3fefe9adb8a5dd728ada57715dd"}, +] + +[[package]] +name = "argparse-dataclass" +version = "2.0.0" +description = "Declarative CLIs with argparse and dataclasses" +optional = false +python-versions = ">=3.8" +files = [ + {file = "argparse_dataclass-2.0.0-py3-none-any.whl", hash = "sha256:3ffc8852a88d9d98d1364b4441a712491320afb91fb56049afd8a51d74bb52d2"}, + {file = "argparse_dataclass-2.0.0.tar.gz", hash = "sha256:09ab641c914a2f12882337b9c3e5086196dbf2ee6bf0ef67895c74002cc9297f"}, +] + +[[package]] +name = "astroid" +version = "3.2.2" +description = "An abstract syntax tree for Python with inference support." +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "astroid-3.2.2-py3-none-any.whl", hash = "sha256:e8a0083b4bb28fcffb6207a3bfc9e5d0a68be951dd7e336d5dcf639c682388c0"}, + {file = "astroid-3.2.2.tar.gz", hash = "sha256:8ead48e31b92b2e217b6c9733a21afafe479d52d6e164dd25fb1a770c7c3cf94"}, +] + +[[package]] +name = "attrs" +version = "23.2.0" +description = "Classes Without Boilerplate" +optional = false +python-versions = ">=3.7" +files = [ + {file = "attrs-23.2.0-py3-none-any.whl", hash = "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"}, + {file = "attrs-23.2.0.tar.gz", hash = "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30"}, +] + +[package.extras] +cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] +dev = ["attrs[tests]", "pre-commit"] +docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] +tests = ["attrs[tests-no-zope]", "zope-interface"] +tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"] +tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"] + +[[package]] +name = "certifi" +version = "2024.7.4" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +files = [ + {file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"}, + {file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"}, +] + +[[package]] +name = "charset-normalizer" +version = "3.3.2" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, + {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, +] + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "conda-inject" +version = "1.3.2" +description = "Helper functions for injecting a conda environment into the current python environment (by modifying sys.path, without actually changing the current python environment)." +optional = false +python-versions = "<4.0,>=3.9" +files = [ + {file = "conda_inject-1.3.2-py3-none-any.whl", hash = "sha256:6e641b408980c2814e3e527008c30749117909a21ff47392f07ef807da93a564"}, + {file = "conda_inject-1.3.2.tar.gz", hash = "sha256:0b8cde8c47998c118d8ff285a04977a3abcf734caf579c520fca469df1cd0aac"}, +] + +[package.dependencies] +pyyaml = ">=6.0,<7.0" + +[[package]] +name = "configargparse" +version = "1.7" +description = "A drop-in replacement for argparse that allows options to also be set via config files and/or environment variables." +optional = false +python-versions = ">=3.5" +files = [ + {file = "ConfigArgParse-1.7-py3-none-any.whl", hash = "sha256:d249da6591465c6c26df64a9f73d2536e743be2f244eb3ebe61114af2f94f86b"}, + {file = "ConfigArgParse-1.7.tar.gz", hash = "sha256:e7067471884de5478c58a511e529f0f9bd1c66bfef1dea90935438d6c23306d1"}, +] + +[package.extras] +test = ["PyYAML", "mock", "pytest"] +yaml = ["PyYAML"] + +[[package]] +name = "connection-pool" +version = "0.0.3" +description = "thread safe connection pool" +optional = false +python-versions = "*" +files = [ + {file = "connection_pool-0.0.3.tar.gz", hash = "sha256:bf429e7aef65921c69b4ed48f3d48d3eac1383b05d2df91884705842d974d0dc"}, +] + +[[package]] +name = "datrie" +version = "0.8.2" +description = "Super-fast, efficiently stored Trie for Python." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "datrie-0.8.2-cp27-cp27m-macosx_10_7_x86_64.whl", hash = "sha256:53969643e2794c37f024d5edaa42d5e6e2627d9937ddcc18d99128e9df700e4c"}, + {file = "datrie-0.8.2-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:6c9b333035312b79e6e9a10356d033e3d29aadbae6365007f706c854b3a94674"}, + {file = "datrie-0.8.2-cp27-cp27m-win32.whl", hash = "sha256:c783e2c1e28964b2b045a951eb9606833a188c4bd4a780da68d22f557e03e429"}, + {file = "datrie-0.8.2-cp27-cp27m-win_amd64.whl", hash = "sha256:f826e843138698501cbf1a21233f724b851b1e475fad532b638ac5904e115f10"}, + {file = "datrie-0.8.2-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:bf5c956c0a9a9d0f07e3c8923746279171096de18a8a51685e22d9817f8755a6"}, + {file = "datrie-0.8.2-cp34-cp34m-manylinux1_x86_64.whl", hash = "sha256:2de594d84a2f43a09ddc15316a8afd48aae0fdc456f9279d0940aa59c473d9d5"}, + {file = "datrie-0.8.2-cp35-cp35m-macosx_10_6_x86_64.whl", hash = "sha256:651c63325056347b86c5de7ffeea8529230a5787c61ee6dcabc5b6c644bd3252"}, + {file = "datrie-0.8.2-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:0e3b76676abbae2368cce6bf605bb0ba7cfd11f2c420b96d67959f353d5d423f"}, + {file = "datrie-0.8.2-cp35-cp35m-win32.whl", hash = "sha256:3a3e360a765cc95410898dc222f8585ea1b1bba0538a1af4d8630a5bc3ad6ee7"}, + {file = "datrie-0.8.2-cp35-cp35m-win_amd64.whl", hash = "sha256:fa9f39ac88dc6286672b9dd286fe459646da48133c877a927af24803eaea441e"}, + {file = "datrie-0.8.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b6fd6c7c149b410a87d46072c1c98f6e87ec557802e1d0e09db7b858746e8550"}, + {file = "datrie-0.8.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:327d9c17efaebc66d1956dca047b76fdd0e5b989d63cb55b9038ec09d8769089"}, + {file = "datrie-0.8.2-cp36-cp36m-win32.whl", hash = "sha256:ee7cd8470a982356e104e62148f2dbe2d3e17545cafaa3ada29f2548984f1e89"}, + {file = "datrie-0.8.2-cp36-cp36m-win_amd64.whl", hash = "sha256:31e316ba305cdd7b8a42f8e4af5a0a15a628aee270d2f392c41329a709eeda6d"}, + {file = "datrie-0.8.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dbe04704eb41b8440ca61416d3670ca6ddeea847d19731cf121889bac2962d07"}, + {file = "datrie-0.8.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:e1d704ee4fdc03f02d7dacc4d92052dbd490dba551509fccfd8ee52c9039d4ad"}, + {file = "datrie-0.8.2-cp37-cp37m-win32.whl", hash = "sha256:25e9e07ecfceaef78d23bde8d7278e4d6f63e1e3dc5ac00ccb4bec3062f0a8e0"}, + {file = "datrie-0.8.2-cp37-cp37m-win_amd64.whl", hash = "sha256:bf9f34f7c63797219b32713b561c4f94e777ff6c22beecfcd6bdf6b6c25b8518"}, + {file = "datrie-0.8.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e0582435a4adef1a2fce53aeedb656bf769b0f113b524f98be51d3e3d40720cb"}, + {file = "datrie-0.8.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:b2d80fa687173cb8f8bae224ef00d1ad6bda8f8597bbb1a63f85182c7d91aeb3"}, + {file = "datrie-0.8.2-cp38-cp38-win32.whl", hash = "sha256:67603594f5db5c0029b1cf86a08e89cde015fe64cf0c4ae4e539c61114396729"}, + {file = "datrie-0.8.2-cp38-cp38-win_amd64.whl", hash = "sha256:f61cf2726f04c08828bfb4e7af698b0b16bdf2777c3993d042f2898b8e118f21"}, + {file = "datrie-0.8.2-pp273-pypy_73-win32.whl", hash = "sha256:b07bd5fdfc3399a6dab86d6e35c72b1dbd598e80c97509c7c7518ab8774d3fda"}, + {file = "datrie-0.8.2-pp373-pypy36_pp73-win32.whl", hash = "sha256:89ff3d41df4f899387aa07b4b066f5da36e3a10b67b8aeae631c950502ff4503"}, + {file = "datrie-0.8.2.tar.gz", hash = "sha256:525b08f638d5cf6115df6ccd818e5a01298cd230b2dac91c8ff2e6499d18765d"}, +] + +[[package]] +name = "dill" +version = "0.3.8" +description = "serialize all of Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7"}, + {file = "dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca"}, +] + +[package.extras] +graph = ["objgraph (>=1.7.2)"] +profile = ["gprof2dot (>=2022.7.29)"] + +[[package]] +name = "docutils" +version = "0.21.2" +description = "Docutils -- Python Documentation Utilities" +optional = false +python-versions = ">=3.9" +files = [ + {file = "docutils-0.21.2-py3-none-any.whl", hash = "sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2"}, + {file = "docutils-0.21.2.tar.gz", hash = "sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f"}, +] + +[[package]] +name = "dpath" +version = "2.2.0" +description = "Filesystem-like pathing and searching for dictionaries" +optional = false +python-versions = ">=3.7" +files = [ + {file = "dpath-2.2.0-py3-none-any.whl", hash = "sha256:b330a375ded0a0d2ed404440f6c6a715deae5313af40bbb01c8a41d891900576"}, + {file = "dpath-2.2.0.tar.gz", hash = "sha256:34f7e630dc55ea3f219e555726f5da4b4b25f2200319c8e6902c394258dd6a3e"}, +] + +[[package]] +name = "fastjsonschema" +version = "2.20.0" +description = "Fastest Python implementation of JSON schema" +optional = false +python-versions = "*" +files = [ + {file = "fastjsonschema-2.20.0-py3-none-any.whl", hash = "sha256:5875f0b0fa7a0043a91e93a9b8f793bcbbba9691e7fd83dca95c28ba26d21f0a"}, + {file = "fastjsonschema-2.20.0.tar.gz", hash = "sha256:3d48fc5300ee96f5d116f10fe6f28d938e6008f59a6a025c2649475b87f76a23"}, +] + +[package.extras] +devel = ["colorama", "json-spec", "jsonschema", "pylint", "pytest", "pytest-benchmark", "pytest-cache", "validictory"] + +[[package]] +name = "gitdb" +version = "4.0.11" +description = "Git Object Database" +optional = false +python-versions = ">=3.7" +files = [ + {file = "gitdb-4.0.11-py3-none-any.whl", hash = "sha256:81a3407ddd2ee8df444cbacea00e2d038e40150acfa3001696fe0dcf1d3adfa4"}, + {file = "gitdb-4.0.11.tar.gz", hash = "sha256:bf5421126136d6d0af55bc1e7c1af1c397a34f5b7bd79e776cd3e89785c2b04b"}, +] + +[package.dependencies] +smmap = ">=3.0.1,<6" + +[[package]] +name = "gitpython" +version = "3.1.43" +description = "GitPython is a Python library used to interact with Git repositories" +optional = false +python-versions = ">=3.7" +files = [ + {file = "GitPython-3.1.43-py3-none-any.whl", hash = "sha256:eec7ec56b92aad751f9912a73404bc02ba212a23adb2c7098ee668417051a1ff"}, + {file = "GitPython-3.1.43.tar.gz", hash = "sha256:35f314a9f878467f5453cc1fee295c3e18e52f1b99f10f6cf5b1682e968a9e7c"}, +] + +[package.dependencies] +gitdb = ">=4.0.1,<5" + +[package.extras] +doc = ["sphinx (==4.3.2)", "sphinx-autodoc-typehints", "sphinx-rtd-theme", "sphinxcontrib-applehelp (>=1.0.2,<=1.0.4)", "sphinxcontrib-devhelp (==1.0.2)", "sphinxcontrib-htmlhelp (>=2.0.0,<=2.0.1)", "sphinxcontrib-qthelp (==1.0.3)", "sphinxcontrib-serializinghtml (==1.1.5)"] +test = ["coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit", "pytest (>=7.3.1)", "pytest-cov", "pytest-instafail", "pytest-mock", "pytest-sugar", "typing-extensions"] + +[[package]] +name = "humanfriendly" +version = "10.0" +description = "Human friendly output for text interfaces using Python" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "humanfriendly-10.0-py2.py3-none-any.whl", hash = "sha256:1697e1a8a8f550fd43c2865cd84542fc175a61dcb779b6fee18cf6b6ccba1477"}, + {file = "humanfriendly-10.0.tar.gz", hash = "sha256:6b0b831ce8f15f7300721aa49829fc4e83921a9a301cc7f606be6686a2288ddc"}, +] + +[package.dependencies] +pyreadline3 = {version = "*", markers = "sys_platform == \"win32\" and python_version >= \"3.8\""} + +[[package]] +name = "idna" +version = "2.8" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "idna-2.8-py2.py3-none-any.whl", hash = "sha256:ea8b7f6188e6fa117537c3df7da9fc686d485087abf6ac197f9c46432f7e4a3c"}, + {file = "idna-2.8.tar.gz", hash = "sha256:c357b3f628cf53ae2c4c05627ecc484553142ca23264e593d327bcde5e9c3407"}, +] + +[[package]] +name = "immutables" +version = "0.20" +description = "Immutable Collections" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "immutables-0.20-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dea0ae4d7f31b145c18c16badeebc2f039d09411be4a8febb86e1244cf7f1ce0"}, + {file = "immutables-0.20-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2dd0dcef2f8d4523d34dbe1d2b7804b3d2a51fddbd104aad13f506a838a2ea15"}, + {file = "immutables-0.20-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:393dde58ffd6b4c089ffdf4cef5fe73dad37ce4681acffade5f5d5935ec23c93"}, + {file = "immutables-0.20-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c1214b5a175df783662b7de94b4a82db55cc0ee206dd072fa9e279fb8895d8df"}, + {file = "immutables-0.20-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:2761e3dc2a6406943ce77b3505e9b3c1187846de65d7247548dc7edaa202fcba"}, + {file = "immutables-0.20-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2bcea81e7516bd823b4ed16f4f794531097888675be13e833b1cc946370d5237"}, + {file = "immutables-0.20-cp310-cp310-win32.whl", hash = "sha256:d828e7580f1fa203ddeab0b5e91f44bf95706e7f283ca9fbbcf0ae08f63d3084"}, + {file = "immutables-0.20-cp310-cp310-win_amd64.whl", hash = "sha256:380e2957ba3d63422b2f3fbbff0547c7bbe6479d611d3635c6411005a4264525"}, + {file = "immutables-0.20-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:532be32c7a25dae6cade28825c76d3004cf4d166a0bfacf04bda16056d59ba26"}, + {file = "immutables-0.20-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5302ce9c7827f8300f3dc34a695abb71e4a32bab09e65e5ad6e454785383347f"}, + {file = "immutables-0.20-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b51aec54b571ae466113509d4dc79a2808dc2ae9263b71fd6b37778cb49eb292"}, + {file = "immutables-0.20-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47f56aea56e597ecf6631f24a4e26007b6a5f4fe30278b96eb90bc1f60506164"}, + {file = "immutables-0.20-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:085ac48ee3eef7baf070f181cae574489bbf65930a83ec5bbd65c9940d625db3"}, + {file = "immutables-0.20-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f063f53b5c0e8f541ae381f1d828f3d05bbed766a2d6c817f9218b8b37a4cb66"}, + {file = "immutables-0.20-cp311-cp311-win32.whl", hash = "sha256:b0436cc831b47e26bef637bcf143cf0273e49946cfb7c28c44486d70513a3080"}, + {file = "immutables-0.20-cp311-cp311-win_amd64.whl", hash = "sha256:5bb32aee1ea16fbb90f58f8bd96016bca87aba0a8e574e5fa218d0d83b142851"}, + {file = "immutables-0.20-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:4ba726b7a3a696b9d4b122fa2c956bc68e866f3df1b92765060c88c64410ff82"}, + {file = "immutables-0.20-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5a88adf1dcc9d8ab07dba5e74deefcd5b5e38bc677815cbf9365dc43b69f1f08"}, + {file = "immutables-0.20-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1009a4e00e2e69a9b40c2f1272795f5a06ad72c9bf4638594d518e9cbd7a721a"}, + {file = "immutables-0.20-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96899994842c37cf4b9d6d2bedf685aae7810bd73f1538f8cba5426e2d65cb85"}, + {file = "immutables-0.20-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a606410b2ccb6ae339c3f26cccc9a92bcb16dc06f935d51edfd8ca68cf687e50"}, + {file = "immutables-0.20-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e8e82754f72823085643a2c0e6a4c489b806613e94af205825fa81df2ba147a0"}, + {file = "immutables-0.20-cp312-cp312-win32.whl", hash = "sha256:525fb361bd7edc8a891633928d549713af8090c79c25af5cc06eb90b48cb3c64"}, + {file = "immutables-0.20-cp312-cp312-win_amd64.whl", hash = "sha256:a82afc3945e9ceb9bcd416dc4ed9b72f92760c42787e26de50610a8b81d48120"}, + {file = "immutables-0.20-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f17f25f21e82a1c349a61191cfb13e442a348b880b74cb01b00e0d1e848b63f4"}, + {file = "immutables-0.20-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:65954eb861c61af48debb1507518d45ae7d594b4fba7282785a70b48c5f51f9b"}, + {file = "immutables-0.20-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62f8a7a22939278127b7a206d05679b268b9cf665437125625348e902617cbad"}, + {file = "immutables-0.20-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac86f4372f4cfaa00206c12472fd3a78753092279e0552b7e1880944d71b04fe"}, + {file = "immutables-0.20-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e771198edc11a9e02ffa693911b3918c6cde0b64ad2e6672b076dbe005557ad8"}, + {file = "immutables-0.20-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fc739fc07cff5df2e4f31addbd48660b5ac0da56e9f719f8bb45da8ddd632c63"}, + {file = "immutables-0.20-cp38-cp38-win32.whl", hash = "sha256:c086ccb44d9d3824b9bf816365d10b1b82837efc7119f8bab56bd7a27ed805a9"}, + {file = "immutables-0.20-cp38-cp38-win_amd64.whl", hash = "sha256:9cd2ee9c10bf00be3c94eb51854bc0b761326bd0a7ea0dad4272a3f182269ae6"}, + {file = "immutables-0.20-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d4f78cb748261f852953620ed991de74972446fd484ec69377a41e2f1a1beb75"}, + {file = "immutables-0.20-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d6449186ea91b7c17ec8e7bd9bf059858298b1db5c053f5d27de8eba077578ce"}, + {file = "immutables-0.20-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85dd9765b068f7beb297553fddfcf7f904bd58a184c520830a106a58f0c9bfb4"}, + {file = "immutables-0.20-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f349a7e0327b92dcefb863e49ace086f2f26e6689a4e022c98720c6e9696e763"}, + {file = "immutables-0.20-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e3a5462f6d3549bbf7d02ce929fb0cb6df9539445f0589105de4e8b99b906e69"}, + {file = "immutables-0.20-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cc51a01a64a6d2cd7db210a49ad010c2ac2e9e026745f23fd31e0784096dcfff"}, + {file = "immutables-0.20-cp39-cp39-win32.whl", hash = "sha256:83794712f0507416f2818edc63f84305358b8656a93e5b9e2ab056d9803c7507"}, + {file = "immutables-0.20-cp39-cp39-win_amd64.whl", hash = "sha256:2837b1078abc66d9f009bee9085cf62515d5516af9a5c9ea2751847e16efd236"}, + {file = "immutables-0.20.tar.gz", hash = "sha256:1d2f83e6a6a8455466cd97b9a90e2b4f7864648616dfa6b19d18f49badac3876"}, +] + +[package.extras] +test = ["flake8 (>=5.0,<6.0)", "mypy (>=1.4,<2.0)", "pycodestyle (>=2.9,<3.0)", "pytest (>=7.4,<8.0)"] + +[[package]] +name = "iniconfig" +version = "2.0.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.7" +files = [ + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, +] + +[[package]] +name = "isodate" +version = "0.6.1" +description = "An ISO 8601 date/time/duration parser and formatter" +optional = false +python-versions = "*" +files = [ + {file = "isodate-0.6.1-py2.py3-none-any.whl", hash = "sha256:0751eece944162659049d35f4f549ed815792b38793f07cf73381c1c87cbed96"}, + {file = "isodate-0.6.1.tar.gz", hash = "sha256:48c5881de7e8b0a0d648cb024c8062dc84e7b840ed81e864c7614fd3c127bde9"}, +] + +[package.dependencies] +six = "*" + +[[package]] +name = "isort" +version = "5.13.2" +description = "A Python utility / library to sort Python imports." +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"}, + {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"}, +] + +[package.extras] +colors = ["colorama (>=0.4.6)"] + +[[package]] +name = "jinja2" +version = "3.1.4" +description = "A very fast and expressive template engine." +optional = false +python-versions = ">=3.7" +files = [ + {file = "jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d"}, + {file = "jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369"}, +] + +[package.dependencies] +MarkupSafe = ">=2.0" + +[package.extras] +i18n = ["Babel (>=2.7)"] + +[[package]] +name = "jsonschema" +version = "4.22.0" +description = "An implementation of JSON Schema validation for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jsonschema-4.22.0-py3-none-any.whl", hash = "sha256:ff4cfd6b1367a40e7bc6411caec72effadd3db0bbe5017de188f2d6108335802"}, + {file = "jsonschema-4.22.0.tar.gz", hash = "sha256:5b22d434a45935119af990552c862e5d6d564e8f6601206b305a61fdf661a2b7"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +jsonschema-specifications = ">=2023.03.6" +referencing = ">=0.28.4" +rpds-py = ">=0.7.1" + +[package.extras] +format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] +format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"] + +[[package]] +name = "jsonschema-specifications" +version = "2023.12.1" +description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jsonschema_specifications-2023.12.1-py3-none-any.whl", hash = "sha256:87e4fdf3a94858b8a2ba2778d9ba57d8a9cafca7c7489c46ba0d30a8bc6a9c3c"}, + {file = "jsonschema_specifications-2023.12.1.tar.gz", hash = "sha256:48a76787b3e70f5ed53f1160d2b81f586e4ca6d1548c5de7085d1682674764cc"}, +] + +[package.dependencies] +referencing = ">=0.31.0" + +[[package]] +name = "jupyter-core" +version = "5.7.2" +description = "Jupyter core package. A base package on which Jupyter projects rely." +optional = false +python-versions = ">=3.8" +files = [ + {file = "jupyter_core-5.7.2-py3-none-any.whl", hash = "sha256:4f7315d2f6b4bcf2e3e7cb6e46772eba760ae459cd1f59d29eb57b0a01bd7409"}, + {file = "jupyter_core-5.7.2.tar.gz", hash = "sha256:aa5f8d32bbf6b431ac830496da7392035d6f61b4f54872f15c4bd2a9c3f536d9"}, +] + +[package.dependencies] +platformdirs = ">=2.5" +pywin32 = {version = ">=300", markers = "sys_platform == \"win32\" and platform_python_implementation != \"PyPy\""} +traitlets = ">=5.3" + +[package.extras] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "traitlets"] +test = ["ipykernel", "pre-commit", "pytest (<8)", "pytest-cov", "pytest-timeout"] + +[[package]] +name = "markupsafe" +version = "2.1.5" +description = "Safely add untrusted strings to HTML/XML markup." +optional = false +python-versions = ">=3.7" +files = [ + {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-win32.whl", hash = "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-win_amd64.whl", hash = "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-win32.whl", hash = "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-win_amd64.whl", hash = "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-win32.whl", hash = "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-win_amd64.whl", hash = "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-win32.whl", hash = "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-win_amd64.whl", hash = "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-win32.whl", hash = "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-win_amd64.whl", hash = "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-win32.whl", hash = "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-win_amd64.whl", hash = "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5"}, + {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, +] + +[[package]] +name = "mccabe" +version = "0.7.0" +description = "McCabe checker, plugin for flake8" +optional = false +python-versions = ">=3.6" +files = [ + {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, + {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, +] + +[[package]] +name = "nbformat" +version = "5.10.4" +description = "The Jupyter Notebook format" +optional = false +python-versions = ">=3.8" +files = [ + {file = "nbformat-5.10.4-py3-none-any.whl", hash = "sha256:3b48d6c8fbca4b299bf3982ea7db1af21580e4fec269ad087b9e81588891200b"}, + {file = "nbformat-5.10.4.tar.gz", hash = "sha256:322168b14f937a5d11362988ecac2a4952d3d8e3a2cbeb2319584631226d5b3a"}, +] + +[package.dependencies] +fastjsonschema = ">=2.15" +jsonschema = ">=2.6" +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" +traitlets = ">=5.1" + +[package.extras] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"] +test = ["pep440", "pre-commit", "pytest", "testpath"] + +[[package]] +name = "packaging" +version = "24.1" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, + {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, +] + +[[package]] +name = "plac" +version = "1.4.3" +description = "The smartest command line arguments parser in the world" +optional = false +python-versions = "*" +files = [ + {file = "plac-1.4.3-py2.py3-none-any.whl", hash = "sha256:8a84fde8f950c9de6588a2d53c9deeac3ba1ddb456d887a33228460cf6549750"}, + {file = "plac-1.4.3.tar.gz", hash = "sha256:d4cb3387b2113a28aebd509433d0264a4e5d9bb7c1a86db4fbd0a8f11af74eb3"}, +] + +[[package]] +name = "platformdirs" +version = "4.2.2" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." +optional = false +python-versions = ">=3.8" +files = [ + {file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"}, + {file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"}, +] + +[package.extras] +docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] +type = ["mypy (>=1.8)"] + +[[package]] +name = "pluggy" +version = "1.5.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, + {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[[package]] +name = "psutil" +version = "6.0.0" +description = "Cross-platform lib for process and system monitoring in Python." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +files = [ + {file = "psutil-6.0.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a021da3e881cd935e64a3d0a20983bda0bb4cf80e4f74fa9bfcb1bc5785360c6"}, + {file = "psutil-6.0.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:1287c2b95f1c0a364d23bc6f2ea2365a8d4d9b726a3be7294296ff7ba97c17f0"}, + {file = "psutil-6.0.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:a9a3dbfb4de4f18174528d87cc352d1f788b7496991cca33c6996f40c9e3c92c"}, + {file = "psutil-6.0.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:6ec7588fb3ddaec7344a825afe298db83fe01bfaaab39155fa84cf1c0d6b13c3"}, + {file = "psutil-6.0.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:1e7c870afcb7d91fdea2b37c24aeb08f98b6d67257a5cb0a8bc3ac68d0f1a68c"}, + {file = "psutil-6.0.0-cp27-none-win32.whl", hash = "sha256:02b69001f44cc73c1c5279d02b30a817e339ceb258ad75997325e0e6169d8b35"}, + {file = "psutil-6.0.0-cp27-none-win_amd64.whl", hash = "sha256:21f1fb635deccd510f69f485b87433460a603919b45e2a324ad65b0cc74f8fb1"}, + {file = "psutil-6.0.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:c588a7e9b1173b6e866756dde596fd4cad94f9399daf99ad8c3258b3cb2b47a0"}, + {file = "psutil-6.0.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ed2440ada7ef7d0d608f20ad89a04ec47d2d3ab7190896cd62ca5fc4fe08bf0"}, + {file = "psutil-6.0.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fd9a97c8e94059b0ef54a7d4baf13b405011176c3b6ff257c247cae0d560ecd"}, + {file = "psutil-6.0.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2e8d0054fc88153ca0544f5c4d554d42e33df2e009c4ff42284ac9ebdef4132"}, + {file = "psutil-6.0.0-cp36-cp36m-win32.whl", hash = "sha256:fc8c9510cde0146432bbdb433322861ee8c3efbf8589865c8bf8d21cb30c4d14"}, + {file = "psutil-6.0.0-cp36-cp36m-win_amd64.whl", hash = "sha256:34859b8d8f423b86e4385ff3665d3f4d94be3cdf48221fbe476e883514fdb71c"}, + {file = "psutil-6.0.0-cp37-abi3-win32.whl", hash = "sha256:a495580d6bae27291324fe60cea0b5a7c23fa36a7cd35035a16d93bdcf076b9d"}, + {file = "psutil-6.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:33ea5e1c975250a720b3a6609c490db40dae5d83a4eb315170c4fe0d8b1f34b3"}, + {file = "psutil-6.0.0-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:ffe7fc9b6b36beadc8c322f84e1caff51e8703b88eee1da46d1e3a6ae11b4fd0"}, + {file = "psutil-6.0.0.tar.gz", hash = "sha256:8faae4f310b6d969fa26ca0545338b21f73c6b15db7c4a8d934a5482faa818f2"}, +] + +[package.extras] +test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] + +[[package]] +name = "pulp" +version = "2.8.0" +description = "PuLP is an LP modeler written in python. PuLP can generate MPS or LP files and call GLPK, COIN CLP/CBC, CPLEX, and GUROBI to solve linear problems." +optional = false +python-versions = ">=3.7" +files = [ + {file = "PuLP-2.8.0-py3-none-any.whl", hash = "sha256:4a19814a5b0a4392d788ac2315263435293579b0583c3469943fe0c6a586f263"}, + {file = "PuLP-2.8.0.tar.gz", hash = "sha256:4903bf96110bbab8ed2c68533f90565ebb76aa367d9e4df38e51bf727927c125"}, +] + +[[package]] +name = "pylint" +version = "3.2.4" +description = "python code static checker" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "pylint-3.2.4-py3-none-any.whl", hash = "sha256:43b8ffdf1578e4e4439fa1f6ace402281f5dd61999192280fa12fe411bef2999"}, + {file = "pylint-3.2.4.tar.gz", hash = "sha256:5753d27e49a658b12a48c2883452751a2ecfc7f38594e0980beb03a6e77e6f86"}, +] + +[package.dependencies] +astroid = ">=3.2.2,<=3.3.0-dev0" +colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} +dill = [ + {version = ">=0.3.7", markers = "python_version >= \"3.12\""}, + {version = ">=0.3.6", markers = "python_version >= \"3.11\" and python_version < \"3.12\""}, +] +isort = ">=4.2.5,<5.13.0 || >5.13.0,<6" +mccabe = ">=0.6,<0.8" +platformdirs = ">=2.2.0" +tomlkit = ">=0.10.1" + +[package.extras] +spelling = ["pyenchant (>=3.2,<4.0)"] +testutils = ["gitpython (>3)"] + +[[package]] +name = "pyparsing" +version = "3.1.2" +description = "pyparsing module - Classes and methods to define and execute parsing grammars" +optional = false +python-versions = ">=3.6.8" +files = [ + {file = "pyparsing-3.1.2-py3-none-any.whl", hash = "sha256:f9db75911801ed778fe61bb643079ff86601aca99fcae6345aa67292038fb742"}, + {file = "pyparsing-3.1.2.tar.gz", hash = "sha256:a1bac0ce561155ecc3ed78ca94d3c9378656ad4c94c1270de543f621420f94ad"}, +] + +[package.extras] +diagrams = ["jinja2", "railroad-diagrams"] + +[[package]] +name = "pyreadline3" +version = "3.4.1" +description = "A python implementation of GNU readline." +optional = false +python-versions = "*" +files = [ + {file = "pyreadline3-3.4.1-py3-none-any.whl", hash = "sha256:b0efb6516fd4fb07b45949053826a62fa4cb353db5be2bbb4a7aa1fdd1e345fb"}, + {file = "pyreadline3-3.4.1.tar.gz", hash = "sha256:6f3d1f7b8a31ba32b73917cefc1f28cc660562f39aea8646d30bd6eff21f7bae"}, +] + +[[package]] +name = "pytest" +version = "8.2.2" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest-8.2.2-py3-none-any.whl", hash = "sha256:c434598117762e2bd304e526244f67bf66bbd7b5d6cf22138be51ff661980343"}, + {file = "pytest-8.2.2.tar.gz", hash = "sha256:de4bb8104e201939ccdc688b27a89a7be2079b22e2bd2b07f806b6ba71117977"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=1.5,<2.0" + +[package.extras] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "pywin32" +version = "306" +description = "Python for Window Extensions" +optional = false +python-versions = "*" +files = [ + {file = "pywin32-306-cp310-cp310-win32.whl", hash = "sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d"}, + {file = "pywin32-306-cp310-cp310-win_amd64.whl", hash = "sha256:84f4471dbca1887ea3803d8848a1616429ac94a4a8d05f4bc9c5dcfd42ca99c8"}, + {file = "pywin32-306-cp311-cp311-win32.whl", hash = "sha256:e65028133d15b64d2ed8f06dd9fbc268352478d4f9289e69c190ecd6818b6407"}, + {file = "pywin32-306-cp311-cp311-win_amd64.whl", hash = "sha256:a7639f51c184c0272e93f244eb24dafca9b1855707d94c192d4a0b4c01e1100e"}, + {file = "pywin32-306-cp311-cp311-win_arm64.whl", hash = "sha256:70dba0c913d19f942a2db25217d9a1b726c278f483a919f1abfed79c9cf64d3a"}, + {file = "pywin32-306-cp312-cp312-win32.whl", hash = "sha256:383229d515657f4e3ed1343da8be101000562bf514591ff383ae940cad65458b"}, + {file = "pywin32-306-cp312-cp312-win_amd64.whl", hash = "sha256:37257794c1ad39ee9be652da0462dc2e394c8159dfd913a8a4e8eb6fd346da0e"}, + {file = "pywin32-306-cp312-cp312-win_arm64.whl", hash = "sha256:5821ec52f6d321aa59e2db7e0a35b997de60c201943557d108af9d4ae1ec7040"}, + {file = "pywin32-306-cp37-cp37m-win32.whl", hash = "sha256:1c73ea9a0d2283d889001998059f5eaaba3b6238f767c9cf2833b13e6a685f65"}, + {file = "pywin32-306-cp37-cp37m-win_amd64.whl", hash = "sha256:72c5f621542d7bdd4fdb716227be0dd3f8565c11b280be6315b06ace35487d36"}, + {file = "pywin32-306-cp38-cp38-win32.whl", hash = "sha256:e4c092e2589b5cf0d365849e73e02c391c1349958c5ac3e9d5ccb9a28e017b3a"}, + {file = "pywin32-306-cp38-cp38-win_amd64.whl", hash = "sha256:e8ac1ae3601bee6ca9f7cb4b5363bf1c0badb935ef243c4733ff9a393b1690c0"}, + {file = "pywin32-306-cp39-cp39-win32.whl", hash = "sha256:e25fd5b485b55ac9c057f67d94bc203f3f6595078d1fb3b458c9c28b7153a802"}, + {file = "pywin32-306-cp39-cp39-win_amd64.whl", hash = "sha256:39b61c15272833b5c329a2989999dcae836b1eed650252ab1b7bfbe1d59f30f4"}, +] + +[[package]] +name = "pyyaml" +version = "6.0.1" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.6" +files = [ + {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, + {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, + {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, + {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, + {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, + {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, + {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, + {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, + {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, + {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, + {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, + {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, + {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, + {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, + {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, + {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, + {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, +] + +[[package]] +name = "rdflib" +version = "6.3.2" +description = "RDFLib is a Python library for working with RDF, a simple yet powerful language for representing information." +optional = false +python-versions = ">=3.7,<4.0" +files = [ + {file = "rdflib-6.3.2-py3-none-any.whl", hash = "sha256:36b4e74a32aa1e4fa7b8719876fb192f19ecd45ff932ea5ebbd2e417a0247e63"}, + {file = "rdflib-6.3.2.tar.gz", hash = "sha256:72af591ff704f4caacea7ecc0c5a9056b8553e0489dd4f35a9bc52dbd41522e0"}, +] + +[package.dependencies] +isodate = ">=0.6.0,<0.7.0" +pyparsing = ">=2.1.0,<4" + +[package.extras] +berkeleydb = ["berkeleydb (>=18.1.0,<19.0.0)"] +html = ["html5lib (>=1.0,<2.0)"] +lxml = ["lxml (>=4.3.0,<5.0.0)"] +networkx = ["networkx (>=2.0.0,<3.0.0)"] + +[[package]] +name = "referencing" +version = "0.35.1" +description = "JSON Referencing + Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "referencing-0.35.1-py3-none-any.whl", hash = "sha256:eda6d3234d62814d1c64e305c1331c9a3a6132da475ab6382eaa997b21ee75de"}, + {file = "referencing-0.35.1.tar.gz", hash = "sha256:25b42124a6c8b632a425174f24087783efb348a6f1e0008e63cd4466fedf703c"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +rpds-py = ">=0.7.0" + +[[package]] +name = "requests" +version = "2.32.3" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.8" +files = [ + {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, + {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "reretry" +version = "0.11.8" +description = "An easy to use, but functional decorator for retrying on exceptions." +optional = false +python-versions = ">=3.7" +files = [ + {file = "reretry-0.11.8-py2.py3-none-any.whl", hash = "sha256:5ec1084cd9644271ee386d34cd5dd24bdb3e91d55961b076d1a31d585ad68a79"}, + {file = "reretry-0.11.8.tar.gz", hash = "sha256:f2791fcebe512ea2f1d153a2874778523a8064860b591cd90afc21a8bed432e3"}, +] + +[[package]] +name = "rpds-py" +version = "0.18.1" +description = "Python bindings to Rust's persistent data structures (rpds)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "rpds_py-0.18.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:d31dea506d718693b6b2cffc0648a8929bdc51c70a311b2770f09611caa10d53"}, + {file = "rpds_py-0.18.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:732672fbc449bab754e0b15356c077cc31566df874964d4801ab14f71951ea80"}, + {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a98a1f0552b5f227a3d6422dbd61bc6f30db170939bd87ed14f3c339aa6c7c9"}, + {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7f1944ce16401aad1e3f7d312247b3d5de7981f634dc9dfe90da72b87d37887d"}, + {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:38e14fb4e370885c4ecd734f093a2225ee52dc384b86fa55fe3f74638b2cfb09"}, + {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08d74b184f9ab6289b87b19fe6a6d1a97fbfea84b8a3e745e87a5de3029bf944"}, + {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d70129cef4a8d979caa37e7fe957202e7eee8ea02c5e16455bc9808a59c6b2f0"}, + {file = "rpds_py-0.18.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ce0bb20e3a11bd04461324a6a798af34d503f8d6f1aa3d2aa8901ceaf039176d"}, + {file = "rpds_py-0.18.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:81c5196a790032e0fc2464c0b4ab95f8610f96f1f2fa3d4deacce6a79852da60"}, + {file = "rpds_py-0.18.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:f3027be483868c99b4985fda802a57a67fdf30c5d9a50338d9db646d590198da"}, + {file = "rpds_py-0.18.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:d44607f98caa2961bab4fa3c4309724b185b464cdc3ba6f3d7340bac3ec97cc1"}, + {file = "rpds_py-0.18.1-cp310-none-win32.whl", hash = "sha256:c273e795e7a0f1fddd46e1e3cb8be15634c29ae8ff31c196debb620e1edb9333"}, + {file = "rpds_py-0.18.1-cp310-none-win_amd64.whl", hash = "sha256:8352f48d511de5f973e4f2f9412736d7dea76c69faa6d36bcf885b50c758ab9a"}, + {file = "rpds_py-0.18.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:6b5ff7e1d63a8281654b5e2896d7f08799378e594f09cf3674e832ecaf396ce8"}, + {file = "rpds_py-0.18.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8927638a4d4137a289e41d0fd631551e89fa346d6dbcfc31ad627557d03ceb6d"}, + {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:154bf5c93d79558b44e5b50cc354aa0459e518e83677791e6adb0b039b7aa6a7"}, + {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:07f2139741e5deb2c5154a7b9629bc5aa48c766b643c1a6750d16f865a82c5fc"}, + {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8c7672e9fba7425f79019db9945b16e308ed8bc89348c23d955c8c0540da0a07"}, + {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:489bdfe1abd0406eba6b3bb4fdc87c7fa40f1031de073d0cfb744634cc8fa261"}, + {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c20f05e8e3d4fc76875fc9cb8cf24b90a63f5a1b4c5b9273f0e8225e169b100"}, + {file = "rpds_py-0.18.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:967342e045564cef76dfcf1edb700b1e20838d83b1aa02ab313e6a497cf923b8"}, + {file = "rpds_py-0.18.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2cc7c1a47f3a63282ab0f422d90ddac4aa3034e39fc66a559ab93041e6505da7"}, + {file = "rpds_py-0.18.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f7afbfee1157e0f9376c00bb232e80a60e59ed716e3211a80cb8506550671e6e"}, + {file = "rpds_py-0.18.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9e6934d70dc50f9f8ea47081ceafdec09245fd9f6032669c3b45705dea096b88"}, + {file = "rpds_py-0.18.1-cp311-none-win32.whl", hash = "sha256:c69882964516dc143083d3795cb508e806b09fc3800fd0d4cddc1df6c36e76bb"}, + {file = "rpds_py-0.18.1-cp311-none-win_amd64.whl", hash = "sha256:70a838f7754483bcdc830444952fd89645569e7452e3226de4a613a4c1793fb2"}, + {file = "rpds_py-0.18.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:3dd3cd86e1db5aadd334e011eba4e29d37a104b403e8ca24dcd6703c68ca55b3"}, + {file = "rpds_py-0.18.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:05f3d615099bd9b13ecf2fc9cf2d839ad3f20239c678f461c753e93755d629ee"}, + {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35b2b771b13eee8729a5049c976197ff58a27a3829c018a04341bcf1ae409b2b"}, + {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ee17cd26b97d537af8f33635ef38be873073d516fd425e80559f4585a7b90c43"}, + {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b646bf655b135ccf4522ed43d6902af37d3f5dbcf0da66c769a2b3938b9d8184"}, + {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:19ba472b9606c36716062c023afa2484d1e4220548751bda14f725a7de17b4f6"}, + {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e30ac5e329098903262dc5bdd7e2086e0256aa762cc8b744f9e7bf2a427d3f8"}, + {file = "rpds_py-0.18.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d58ad6317d188c43750cb76e9deacf6051d0f884d87dc6518e0280438648a9ac"}, + {file = "rpds_py-0.18.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e1735502458621921cee039c47318cb90b51d532c2766593be6207eec53e5c4c"}, + {file = "rpds_py-0.18.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:f5bab211605d91db0e2995a17b5c6ee5edec1270e46223e513eaa20da20076ac"}, + {file = "rpds_py-0.18.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2fc24a329a717f9e2448f8cd1f960f9dac4e45b6224d60734edeb67499bab03a"}, + {file = "rpds_py-0.18.1-cp312-none-win32.whl", hash = "sha256:1805d5901779662d599d0e2e4159d8a82c0b05faa86ef9222bf974572286b2b6"}, + {file = "rpds_py-0.18.1-cp312-none-win_amd64.whl", hash = "sha256:720edcb916df872d80f80a1cc5ea9058300b97721efda8651efcd938a9c70a72"}, + {file = "rpds_py-0.18.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:c827576e2fa017a081346dce87d532a5310241648eb3700af9a571a6e9fc7e74"}, + {file = "rpds_py-0.18.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:aa3679e751408d75a0b4d8d26d6647b6d9326f5e35c00a7ccd82b78ef64f65f8"}, + {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0abeee75434e2ee2d142d650d1e54ac1f8b01e6e6abdde8ffd6eeac6e9c38e20"}, + {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed402d6153c5d519a0faf1bb69898e97fb31613b49da27a84a13935ea9164dfc"}, + {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:338dee44b0cef8b70fd2ef54b4e09bb1b97fc6c3a58fea5db6cc083fd9fc2724"}, + {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7750569d9526199c5b97e5a9f8d96a13300950d910cf04a861d96f4273d5b104"}, + {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:607345bd5912aacc0c5a63d45a1f73fef29e697884f7e861094e443187c02be5"}, + {file = "rpds_py-0.18.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:207c82978115baa1fd8d706d720b4a4d2b0913df1c78c85ba73fe6c5804505f0"}, + {file = "rpds_py-0.18.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:6d1e42d2735d437e7e80bab4d78eb2e459af48c0a46e686ea35f690b93db792d"}, + {file = "rpds_py-0.18.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:5463c47c08630007dc0fe99fb480ea4f34a89712410592380425a9b4e1611d8e"}, + {file = "rpds_py-0.18.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:06d218939e1bf2ca50e6b0ec700ffe755e5216a8230ab3e87c059ebb4ea06afc"}, + {file = "rpds_py-0.18.1-cp38-none-win32.whl", hash = "sha256:312fe69b4fe1ffbe76520a7676b1e5ac06ddf7826d764cc10265c3b53f96dbe9"}, + {file = "rpds_py-0.18.1-cp38-none-win_amd64.whl", hash = "sha256:9437ca26784120a279f3137ee080b0e717012c42921eb07861b412340f85bae2"}, + {file = "rpds_py-0.18.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:19e515b78c3fc1039dd7da0a33c28c3154458f947f4dc198d3c72db2b6b5dc93"}, + {file = "rpds_py-0.18.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a7b28c5b066bca9a4eb4e2f2663012debe680f097979d880657f00e1c30875a0"}, + {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:673fdbbf668dd958eff750e500495ef3f611e2ecc209464f661bc82e9838991e"}, + {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d960de62227635d2e61068f42a6cb6aae91a7fe00fca0e3aeed17667c8a34611"}, + {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:352a88dc7892f1da66b6027af06a2e7e5d53fe05924cc2cfc56495b586a10b72"}, + {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4e0ee01ad8260184db21468a6e1c37afa0529acc12c3a697ee498d3c2c4dcaf3"}, + {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4c39ad2f512b4041343ea3c7894339e4ca7839ac38ca83d68a832fc8b3748ab"}, + {file = "rpds_py-0.18.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:aaa71ee43a703c321906813bb252f69524f02aa05bf4eec85f0c41d5d62d0f4c"}, + {file = "rpds_py-0.18.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:6cd8098517c64a85e790657e7b1e509b9fe07487fd358e19431cb120f7d96338"}, + {file = "rpds_py-0.18.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:4adec039b8e2928983f885c53b7cc4cda8965b62b6596501a0308d2703f8af1b"}, + {file = "rpds_py-0.18.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:32b7daaa3e9389db3695964ce8e566e3413b0c43e3394c05e4b243a4cd7bef26"}, + {file = "rpds_py-0.18.1-cp39-none-win32.whl", hash = "sha256:2625f03b105328729f9450c8badda34d5243231eef6535f80064d57035738360"}, + {file = "rpds_py-0.18.1-cp39-none-win_amd64.whl", hash = "sha256:bf18932d0003c8c4d51a39f244231986ab23ee057d235a12b2684ea26a353590"}, + {file = "rpds_py-0.18.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:cbfbea39ba64f5e53ae2915de36f130588bba71245b418060ec3330ebf85678e"}, + {file = "rpds_py-0.18.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:a3d456ff2a6a4d2adcdf3c1c960a36f4fd2fec6e3b4902a42a384d17cf4e7a65"}, + {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7700936ef9d006b7ef605dc53aa364da2de5a3aa65516a1f3ce73bf82ecfc7ae"}, + {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:51584acc5916212e1bf45edd17f3a6b05fe0cbb40482d25e619f824dccb679de"}, + {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:942695a206a58d2575033ff1e42b12b2aece98d6003c6bc739fbf33d1773b12f"}, + {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b906b5f58892813e5ba5c6056d6a5ad08f358ba49f046d910ad992196ea61397"}, + {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6f8e3fecca256fefc91bb6765a693d96692459d7d4c644660a9fff32e517843"}, + {file = "rpds_py-0.18.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7732770412bab81c5a9f6d20aeb60ae943a9b36dcd990d876a773526468e7163"}, + {file = "rpds_py-0.18.1-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:bd1105b50ede37461c1d51b9698c4f4be6e13e69a908ab7751e3807985fc0346"}, + {file = "rpds_py-0.18.1-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:618916f5535784960f3ecf8111581f4ad31d347c3de66d02e728de460a46303c"}, + {file = "rpds_py-0.18.1-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:17c6d2155e2423f7e79e3bb18151c686d40db42d8645e7977442170c360194d4"}, + {file = "rpds_py-0.18.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:6c4c4c3f878df21faf5fac86eda32671c27889e13570645a9eea0a1abdd50922"}, + {file = "rpds_py-0.18.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:fab6ce90574645a0d6c58890e9bcaac8d94dff54fb51c69e5522a7358b80ab64"}, + {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:531796fb842b53f2695e94dc338929e9f9dbf473b64710c28af5a160b2a8927d"}, + {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:740884bc62a5e2bbb31e584f5d23b32320fd75d79f916f15a788d527a5e83644"}, + {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:998125738de0158f088aef3cb264a34251908dd2e5d9966774fdab7402edfab7"}, + {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e2be6e9dd4111d5b31ba3b74d17da54a8319d8168890fbaea4b9e5c3de630ae5"}, + {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d0cee71bc618cd93716f3c1bf56653740d2d13ddbd47673efa8bf41435a60daa"}, + {file = "rpds_py-0.18.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2c3caec4ec5cd1d18e5dd6ae5194d24ed12785212a90b37f5f7f06b8bedd7139"}, + {file = "rpds_py-0.18.1-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:27bba383e8c5231cd559affe169ca0b96ec78d39909ffd817f28b166d7ddd4d8"}, + {file = "rpds_py-0.18.1-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:a888e8bdb45916234b99da2d859566f1e8a1d2275a801bb8e4a9644e3c7e7909"}, + {file = "rpds_py-0.18.1-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:6031b25fb1b06327b43d841f33842b383beba399884f8228a6bb3df3088485ff"}, + {file = "rpds_py-0.18.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:48c2faaa8adfacefcbfdb5f2e2e7bdad081e5ace8d182e5f4ade971f128e6bb3"}, + {file = "rpds_py-0.18.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:d85164315bd68c0806768dc6bb0429c6f95c354f87485ee3593c4f6b14def2bd"}, + {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6afd80f6c79893cfc0574956f78a0add8c76e3696f2d6a15bca2c66c415cf2d4"}, + {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa242ac1ff583e4ec7771141606aafc92b361cd90a05c30d93e343a0c2d82a89"}, + {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d21be4770ff4e08698e1e8e0bce06edb6ea0626e7c8f560bc08222880aca6a6f"}, + {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c45a639e93a0c5d4b788b2613bd637468edd62f8f95ebc6fcc303d58ab3f0a8"}, + {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:910e71711d1055b2768181efa0a17537b2622afeb0424116619817007f8a2b10"}, + {file = "rpds_py-0.18.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b9bb1f182a97880f6078283b3505a707057c42bf55d8fca604f70dedfdc0772a"}, + {file = "rpds_py-0.18.1-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:1d54f74f40b1f7aaa595a02ff42ef38ca654b1469bef7d52867da474243cc633"}, + {file = "rpds_py-0.18.1-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:8d2e182c9ee01135e11e9676e9a62dfad791a7a467738f06726872374a83db49"}, + {file = "rpds_py-0.18.1-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:636a15acc588f70fda1661234761f9ed9ad79ebed3f2125d44be0862708b666e"}, + {file = "rpds_py-0.18.1.tar.gz", hash = "sha256:dc48b479d540770c811fbd1eb9ba2bb66951863e448efec2e2c102625328e92f"}, +] + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[[package]] +name = "smart-open" +version = "7.0.4" +description = "Utils for streaming large files (S3, HDFS, GCS, Azure Blob Storage, gzip, bz2...)" +optional = false +python-versions = "<4.0,>=3.7" +files = [ + {file = "smart_open-7.0.4-py3-none-any.whl", hash = "sha256:4e98489932b3372595cddc075e6033194775165702887216b65eba760dfd8d47"}, + {file = "smart_open-7.0.4.tar.gz", hash = "sha256:62b65852bdd1d1d516839fcb1f6bc50cd0f16e05b4ec44b52f43d38bcb838524"}, +] + +[package.dependencies] +wrapt = "*" + +[package.extras] +all = ["azure-common", "azure-core", "azure-storage-blob", "boto3", "google-cloud-storage (>=2.6.0)", "paramiko", "requests", "zstandard"] +azure = ["azure-common", "azure-core", "azure-storage-blob"] +gcs = ["google-cloud-storage (>=2.6.0)"] +http = ["requests"] +s3 = ["boto3"] +ssh = ["paramiko"] +test = ["azure-common", "azure-core", "azure-storage-blob", "boto3", "google-cloud-storage (>=2.6.0)", "moto[server]", "paramiko", "pytest", "pytest-rerunfailures", "requests", "responses", "zstandard"] +webhdfs = ["requests"] +zst = ["zstandard"] + +[[package]] +name = "smmap" +version = "5.0.1" +description = "A pure Python implementation of a sliding window memory map manager" +optional = false +python-versions = ">=3.7" +files = [ + {file = "smmap-5.0.1-py3-none-any.whl", hash = "sha256:e6d8668fa5f93e706934a62d7b4db19c8d9eb8cf2adbb75ef1b675aa332b69da"}, + {file = "smmap-5.0.1.tar.gz", hash = "sha256:dceeb6c0028fdb6734471eb07c0cd2aae706ccaecab45965ee83f11c8d3b1f62"}, +] + +[[package]] +name = "snakemake" +version = "8.14.0" +description = "Workflow management system to create reproducible and scalable data analyses" +optional = false +python-versions = ">=3.11" +files = [ + {file = "snakemake-8.14.0-py3-none-any.whl", hash = "sha256:4a7cff5fa6150097047a1a95d0825657789a00582c3cb278f53a669c1df067a4"}, + {file = "snakemake-8.14.0.tar.gz", hash = "sha256:f7ea6ce0fc7544ef4329fa808d6e2e1c1b3adff2b4658dcc096a41741a7e8d81"}, +] + +[package.dependencies] +appdirs = "*" +conda-inject = ">=1.3.1,<2.0" +configargparse = "*" +connection-pool = ">=0.0.3" +datrie = "*" +docutils = "*" +dpath = ">=2.1.6,<3.0.0" +gitpython = "*" +humanfriendly = "*" +immutables = "*" +jinja2 = ">=3.0,<4.0" +jsonschema = "*" +nbformat = "*" +packaging = "*" +psutil = "*" +pulp = ">=2.3.1,<2.9" +pyyaml = "*" +requests = ">=2.8.1,<3.0" +reretry = "*" +smart-open = ">=4.0,<8.0" +snakemake-interface-common = ">=1.17.0,<2.0" +snakemake-interface-executor-plugins = ">=9.1.0,<10.0" +snakemake-interface-report-plugins = ">=1.0.0,<2.0.0" +snakemake-interface-storage-plugins = ">=3.1.0,<4.0" +stopit = "*" +tabulate = "*" +throttler = "*" +toposort = ">=1.10,<2.0" +wrapt = "*" +yte = ">=1.5.1,<2.0" + +[package.extras] +messaging = ["slack-sdk"] +pep = ["eido", "peppy"] +reports = ["pygments"] + +[[package]] +name = "snakemake-interface-common" +version = "1.17.2" +description = "Common functions and classes for Snakemake and its plugins" +optional = false +python-versions = "<4.0,>=3.8" +files = [ + {file = "snakemake_interface_common-1.17.2-py3-none-any.whl", hash = "sha256:ca5043ee707c071d9fed7e659df4803e8eeeaf1fe0d0bdb6716deb0141208748"}, + {file = "snakemake_interface_common-1.17.2.tar.gz", hash = "sha256:7a2bba88df98c1a0a5cec89b835c62dd2e6e72c1fb8fd104fe73405c800b87c0"}, +] + +[package.dependencies] +argparse-dataclass = ">=2.0.0,<3.0.0" +ConfigArgParse = ">=1.7,<2.0" + +[[package]] +name = "snakemake-interface-executor-plugins" +version = "9.1.1" +description = "This package provides a stable interface for interactions between Snakemake and its executor plugins." +optional = false +python-versions = "<4.0,>=3.11" +files = [ + {file = "snakemake_interface_executor_plugins-9.1.1-py3-none-any.whl", hash = "sha256:8f164978829e11c821d31b380501a80956bff67023935d0320d4cc00c9b0aa3b"}, + {file = "snakemake_interface_executor_plugins-9.1.1.tar.gz", hash = "sha256:357c3b1d633b26241693a4e5ce291fbe198c03a54a30acfa86dd97dc252fa2c6"}, +] + +[package.dependencies] +argparse-dataclass = ">=2.0.0,<3.0.0" +snakemake-interface-common = ">=1.12.0,<2.0.0" +throttler = ">=1.2.2,<2.0.0" + +[[package]] +name = "snakemake-interface-report-plugins" +version = "1.0.0" +description = "The interface for Snakemake report plugins." +optional = false +python-versions = ">=3.11,<4.0" +files = [ + {file = "snakemake_interface_report_plugins-1.0.0-py3-none-any.whl", hash = "sha256:e39cf2f27a36bda788dd97ede8fd056f887e00dca2d14ffea91dbc696d1f17cd"}, + {file = "snakemake_interface_report_plugins-1.0.0.tar.gz", hash = "sha256:02311cdc4bebab2a1c28469b5e6d5c6ac6e9c66998ad4e4b3229f1472127490f"}, +] + +[package.dependencies] +snakemake-interface-common = ">=1.16.0,<2.0.0" + +[[package]] +name = "snakemake-interface-storage-plugins" +version = "3.2.2" +description = "This package provides a stable interface for interactions between Snakemake and its storage plugins." +optional = false +python-versions = "<4.0,>=3.11" +files = [ + {file = "snakemake_interface_storage_plugins-3.2.2-py3-none-any.whl", hash = "sha256:d06211b965c165db719cfe598e7aee3c153081ad6c79a3819380fbd2e4fe80c1"}, + {file = "snakemake_interface_storage_plugins-3.2.2.tar.gz", hash = "sha256:fc8a70ef5b1fd054bc64270925228e2054158da9bcf8fa8bd4be36d93a82678b"}, +] + +[package.dependencies] +reretry = ">=0.11.8,<0.12.0" +snakemake-interface-common = ">=1.12.0,<2.0.0" +throttler = ">=1.2.2,<2.0.0" +wrapt = ">=1.15.0,<2.0.0" + +[[package]] +name = "stopit" +version = "1.1.2" +description = "Timeout control decorator and context managers, raise any exception in another thread" +optional = false +python-versions = "*" +files = [ + {file = "stopit-1.1.2.tar.gz", hash = "sha256:f7f39c583fd92027bd9d06127b259aee7a5b7945c1f1fa56263811e1e766996d"}, +] + +[[package]] +name = "tabulate" +version = "0.9.0" +description = "Pretty-print tabular data" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f"}, + {file = "tabulate-0.9.0.tar.gz", hash = "sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c"}, +] + +[package.extras] +widechars = ["wcwidth"] + +[[package]] +name = "throttler" +version = "1.2.2" +description = "Zero-dependency Python package for easy throttling with asyncio support" +optional = false +python-versions = "*" +files = [ + {file = "throttler-1.2.2-py3-none-any.whl", hash = "sha256:fc6ae612a2529e01110b32335af40375258b98e3b81232ec77cd07f51bf71392"}, + {file = "throttler-1.2.2.tar.gz", hash = "sha256:d54db406d98e1b54d18a9ba2b31ab9f093ac64a0a59d730c1cf7bb1cdfc94a58"}, +] + +[package.extras] +dev = ["aiohttp (>=3.8)", "codecov (>=2.1)", "flake8 (>=4.0)", "pytest (>=7.0)", "pytest-asyncio (>=0.16)", "pytest-cov (>=3.0)"] + +[[package]] +name = "tomlkit" +version = "0.12.5" +description = "Style preserving TOML library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tomlkit-0.12.5-py3-none-any.whl", hash = "sha256:af914f5a9c59ed9d0762c7b64d3b5d5df007448eb9cd2edc8a46b1eafead172f"}, + {file = "tomlkit-0.12.5.tar.gz", hash = "sha256:eef34fba39834d4d6b73c9ba7f3e4d1c417a4e56f89a7e96e090dd0d24b8fb3c"}, +] + +[[package]] +name = "toposort" +version = "1.10" +description = "Implements a topological sort algorithm." +optional = false +python-versions = "*" +files = [ + {file = "toposort-1.10-py3-none-any.whl", hash = "sha256:cbdbc0d0bee4d2695ab2ceec97fe0679e9c10eab4b2a87a9372b929e70563a87"}, + {file = "toposort-1.10.tar.gz", hash = "sha256:bfbb479c53d0a696ea7402601f4e693c97b0367837c8898bc6471adfca37a6bd"}, +] + +[[package]] +name = "traitlets" +version = "5.14.3" +description = "Traitlets Python configuration system" +optional = false +python-versions = ">=3.8" +files = [ + {file = "traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f"}, + {file = "traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7"}, +] + +[package.extras] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] +test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0,<8.2)", "pytest-mock", "pytest-mypy-testing"] + +[[package]] +name = "urllib3" +version = "2.2.2" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.8" +files = [ + {file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"}, + {file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "wrapt" +version = "1.16.0" +description = "Module for decorators, wrappers and monkey patching." +optional = false +python-versions = ">=3.6" +files = [ + {file = "wrapt-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ffa565331890b90056c01db69c0fe634a776f8019c143a5ae265f9c6bc4bd6d4"}, + {file = "wrapt-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e4fdb9275308292e880dcbeb12546df7f3e0f96c6b41197e0cf37d2826359020"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb2dee3874a500de01c93d5c71415fcaef1d858370d405824783e7a8ef5db440"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a88e6010048489cda82b1326889ec075a8c856c2e6a256072b28eaee3ccf487"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac83a914ebaf589b69f7d0a1277602ff494e21f4c2f743313414378f8f50a4cf"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:73aa7d98215d39b8455f103de64391cb79dfcad601701a3aa0dddacf74911d72"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:807cc8543a477ab7422f1120a217054f958a66ef7314f76dd9e77d3f02cdccd0"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bf5703fdeb350e36885f2875d853ce13172ae281c56e509f4e6eca049bdfb136"}, + {file = "wrapt-1.16.0-cp310-cp310-win32.whl", hash = "sha256:f6b2d0c6703c988d334f297aa5df18c45e97b0af3679bb75059e0e0bd8b1069d"}, + {file = "wrapt-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:decbfa2f618fa8ed81c95ee18a387ff973143c656ef800c9f24fb7e9c16054e2"}, + {file = "wrapt-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1a5db485fe2de4403f13fafdc231b0dbae5eca4359232d2efc79025527375b09"}, + {file = "wrapt-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:75ea7d0ee2a15733684badb16de6794894ed9c55aa5e9903260922f0482e687d"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a452f9ca3e3267cd4d0fcf2edd0d035b1934ac2bd7e0e57ac91ad6b95c0c6389"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:43aa59eadec7890d9958748db829df269f0368521ba6dc68cc172d5d03ed8060"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72554a23c78a8e7aa02abbd699d129eead8b147a23c56e08d08dfc29cfdddca1"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d2efee35b4b0a347e0d99d28e884dfd82797852d62fcd7ebdeee26f3ceb72cf3"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6dcfcffe73710be01d90cae08c3e548d90932d37b39ef83969ae135d36ef3956"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:eb6e651000a19c96f452c85132811d25e9264d836951022d6e81df2fff38337d"}, + {file = "wrapt-1.16.0-cp311-cp311-win32.whl", hash = "sha256:66027d667efe95cc4fa945af59f92c5a02c6f5bb6012bff9e60542c74c75c362"}, + {file = "wrapt-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:aefbc4cb0a54f91af643660a0a150ce2c090d3652cf4052a5397fb2de549cd89"}, + {file = "wrapt-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5eb404d89131ec9b4f748fa5cfb5346802e5ee8836f57d516576e61f304f3b7b"}, + {file = "wrapt-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9090c9e676d5236a6948330e83cb89969f433b1943a558968f659ead07cb3b36"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94265b00870aa407bd0cbcfd536f17ecde43b94fb8d228560a1e9d3041462d73"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2058f813d4f2b5e3a9eb2eb3faf8f1d99b81c3e51aeda4b168406443e8ba809"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98b5e1f498a8ca1858a1cdbffb023bfd954da4e3fa2c0cb5853d40014557248b"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:14d7dc606219cdd7405133c713f2c218d4252f2a469003f8c46bb92d5d095d81"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:49aac49dc4782cb04f58986e81ea0b4768e4ff197b57324dcbd7699c5dfb40b9"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:418abb18146475c310d7a6dc71143d6f7adec5b004ac9ce08dc7a34e2babdc5c"}, + {file = "wrapt-1.16.0-cp312-cp312-win32.whl", hash = "sha256:685f568fa5e627e93f3b52fda002c7ed2fa1800b50ce51f6ed1d572d8ab3e7fc"}, + {file = "wrapt-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:dcdba5c86e368442528f7060039eda390cc4091bfd1dca41e8046af7c910dda8"}, + {file = "wrapt-1.16.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:d462f28826f4657968ae51d2181a074dfe03c200d6131690b7d65d55b0f360f8"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a33a747400b94b6d6b8a165e4480264a64a78c8a4c734b62136062e9a248dd39"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3646eefa23daeba62643a58aac816945cadc0afaf21800a1421eeba5f6cfb9c"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ebf019be5c09d400cf7b024aa52b1f3aeebeff51550d007e92c3c1c4afc2a40"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:0d2691979e93d06a95a26257adb7bfd0c93818e89b1406f5a28f36e0d8c1e1fc"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:1acd723ee2a8826f3d53910255643e33673e1d11db84ce5880675954183ec47e"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:bc57efac2da352a51cc4658878a68d2b1b67dbe9d33c36cb826ca449d80a8465"}, + {file = "wrapt-1.16.0-cp36-cp36m-win32.whl", hash = "sha256:da4813f751142436b075ed7aa012a8778aa43a99f7b36afe9b742d3ed8bdc95e"}, + {file = "wrapt-1.16.0-cp36-cp36m-win_amd64.whl", hash = "sha256:6f6eac2360f2d543cc875a0e5efd413b6cbd483cb3ad7ebf888884a6e0d2e966"}, + {file = "wrapt-1.16.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a0ea261ce52b5952bf669684a251a66df239ec6d441ccb59ec7afa882265d593"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bd2d7ff69a2cac767fbf7a2b206add2e9a210e57947dd7ce03e25d03d2de292"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9159485323798c8dc530a224bd3ffcf76659319ccc7bbd52e01e73bd0241a0c5"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a86373cf37cd7764f2201b76496aba58a52e76dedfaa698ef9e9688bfd9e41cf"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:73870c364c11f03ed072dda68ff7aea6d2a3a5c3fe250d917a429c7432e15228"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b935ae30c6e7400022b50f8d359c03ed233d45b725cfdd299462f41ee5ffba6f"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:db98ad84a55eb09b3c32a96c576476777e87c520a34e2519d3e59c44710c002c"}, + {file = "wrapt-1.16.0-cp37-cp37m-win32.whl", hash = "sha256:9153ed35fc5e4fa3b2fe97bddaa7cbec0ed22412b85bcdaf54aeba92ea37428c"}, + {file = "wrapt-1.16.0-cp37-cp37m-win_amd64.whl", hash = "sha256:66dfbaa7cfa3eb707bbfcd46dab2bc6207b005cbc9caa2199bcbc81d95071a00"}, + {file = "wrapt-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1dd50a2696ff89f57bd8847647a1c363b687d3d796dc30d4dd4a9d1689a706f0"}, + {file = "wrapt-1.16.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:44a2754372e32ab315734c6c73b24351d06e77ffff6ae27d2ecf14cf3d229202"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e9723528b9f787dc59168369e42ae1c3b0d3fadb2f1a71de14531d321ee05b0"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbed418ba5c3dce92619656802cc5355cb679e58d0d89b50f116e4a9d5a9603e"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:941988b89b4fd6b41c3f0bfb20e92bd23746579736b7343283297c4c8cbae68f"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6a42cd0cfa8ffc1915aef79cb4284f6383d8a3e9dcca70c445dcfdd639d51267"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1ca9b6085e4f866bd584fb135a041bfc32cab916e69f714a7d1d397f8c4891ca"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5e49454f19ef621089e204f862388d29e6e8d8b162efce05208913dde5b9ad6"}, + {file = "wrapt-1.16.0-cp38-cp38-win32.whl", hash = "sha256:c31f72b1b6624c9d863fc095da460802f43a7c6868c5dda140f51da24fd47d7b"}, + {file = "wrapt-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:490b0ee15c1a55be9c1bd8609b8cecd60e325f0575fc98f50058eae366e01f41"}, + {file = "wrapt-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9b201ae332c3637a42f02d1045e1d0cccfdc41f1f2f801dafbaa7e9b4797bfc2"}, + {file = "wrapt-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2076fad65c6736184e77d7d4729b63a6d1ae0b70da4868adeec40989858eb3fb"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5cd603b575ebceca7da5a3a251e69561bec509e0b46e4993e1cac402b7247b8"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b47cfad9e9bbbed2339081f4e346c93ecd7ab504299403320bf85f7f85c7d46c"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8212564d49c50eb4565e502814f694e240c55551a5f1bc841d4fcaabb0a9b8a"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5f15814a33e42b04e3de432e573aa557f9f0f56458745c2074952f564c50e664"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db2e408d983b0e61e238cf579c09ef7020560441906ca990fe8412153e3b291f"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:edfad1d29c73f9b863ebe7082ae9321374ccb10879eeabc84ba3b69f2579d537"}, + {file = "wrapt-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed867c42c268f876097248e05b6117a65bcd1e63b779e916fe2e33cd6fd0d3c3"}, + {file = "wrapt-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:eb1b046be06b0fce7249f1d025cd359b4b80fc1c3e24ad9eca33e0dcdb2e4a35"}, + {file = "wrapt-1.16.0-py3-none-any.whl", hash = "sha256:6906c4100a8fcbf2fa735f6059214bb13b97f75b1a61777fcf6432121ef12ef1"}, + {file = "wrapt-1.16.0.tar.gz", hash = "sha256:5f370f952971e7d17c7d1ead40e49f32345a7f7a5373571ef44d800d06b1899d"}, +] + +[[package]] +name = "yte" +version = "1.5.4" +description = "A YAML template engine with Python expressions" +optional = false +python-versions = ">=3.7" +files = [ + {file = "yte-1.5.4-py3-none-any.whl", hash = "sha256:14ccfcb57d60b7652041b606129851423805140b22f52f5152f7c2692cd7b905"}, + {file = "yte-1.5.4.tar.gz", hash = "sha256:d2d77e53eafca74f58234fcd3fea28cc0a719e4f3784911511e35e86594bc880"}, +] + +[package.dependencies] +dpath = ">=2.1,<3.0" +plac = ">=1.3.4,<2.0.0" +pyyaml = ">=6.0,<7.0" + +[metadata] +lock-version = "2.0" +python-versions = "^3.11" +content-hash = "a5d9f7005f925ae65fec076340d8ab32cdf23ce9c6955dee49fc993a5bcbc75d" diff --git a/pyproject.toml b/pyproject.toml index 2e93d06..562d2b6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,6 +16,8 @@ rdflib = "^6.3.2" requests = "^2.32.2" certifi = "^2024.2.2" pytest = ">=8.2.1" +snakemake = "^8.14.0" +urllib3 = "2.2.2" [tool.poetry.scripts] source-crates = "workflowhub_graph.source_crates:main" diff --git a/ro-crate-metadata/.snakemake_timestamp b/ro-crate-metadata/.snakemake_timestamp new file mode 100644 index 0000000..e69de29 diff --git a/ro-crate-metadata/Snakefile b/ro-crate-metadata/Snakefile new file mode 100644 index 0000000..08b8f45 --- /dev/null +++ b/ro-crate-metadata/Snakefile @@ -0,0 +1,101 @@ +from snakemake.io import directory + +VERSIONS = ['1'] +OUTPUT_DIRS = "data" +MERGED_FILE = "merged.ttl" +ro_crate_metadata_dir = "ro-crate-metadata/" + +rule all: + input: + "ro-crate-metadata" + +rule source_ro_crates: + output: + "created_files.json" + shell: + """ + # Create the output directory if it doesn't exist: + mkdir -p {OUTPUT_DIRS} + + # Add the current directory to PYTHONPATH, creating it if it doesn't exist + export PYTHONPATH="${{PYTHONPATH:+$PYTHONPATH:}}$(pwd)" + + # Run the source_crates script to download the RO Crate metadata, + # then check the output files and generate created_files.json: + + # - all versions of all workflows: + python workflowhub_graph/source_crates.py --prod --all-versions + python workflowhub_graph/check_outputs.py --versions {VERSIONS} --output-dir {OUTPUT_DIRS} + + # - all versions of first 10 workflows: + # python workflowhub_graph/source_crates.py --workflow-ids 1-20 --prod --all-versions + # python workflowhub_graph/check_outputs.py --workflow-ids 1-20 --versions {VERSIONS} --output-dir {OUTPUT_DIRS} + """ + +rule report_created_files: + input: + "created_files.json" + shell: + """ + echo "Files created:" + cat created_files.json + """ + +rule merge_files: + input: + "created_files.json" + output: + MERGED_FILE + run: + import json + import os + + # Load the list of created files: + with open("created_files.json") as f: + created_files = json.load(f) + + files_to_merge = [f"data/{os.path.basename(file)}" for file in created_files] + + # If no files are available to merge, raise an exception: + if not files_to_merge: + raise ValueError("No files in to merge in data directory.") + + file_patterns = " ".join(files_to_merge) + + # Merge the JSON-LD files into a single RDF graph and output as a TTL file + shell(f""" + python workflowhub_graph/merge.py {output[0]} -p "data/*.json" + """) + +rule create_ro_crate: + input: + MERGED_FILE + params: + workflow_file = "Snakefile" + output: + directory("ro-crate-metadata/") + shell: + """ + # Create a new virtual environment + python -m venv rocrate_env + + # Activate the virtual environment + source rocrate_env/bin/activate + + # Upgrade pip to avoid any potential issues + pip install --upgrade pip + + # pip uninstall urllib3 + + # Install required packages + pip install requests urllib3 rocrate rocrate-zenodo + + # Run the create_ro_crate script + python workflowhub_graph/create_ro_crate.py {input} {params.workflow_file} {output} + + # Deactivate the virtual environment + deactivate + + # Remove the virtual environment to clean up + rm -rf rocrate_env + """ diff --git a/ro-crate-metadata/merged.ttl b/ro-crate-metadata/merged.ttl new file mode 100644 index 0000000..05d8962 --- /dev/null +++ b/ro-crate-metadata/merged.ttl @@ -0,0 +1,239261 @@ +@prefix dct: . +@prefix ns1: . +@prefix ns2: . +@prefix rel: . +@prefix schema1: . +@prefix xsd: . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/MaxMTpc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/MinCountPerCell" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/MinGenesPerCell" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/genecount_qc_plot" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/mito_qc_plot" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/qc_anndata_object" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/top_genes_plot" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:Person ; + schema1:name "Anna Syme" . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#Amel_4.5_scaffolds.fa.gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#amel_OGSv3.2.gff3.gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#forager.bw" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#forager_Amel4.5_accepted_hits.bam" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_9" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "bruno.depaulakinoshita@bsc.es" ; + schema1:identifier "bruno.depaulakinoshita@bsc.es" ; + schema1:url "https://orcid.org/0000-0001-8250-4074" . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about ; + schema1:author . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_5" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/myamber_to_pdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/mycanonical_fasta.fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/myextract_chain.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/myextract_model.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/myfix_altlocs.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/myfix_amides.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/myfix_backbone.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/myfix_chirality.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/myfix_side_chain.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/myfix_ssbonds.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/myleap_gen_top.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/myleap_gen_top.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/myleap_gen_top.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/mypdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/myreduce_remove_hydrogens.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/myremove_molecules.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/myremove_pdb_water.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/mysander_mdrun.cpout" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/mysander_mdrun.cprst" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/mysander_mdrun.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/mysander_mdrun.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/mysander_mdrun.mdinfo" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/mysander_mdrun.rst" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/mystructure_check.json" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:endTime "2021-03-22" ; + schema1:name "COMPSs RO-Crate automatically generated for Python applications" ; + schema1:object . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_30" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_31" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_32" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_33" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_34" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_35" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_36" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_37" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_38" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_39" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_40" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_41" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_42" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_43" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_44" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_45" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_46" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_47" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_48" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_49" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_50" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_51" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_52" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_53" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_54" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_9" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/coding_seqs.fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/genome.fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/non_coding_seqs.fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/bed_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/no_orf_seqs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/orf_seqs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/orf_seqs_prob" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/orf_seqs_prob_best" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/out_file1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/out_gtf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/output" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/output_pos" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:PropertyValue ; + schema1:name "DISLIB_GPU_AVAILABLE" ; + schema1:value "True" . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:PropertyValue ; + schema1:name "COMPSS_HOME" ; + schema1:value "/apps/GPP/COMPSs/3.3/" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_MASTER_NODE" ; + schema1:value "gs11r2b04" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_MPIRUN_TYPE" ; + schema1:value "impi" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_WORKER_INIT_TIMEOUT" ; + schema1:value "360000" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_WORKER_NODES" ; + schema1:value "" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_ACCOUNT" ; + schema1:value "bsc19" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_CPUS_PER_NODE" ; + schema1:value "112" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_END_TIME" ; + schema1:value "1720186659" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_GID" ; + schema1:value "50000" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_ID" ; + schema1:value "3644777" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NAME" ; + schema1:value "kmeans_california" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NODELIST" ; + schema1:value "gs11r2b04" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NUM_NODES" ; + schema1:value "1" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_PARTITION" ; + schema1:value "gpp" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_QOS" ; + schema1:value "gp_debug" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_START_TIME" ; + schema1:value "1720179459" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_UID" ; + schema1:value "4486" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_USER" ; + schema1:value "bsc019756" . + + a schema1:PropertyValue ; + schema1:name "SLURM_MEM_PER_CPU" ; + schema1:value "2000" . + + a schema1:PropertyValue ; + schema1:name "SLURM_SUBMIT_DIR" ; + schema1:value "/gpfs/scratch/bsc19/bsc019756/Housing_Clustering" . + + a schema1:PropertyValue ; + schema1:name "SLURM_SUBMIT_HOST" ; + schema1:value "glogin1" . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_9" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/Ploidy" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/genome_size" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/max_depth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/transition_parameter" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/genome.fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/Repeats_output_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/output_log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/output_masked_genome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/output_repeat_catalog" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/seeds" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/sequences" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/merged_transcriptomes.fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/busco_sum" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/busco_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/out_bed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/out_cds" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/out_gff3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/out_lo_cds" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/out_lo_pep" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/out_pep" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:PropertyValue ; + schema1:name "COMPSS_HOME" ; + schema1:value "/apps/GPP/COMPSs/3.3.1/" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_MASTER_NODE" ; + schema1:value "gs12r1b10" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_MPIRUN_TYPE" ; + schema1:value "impi" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_PYTHON_VERSION" ; + schema1:value "3.12.1" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_WORKER_INIT_TIMEOUT" ; + schema1:value "360000" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_WORKER_NODES" ; + schema1:value " gs12r1b50" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_ACCOUNT" ; + schema1:value "bsc19" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_CPUS_PER_NODE" ; + schema1:value "112(x2)" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_END_TIME" ; + schema1:value "1720783388" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_GID" ; + schema1:value "50000" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_ID" ; + schema1:value "3846825" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NAME" ; + schema1:value "COMPSs" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NODELIST" ; + schema1:value "gs12r1b[10,50]" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NUM_NODES" ; + schema1:value "2" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_PARTITION" ; + schema1:value "gpp" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_QOS" ; + schema1:value "gp_debug" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_START_TIME" ; + schema1:value "1720782788" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_UID" ; + schema1:value "2952" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_USER" ; + schema1:value "bsc019057" . + + a schema1:PropertyValue ; + schema1:name "SLURM_MEM_PER_CPU" ; + schema1:value "2000" . + + a schema1:PropertyValue ; + schema1:name "SLURM_SUBMIT_DIR" ; + schema1:value "/gpfs/home/bsc/bsc019057/Tutorial_2024/lysozyme_in_water" . + + a schema1:PropertyValue ; + schema1:name "SLURM_SUBMIT_HOST" ; + schema1:value "glogin2" . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/lineage" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/output" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/community" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/pure_culture" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/which_figures" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/figures" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/results" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/html_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/output_feature_lengths" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/output_short" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/conformer_output" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:PropertyValue ; + schema1:name "COMPSS_HOME" ; + schema1:value "/Users/rsirvent/opt/COMPSs/" . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/lineage" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_9" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_3" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_30" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_31" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_32" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_33" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_34" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_35" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_36" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_37" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_38" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_39" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_40" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_41" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_42" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_43" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_44" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_45" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_9" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:PropertyValue ; + schema1:name "COMPSS_HOME" ; + schema1:value "/apps/GPP/COMPSs/Trunk/" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_MASTER_NODE" ; + schema1:value "gs18r2b72" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_MPIRUN_TYPE" ; + schema1:value "impi" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_PYTHON_VERSION" ; + schema1:value "3.12.1" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_WORKER_INIT_TIMEOUT" ; + schema1:value "360000" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_WORKER_NODES" ; + schema1:value "" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_ACCOUNT" ; + schema1:value "bsc19" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_CPUS_PER_NODE" ; + schema1:value "112" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_END_TIME" ; + schema1:value "1722587523" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_GID" ; + schema1:value "50000" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_ID" ; + schema1:value "4500562" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NAME" ; + schema1:value "matmul" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NODELIST" ; + schema1:value "gs18r2b72" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NUM_NODES" ; + schema1:value "1" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_PARTITION" ; + schema1:value "gpp" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_QOS" ; + schema1:value "gp_debug" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_START_TIME" ; + schema1:value "1722587223" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_UID" ; + schema1:value "2952" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_USER" ; + schema1:value "bsc019057" . + + a schema1:PropertyValue ; + schema1:name "SLURM_MEM_PER_CPU" ; + schema1:value "2000" . + + a schema1:PropertyValue ; + schema1:name "SLURM_SUBMIT_DIR" ; + schema1:value "/gpfs/home/bsc/bsc019057/WorkflowHub/matmul_java" . + + a schema1:PropertyValue ; + schema1:name "SLURM_SUBMIT_HOST" ; + schema1:value "glogin2" . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/Ploidy" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/genome_size" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/max_depth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/transition_parameter" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:PropertyValue ; + schema1:name "COMPSS_HOME" ; + schema1:value "/apps/GPP/COMPSs/3.3/" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_MASTER_NODE" ; + schema1:value "gs11r2b04" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_MPIRUN_TYPE" ; + schema1:value "impi" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_WORKER_INIT_TIMEOUT" ; + schema1:value "360000" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_WORKER_NODES" ; + schema1:value "" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_ACCOUNT" ; + schema1:value "bsc19" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_CPUS_PER_NODE" ; + schema1:value "112" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_END_TIME" ; + schema1:value "1720186659" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_GID" ; + schema1:value "50000" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_ID" ; + schema1:value "3644777" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NAME" ; + schema1:value "kmeans_california" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NODELIST" ; + schema1:value "gs11r2b04" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NUM_NODES" ; + schema1:value "1" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_PARTITION" ; + schema1:value "gpp" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_QOS" ; + schema1:value "gp_debug" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_START_TIME" ; + schema1:value "1720179459" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_UID" ; + schema1:value "4486" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_USER" ; + schema1:value "bsc019756" . + + a schema1:PropertyValue ; + schema1:name "SLURM_MEM_PER_CPU" ; + schema1:value "2000" . + + a schema1:PropertyValue ; + schema1:name "SLURM_SUBMIT_DIR" ; + schema1:value "/gpfs/scratch/bsc19/bsc019756/Housing_Clustering" . + + a schema1:PropertyValue ; + schema1:name "SLURM_SUBMIT_HOST" ; + schema1:value "glogin1" . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_2" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_6" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:Person ; + schema1:name "Anna Syme" . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:PropertyValue ; + schema1:name "COMPSS_BINDINGS_DEBUG" ; + schema1:value "1" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_HOME" ; + schema1:value "/apps/GPP/COMPSs/3.3.1/" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_MASTER_NODE" ; + schema1:value "gs05r2b06" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_MPIRUN_TYPE" ; + schema1:value "impi" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_PYTHON_VERSION" ; + schema1:value "3.12.1" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_WORKER_INIT_TIMEOUT" ; + schema1:value "360000" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_WORKER_NODES" ; + schema1:value " gs05r2b07 gs05r2b10" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_ACCOUNT" ; + schema1:value "bsc19" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_CPUS_PER_NODE" ; + schema1:value "112(x3)" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_END_TIME" ; + schema1:value "1718717219" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_GID" ; + schema1:value "50000" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_ID" ; + schema1:value "3166653" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NAME" ; + schema1:value "sparseLU_prov" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NODELIST" ; + schema1:value "gs05r2b[06-07,10]" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NUM_NODES" ; + schema1:value "3" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_PARTITION" ; + schema1:value "gpp" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_QOS" ; + schema1:value "gp_debug" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_START_TIME" ; + schema1:value "1718716919" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_UID" ; + schema1:value "2952" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_USER" ; + schema1:value "bsc019057" . + + a schema1:PropertyValue ; + schema1:name "SLURM_MEM_PER_CPU" ; + schema1:value "2000" . + + a schema1:PropertyValue ; + schema1:name "SLURM_SUBMIT_DIR" ; + schema1:value "/gpfs/home/bsc/bsc019057/COMPSs-DP/tutorial_apps/java/sparseLU" . + + a schema1:PropertyValue ; + schema1:name "SLURM_SUBMIT_HOST" ; + schema1:value "glogin2" . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:PropertyValue ; + schema1:name "COMPSS_HOME" ; + schema1:value "/Users/rsirvent/opt/COMPSs/" . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:Person ; + schema1:name "Sánchez, I." . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:PropertyValue ; + schema1:name "COMPSS_HOME" ; + schema1:value "/Users/rsirvent/opt/COMPSs/" . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:endTime "2021-03-22" ; + schema1:name "COMPSs RO-Crate automatically generated for Python applications" ; + schema1:object . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "javier.conejero@bsc.es" ; + schema1:identifier "javier.conejero@bsc.es" ; + schema1:url "https://orcid.org/0000-0001-6401-6229" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "poiata@ipgp.fr" ; + schema1:identifier "poiata@ipgp.fr" ; + schema1:url "https://orcid.org/0000-0002-6412-3179" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "satriano@ipgp.fr" ; + schema1:identifier "satriano@ipgp.fr" ; + schema1:url "https://orcid.org/0000-0002-3039-2530" . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:Person ; + schema1:name "Melero, R." . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/analysis_metadata" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/bamqc_html_normal" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/bamqc_html_tumor" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/bamqc_normal_genome_results" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/bamqc_tumor_genome_results" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/fastqc_html_normal_fw_after" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/fastqc_html_normal_fw_before" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/fastqc_html_normal_rv_after" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/fastqc_html_normal_rv_before" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/fastqc_html_tumor_fw_after" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/fastqc_html_tumor_fw_before" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/fastqc_html_tumor_rv_after" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/fastqc_html_tumor_rv_before" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/multiqc_html_post_trim" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/multiqc_html_pre_trim" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:PropertyValue ; + schema1:name "DISLIB_GPU_AVAILABLE" ; + schema1:value "True" . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/bd_ensemble.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/bd_ensemble.mdcrd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/bd_ensemble.pcz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/bd_ensemble_rmsd.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/bd_ensemble_rmsd.dcd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/bd_ensemble_uncompressed.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/bfactor_all.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/bfactor_all.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/dmd_ensemble.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/dmd_ensemble.mdcrd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/hinges_bfactor_report.json" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/mypdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/nma_ensemble.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/nma_ensemble.mdcrd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/pcz_collectivity.json" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/pcz_evecs.json" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/pcz_proj1.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/pcz_proj1.dcd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/pcz_report.json" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/pcz_stiffness.json" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/structure_ca.pdb" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:PropertyValue ; + schema1:name "COMPSS_HOME" ; + schema1:value "/apps/GPP/COMPSs/TrunkRSP/" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_MASTER_NODE" ; + schema1:value "gs13r3b15" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_MPIRUN_TYPE" ; + schema1:value "impi" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_PYTHON_VERSION" ; + schema1:value "3.12.1" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_WORKER_INIT_TIMEOUT" ; + schema1:value "360000" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_WORKER_NODES" ; + schema1:value " gs13r3b16" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_ACCOUNT" ; + schema1:value "bsc19" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_CPUS_PER_NODE" ; + schema1:value "112(x2)" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_END_TIME" ; + schema1:value "1721302183" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_GID" ; + schema1:value "50000" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_ID" ; + schema1:value "4055071" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NAME" ; + schema1:value "COMPSs" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NODELIST" ; + schema1:value "gs13r3b[15-16]" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NUM_NODES" ; + schema1:value "2" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_PARTITION" ; + schema1:value "gpp" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_QOS" ; + schema1:value "gp_debug" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_START_TIME" ; + schema1:value "1721300983" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_UID" ; + schema1:value "2952" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_USER" ; + schema1:value "bsc019057" . + + a schema1:PropertyValue ; + schema1:name "SLURM_MEM_PER_CPU" ; + schema1:value "2000" . + + a schema1:PropertyValue ; + schema1:name "SLURM_SUBMIT_DIR" ; + schema1:value "/gpfs/home/bsc/bsc019057/Tutorial_2024/lysozyme_in_water" . + + a schema1:PropertyValue ; + schema1:name "SLURM_SUBMIT_HOST" ; + schema1:value "glogin2" . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/JBrowse" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_5" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/hisat2_summary_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/output_alignments" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/output_gtf" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:PropertyValue ; + schema1:name "DISLIB_GPU_AVAILABLE" ; + schema1:value "True" . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/Reel_life_survey_fish_modif.tabular" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_9" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:PropertyValue ; + schema1:name "DISLIB_GPU_AVAILABLE" ; + schema1:value "True" . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/Sample" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/Contigs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/Groups" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_8" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:Person ; + schema1:name "Sánchez, I." . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/mygodmd_prep.aln" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/origin-target.godmd.dcd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/origin-target.godmd.ene.out" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/origin-target.godmd.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/origin-target.godmd.mdcrd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/origin-target.godmd.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/origin.chains.nolig.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/origin.chains.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/origin.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/target.chains.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/target.pdb" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/Sample" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:PropertyValue ; + schema1:name "COMPSS_HOME" ; + schema1:value "/apps/GPP/COMPSs/3.3.1/" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_MASTER_NODE" ; + schema1:value "gs05r2b62" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_MPIRUN_TYPE" ; + schema1:value "impi" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_PYTHON_VERSION" ; + schema1:value "3.12.1" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_WORKER_INIT_TIMEOUT" ; + schema1:value "360000" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_WORKER_NODES" ; + schema1:value " gs05r2b63" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_ACCOUNT" ; + schema1:value "bsc19" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_CPUS_PER_NODE" ; + schema1:value "112(x2)" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_END_TIME" ; + schema1:value "1720779730" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_GID" ; + schema1:value "50000" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_ID" ; + schema1:value "3844353" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NAME" ; + schema1:value "COMPSs" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NODELIST" ; + schema1:value "gs05r2b[62-63]" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NUM_NODES" ; + schema1:value "2" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_PARTITION" ; + schema1:value "gpp" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_QOS" ; + schema1:value "gp_debug" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_START_TIME" ; + schema1:value "1720779130" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_UID" ; + schema1:value "2952" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_USER" ; + schema1:value "bsc019057" . + + a schema1:PropertyValue ; + schema1:name "SLURM_MEM_PER_CPU" ; + schema1:value "2000" . + + a schema1:PropertyValue ; + schema1:name "SLURM_SUBMIT_DIR" ; + schema1:value "/gpfs/home/bsc/bsc019057/Tutorial_2024/lysozyme_in_water" . + + a schema1:PropertyValue ; + schema1:name "SLURM_SUBMIT_HOST" ; + schema1:value "glogin2" . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:Place ; + schema1:geo _:b42 ; + schema1:name "Europe" . + + a schema1:PropertyValue ; + schema1:description "Pseudoid of the person included in the cohort" ; + schema1:name "person_id" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort an essential worker? (at the time of entering the cohort)" ; + schema1:name "essential_worker_bl" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort institutionalized? (at the time of entering the cohort)" ; + schema1:name "institutionalized_bl" . + + a schema1:PropertyValue ; + schema1:description "Brand of first dose of the vaccine" ; + schema1:name "dose_1_brand_cd" . + + a schema1:PropertyValue ; + schema1:description "Date of the first dose of the vaccine" ; + schema1:name "dose_1_dt" . + + a schema1:PropertyValue ; + schema1:description "Brand of second dose of the vaccine" ; + schema1:name "dose_2_brand_cd" . + + a schema1:PropertyValue ; + schema1:description "Date of the second dose of the vaccine" ; + schema1:name "dose_2_dt" . + + a schema1:PropertyValue ; + schema1:description "Brand of third dose of the vaccine" ; + schema1:name "dose_3_brand_cd" . + + a schema1:PropertyValue ; + schema1:description "Date of the third dose of the vaccine" ; + schema1:name "dose_3_dt" . + + a schema1:PropertyValue ; + schema1:description "Number of doses (independently of the vaccine brand) received by a person during the period of study" ; + schema1:name "number_doses" . + + a schema1:PropertyValue ; + schema1:description "Date on which the person included in the cohort is considered fully vaccinated" ; + schema1:name "fully_vaccinated_dt" . + + a schema1:PropertyValue ; + schema1:description "Age of the person included in the cohort (at the time of entering the cohort)" ; + schema1:maxValue 115 ; + schema1:minValue 5 ; + schema1:name "age_nm" ; + schema1:unitText "years" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort fully vaccinated?" ; + schema1:name "fully_vaccinated_bl" . + + a schema1:PropertyValue ; + schema1:description "Vaccine types received for 1st and 2nd dose concatenated with hyphen" ; + schema1:name "vaccination_schedule_cd" . + + a schema1:PropertyValue ; + schema1:description "Date of diagnosis or test result positive of the last known/confirmed infection" ; + schema1:name "confirmed_case_dt" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort a confirmed COVID-19 case?" ; + schema1:name "confirmed_case_bl" . + + a schema1:PropertyValue ; + schema1:description "If the person experienced a confirmed infection proceeding the last confirmed infection with more than 60 days, date of diagnosis of this previous infection" ; + schema1:name "previous_infection_dt" . + + a schema1:PropertyValue ; + schema1:description "Did the person experience a confirmed infection proceeding the last confirmed infection?" ; + schema1:name "previous_infection_bl" . + + a schema1:PropertyValue ; + schema1:description "Type of COVID-19 test used to detect the last known SARS-CoV-2 infection (i.e. first positive test for SARS-CoV-2)" ; + schema1:name "test_type_cd" . + + a schema1:PropertyValue ; + schema1:description "Identified variant (WHO label) for last known SARS-CoV-2 infection" ; + schema1:name "variant_cd" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from type II diabetes (at the time of entering the cohort)?" ; + schema1:name "diabetes_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from obesity (at the time of entering the cohort)? " ; + schema1:name "obesity_bl" . + + a schema1:PropertyValue ; + schema1:description "Sex of the person included in the cohort (at the time of entering the cohort)" ; + schema1:name "sex_cd" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from heart failure (at the time of entering the cohort)?" ; + schema1:name "heart_failure_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from chronic lung disease (at the time of entering the cohort)?" ; + schema1:name "copd_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from solid cancer without metastatis (at the time of entering the cohort)?" ; + schema1:name "solid_tumor_without_metastasis_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from chronic kidney disease (at the time of entering the cohort)?" ; + schema1:name "chronic_kidney_disease_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from sickle cell disease (at the time of entering the cohort)?" ; + schema1:name "sickle_cell_disease_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from hypertension (at the time of entering the cohort)?" ; + schema1:name "hypertension_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from blood cancer (leukemia) (at the time of entering the cohort)?" ; + schema1:name "blood_cancer_bl" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort a transplant patient (i.e. solid organ transplantation) (at the time of entering the cohort)?" ; + schema1:name "transplanted_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from a HIV infection (at the time of entering the cohort)?" ; + schema1:name "hiv_infection_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from a primary immuno deficiency disease (lymphoma) (at the time of entering the cohort)?" ; + schema1:name "primary_immunodeficiency_bl" . + + a schema1:PropertyValue ; + schema1:description "Socioeconomic level of the person in the cohort (at the time of entering the cohort)" ; + schema1:name "socecon_lvl_cd" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from drugs induced immunosuppression (at the time of entering the cohort)?" ; + schema1:name "immunosuppression_bl" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort pregnant (at the time of entering the cohort)?" ; + schema1:name "pregnancy_bl" . + + a schema1:GeoShape ; + schema1:box "66.78398 11.50546 44.4272 -15.84884" . + + a schema1:PropertyValue ; + schema1:description "Area of residence of the person included in the cohort (NUTS 3) (at the time of entering the cohort)" ; + schema1:name "residence_area_cd" . + + a schema1:PropertyValue ; + schema1:description "Country of residence of the person included in the cohort (at the time of entering the cohort)" ; + schema1:name "country_cd" . + + a schema1:PropertyValue ; + schema1:description "Is the country of residence different from the country that is doing the analyses?" ; + schema1:name "foreign_bl" . + + a schema1:PropertyValue ; + schema1:description "Date of death of the person (if the person has died)" ; + schema1:name "exitus_dt" . + + a schema1:PropertyValue ; + schema1:description "Date of death of the person (if the person has died during the period of study)" ; + schema1:name "exitus_bl" . + + a schema1:MediaObject ; + schema1:encodingFormat "" ; + schema1:name "Dockerfile" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/rproj" ; + schema1:name "Analytical pipeline R project file (RPROJ)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/pdf" ; + schema1:name "Documentation analytical pipeline (PDF)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/yml" ; + schema1:name ".yml file listing dependencies (YML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/csv" ; + schema1:name "Synthetic dataset with 10k entries (CSV)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/r" ; + schema1:name "Installation required R packages (R)" . + + a schema1:Dataset ; + schema1:description "Analytical pipeline output using synthetic data with 10k entries" ; + schema1:hasPart , + , + , + , + , + ; + schema1:name "Output analytical pipeline" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report with dataset statistics, missing data profiles, potential alerts, and detailed per-variable information (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report summarizing compliance with the logic validation rules (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report summarizing used imputation methods and number of imputed values (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report to assess matching balance in the obtained study population (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report including the results of survival analysis in the unmatched study population, a table with baseline characteristics of the matched study population and CONSORT diagram (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report including the analytic results for the meta-analysis (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/r" ; + schema1:name "Upload the csv file and create a DuckDB database file (R)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Exploration and Data Quality Assessment (DQA) of the original data mapped at each Data Hub (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Validation (applying logic validation rules) of the original data mapped at each Data Hub (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Imputation of missing values in the original data mapped at each Data Hub (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Iterative matching of the exposed and unexposed, and assessing covariate balance in matched population (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/r" ; + schema1:name "Iterative matching of the exposed and unexposed (R)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Descriptive analysis of the matched and unmatched study population (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Survival analysis of the matched study population (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Quarto script sourcing consecutive scripts of the analytical pipeline (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Quarto document providing a log of the individual code block executions (HTML)" . + + a schema1:Dataset ; + schema1:description "Consecutive scripts of the analytical pipeline" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + ; + schema1:name "Scripts analytical pipeline" . + + a schema1:MediaObject ; + schema1:description "Interactive report showcasing the structural causal model (DAG) to answer the research question" ; + schema1:encodingFormat "text/html" ; + schema1:name "COVID-19 vaccine effectiveness causal model v.1.1.0 (HTML)" . + + a schema1:MediaObject ; + schema1:description "Quarto RMarkdown script to produce the structural causal model" ; + schema1:encodingFormat , + "text/markdown" ; + schema1:name "COVID-19 vaccine effectiveness causal model v.1.1.0 (QMD)" . + + a schema1:Dataset ; + schema1:about ; + schema1:hasPart , + , + , + , + ; + schema1:name "Metadata for Common data model specification" . + + a schema1:MediaObject ; + dct:conformsTo ; + schema1:encodingFormat "text/csv" . + + a schema1:MediaObject ; + dct:conformsTo ; + schema1:encodingFormat "text/csv" . + + a schema1:MediaObject ; + dct:conformsTo ; + schema1:encodingFormat "text/csv" . + + a schema1:MediaObject ; + dct:conformsTo ; + schema1:encodingFormat "text/csv" . + + a schema1:MediaObject ; + schema1:description "Machine-readable version" ; + schema1:encodingFormat "application/ld+json" ; + schema1:mainEntity ; + schema1:name "COVID-19 vaccine effectiveness data model specification dataspice (JSON)" . + + a schema1:DataDownload, + schema1:MediaObject ; + schema1:description "synthetic data set to test the causal model proposed by the baseline use case in BY-COVID WP5 assessing the effectiveness of the COVID-19 vaccine(s)" ; + schema1:encodingFormat "text/csv" ; + schema1:name "vaccine_effectiveness_synthetic_pop_10k_v.1.1.0" ; + schema1:sameAs , + . + + a schema1:Dataset ; + schema1:about ; + schema1:hasPart ; + schema1:name "Documentation for Common data model specification" . + + a schema1:MediaObject ; + schema1:about ; + schema1:description "Human-readable version (interactive report)" ; + schema1:encodingFormat "text/html" ; + schema1:name "COVID-19 vaccine effectiveness data model specification dataspice (HTML)" . + + a schema1:MediaObject ; + schema1:description "Human-readable version (Excel)" ; + schema1:encodingFormat "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" ; + schema1:name "vaccine effectiveness data model specification (XLXS)" . + + a schema1:MediaObject ; + schema1:description "SARS-CoV-2 vaccine effectiveness assessment data management plan (machine-readable)" ; + schema1:encodingFormat "text/json" ; + schema1:name "Data management plan v.0.0.1 (JSON)" . + + a schema1:MediaObject ; + schema1:description "SARS-CoV-2 vaccine effectiveness assessment data management plan (human-readable)" ; + schema1:encodingFormat "text/pdf" ; + schema1:name "Data management plan v.0.0.1 (PDF)" . + + a schema1:MediaObject ; + schema1:description "SARS-CoV-2 vaccine effectiveness assessment study protocol" ; + schema1:encodingFormat "text/pdf" ; + schema1:name "Study protocol v.1.0.3 (PDF)" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo ; + schema1:conditionsOfAccess "The scripts (software) accompanying the data model specification are offered \"as-is\", without warranty, and disclaiming liability for damages resulting from using it." ; + schema1:description "Jupyter notebook with Python scripting and commenting to generate the synthetic dataset" ; + schema1:name "COVID-19 vaccine effectiveness synthetic dataset generation script (IPYNB)" ; + schema1:programmingLanguage ; + schema1:url "" ; + ns1:output . + + a schema1:MediaObject ; + schema1:encodingFormat "application/json" ; + schema1:name "pandas-profiling config" . + + a schema1:MediaObject ; + schema1:description "Interactive report of the exploratory data analysis (EDA) of the synthetic dataset" ; + schema1:encodingFormat "text/html" ; + schema1:name "COVID-19 vaccine effectiveness synthetic dataset EDA (HTML)" ; + schema1:version "1.1.0" . + + a schema1:MediaObject ; + schema1:description "Machine-readable version of the exploratory data analysis (EDA) of the synthetic dataset" ; + schema1:encodingFormat "application/json" ; + schema1:name "COVID-19 vaccine effectiveness synthetic dataset EDA (JSON)" ; + schema1:version "1.1.0" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/csv" ; + schema1:name "Synthetic dataset with 650k entries" ; + schema1:version "1.1.1" . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:PropertyValue ; + schema1:name "COMPSS_BINDINGS_DEBUG" ; + schema1:value "1" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_HOME" ; + schema1:value "/apps/GPP/COMPSs/3.3/" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_MASTER_NODE" ; + schema1:value "gs06r3b72" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_MPIRUN_TYPE" ; + schema1:value "impi" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_PYTHON_VERSION" ; + schema1:value "3.12.1" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_WORKER_INIT_TIMEOUT" ; + schema1:value "120000" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_WORKER_NODES" ; + schema1:value "" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_ACCOUNT" ; + schema1:value "bsc19" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_CPUS_PER_NODE" ; + schema1:value "112" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_END_TIME" ; + schema1:value "1714482541" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_GID" ; + schema1:value "50000" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_ID" ; + schema1:value "1236485" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NAME" ; + schema1:value "matmul-DP" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NODELIST" ; + schema1:value "gs06r3b72" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NUM_NODES" ; + schema1:value "1" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_PARTITION" ; + schema1:value "gpp" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_QOS" ; + schema1:value "gp_debug" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_START_TIME" ; + schema1:value "1714482241" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_UID" ; + schema1:value "2952" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_USER" ; + schema1:value "bsc019057" . + + a schema1:PropertyValue ; + schema1:name "SLURM_MEM_PER_CPU" ; + schema1:value "2000" . + + a schema1:PropertyValue ; + schema1:name "SLURM_SUBMIT_DIR" ; + schema1:value "/gpfs/home/bsc/bsc019057/WorkflowHub/reproducible_matmul" . + + a schema1:PropertyValue ; + schema1:name "SLURM_SUBMIT_HOST" ; + schema1:value "glogin2" . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:PropertyValue ; + schema1:name "COMPSS_HOME" ; + schema1:value "/apps/GPP/COMPSs/3.3.1/" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_MASTER_NODE" ; + schema1:value "gs10r3b56" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_MPIRUN_TYPE" ; + schema1:value "impi" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_NUM_CPUS" ; + schema1:value "4" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_PYTHON_VERSION" ; + schema1:value "3.12.1" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_THREADED_DESERIALIZATION" ; + schema1:value "True" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_WORKER_INIT_TIMEOUT" ; + schema1:value "360000" . + + a schema1:PropertyValue ; + schema1:name "COMPSS_WORKER_NODES" ; + schema1:value " gs10r3b61 gs10r3b66" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_ACCOUNT" ; + schema1:value "bsc19" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_CPUS_PER_NODE" ; + schema1:value "112(x3)" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_END_TIME" ; + schema1:value "1720107267" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_GID" ; + schema1:value "50000" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_ID" ; + schema1:value "3618551" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NAME" ; + schema1:value "COMPSs" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NODELIST" ; + schema1:value "gs10r3b[56,61,66]" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_NUM_NODES" ; + schema1:value "3" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_PARTITION" ; + schema1:value "gpp" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_QOS" ; + schema1:value "gp_debug" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_START_TIME" ; + schema1:value "1720100067" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_UID" ; + schema1:value "4373" . + + a schema1:PropertyValue ; + schema1:name "SLURM_JOB_USER" ; + schema1:value "bsc019959" . + + a schema1:PropertyValue ; + schema1:name "SLURM_MEM_PER_CPU" ; + schema1:value "2000" . + + a schema1:PropertyValue ; + schema1:name "SLURM_SUBMIT_DIR" ; + schema1:value "/gpfs/scratch/bsc19/bsc19959/randomsvd" . + + a schema1:PropertyValue ; + schema1:name "SLURM_SUBMIT_HOST" ; + schema1:value "glogin2" . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:Place ; + schema1:geo _:b42 ; + schema1:name "Europe" . + + a schema1:PropertyValue ; + schema1:description "Pseudoid of the person included in the cohort" ; + schema1:name "person_id" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort an essential worker? (at the time of entering the cohort)" ; + schema1:name "essential_worker_bl" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort institutionalized? (at the time of entering the cohort)" ; + schema1:name "institutionalized_bl" . + + a schema1:PropertyValue ; + schema1:description "Brand of first dose of the vaccine" ; + schema1:name "dose_1_brand_cd" . + + a schema1:PropertyValue ; + schema1:description "Date of the first dose of the vaccine" ; + schema1:name "dose_1_dt" . + + a schema1:PropertyValue ; + schema1:description "Brand of second dose of the vaccine" ; + schema1:name "dose_2_brand_cd" . + + a schema1:PropertyValue ; + schema1:description "Date of the second dose of the vaccine" ; + schema1:name "dose_2_dt" . + + a schema1:PropertyValue ; + schema1:description "Brand of third dose of the vaccine" ; + schema1:name "dose_3_brand_cd" . + + a schema1:PropertyValue ; + schema1:description "Date of the third dose of the vaccine" ; + schema1:name "dose_3_dt" . + + a schema1:PropertyValue ; + schema1:description "Number of doses (independently of the vaccine brand) received by a person during the period of study" ; + schema1:name "number_doses" . + + a schema1:PropertyValue ; + schema1:description "Date on which the person included in the cohort is considered fully vaccinated" ; + schema1:name "fully_vaccinated_dt" . + + a schema1:PropertyValue ; + schema1:description "Age of the person included in the cohort (at the time of entering the cohort)" ; + schema1:maxValue 115 ; + schema1:minValue 5 ; + schema1:name "age_nm" ; + schema1:unitText "years" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort fully vaccinated?" ; + schema1:name "fully_vaccinated_bl" . + + a schema1:PropertyValue ; + schema1:description "Vaccine types received for 1st and 2nd dose concatenated with hyphen" ; + schema1:name "vaccination_schedule_cd" . + + a schema1:PropertyValue ; + schema1:description "Date of diagnosis or test result positive of the last known/confirmed infection" ; + schema1:name "confirmed_case_dt" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort a confirmed COVID-19 case?" ; + schema1:name "confirmed_case_bl" . + + a schema1:PropertyValue ; + schema1:description "If the person experienced a confirmed infection proceeding the last confirmed infection with more than 60 days, date of diagnosis of this previous infection" ; + schema1:name "previous_infection_dt" . + + a schema1:PropertyValue ; + schema1:description "Did the person experience a confirmed infection proceeding the last confirmed infection?" ; + schema1:name "previous_infection_bl" . + + a schema1:PropertyValue ; + schema1:description "Type of COVID-19 test used to detect the last known SARS-CoV-2 infection (i.e. first positive test for SARS-CoV-2)" ; + schema1:name "test_type_cd" . + + a schema1:PropertyValue ; + schema1:description "Identified variant (WHO label) for last known SARS-CoV-2 infection" ; + schema1:name "variant_cd" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from type II diabetes (at the time of entering the cohort)?" ; + schema1:name "diabetes_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from obesity (at the time of entering the cohort)? " ; + schema1:name "obesity_bl" . + + a schema1:PropertyValue ; + schema1:description "Sex of the person included in the cohort (at the time of entering the cohort)" ; + schema1:name "sex_cd" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from heart failure (at the time of entering the cohort)?" ; + schema1:name "heart_failure_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from chronic lung disease (at the time of entering the cohort)?" ; + schema1:name "copd_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from solid cancer without metastatis (at the time of entering the cohort)?" ; + schema1:name "solid_tumor_without_metastasis_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from chronic kidney disease (at the time of entering the cohort)?" ; + schema1:name "chronic_kidney_disease_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from sickle cell disease (at the time of entering the cohort)?" ; + schema1:name "sickle_cell_disease_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from hypertension (at the time of entering the cohort)?" ; + schema1:name "hypertension_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from blood cancer (leukemia) (at the time of entering the cohort)?" ; + schema1:name "blood_cancer_bl" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort a transplant patient (i.e. solid organ transplantation) (at the time of entering the cohort)?" ; + schema1:name "transplanted_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from a HIV infection (at the time of entering the cohort)?" ; + schema1:name "hiv_infection_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from a primary immuno deficiency disease (lymphoma) (at the time of entering the cohort)?" ; + schema1:name "primary_immunodeficiency_bl" . + + a schema1:PropertyValue ; + schema1:description "Socioeconomic level of the person in the cohort (at the time of entering the cohort)" ; + schema1:name "socecon_lvl_cd" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from drugs induced immunosuppression (at the time of entering the cohort)?" ; + schema1:name "immunosuppression_bl" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort pregnant (at the time of entering the cohort)?" ; + schema1:name "pregnancy_bl" . + + a schema1:GeoShape ; + schema1:box "66.78398 11.50546 44.4272 -15.84884" . + + a schema1:PropertyValue ; + schema1:description "Area of residence of the person included in the cohort (NUTS 3) (at the time of entering the cohort)" ; + schema1:name "residence_area_cd" . + + a schema1:PropertyValue ; + schema1:description "Country of residence of the person included in the cohort (at the time of entering the cohort)" ; + schema1:name "country_cd" . + + a schema1:PropertyValue ; + schema1:description "Is the country of residence different from the country that is doing the analyses?" ; + schema1:name "foreign_bl" . + + a schema1:PropertyValue ; + schema1:description "Date of death of the person (if the person has died)" ; + schema1:name "exitus_dt" . + + a schema1:PropertyValue ; + schema1:description "Date of death of the person (if the person has died during the period of study)" ; + schema1:name "exitus_bl" . + + a schema1:MediaObject ; + schema1:encodingFormat "" ; + schema1:name "Dockerfile" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/rproj" ; + schema1:name "Analytical pipeline R project file (RPROJ)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/pdf" ; + schema1:name "Documentation analytical pipeline (PDF)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/yml" ; + schema1:name ".yml file listing dependencies (YML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/csv" ; + schema1:name "Synthetic dataset with 10k entries (CSV)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/r" ; + schema1:name "Installation required R packages (R)" . + + a schema1:Dataset ; + schema1:description "Analytical pipeline output using synthetic data with 10k entries" ; + schema1:hasPart , + , + , + , + , + ; + schema1:name "Output analytical pipeline" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report with dataset statistics, missing data profiles, potential alerts, and detailed per-variable information (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report summarizing compliance with the logic validation rules (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report summarizing used imputation methods and number of imputed values (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report to assess matching balance in the obtained study population (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report including the results of survival analysis in the unmatched study population, a table with baseline characteristics of the matched study population and CONSORT diagram (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report including the analytic results for the meta-analysis (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/r" ; + schema1:name "Upload the csv file and create a DuckDB database file (R)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Exploration and Data Quality Assessment (DQA) of the original data mapped at each Data Hub (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Validation (applying logic validation rules) of the original data mapped at each Data Hub (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Imputation of missing values in the original data mapped at each Data Hub (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Iterative matching of the exposed and unexposed, and assessing covariate balance in matched population (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/r" ; + schema1:name "Iterative matching of the exposed and unexposed (R)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Descriptive analysis of the matched and unmatched study population (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Survival analysis of the matched study population (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Quarto script sourcing consecutive scripts of the analytical pipeline (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Quarto document providing a log of the individual code block executions (HTML)" . + + a schema1:Dataset ; + schema1:description "Consecutive scripts of the analytical pipeline" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + ; + schema1:name "Scripts analytical pipeline" . + + a schema1:MediaObject ; + schema1:description "Interactive report showcasing the structural causal model (DAG) to answer the research question" ; + schema1:encodingFormat "text/html" ; + schema1:name "COVID-19 vaccine effectiveness causal model v.1.1.0 (HTML)" . + + a schema1:MediaObject ; + schema1:description "Quarto RMarkdown script to produce the structural causal model" ; + schema1:encodingFormat , + "text/markdown" ; + schema1:name "COVID-19 vaccine effectiveness causal model v.1.1.0 (QMD)" . + + a schema1:Dataset ; + schema1:about ; + schema1:hasPart , + , + , + , + ; + schema1:name "Metadata for Common data model specification" . + + a schema1:MediaObject ; + dct:conformsTo ; + schema1:encodingFormat "text/csv" . + + a schema1:MediaObject ; + dct:conformsTo ; + schema1:encodingFormat "text/csv" . + + a schema1:MediaObject ; + dct:conformsTo ; + schema1:encodingFormat "text/csv" . + + a schema1:MediaObject ; + dct:conformsTo ; + schema1:encodingFormat "text/csv" . + + a schema1:MediaObject ; + schema1:description "Machine-readable version" ; + schema1:encodingFormat "application/ld+json" ; + schema1:mainEntity ; + schema1:name "COVID-19 vaccine effectiveness data model specification dataspice (JSON)" . + + a schema1:DataDownload, + schema1:MediaObject ; + schema1:description "synthetic data set to test the causal model proposed by the baseline use case in BY-COVID WP5 assessing the effectiveness of the COVID-19 vaccine(s)" ; + schema1:encodingFormat "text/csv" ; + schema1:name "vaccine_effectiveness_synthetic_pop_10k_v.1.1.0" ; + schema1:sameAs , + . + + a schema1:Dataset ; + schema1:about ; + schema1:hasPart ; + schema1:name "Documentation for Common data model specification" . + + a schema1:MediaObject ; + schema1:about ; + schema1:description "Human-readable version (interactive report)" ; + schema1:encodingFormat "text/html" ; + schema1:name "COVID-19 vaccine effectiveness data model specification dataspice (HTML)" . + + a schema1:MediaObject ; + schema1:description "Human-readable version (Excel)" ; + schema1:encodingFormat "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" ; + schema1:name "vaccine effectiveness data model specification (XLXS)" . + + a schema1:MediaObject ; + schema1:description "SARS-CoV-2 vaccine effectiveness assessment data management plan (machine-readable)" ; + schema1:encodingFormat "text/json" ; + schema1:name "Data management plan v.0.0.1 (JSON)" . + + a schema1:MediaObject ; + schema1:description "SARS-CoV-2 vaccine effectiveness assessment data management plan (human-readable)" ; + schema1:encodingFormat "text/pdf" ; + schema1:name "Data management plan v.0.0.1 (PDF)" . + + a schema1:MediaObject ; + schema1:description "SARS-CoV-2 vaccine effectiveness assessment study protocol" ; + schema1:encodingFormat "text/pdf" ; + schema1:name "Study protocol v.1.0.3 (PDF)" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo ; + schema1:conditionsOfAccess "The scripts (software) accompanying the data model specification are offered \"as-is\", without warranty, and disclaiming liability for damages resulting from using it." ; + schema1:description "Jupyter notebook with Python scripting and commenting to generate the synthetic dataset" ; + schema1:name "COVID-19 vaccine effectiveness synthetic dataset generation script (IPYNB)" ; + schema1:programmingLanguage ; + schema1:url "" ; + ns1:output . + + a schema1:MediaObject ; + schema1:encodingFormat "application/json" ; + schema1:name "pandas-profiling config" . + + a schema1:MediaObject ; + schema1:description "Interactive report of the exploratory data analysis (EDA) of the synthetic dataset" ; + schema1:encodingFormat "text/html" ; + schema1:name "COVID-19 vaccine effectiveness synthetic dataset EDA (HTML)" ; + schema1:version "1.1.0" . + + a schema1:MediaObject ; + schema1:description "Machine-readable version of the exploratory data analysis (EDA) of the synthetic dataset" ; + schema1:encodingFormat "application/json" ; + schema1:name "COVID-19 vaccine effectiveness synthetic dataset EDA (JSON)" ; + schema1:version "1.1.0" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/csv" ; + schema1:name "Synthetic dataset with 650k entries" ; + schema1:version "1.1.1" . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/genome.fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/Repeats_output_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/output_log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/output_masked_genome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/output_repeat_catalog" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/seeds" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/sequences" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/QCFilteredAnnDataObject" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/1k_cell_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/1k_gene_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/marker_dot_plot" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/marker_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/processed_anndata_object" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/umap_cluster_plot" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/umap_sample_plot" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:Place ; + schema1:geo _:b42 ; + schema1:name "Europe" . + + a schema1:PropertyValue ; + schema1:description "Pseudoid of the person included in the cohort" ; + schema1:name "person_id" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort an essential worker? (at the time of entering the cohort)" ; + schema1:name "essential_worker_bl" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort institutionalized? (at the time of entering the cohort)" ; + schema1:name "institutionalized_bl" . + + a schema1:PropertyValue ; + schema1:description "Brand of first dose of the vaccine" ; + schema1:name "dose_1_brand_cd" . + + a schema1:PropertyValue ; + schema1:description "Date of the first dose of the vaccine" ; + schema1:name "dose_1_dt" . + + a schema1:PropertyValue ; + schema1:description "Brand of second dose of the vaccine" ; + schema1:name "dose_2_brand_cd" . + + a schema1:PropertyValue ; + schema1:description "Date of the second dose of the vaccine" ; + schema1:name "dose_2_dt" . + + a schema1:PropertyValue ; + schema1:description "Brand of third dose of the vaccine" ; + schema1:name "dose_3_brand_cd" . + + a schema1:PropertyValue ; + schema1:description "Date of the third dose of the vaccine" ; + schema1:name "dose_3_dt" . + + a schema1:PropertyValue ; + schema1:description "Number of doses (independently of the vaccine brand) received by a person during the period of study" ; + schema1:name "number_doses" . + + a schema1:PropertyValue ; + schema1:description "Date on which the person included in the cohort is considered fully vaccinated" ; + schema1:name "fully_vaccinated_dt" . + + a schema1:PropertyValue ; + schema1:description "Age of the person included in the cohort (at the time of entering the cohort)" ; + schema1:maxValue 115 ; + schema1:minValue 5 ; + schema1:name "age_nm" ; + schema1:unitText "years" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort fully vaccinated?" ; + schema1:name "fully_vaccinated_bl" . + + a schema1:PropertyValue ; + schema1:description "Vaccine types received for 1st and 2nd dose concatenated with hyphen" ; + schema1:name "vaccination_schedule_cd" . + + a schema1:PropertyValue ; + schema1:description "Date of diagnosis or test result positive of the last known/confirmed infection" ; + schema1:name "confirmed_case_dt" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort a confirmed COVID-19 case?" ; + schema1:name "confirmed_case_bl" . + + a schema1:PropertyValue ; + schema1:description "If the person experienced a confirmed infection proceeding the last confirmed infection with more than 60 days, date of diagnosis of this previous infection" ; + schema1:name "previous_infection_dt" . + + a schema1:PropertyValue ; + schema1:description "Did the person experience a confirmed infection proceeding the last confirmed infection?" ; + schema1:name "previous_infection_bl" . + + a schema1:PropertyValue ; + schema1:description "Type of COVID-19 test used to detect the last known SARS-CoV-2 infection (i.e. first positive test for SARS-CoV-2)" ; + schema1:name "test_type_cd" . + + a schema1:PropertyValue ; + schema1:description "Identified variant (WHO label) for last known SARS-CoV-2 infection" ; + schema1:name "variant_cd" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from type II diabetes (at the time of entering the cohort)?" ; + schema1:name "diabetes_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from obesity (at the time of entering the cohort)? " ; + schema1:name "obesity_bl" . + + a schema1:PropertyValue ; + schema1:description "Sex of the person included in the cohort (at the time of entering the cohort)" ; + schema1:name "sex_cd" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from heart failure (at the time of entering the cohort)?" ; + schema1:name "heart_failure_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from chronic lung disease (at the time of entering the cohort)?" ; + schema1:name "copd_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from solid cancer without metastatis (at the time of entering the cohort)?" ; + schema1:name "solid_tumor_without_metastasis_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from chronic kidney disease (at the time of entering the cohort)?" ; + schema1:name "chronic_kidney_disease_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from sickle cell disease (at the time of entering the cohort)?" ; + schema1:name "sickle_cell_disease_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from hypertension (at the time of entering the cohort)?" ; + schema1:name "hypertension_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from blood cancer (leukemia) (at the time of entering the cohort)?" ; + schema1:name "blood_cancer_bl" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort a transplant patient (i.e. solid organ transplantation) (at the time of entering the cohort)?" ; + schema1:name "transplanted_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from a HIV infection (at the time of entering the cohort)?" ; + schema1:name "hiv_infection_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from a primary immuno deficiency disease (lymphoma) (at the time of entering the cohort)?" ; + schema1:name "primary_immunodeficiency_bl" . + + a schema1:PropertyValue ; + schema1:description "Socioeconomic level of the person in the cohort (at the time of entering the cohort)" ; + schema1:name "socecon_lvl_cd" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from drugs induced immunosuppression (at the time of entering the cohort)?" ; + schema1:name "immunosuppression_bl" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort pregnant (at the time of entering the cohort)?" ; + schema1:name "pregnancy_bl" . + + a schema1:GeoShape ; + schema1:box "66.78398 11.50546 44.4272 -15.84884" . + + a schema1:PropertyValue ; + schema1:description "Area of residence of the person included in the cohort (NUTS 3) (at the time of entering the cohort)" ; + schema1:name "residence_area_cd" . + + a schema1:PropertyValue ; + schema1:description "Country of residence of the person included in the cohort (at the time of entering the cohort)" ; + schema1:name "country_cd" . + + a schema1:PropertyValue ; + schema1:description "Is the country of residence different from the country that is doing the analyses?" ; + schema1:name "foreign_bl" . + + a schema1:PropertyValue ; + schema1:description "Date of death of the person (if the person has died)" ; + schema1:name "exitus_dt" . + + a schema1:PropertyValue ; + schema1:description "Date of death of the person (if the person has died during the period of study)" ; + schema1:name "exitus_bl" . + + a schema1:MediaObject ; + schema1:encodingFormat "" ; + schema1:name "Dockerfile" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/rproj" ; + schema1:name "Analytical pipeline R project file (RPROJ)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/pdf" ; + schema1:name "Documentation analytical pipeline (PDF)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/yml" ; + schema1:name ".yml file listing dependencies (YML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/csv" ; + schema1:name "Synthetic dataset with 10k entries (CSV)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/r" ; + schema1:name "Installation required R packages (R)" . + + a schema1:Dataset ; + schema1:description "Analytical pipeline output using synthetic data with 10k entries" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:name "Output analytical pipeline" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report with dataset statistics, missing data profiles, potential alerts, and detailed per-variable information (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report summarizing compliance with the logic validation rules (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report summarizing used imputation methods and number of imputed values (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report to assess matching balance in the obtained study population (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report including the results of survival analysis in the unmatched study population, a table with baseline characteristics of the matched study population and CONSORT diagram (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report including the analytic results for the meta-analysis (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" ; + schema1:name "Aggregated non-sensitive results for meta-analysis (Excel sheet)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/r" ; + schema1:name "Upload the csv file and create a DuckDB database file (R)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Exploration and Data Quality Assessment (DQA) of the original data mapped at each Data Hub (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Validation (applying logic validation rules) of the original data mapped at each Data Hub (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Imputation of missing values in the original data mapped at each Data Hub (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Iterative matching of the exposed and unexposed, and assessing covariate balance in matched population (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/r" ; + schema1:name "Iterative matching of the exposed and unexposed (R)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Descriptive analysis of the matched and unmatched study population (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Survival analysis of the matched study population (QMD)" . + + a schema1:Dataset ; + schema1:description "Consecutive scripts of the analytical pipeline" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + ; + schema1:name "Scripts analytical pipeline" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:encodingFormat "text/qmd" ; + schema1:image ; + schema1:name "Quarto script sourcing consecutive scripts of the analytical pipeline (QMD)" ; + schema1:programmingLanguage . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Quarto document providing a log of the individual code block executions (HTML)" . + + a schema1:MediaObject ; + schema1:description "Interactive report showcasing the structural causal model (DAG) to answer the research question" ; + schema1:encodingFormat "text/html" ; + schema1:name "COVID-19 vaccine effectiveness causal model v.1.1.0 (HTML)" . + + a schema1:MediaObject ; + schema1:description "Quarto RMarkdown script to produce the structural causal model" ; + schema1:encodingFormat , + "text/markdown" ; + schema1:name "COVID-19 vaccine effectiveness causal model v.1.1.0 (QMD)" . + + a schema1:Dataset ; + schema1:about ; + schema1:hasPart , + , + , + , + ; + schema1:name "Metadata for Common data model specification" . + + a schema1:MediaObject ; + dct:conformsTo ; + schema1:encodingFormat "text/csv" . + + a schema1:MediaObject ; + dct:conformsTo ; + schema1:encodingFormat "text/csv" . + + a schema1:MediaObject ; + dct:conformsTo ; + schema1:encodingFormat "text/csv" . + + a schema1:MediaObject ; + dct:conformsTo ; + schema1:encodingFormat "text/csv" . + + a schema1:MediaObject ; + schema1:description "Machine-readable version" ; + schema1:encodingFormat "application/ld+json" ; + schema1:mainEntity ; + schema1:name "COVID-19 vaccine effectiveness data model specification dataspice (JSON)" . + + a schema1:DataDownload, + schema1:MediaObject ; + schema1:description "synthetic data set to test the causal model proposed by the baseline use case in BY-COVID WP5 assessing the effectiveness of the COVID-19 vaccine(s)" ; + schema1:encodingFormat "text/csv" ; + schema1:name "vaccine_effectiveness_synthetic_pop_10k_v.1.1.0" ; + schema1:sameAs , + . + + a schema1:Dataset ; + schema1:about ; + schema1:hasPart ; + schema1:name "Documentation for Common data model specification" . + + a schema1:MediaObject ; + schema1:about ; + schema1:description "Human-readable version (interactive report)" ; + schema1:encodingFormat "text/html" ; + schema1:name "COVID-19 vaccine effectiveness data model specification dataspice (HTML)" . + + a schema1:MediaObject ; + schema1:description "Human-readable version (Excel)" ; + schema1:encodingFormat "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" ; + schema1:name "vaccine effectiveness data model specification (XLXS)" . + + a schema1:MediaObject ; + schema1:description "SARS-CoV-2 vaccine effectiveness assessment data management plan (machine-readable)" ; + schema1:encodingFormat "text/json" ; + schema1:name "Data management plan v.0.0.1 (JSON)" . + + a schema1:MediaObject ; + schema1:description "SARS-CoV-2 vaccine effectiveness assessment data management plan (human-readable)" ; + schema1:encodingFormat "text/pdf" ; + schema1:name "Data management plan v.0.0.1 (PDF)" . + + a schema1:MediaObject ; + schema1:description "SARS-CoV-2 vaccine effectiveness assessment study protocol" ; + schema1:encodingFormat "text/pdf" ; + schema1:name "Study protocol v.1.0.3 (PDF)" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo ; + schema1:conditionsOfAccess "The scripts (software) accompanying the data model specification are offered \"as-is\", without warranty, and disclaiming liability for damages resulting from using it." ; + schema1:description "Jupyter notebook with Python scripting and commenting to generate the synthetic dataset" ; + schema1:name "COVID-19 vaccine effectiveness synthetic dataset generation script (IPYNB)" ; + schema1:programmingLanguage ; + schema1:url "" ; + ns1:output . + + a schema1:MediaObject ; + schema1:encodingFormat "application/json" ; + schema1:name "pandas-profiling config" . + + a schema1:MediaObject ; + schema1:description "Interactive report of the exploratory data analysis (EDA) of the synthetic dataset" ; + schema1:encodingFormat "text/html" ; + schema1:name "COVID-19 vaccine effectiveness synthetic dataset EDA (HTML)" ; + schema1:version "1.1.0" . + + a schema1:MediaObject ; + schema1:description "Machine-readable version of the exploratory data analysis (EDA) of the synthetic dataset" ; + schema1:encodingFormat "application/json" ; + schema1:name "COVID-19 vaccine effectiveness synthetic dataset EDA (JSON)" ; + schema1:version "1.1.0" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/csv" ; + schema1:name "Synthetic dataset with 650k entries" ; + schema1:version "1.1.1" . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/SENTINEL2A_20230210-111817-461_L2A_T30TWS_D.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/SENTINEL2A_20230214-105638-781_L2A_T31UET_D.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_9" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_2" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:Place ; + schema1:geo _:b42 ; + schema1:name "Europe" . + + a schema1:PropertyValue ; + schema1:description "Pseudoid of the person included in the cohort" ; + schema1:name "person_id" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort an essential worker? (at the time of entering the cohort)" ; + schema1:name "essential_worker_bl" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort institutionalized? (at the time of entering the cohort)" ; + schema1:name "institutionalized_bl" . + + a schema1:PropertyValue ; + schema1:description "Brand of first dose of the vaccine" ; + schema1:name "dose_1_brand_cd" . + + a schema1:PropertyValue ; + schema1:description "Date of the first dose of the vaccine" ; + schema1:name "dose_1_dt" . + + a schema1:PropertyValue ; + schema1:description "Brand of second dose of the vaccine" ; + schema1:name "dose_2_brand_cd" . + + a schema1:PropertyValue ; + schema1:description "Date of the second dose of the vaccine" ; + schema1:name "dose_2_dt" . + + a schema1:PropertyValue ; + schema1:description "Brand of third dose of the vaccine" ; + schema1:name "dose_3_brand_cd" . + + a schema1:PropertyValue ; + schema1:description "Date of the third dose of the vaccine" ; + schema1:name "dose_3_dt" . + + a schema1:PropertyValue ; + schema1:description "Number of doses (independently of the vaccine brand) received by a person during the period of study" ; + schema1:name "number_doses" . + + a schema1:PropertyValue ; + schema1:description "Date on which the person included in the cohort is considered fully vaccinated" ; + schema1:name "fully_vaccinated_dt" . + + a schema1:PropertyValue ; + schema1:description "Age of the person included in the cohort (at the time of entering the cohort)" ; + schema1:maxValue 115 ; + schema1:minValue 5 ; + schema1:name "age_nm" ; + schema1:unitText "years" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort fully vaccinated?" ; + schema1:name "fully_vaccinated_bl" . + + a schema1:PropertyValue ; + schema1:description "Vaccine types received for 1st and 2nd dose concatenated with hyphen" ; + schema1:name "vaccination_schedule_cd" . + + a schema1:PropertyValue ; + schema1:description "Date of diagnosis or test result positive of the last known/confirmed infection" ; + schema1:name "confirmed_case_dt" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort a confirmed COVID-19 case?" ; + schema1:name "confirmed_case_bl" . + + a schema1:PropertyValue ; + schema1:description "If the person experienced a confirmed infection proceeding the last confirmed infection with more than 60 days, date of diagnosis of this previous infection" ; + schema1:name "previous_infection_dt" . + + a schema1:PropertyValue ; + schema1:description "Did the person experience a confirmed infection proceeding the last confirmed infection?" ; + schema1:name "previous_infection_bl" . + + a schema1:PropertyValue ; + schema1:description "Type of COVID-19 test used to detect the last known SARS-CoV-2 infection (i.e. first positive test for SARS-CoV-2)" ; + schema1:name "test_type_cd" . + + a schema1:PropertyValue ; + schema1:description "Identified variant (WHO label) for last known SARS-CoV-2 infection" ; + schema1:name "variant_cd" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from type II diabetes (at the time of entering the cohort)?" ; + schema1:name "diabetes_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from obesity (at the time of entering the cohort)? " ; + schema1:name "obesity_bl" . + + a schema1:PropertyValue ; + schema1:description "Sex of the person included in the cohort (at the time of entering the cohort)" ; + schema1:name "sex_cd" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from heart failure (at the time of entering the cohort)?" ; + schema1:name "heart_failure_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from chronic lung disease (at the time of entering the cohort)?" ; + schema1:name "copd_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from solid cancer without metastatis (at the time of entering the cohort)?" ; + schema1:name "solid_tumor_without_metastasis_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from chronic kidney disease (at the time of entering the cohort)?" ; + schema1:name "chronic_kidney_disease_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from sickle cell disease (at the time of entering the cohort)?" ; + schema1:name "sickle_cell_disease_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from hypertension (at the time of entering the cohort)?" ; + schema1:name "hypertension_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from blood cancer (leukemia) (at the time of entering the cohort)?" ; + schema1:name "blood_cancer_bl" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort a transplant patient (i.e. solid organ transplantation) (at the time of entering the cohort)?" ; + schema1:name "transplanted_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from a HIV infection (at the time of entering the cohort)?" ; + schema1:name "hiv_infection_bl" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from a primary immuno deficiency disease (lymphoma) (at the time of entering the cohort)?" ; + schema1:name "primary_immunodeficiency_bl" . + + a schema1:PropertyValue ; + schema1:description "Socioeconomic level of the person in the cohort (at the time of entering the cohort)" ; + schema1:name "socecon_lvl_cd" . + + a schema1:PropertyValue ; + schema1:description "Does the person included in the cohort suffer from drugs induced immunosuppression (at the time of entering the cohort)?" ; + schema1:name "immunosuppression_bl" . + + a schema1:PropertyValue ; + schema1:description "Is the person included in the cohort pregnant (at the time of entering the cohort)?" ; + schema1:name "pregnancy_bl" . + + a schema1:GeoShape ; + schema1:box "66.78398 11.50546 44.4272 -15.84884" . + + a schema1:PropertyValue ; + schema1:description "Area of residence of the person included in the cohort (NUTS 3) (at the time of entering the cohort)" ; + schema1:name "residence_area_cd" . + + a schema1:PropertyValue ; + schema1:description "Country of residence of the person included in the cohort (at the time of entering the cohort)" ; + schema1:name "country_cd" . + + a schema1:PropertyValue ; + schema1:description "Is the country of residence different from the country that is doing the analyses?" ; + schema1:name "foreign_bl" . + + a schema1:PropertyValue ; + schema1:description "Date of death of the person (if the person has died)" ; + schema1:name "exitus_dt" . + + a schema1:PropertyValue ; + schema1:description "Date of death of the person (if the person has died during the period of study)" ; + schema1:name "exitus_bl" . + + a schema1:MediaObject ; + schema1:encodingFormat "" ; + schema1:name "Dockerfile" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/rproj" ; + schema1:name "Analytical pipeline R project file (RPROJ)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/pdf" ; + schema1:name "Documentation analytical pipeline (PDF)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/yml" ; + schema1:name ".yml file listing dependencies (YML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/csv" ; + schema1:name "Synthetic dataset with 10k entries (CSV)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/r" ; + schema1:name "Installation required R packages (R)" . + + a schema1:Dataset ; + schema1:description "Analytical pipeline output using synthetic data with 10k entries" ; + schema1:hasPart , + , + , + , + , + ; + schema1:name "Output analytical pipeline" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report with dataset statistics, missing data profiles, potential alerts, and detailed per-variable information (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report summarizing compliance with the logic validation rules (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report summarizing used imputation methods and number of imputed values (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report to assess matching balance in the obtained study population (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report including the results of survival analysis in the unmatched study population, a table with baseline characteristics of the matched study population and CONSORT diagram (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Interactive report including the analytic results for the meta-analysis (HTML)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/r" ; + schema1:name "Upload the csv file and create a DuckDB database file (R)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Exploration and Data Quality Assessment (DQA) of the original data mapped at each Data Hub (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Validation (applying logic validation rules) of the original data mapped at each Data Hub (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Imputation of missing values in the original data mapped at each Data Hub (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Iterative matching of the exposed and unexposed, and assessing covariate balance in matched population (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/r" ; + schema1:name "Iterative matching of the exposed and unexposed (R)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Descriptive analysis of the matched and unmatched study population (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Survival analysis of the matched study population (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/qmd" ; + schema1:name "Quarto script sourcing consecutive scripts of the analytical pipeline (QMD)" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/html" ; + schema1:name "Quarto document providing a log of the individual code block executions (HTML)" . + + a schema1:Dataset ; + schema1:description "Consecutive scripts of the analytical pipeline" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + ; + schema1:name "Scripts analytical pipeline" . + + a schema1:MediaObject ; + schema1:description "Interactive report showcasing the structural causal model (DAG) to answer the research question" ; + schema1:encodingFormat "text/html" ; + schema1:name "COVID-19 vaccine effectiveness causal model v.1.1.0 (HTML)" . + + a schema1:MediaObject ; + schema1:description "Quarto RMarkdown script to produce the structural causal model" ; + schema1:encodingFormat , + "text/markdown" ; + schema1:name "COVID-19 vaccine effectiveness causal model v.1.1.0 (QMD)" . + + a schema1:Dataset ; + schema1:about ; + schema1:hasPart , + , + , + , + ; + schema1:name "Metadata for Common data model specification" . + + a schema1:MediaObject ; + dct:conformsTo ; + schema1:encodingFormat "text/csv" . + + a schema1:MediaObject ; + dct:conformsTo ; + schema1:encodingFormat "text/csv" . + + a schema1:MediaObject ; + dct:conformsTo ; + schema1:encodingFormat "text/csv" . + + a schema1:MediaObject ; + dct:conformsTo ; + schema1:encodingFormat "text/csv" . + + a schema1:MediaObject ; + schema1:description "Machine-readable version" ; + schema1:encodingFormat "application/ld+json" ; + schema1:mainEntity ; + schema1:name "COVID-19 vaccine effectiveness data model specification dataspice (JSON)" . + + a schema1:DataDownload, + schema1:MediaObject ; + schema1:description "synthetic data set to test the causal model proposed by the baseline use case in BY-COVID WP5 assessing the effectiveness of the COVID-19 vaccine(s)" ; + schema1:encodingFormat "text/csv" ; + schema1:name "vaccine_effectiveness_synthetic_pop_10k_v.1.1.0" ; + schema1:sameAs , + . + + a schema1:Dataset ; + schema1:about ; + schema1:hasPart ; + schema1:name "Documentation for Common data model specification" . + + a schema1:MediaObject ; + schema1:about ; + schema1:description "Human-readable version (interactive report)" ; + schema1:encodingFormat "text/html" ; + schema1:name "COVID-19 vaccine effectiveness data model specification dataspice (HTML)" . + + a schema1:MediaObject ; + schema1:description "Human-readable version (Excel)" ; + schema1:encodingFormat "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" ; + schema1:name "vaccine effectiveness data model specification (XLXS)" . + + a schema1:MediaObject ; + schema1:description "SARS-CoV-2 vaccine effectiveness assessment data management plan (machine-readable)" ; + schema1:encodingFormat "text/json" ; + schema1:name "Data management plan v.0.0.1 (JSON)" . + + a schema1:MediaObject ; + schema1:description "SARS-CoV-2 vaccine effectiveness assessment data management plan (human-readable)" ; + schema1:encodingFormat "text/pdf" ; + schema1:name "Data management plan v.0.0.1 (PDF)" . + + a schema1:MediaObject ; + schema1:description "SARS-CoV-2 vaccine effectiveness assessment study protocol" ; + schema1:encodingFormat "text/pdf" ; + schema1:name "Study protocol v.1.0.3 (PDF)" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo ; + schema1:conditionsOfAccess "The scripts (software) accompanying the data model specification are offered \"as-is\", without warranty, and disclaiming liability for damages resulting from using it." ; + schema1:description "Jupyter notebook with Python scripting and commenting to generate the synthetic dataset" ; + schema1:name "COVID-19 vaccine effectiveness synthetic dataset generation script (IPYNB)" ; + schema1:programmingLanguage ; + schema1:url "" ; + ns1:output . + + a schema1:MediaObject ; + schema1:encodingFormat "application/json" ; + schema1:name "pandas-profiling config" . + + a schema1:MediaObject ; + schema1:description "Interactive report of the exploratory data analysis (EDA) of the synthetic dataset" ; + schema1:encodingFormat "text/html" ; + schema1:name "COVID-19 vaccine effectiveness synthetic dataset EDA (HTML)" ; + schema1:version "1.1.0" . + + a schema1:MediaObject ; + schema1:description "Machine-readable version of the exploratory data analysis (EDA) of the synthetic dataset" ; + schema1:encodingFormat "application/json" ; + schema1:name "COVID-19 vaccine effectiveness synthetic dataset EDA (JSON)" ; + schema1:version "1.1.0" . + + a schema1:MediaObject ; + schema1:encodingFormat "text/csv" ; + schema1:name "Synthetic dataset with 650k entries" ; + schema1:version "1.1.1" . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:PropertyValue ; + schema1:name "DISLIB_GPU_AVAILABLE" ; + schema1:value "True" . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/genome.fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/Repeats_output_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/output_log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/output_masked_genome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/output_repeat_catalog" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/seeds" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/sequences" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:MediaObject ; + schema1:dateModified "2021-09-28T10:02:17.833Z" ; + schema1:description "Launcher for the md_list workflow." ; + schema1:name "md_launch.py" . + + a schema1:MediaObject ; + schema1:dateModified "2021-09-28T10:02:17.833Z" ; + schema1:description "Launcher for the md_muts_sets and md_add_muts_wt workflows" ; + schema1:name "mdmut_launch.py" . + + a schema1:Dataset ; + schema1:dateModified "2021-09-28T10:02:17.833Z" ; + schema1:description "Molecular dynamics workflows" ; + schema1:hasPart , + , + , + , + ; + schema1:name "MD" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:author , + ; + schema1:dateModified "2021-09-03T00:00:00.000Z" ; + schema1:description "Applies a list of mutations over the initial structure obtaining a set of structures (initial structure + one mutation, initial structure + two mutations, initial structure + three mutations, ...). Finally performs a system setup and runs a molecular dynamics simulation for each of the structures in the set." ; + schema1:hasPart , + , + , + ; + schema1:isBasedOn ; + schema1:name "md_add_muts_wt.py" ; + schema1:programmingLanguage ; + schema1:runtimePlatform . + + a schema1:MediaObject ; + schema1:dateModified "2021-09-28T10:02:17.833Z" ; + schema1:description "Input and path configuration for md_list.py workflow" ; + schema1:name "md_list.yaml" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:author , + ; + schema1:dateModified "2021-09-03T00:00:00.000Z" ; + schema1:description "Performs a system setup and runs a molecular dynamics simulation for each one of the listed mutations in a given structure" ; + schema1:hasPart , + , + , + ; + schema1:isBasedOn ; + schema1:name "md_muts_sets.py" ; + schema1:programmingLanguage ; + schema1:runtimePlatform . + + a schema1:MediaObject ; + schema1:dateModified "2021-09-28T10:02:17.833Z" ; + schema1:description "Input and path configuration for md_muts_sets.py workflow" ; + schema1:name "md_muts_sets.yaml" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about ; + schema1:author ; + schema1:license . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_30" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_31" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_32" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_33" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_34" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_35" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_36" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_37" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_38" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_39" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_40" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_41" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_42" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_43" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_44" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_45" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_9" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_30" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_31" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_32" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_33" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_34" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_35" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_36" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_37" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_38" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_39" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_40" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_41" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_42" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_43" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_44" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_45" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_46" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_47" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_48" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_49" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_50" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_51" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_52" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_53" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_54" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_9" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_4" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/Sequences" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/Taxonomy" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "#main/_anonymous_output_9" . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:Person ; + schema1:name "Anna Syme" . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "KNIME" ; + schema1:url . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about . + + a schema1:CreativeWork ; + schema1:about . + + a schema1:DefinedTerm ; + schema1:description "Tabular data represented as comma-separated values in a text file." ; + schema1:name "CSV" . + + a schema1:CreativeWork ; + schema1:name "Workflow RO-Crate Profile" ; + schema1:version "0.2.0" . + + a schema1:Organization ; + schema1:name "eScience Lab" ; + schema1:parentOrganization ; + schema1:url "https://esciencelab.org.uk/" . + + a schema1:SoftwareApplication ; + schema1:license ; + schema1:name "BioExcel Building Blocks" ; + schema1:publisher ; + schema1:softwareHelp "https://mmb.irbbarcelona.org/biobb/documentation/source" ; + schema1:url "https://github.com/bioexcel/biobb" ; + schema1:version "3.6.0" . + + a schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:creator , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2021-06-23T10:42:46Z" ; + schema1:dateModified "2022-10-27T16:39:25Z" ; + schema1:description """ Joint multi-omics dimensionality reduction approaches for CAKUT data using peptidome and proteome data\r + \r + **Brief description**\r + In (Cantini et al. 2020), Cantini et al. evaluated 9 representative joint dimensionality reduction (jDR) methods for multi-omics integration and analysis and . The methods are Regularized Generalized Canonical Correlation Analysis (RGCCA), Multiple co-inertia analysis (MCIA), Multi-Omics Factor Analysis (MOFA), Multi-Study Factor Analysis (MSFA), iCluster, Integrative NMF (intNMF), Joint and Individual Variation Explained (JIVE), tensorial Independent Component Analysis (tICA), and matrix-tri-factorization (scikit-fusion) (Tenenhaus, Tenenhaus, and Groenen 2017; Bady et al. 2004; Argelaguet et al. 2018; De Vito et al. 2019; Shen, Olshen, and Ladanyi 2009; Chalise and Fridley 2017; Lock et al. 2013; Teschendorff et al. 2018; Žitnik and Zupan 2015).\r +\r +The authors provided their benchmarking procedure, multi-omics mix (momix), as Jupyter Notebook on GitHub (https://github.com/ComputationalSystemsBiology/momix-notebook) and project environment through Conda. In momix, the factorization methods are called from an R script, and parameters of the methods are also set in that script. We did not modify the parameters of the methods in the provided script. We set factor number to 2.\r +""" ; + schema1:keywords "rare diseases, workflow, Proteomics, protein, mirna prediction" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "EJP-RD WP13 case-study CAKUT momix analysis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/126?version=1" ; + schema1:version 1 . + + a schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:creator , + ; + schema1:dateCreated "2022-11-22T09:57:32Z" ; + schema1:dateModified "2022-11-22T09:59:30Z" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/gmx-protein-ligand-complex-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/295?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy Protein Ligand Complex MD Setup" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/295?version=2" ; + schema1:version 2 ; + ns1:input , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:creator , + , + ; + schema1:dateCreated "2022-12-14T16:05:01Z" ; + schema1:dateModified "2022-12-14T16:06:43Z" ; + schema1:description "An example workflow to allow users to run the Specimen Data Refinery tools on data provided in an input CSV file." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/373?version=1" ; + schema1:keywords "Default-SDR" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "De novo digitisation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/373?version=2" ; + schema1:version 2 ; + ns1:input , + ; + ns1:output , + , + , + . + + a schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:dateCreated "2024-07-10T14:17:34Z" ; + schema1:dateModified "2024-07-12T10:53:48Z" ; + schema1:description """The second-level complexity workflow is one among a collection of workflows designed to address tasks up to CTF estimation. In addition to the functionalities provided by the layer 0 workflow, this workflow aims to enhance the quality of acquisition images using quality protocols.\r +\r +**Quality control protocols**\r +\r +* **Movie max shift**: automatic reject those movies whose frames move more than a given threshold. \r +\r +* **Tilt analysis**: quality score based in the Power Spectrum Density (astigmatism and tilt) \r +\r +* **CTF consensus**: acts as a filter discarding micrographs based on their CTF (limit resolution, defocus, astigmatism, etc.).\r +\r +**Advantages:** \r +\r +* More control of the acquisition quality\r +\r +* Reduce unnecessary processing time and storage""" ; + schema1:image "https://workflowhub.eu/workflows/2273/diagram?version=2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/599?version=1" ; + schema1:isPartOf ; + schema1:keywords "image processing, cryoem, spa, scipion" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "CEITEC layer 1 workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/599?version=2" ; + schema1:version 2 . + +<#ont____assembly_flye_ahrenslab-inputs-ftp://biftp.informatik.uni-freiburg.de/pub/T0/Ahrens/SRR6982805.fastq> a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ftp://biftp.informatik.uni-freiburg.de/pub/T0/Ahrens/SRR6982805.fastq" . + +<#ont___workflow_wick_et_al_-inputs-https://ndownloader.figshare.com/files/8811145> a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "https://ndownloader.figshare.com/files/8811145" . + +<#ont___workflow_wick_et_al_-inputs-https://ndownloader.figshare.com/files/8811148> a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "https://ndownloader.figshare.com/files/8811148" . + +<#ont___workflow_wick_et_al_-inputs-https://ndownloader.figshare.com/files/8812159> a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "https://ndownloader.figshare.com/files/8812159" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/476" ; + schema1:name "Vasiliki Panagi" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "body" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "is_availability" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "result_modifiers" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "results" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_file" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "BridgeDB cache" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Differential gene expression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Differential miRNA expression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "STRING identifier mapping" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mRNA expression correlation" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Correlation limit" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "miRNA mRNA correlation" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "miRTarBase data" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "miRTarBase limit" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MOGAMUN cores" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MOGAMUN generations" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Maximum subnetwork size" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Subnetwork merge threshold" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Minimum subnetwork size" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MOGAMUN runs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "STRING data" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "STRING filter" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "STRING limit" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Variant Burden" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Full network" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Subnetworks" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux p9r1n05 4.14.0-115.el7a.ppc64le #1 SMP Tue Sep 25 12:28:39 EDT 2018 ppc64le ppc64le ppc64le GNU/Linux SLURM_JOB_NAME=COMPSs SLURM_JOB_QOS=bsc_cs SLURM_JOB_GPUS=0,1,2,3 SLURM_MEM_PER_CPU=3000 COMPSS_BINDINGS_DEBUG=1 SLURM_JOB_ID=12794885 SLURM_JOB_USER=bsc44973 COMPSS_HOME=/apps/COMPSs/3.3/ SLURM_JOB_UID=4586 SLURM_SUBMIT_DIR=/gpfs/projects/bsc44/PTF_WF_clean SLURM_JOB_NODELIST=p9r1n[05,07-11] SLURM_JOB_GID=17215 SLURM_JOB_CPUS_PER_NODE=160(x6) SLURM_SUBMIT_HOST=p9login1 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=bsc44 SLURM_JOB_NUM_NODES=6 COMPSS_MASTER_NODE=p9r1n05 COMPSS_WORKER_NODES= p9r1n07 p9r1n08 p9r1n09 p9r1n10 p9r1n11" ; + schema1:endTime "2024-03-04T20:06:30+00:00" ; + schema1:instrument ; + schema1:name "COMPSs ptf_workflow.py execution at cte-power9 with JOB_ID 12794885" ; + schema1:object , + , + , + , + , + ; + schema1:result ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "null" . + + a schema1:MediaObject ; + schema1:contentSize 5459 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:description "Auxiliary File" ; + schema1:name "out_run.txt" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 5029 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_ellipsoids.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 7365 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_ensemble_kagan.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 9604 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_ensemble_mare.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 9561 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_ensemble_mare_bu.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 20681 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_ensemble_sampling.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 16765 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_ensemble_sampling_MC.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 42717 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_ensemble_sampling_RS.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 11254 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_figures.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 27146 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_hazard_curves.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 20921 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_hazard_curves_sim.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 28417 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_hazard_curves_sim_mc.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 13430 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_lambda_bsps_load.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 15145 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_lambda_bsps_sep.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 13058 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_load_event.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 5827 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_mix_utilities.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 11156 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_parser.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 14598 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_pre_selection.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 48135 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_preload.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 7853 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_preload_curves.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 48133 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_preload_save.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 18922 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_probability_scenarios.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 28266 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_save.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 3214 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_scaling_laws.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 23223 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_short_term.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 11144 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_sptha_utilities.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 734 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_time_tracker.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 6302 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "Step2_create_ts_input_for_ptf_mod.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 6584 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "Step2_extract_ts_mod.py" . + + a schema1:MediaObject ; + schema1:contentSize 8440 ; + schema1:description "Auxiliary File" ; + schema1:name "launch_pycompss.py.bk" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 9120 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "run_kagan.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 10139 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "run_mare.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 20845 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "run_step1.py" . + + a schema1:MediaObject ; + schema1:contentSize 19355 ; + schema1:description "Auxiliary File" ; + schema1:name "run_step1.py.old" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 15324 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "run_step3.py" . + + a schema1:MediaObject ; + schema1:contentSize 2695 ; + schema1:description "Auxiliary File" ; + schema1:name "Create_config.sh" . + + a schema1:MediaObject ; + schema1:contentSize 2465 ; + schema1:description "Auxiliary File" ; + schema1:name "Create_config.sh.bk" . + + a schema1:MediaObject ; + schema1:contentSize 2514 ; + schema1:description "Auxiliary File" ; + schema1:name "Step2_config_simul.sh" . + + a schema1:MediaObject ; + schema1:contentSize 2257 ; + schema1:description "Auxiliary File" ; + schema1:name "Step2_simul_BS.sh" . + + a schema1:MediaObject ; + schema1:contentSize 628 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 4873 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Gareth Price" . + + a schema1:MediaObject ; + schema1:description "Workflow documentation" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Tim Dudgeon" . + + a schema1:Person ; + schema1:name "Simon Bray" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/fragment-based-docking-scoring/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "canuConcurrency" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "corMaxEvidenceErate" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "database" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "diploidOrganism" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "falseValue" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fix" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "genomeSize" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "illuminaClip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "leading" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "maxFragmentLens" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "minReadLen" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "minThreads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "minlen" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "orientation" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pacBioDataDir" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pacBioInBam" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pacBioTmpDir" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "partialMatch" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "phredsPe" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "polishedAssembly" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "prefix" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "readsPe1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "readsPe2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "repBaseLibrary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "slidingWindow" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "taxons" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "trailing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "trueValue" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "arrowAssembly" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "assemblyMasked" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "assemblyMerged" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "canuAssembly" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "contaminatedReads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "correctedReads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deconClassification" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deconReport" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "decontaminatedReads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pilonAssembly" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sortedBamIndexFileOut" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "trimmedReadFiles1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "trimmedReadFiles2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "trimmedReads" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/261" ; + schema1:name "Pasi Korhonen" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_30" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_31" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_32" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_33" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_34" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_35" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_36" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_37" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_38" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_39" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_40" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_41" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_42" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_43" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_44" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fasta" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-only-VGP3/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "annotation_metadata" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "cancer_hotspots" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "cgi_biomarkers" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "cgi_genes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "civic_genes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "civic_variants" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "dbsnp_vcf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "final_variants" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gene_cards_germline" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gene_cards_loh" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gene_cards_somatic" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gene_reports_tabular" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "germline_cancer_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "loh_cancer_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "loh_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "maf_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mutations_summary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "somatic_cancer_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "somatic_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "uniprot_cancer_genes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "variant_reports_tabular" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "wolf_tutorial.zip?download=1" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myeditconf.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfix_side_chain.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygenion.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygenion.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_energy.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_image.xtc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_rgyr.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_rms.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_trjconv_str.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygrompp.tpr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.cpt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.edr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.xtc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb2gmx.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb2gmx.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysolvate.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysolvate.zip" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux p9r2n12 4.14.0-115.el7a.ppc64le #1 SMP Tue Sep 25 12:28:39 EDT 2018 ppc64le ppc64le ppc64le GNU/Linux SLURM_JOB_NAME=COMPSs SLURM_JOB_QOS=bsc_cs SLURM_JOB_GPUS=0,1,2,3 SLURM_MEM_PER_CPU=3000 COMPSS_BINDINGS_DEBUG=1 SLURM_JOB_ID=12783189 SLURM_JOB_USER=bsc44973 COMPSS_HOME=/apps/COMPSs/3.3/ SLURM_JOB_UID=4586 SLURM_SUBMIT_DIR=/gpfs/projects/bsc44/PTF_WF_clean SLURM_JOB_NODELIST=p9r2n[12-13,15],p9r3n[13-15] SLURM_JOB_GID=17215 SLURM_JOB_CPUS_PER_NODE=160(x6) SLURM_SUBMIT_HOST=p9login1 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=bsc44 SLURM_JOB_NUM_NODES=6 COMPSS_MASTER_NODE=p9r2n12 COMPSS_WORKER_NODES= p9r2n13 p9r2n15 p9r3n13 p9r3n14 p9r3n15" ; + schema1:endTime "2024-03-01T16:01:15+00:00" ; + schema1:instrument ; + schema1:name "COMPSs ptf_workflow.py execution at cte-power9 with JOB_ID 12783189" ; + schema1:object , + , + , + , + , + ; + schema1:result ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "null" . + + a schema1:MediaObject ; + schema1:contentSize 5459 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:description "Auxiliary File" ; + schema1:name "out_run.txt" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 5029 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_ellipsoids.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 7365 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_ensemble_kagan.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 9604 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_ensemble_mare.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 9561 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_ensemble_mare_bu.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 20681 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_ensemble_sampling.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 16765 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_ensemble_sampling_MC.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 42717 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_ensemble_sampling_RS.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 11254 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_figures.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 27146 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_hazard_curves.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 20921 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_hazard_curves_sim.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 28417 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_hazard_curves_sim_mc.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 13430 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_lambda_bsps_load.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 15145 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_lambda_bsps_sep.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 13058 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_load_event.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 5827 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_mix_utilities.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 11156 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_parser.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 14598 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_pre_selection.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 48135 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_preload.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 7853 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_preload_curves.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 48133 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_preload_save.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 18922 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_probability_scenarios.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 28266 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_save.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 3214 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_scaling_laws.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 23223 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_short_term.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 11144 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_sptha_utilities.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 734 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ptf_time_tracker.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 6302 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "Step2_create_ts_input_for_ptf_mod.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 6584 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "Step2_extract_ts_mod.py" . + + a schema1:MediaObject ; + schema1:contentSize 8440 ; + schema1:description "Auxiliary File" ; + schema1:name "launch_pycompss.py.bk" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 9120 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "run_kagan.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 10139 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "run_mare.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 20845 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "run_step1.py" . + + a schema1:MediaObject ; + schema1:contentSize 19355 ; + schema1:description "Auxiliary File" ; + schema1:name "run_step1.py.old" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 15324 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "run_step3.py" . + + a schema1:MediaObject ; + schema1:contentSize 2695 ; + schema1:description "Auxiliary File" ; + schema1:name "Create_config.sh" . + + a schema1:MediaObject ; + schema1:contentSize 2465 ; + schema1:description "Auxiliary File" ; + schema1:name "Create_config.sh.bk" . + + a schema1:MediaObject ; + schema1:contentSize 2514 ; + schema1:description "Auxiliary File" ; + schema1:name "Step2_config_simul.sh" . + + a schema1:MediaObject ; + schema1:contentSize 2257 ; + schema1:description "Auxiliary File" ; + schema1:name "Step2_simul_BS.sh" . + + a schema1:MediaObject ; + schema1:contentSize 629 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 4873 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Darwin MacBook-Pro-Raul-2018.local 23.1.0 Darwin Kernel Version 23.1.0: Mon Oct 9 21:27:27 PDT 2023; root:xnu-10002.41.9~6/RELEASE_X86_64 x86_64 COMPSS_HOME=/Users/rsirvent/opt/COMPSs/" ; + schema1:endTime "2023-12-04T14:19:45+00:00" ; + schema1:instrument ; + schema1:name "COMPSs Wordcount.java execution at MacBook-Pro-Raul-2018.local" ; + schema1:object ; + schema1:result . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 566 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 1843 ; + schema1:description "Auxiliary File" ; + schema1:name "Readme" . + + a schema1:MediaObject ; + schema1:contentSize 9568 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "application/java-archive" ; + schema1:name "wordcount.jar" . + + a schema1:MediaObject ; + schema1:contentSize 4235 ; + schema1:description "Auxiliary File" ; + schema1:name "pom.xml" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 2653 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "Wordcount.java" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 703 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "WordcountItf.java" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 743 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "WordcountItf.java" . + + a schema1:MediaObject ; + schema1:contentSize 289 ; + schema1:description "Auxiliary File" ; + schema1:name "project.xml" . + + a schema1:MediaObject ; + schema1:contentSize 983 ; + schema1:description "Auxiliary File" ; + schema1:name "resources.xml" . + + a schema1:MediaObject ; + schema1:contentSize 138 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 2407 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/chipseq-pe/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Javier Garrayo-Ventas" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Scaffolding-HiC-VGP8/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Michael Franklin; Jiaan Yu; Juny Kesumadewi" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Janis" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "align_and_sort_sortsam_tmpDir" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "cutadapt_adapters" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gatk_intervals" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "known_indels" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mills_indels" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sample_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "snps_1000gp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "snps_dbsnp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "out_bam" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "out_fastqc_reports" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "out_performance_summary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "out_variants_bamstats" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "out_variants_gatk" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "out_variants_gatk_split" . + + a schema1:Person ; + schema1:name "Lucille Delisle" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/rnaseq-pe/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Dannon Baker" . + + a schema1:Person ; + schema1:name "Björn Grüning" . + + a schema1:Person ; + schema1:name "Delphine Larivière" . + + a schema1:Person ; + schema1:name "Gildas Le Corguillé" . + + a schema1:Person ; + schema1:name "Andrew Lonie" . + + a schema1:Person ; + schema1:name "Nicholas Keener" . + + a schema1:Person ; + schema1:name "Sergei Kosakovsky Pond" . + + a schema1:Person ; + schema1:name "Wolfgang Maier" . + + a schema1:Person ; + schema1:name "Anton Nekrutenko" . + + a schema1:Person ; + schema1:name "James Taylor" . + + a schema1:Person ; + schema1:name "Steven Weaver" . + + a schema1:Person ; + schema1:name "Marius van den Beek" . + + a schema1:Person ; + schema1:name "Dave Bouvier" . + + a schema1:Person ; + schema1:name "John Chilton" . + + a schema1:Person ; + schema1:name "Nate Coraor" . + + a schema1:Person ; + schema1:name "Frederik Coppens" . + + a schema1:Person ; + schema1:name "Bert Droesbeke" . + + a schema1:Person ; + schema1:name "Ignacio Eguinoa" . + + a schema1:Person ; + schema1:name "Simon Gladman" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "CONFIG.AUTOSUBMIT_VERSION" ; + schema1:value "4.0.98" . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "CONFIG.MAXWAITINGJOBS" ; + schema1:value 20 . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "CONFIG.TOTALJOBS" ; + schema1:value 20 . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "DEFAULT.CUSTOM_CONFIG" ; + schema1:value "/home/kinow/autosubmit/a000/proj/git_project/conf/bootstrap" . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "DEFAULT.EXPID" ; + schema1:value "a000" . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "DEFAULT.HPCARCH" ; + schema1:value "local" . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "EXPERIMENT.CALENDAR" ; + schema1:value "standard" . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "EXPERIMENT.CHUNKSIZE" ; + schema1:value 0 . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "EXPERIMENT.CHUNKSIZEUNIT" ; + schema1:value "year" . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "EXPERIMENT.DATELIST" ; + schema1:value 19910101 . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "EXPERIMENT.MEMBERS" ; + schema1:value "fc0" . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "EXPERIMENT.NUMCHUNKS" ; + schema1:value 0 . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "GIT.FETCH_SINGLE_BRANCH" ; + schema1:value true . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "GIT.PROJECT_BRANCH" ; + schema1:value "master" . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "GIT.PROJECT_COMMIT" ; + schema1:value "" . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "GIT.PROJECT_ORIGIN" ; + schema1:value "https://github.com/kinow/auto-mhm-test-domains.git" . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "GIT.PROJECT_SUBMODULES" ; + schema1:value "" . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "MHM.BRANCH_NAME" ; + schema1:value "develop" . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "MHM.DOMAIN" ; + schema1:value 1 . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "MHM.EVAL_PERIOD_DURATION_YEARS" ; + schema1:value 2 . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "MHM.SINGULARITY_CONTAINER" ; + schema1:value "/tmp/mhm.sif" . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "PROJECT.PROJECT_DESTINATION" ; + schema1:value "git_project" . + + a schema1:PropertyValue ; + schema1:exampleOfWork ; + schema1:name "PROJECT.PROJECT_TYPE" ; + schema1:value "git" . + + a schema1:ComputerLanguage ; + schema1:alternateName "AS" ; + schema1:citation "https://doi.org/10.1109/HPCSim.2016.7568429" ; + schema1:name "Autosubmit" ; + schema1:url "https://autosubmit.readthedocs.io/" ; + schema1:version "4.0.98" . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Autosubmit mHM test domains" ; + schema1:endTime "2023-11-03T23:41:48" ; + schema1:instrument ; + schema1:name "Run mHM" ; + schema1:object , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:result ; + schema1:startTime "2023-11-03T23:42:31" . + + a schema1:Dataset . + + a schema1:MediaObject ; + schema1:contentSize 780 ; + schema1:dateModified "2023-11-03T22:41:19" ; + schema1:name "minimal.yml" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:Dataset . + + a schema1:MediaObject ; + schema1:contentSize 820 ; + schema1:dateModified "2023-11-03T22:43:08" ; + schema1:encodingFormat "application/binary" ; + schema1:name "job_list_a000.pkl" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-11-03T22:41:31" ; + schema1:encodingFormat "application/binary" ; + schema1:name "job_packages_a000.db" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:Dataset . + + a schema1:MediaObject ; + schema1:contentSize 17902 ; + schema1:dateModified "2023-11-03T22:41:31" ; + schema1:encodingFormat "application/pdf" ; + schema1:name "a000_20231103_2341.pdf" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:Dataset . + + a schema1:Dataset . + + a schema1:MediaObject ; + schema1:contentSize 139 ; + schema1:dateModified "2023-11-03T22:41:31" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_20231103_2341.txt" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:Dataset . + + a schema1:MediaObject ; + schema1:contentSize 4241 ; + schema1:dateModified "2023-11-03T22:41:34" ; + schema1:encodingFormat "text/plain" ; + schema1:name "20231103_234125_create.log" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:dateModified "2023-11-03T22:41:27" ; + schema1:encodingFormat "text/plain" ; + schema1:name "20231103_234125_create_err.log" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 22671 ; + schema1:dateModified "2023-11-03T22:43:18" ; + schema1:encodingFormat "text/plain" ; + schema1:name "20231103_234138_run.log" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:dateModified "2023-11-03T22:41:41" ; + schema1:encodingFormat "text/plain" ; + schema1:name "20231103_234138_run_err.log" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 607 ; + schema1:dateModified "2023-11-03T22:43:08" ; + schema1:encodingFormat "text/plain" ; + schema1:name "jobs_active_status.log" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:dateModified "2023-11-03T22:43:08" ; + schema1:encodingFormat "text/plain" ; + schema1:name "jobs_failed_status.log" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3635 ; + schema1:dateModified "2023-11-03T22:43:08" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_COPY_GRAPH.20231103234308.err" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 20 ; + schema1:dateModified "2023-11-03T22:43:08" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_COPY_GRAPH.20231103234308.out" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3028 ; + schema1:dateModified "2023-11-03T22:42:56" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_COPY_GRAPH.cmd" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:dateModified "2023-11-03T22:42:56" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_COPY_GRAPH_COMPLETED" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 22 ; + schema1:dateModified "2023-11-03T22:42:56" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_COPY_GRAPH_STAT" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 5082 ; + schema1:dateModified "2023-11-03T22:42:56" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_GRAPH.20231103234243.err" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1356 ; + schema1:dateModified "2023-11-03T22:42:56" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_GRAPH.20231103234243.out" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 5843 ; + schema1:dateModified "2023-11-03T22:41:54" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_LOCAL_SETUP.20231103234154.err" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4324 ; + schema1:dateModified "2023-11-03T22:41:54" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_LOCAL_SETUP.20231103234154.out" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4802 ; + schema1:dateModified "2023-11-03T22:41:42" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_LOCAL_SETUP.cmd" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:dateModified "2023-11-03T22:41:48" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_LOCAL_SETUP_COMPLETED" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 22 ; + schema1:dateModified "2023-11-03T22:41:48" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_LOCAL_SETUP_STAT" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2550 ; + schema1:dateModified "2023-11-03T22:42:07" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_REMOTE_SETUP.20231103234206.err" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 20 ; + schema1:dateModified "2023-11-03T22:42:07" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_REMOTE_SETUP.20231103234206.out" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3127 ; + schema1:dateModified "2023-11-03T22:42:31" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_SIM.20231103234231.err" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 5264 ; + schema1:dateModified "2023-11-03T22:42:31" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_SIM.20231103234231.out" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 19235 ; + schema1:dateModified "2023-11-03T22:42:19" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_SYNC.20231103234219.err" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116 ; + schema1:dateModified "2023-11-03T22:42:19" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_SYNC.20231103234219.out" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3028 ; + schema1:dateModified "2023-11-03T22:42:56" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_COPY_GRAPH.cmd" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:dateModified "2023-11-03T22:43:08" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_COPY_GRAPH_COMPLETED" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 22 ; + schema1:dateModified "2023-11-03T22:43:08" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_COPY_GRAPH_STAT" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 54 ; + schema1:dateModified "2023-11-03T22:43:08" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_COPY_GRAPH_TOTAL_STATS" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3677 ; + schema1:dateModified "2023-11-03T22:42:31" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_GRAPH.cmd" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:dateModified "2023-11-03T22:42:55" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_GRAPH_COMPLETED" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 22 ; + schema1:dateModified "2023-11-03T22:42:55" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_GRAPH_STAT" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 54 ; + schema1:dateModified "2023-11-03T22:42:56" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_GRAPH_TOTAL_STATS" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4802 ; + schema1:dateModified "2023-11-03T22:41:42" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_LOCAL_SETUP.cmd" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:dateModified "2023-11-03T22:41:54" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_LOCAL_SETUP_COMPLETED" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 22 ; + schema1:dateModified "2023-11-03T22:41:54" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_LOCAL_SETUP_STAT" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 54 ; + schema1:dateModified "2023-11-03T22:41:54" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_LOCAL_SETUP_TOTAL_STATS" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2643 ; + schema1:dateModified "2023-11-03T22:41:54" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_REMOTE_SETUP.cmd" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:dateModified "2023-11-03T22:42:06" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_REMOTE_SETUP_COMPLETED" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 22 ; + schema1:dateModified "2023-11-03T22:42:06" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_REMOTE_SETUP_STAT" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 54 ; + schema1:dateModified "2023-11-03T22:42:06" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_REMOTE_SETUP_TOTAL_STATS" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2872 ; + schema1:dateModified "2023-11-03T22:42:19" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_SIM.cmd" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:dateModified "2023-11-03T22:42:31" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_SIM_COMPLETED" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 22 ; + schema1:dateModified "2023-11-03T22:42:31" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_SIM_STAT" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 54 ; + schema1:dateModified "2023-11-03T22:42:31" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_SIM_TOTAL_STATS" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3065 ; + schema1:dateModified "2023-11-03T22:42:06" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_SYNC.cmd" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:dateModified "2023-11-03T22:42:18" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_SYNC_COMPLETED" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 22 ; + schema1:dateModified "2023-11-03T22:42:19" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_SYNC_STAT" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 54 ; + schema1:dateModified "2023-11-03T22:42:19" ; + schema1:encodingFormat "text/plain" ; + schema1:name "a000_SYNC_TOTAL_STATS" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "QCFilteredAnnDataObject" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "1k_cell_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "1k_gene_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "marker_dot_plot" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "marker_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "processed_anndata_object" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "umap_cluster_plot" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "umap_sample_plot" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Sample" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:MediaObject ; + schema1:contentSize 183 . + + a schema1:MediaObject ; + schema1:contentSize 486 . + + a schema1:MediaObject ; + schema1:contentSize 558 . + + a schema1:MediaObject ; + schema1:contentSize 1302 . + + a schema1:Dataset . + + a schema1:MediaObject ; + schema1:contentSize 43925 . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-only-VGP3/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "Docker" ; + schema1:identifier ; + schema1:name "Docker" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/VGP-meryldb-creation-trio/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Matthias Bernt" . + + a schema1:Organization ; + schema1:name "UFZ Leipzig" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/dada2/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_energy_min_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_energy_min_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_gppnvt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_energy_nvt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_energy_nvt_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_gppnpt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_energy_npt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_energy_npt_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step17_gppmd_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_rmsfirst_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_rmsfirst_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_pdb_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_pdb_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_rmsexp_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_rmsexp_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step21_rgyr_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_image_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step23_dry_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_editconf_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_gppion_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step7_genion_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_gppmin_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Checkpoint file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Structures - Raw structure" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Resulting protein structure" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GROMACS topology file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Topologies GROMACS portable binary run" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Trajectories - Raw trajectory" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Trajectories - Post-processed trajectory" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "System Setup Observables - Potential Energy" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "System Setup Observables - Pressure and density" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "System Setup Observables - Temperature" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Simulation Analysis" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Simulation Analysis" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Simulation Analysis" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/357" ; + schema1:name "Tatiana Gurbich" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Scipion" ; + schema1:url . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 53166 . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Engy Nasr" . + + a schema1:Person ; + schema1:name "Bérénice Batut" . + + a schema1:Person ; + schema1:name "Paul Zierep" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/allele-based-pathogen-identification/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_baredSC-1d-logNorm_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_baredSC-1d-logNorm_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/baredsc/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_baredSC-2d-logNorm_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_baredSC-2d-logNorm_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/baredsc/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Smitha Sukumar" . + + a schema1:Person ; + schema1:name "Elena Martinez" . + + a schema1:Person ; + schema1:name "Christina Adler" . + + a schema1:Person ; + schema1:name "Henry Lydecker" . + + a schema1:Person ; + schema1:name "Fang Wang" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/141" ; + schema1:name "Tracy Chew" . + + a schema1:ComputerLanguage ; + schema1:name "Shell Script" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-decontamination-VGP9/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "STAR parameter" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "turtle file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "maximum memory usage in megabytes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Run STAR" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "number of threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "STAR" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bowtie2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Genome fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GTF" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "kallisto" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Proteins" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Transcripts" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "WDL" ; + schema1:identifier ; + schema1:name "Workflow Description Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:name "Simon Bray" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/galaxyproject/iwc/actions/workflows/workflow_test.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-atac-cutandrun_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-atac-cutandrun_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-chip-pe_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-chip-pe_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-chip-sr_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-chip-sr_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Scipion" ; + schema1:url . + + a schema1:Person ; + schema1:name "GAPARS Horizon 2020 European project" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "classifications-000312899389-000316591628.csv.part_00000" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Cali Willet" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/141" ; + schema1:name "Tracy Chew" . + + a schema1:ComputerLanguage ; + schema1:name "Shell Script" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:MediaObject ; + schema1:contentSize 187 . + + a schema1:MediaObject ; + schema1:contentSize 74 . + + a schema1:MediaObject ; + schema1:contentSize 324 . + + a schema1:Dataset . + + a schema1:MediaObject ; + schema1:contentSize 5131 . + + a schema1:MediaObject ; + schema1:contentSize 8805 . + + a schema1:MediaObject ; + schema1:contentSize 6645 . + + a schema1:MediaObject ; + schema1:contentSize 44006 . + + a schema1:MediaObject ; + schema1:contentSize 21291 . + + a schema1:MediaObject ; + schema1:contentSize 75358 . + + a schema1:MediaObject ; + schema1:contentSize 847 . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.0" . + + a schema1:MediaObject ; + schema1:contentSize 2165 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 133 ; + schema1:description "Parameters passed as arguments to the COMPSs application through the command line" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_command_line_arguments.txt" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "Docker" ; + schema1:identifier ; + schema1:name "Docker" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Perl" ; + schema1:url . + + a schema1:Person ; + schema1:name "Sarai Varona and Miguel Juliá and Sara Monzon and Alexander Peltzer and Alison Meynert and Edgar Garriga Nogales and Erik Garrison and Gisela Gabernet and Harshil Patel and Joao Curado and Jose Espinosa-Carrasco and Katrin Sameith and Marta Pozuelo and Maxime Garcia and Michael Heuer and Phil Ewels and Simon Heumos and Stephen Kelly and Thanh Le Viet and Isabel Cuesta" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "" . + + a schema1:Person ; + schema1:name "Susana Posada Céspedes" . + + a schema1:Person ; + schema1:name "Niko Beerenwinkel" . + + a schema1:Person ; + schema1:name "" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sars-cov-2-consensus-from-variation/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:Person ; + schema1:name "Lucille Delisle" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/rnaseq-sr/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_energy_min_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_energy_min_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_gppnvt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_energy_nvt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_energy_nvt_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_gppnpt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_energy_npt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_energy_npt_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step17_gppmd_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_rmsfirst_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_rmsfirst_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_pdb_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_pdb_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_rmsexp_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_rmsexp_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step21_rgyr_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_image_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step23_dry_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_editconf_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_gppion_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step7_genion_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_gppmin_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Checkpoint file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Structures - Raw structure" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Resulting protein structure" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GROMACS topology file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Topologies GROMACS portable binary run" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Trajectories - Raw trajectory" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Trajectories - Post-processed trajectory" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "System Setup Observables - Potential Energy" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "System Setup Observables - Pressure and density" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "System Setup Observables - Temperature" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Simulation Analysis" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Simulation Analysis" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Simulation Analysis" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Helmholtz-Zentrum für Umweltforschung - UFZ" . + + a schema1:Organization ; + schema1:name "Helmholtz-Zentrum für Umweltforschung - UFZ" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_IIa-denoising-se_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_IIa-denoising-se_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/qiime2-II-denoising/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_IIb-denoising-pe_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_IIb-denoising-pe_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/qiime2-II-denoising/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Reference filters files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output Destination" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filter rRNA" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Forward reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Identifier" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "kallisto index file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Maximum memory in MB" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Reverse reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filtered statistics" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "kallisto output" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/chipseq-sr/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/445" ; + schema1:name "Diego De Panis" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "consensus_genome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "drug_resistance_report_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "html_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_txt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "report_output" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "variants_report_html" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Dannon Baker" . + + a schema1:Person ; + schema1:name "Björn Grüning" . + + a schema1:Person ; + schema1:name "Delphine Larivière" . + + a schema1:Person ; + schema1:name "Gildas Le Corguillé" . + + a schema1:Person ; + schema1:name "Andrew Lonie" . + + a schema1:Person ; + schema1:name "Nicholas Keener" . + + a schema1:Person ; + schema1:name "Sergei Kosakovsky Pond" . + + a schema1:Person ; + schema1:name "Wolfgang Maier" . + + a schema1:Person ; + schema1:name "Anton Nekrutenko" . + + a schema1:Person ; + schema1:name "James Taylor" . + + a schema1:Person ; + schema1:name "Steven Weaver" . + + a schema1:Person ; + schema1:name "Marius van den Beek" . + + a schema1:Person ; + schema1:name "Dave Bouvier" . + + a schema1:Person ; + schema1:name "John Chilton" . + + a schema1:Person ; + schema1:name "Nate Coraor" . + + a schema1:Person ; + schema1:name "Frederik Coppens" . + + a schema1:Person ; + schema1:name "Bert Droesbeke" . + + a schema1:Person ; + schema1:name "Ignacio Eguinoa" . + + a schema1:Person ; + schema1:name "Simon Gladman" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "NC_045512" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "basecalling model" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "identifier used" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "kraken_database" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "nanopore FASTQ reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "number of threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Flye de novo assembler for single-molecule reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken2 reports" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Medaka polisher" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "QUAlity assessment" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "KNIME" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_scrna-seq-fastq-to-matrix-10x-cellplex_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_scrna-seq-fastq-to-matrix-10x-cellplex_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/fastq-to-matrix-10x/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_scrna-seq-fastq-to-matrix-10x-v3_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_scrna-seq-fastq-to-matrix-10x-v3_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/fastq-to-matrix-10x/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Wolfgang Maier" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "called_variants" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastp_html_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastp_pe" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "filtered_mapped_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mapped_reads_stats" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "markduplicates_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "markduplicates_stats" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "preprocessing_and_mapping_reports" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "realigned_deduplicated_filtered_mapped_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "realigned_deduplicated_filtered_mapped_reads_with_indel_quals" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "soft_filtered_variants" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Darwin MacBook-Pro-Raul-2018.local 22.5.0 Darwin Kernel Version 22.5.0: Mon Apr 24 20:51:50 PDT 2023; root:xnu-8796.121.2~5/RELEASE_X86_64 x86_64 COMPSS_HOME=/Users/rsirvent/opt/COMPSs/" ; + schema1:endTime "2023-05-30T08:48:00+00:00" ; + schema1:instrument ; + schema1:name "COMPSs matmul_files.py execution at MacBook-Pro-Raul-2018.local" ; + schema1:object , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:result , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.2" . + + a schema1:MediaObject ; + schema1:contentSize 250 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 1429 ; + schema1:description "Auxiliary File" ; + schema1:name "README" . + + a schema1:MediaObject ; + schema1:contentSize 4414 ; + schema1:description "Auxiliary File" ; + schema1:name "pom.xml" . + + a schema1:MediaObject ; + schema1:contentSize 2061 ; + schema1:description "Auxiliary File" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 0 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "__init__.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1627 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "matmul_tasks.py" . + + a schema1:MediaObject ; + schema1:contentSize 25 ; + schema1:description "COMPSs command line execution command, including parameters passed" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_command_line_arguments.txt" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "Gareth Price" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "metrics" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/assembly-with-flye/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/chipseq-pe/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux nvb4 2.6.32-642.6.2.el6.x86_64 #1 SMP Mon Oct 24 10:22:33 EDT 2016 x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=COMPSs SLURM_JOB_QOS=bsc_cs SLURM_JOB_GPUS=0,1,2,3 SLURM_MEM_PER_CPU=8000 SLURM_JOB_ID=1930225 SLURM_JOB_USER=bsc19959 COMPSS_HOME=/apps/COMPSs/TrunkCTCache/ SLURM_JOB_UID=4373 SLURM_SUBMIT_DIR=/gpfs/scratch/bsc19/bsc19959/cache SLURM_JOB_NODELIST=nvb[4-9,12-21] SLURM_JOB_CPUS_PER_NODE=16(x16) SLURM_SUBMIT_HOST=nvblogin1 SLURM_JOB_PARTITION=projects SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=16 COMPSS_MASTER_NODE=nvb4 COMPSS_WORKER_NODES= nvb5 nvb6 nvb7 nvb8 nvb9 nvb12 nvb13 nvb14 nvb15 nvb16 nvb17 nvb18 nvb19 nvb20 nvb21" ; + schema1:endTime "2024-03-22T11:45:33+00:00" ; + schema1:instrument ; + schema1:name "COMPSs cch_matmul_test.py execution at bsc_nvidia with JOB_ID 1930225" ; + schema1:result , + , + , + ; + schema1:startTime "2024-03-22T11:39:34+00:00" ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:SoftwareApplication ; + schema1:name "Dislib" ; + schema1:version "0.9.x" . + + a schema1:MediaObject ; + schema1:contentSize 4113 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_bsc_nvidia_SLURM_JOB_ID_1930225" ; + schema1:contentSize 64 ; + schema1:description "COMPSs console standard error log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-1930225.err" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_bsc_nvidia_SLURM_JOB_ID_1930225" ; + schema1:contentSize 2864 ; + schema1:description "COMPSs console standard output log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-1930225.out" . + + a schema1:MediaObject ; + schema1:contentSize 716 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 1203 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Romane Libouban" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/repeatmasking/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-decontamination-VGP9/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:ComputerLanguage ; + schema1:name "Bpipe" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/chipseq-pe/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-only-VGP3/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:name "Matthias Bernt" . + + a schema1:Organization ; + schema1:name "UFZ Leipzig" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/dada2/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_scrna-seq-fastq-to-matrix-10x-cellplex_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_scrna-seq-fastq-to-matrix-10x-cellplex_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/fastq-to-matrix-10x/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_scrna-seq-fastq-to-matrix-10x-v3_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_scrna-seq-fastq-to-matrix-10x-v3_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/fastq-to-matrix-10x/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Plasmids" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux gs11r2b04 5.14.0-284.30.1.el9_2.x86_64 #1 SMP PREEMPT_DYNAMIC Fri Aug 25 09:13:12 EDT 2023 x86_64 x86_64 x86_64 GNU/Linux" ; + schema1:endTime "2024-07-05T11:38:51+00:00" ; + schema1:instrument ; + schema1:name "COMPSs main.py execution at marenostrum5 with JOB_ID 3644777" ; + schema1:object , + ; + schema1:result , + , + , + ; + schema1:startTime "2024-07-05T11:37:39+00:00" ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 1388 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_marenostrum5_SLURM_JOB_ID_3644777" ; + schema1:contentSize 100 ; + schema1:description "COMPSs console standard error log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-3644777.err" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_marenostrum5_SLURM_JOB_ID_3644777" ; + schema1:contentSize 1723 ; + schema1:description "COMPSs console standard output log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-3644777.out" . + + a schema1:MediaObject ; + schema1:contentSize 590 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 996 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/lcms-preprocessing/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path_gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path_itp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path_top" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bits_set" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "blacklist" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bwt_algorithm" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chromosome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_lratio" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_exomeDepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_manta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_bf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "generate_bwa_indexes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_exome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "min_mapping_quality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "read_group" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_amb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_ann" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_bwt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fai" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_pac" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_sa" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samples" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_bwa_mem" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastqc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_samtools" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "html_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "json_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_all" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_bam_filtering" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_exomedepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_manta" . + + a schema1:Person ; + schema1:name "Daniel López-López" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/chipseq-sr/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s01r2b26 4.18.0-305.19.1.el8_4.x86_64 #1 SMP Tue Sep 7 07:07:31 EDT 2021 x86_64 x86_64 x86_64 GNU/Linux COMPSS_CONTAINER_ENGINE=SINGULARITY SLURM_JOB_ID=2497335 COMPSS_HOME=/apps/COMPSs/3.3.pr/ COMPSS_MASTER_WORKING_DIR=" ; + schema1:endTime "2023-10-20T11:05:06+00:00" ; + schema1:instrument ; + schema1:name "COMPSs Workflow.py execution at s01r2b26 with JOB_ID 2497335" ; + schema1:object , + , + ; + schema1:result , + ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.2.rc2310" . + + a schema1:MediaObject ; + schema1:contentSize 4057 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 612 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 1497 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:Person ; + schema1:name "Dannon Baker" . + + a schema1:Person ; + schema1:name "Björn Grüning" . + + a schema1:Person ; + schema1:name "Delphine Larivière" . + + a schema1:Person ; + schema1:name "Gildas Le Corguillé" . + + a schema1:Person ; + schema1:name "Andrew Lonie" . + + a schema1:Person ; + schema1:name "Nicholas Keener" . + + a schema1:Person ; + schema1:name "Sergei Kosakovsky Pond" . + + a schema1:Person ; + schema1:name "Wolfgang Maier" . + + a schema1:Person ; + schema1:name "Anton Nekrutenko" . + + a schema1:Person ; + schema1:name "James Taylor" . + + a schema1:Person ; + schema1:name "Steven Weaver" . + + a schema1:Person ; + schema1:name "Marius van den Beek" . + + a schema1:Person ; + schema1:name "Dave Bouvier" . + + a schema1:Person ; + schema1:name "John Chilton" . + + a schema1:Person ; + schema1:name "Nate Coraor" . + + a schema1:Person ; + schema1:name "Frederik Coppens" . + + a schema1:Person ; + schema1:name "Bert Droesbeke" . + + a schema1:Person ; + schema1:name "Ignacio Eguinoa" . + + a schema1:Person ; + schema1:name "Simon Gladman" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Scaffolding-HiC-VGP8/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/atacseq/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "Wolfgang Maier" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "1_based_masking_regions" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "called_variant_sites" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chrom_pos_ref_called_variants" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chrom_pos_ref_called_variants_with_0_based_start" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chrom_pos_ref_called_variants_with_0_based_start_end" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chrom_pos_ref_failed_variants" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chrom_pos_ref_failed_variants_with_0_based_start" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chrom_pos_ref_failed_variants_with_0_based_start_end" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "consensus" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "consensus_variants" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "coverage_depth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "failed_variant_sites" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "filter_failed_variants" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "low_cov_regions" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "low_cov_regions_plus_filter_failed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "low_cov_regions_plus_filter_failed_combined" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "masking_regions" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "masking_regions_with_1_based_start" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "multisample_consensus_fasta" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "VCF" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-decontamination-VGP9/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/445" ; + schema1:name "Diego De Panis" . + + a schema1:Person ; + schema1:name "Engy Nasr" . + + a schema1:Person ; + schema1:name "Bérénice Batut" . + + a schema1:Person ; + schema1:name "Paul Zierep" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/pathogen-detection-pathogfair-samples-aggregation-and-visualisation/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a schema1:Person ; + schema1:name "Javier Garrayo-Ventas" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sars-cov-2-pe-illumina-artic-variant-calling/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "db_host" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "db_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "db_password" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "db_user" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "filters" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_file" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bowtie2 index" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filer rRNA" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "forward reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GTF file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "kallisto index" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Max memory" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filename prefix" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reverse reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "number of threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bowtie2 output" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "FASTQC" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "FeatureCounts output" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filtered reads folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "kallisto output" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/gcms-metams/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/chipseq-pe/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Alex L Mitchell" . + + a schema1:Person ; + schema1:name "Lorna J Richardson" . + + a schema1:Person ; + schema1:name "Ekaterina Sakharova" . + + a schema1:Person ; + schema1:name "Maxim Scheremetjew" . + + a schema1:Person ; + schema1:name "Anton Korobeynikov" . + + a schema1:Person ; + schema1:name "Alex Shlemov" . + + a schema1:Person ; + schema1:name "Olga Kunyavskaya" . + + a schema1:Person ; + schema1:name "Alla Lapidus" . + + a schema1:Person ; + schema1:name "Robert D Finn" . + + a schema1:Person ; + schema1:name "Alexandre Almeida" . + + a schema1:Person ; + schema1:name "Martin Beracochea" . + + a schema1:Person ; + schema1:name "Miguel Boland" . + + a schema1:Person ; + schema1:name "Josephine Burgin" . + + a schema1:Person ; + schema1:name "Guy Cochrane" . + + a schema1:Person ; + schema1:name "Michael R Crusoe" . + + a schema1:Person ; + schema1:name "Varsha Kale" . + + a schema1:Person ; + schema1:name "Simon C Potter" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "5.8s_pattern" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "5s_pattern" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CGC_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CGC_postfixes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "EggNOG_data_dir" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "EggNOG_db" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "EggNOG_diamond_db" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "HMM_gathering_bit_score" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "HMM_name_database" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "HMM_omit_alignment" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "InterProScan_applications" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "InterProScan_databases" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "InterProScan_outputFormat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "cgc_chunk_size" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "forward_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "func_ann_names_hmmer" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "func_ann_names_ips" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "go_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hmmsearch_header" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ips_header" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ko_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lsu_db" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lsu_label" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lsu_otus" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lsu_tax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "other_ncRNA_models" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "protein_chunk_size_IPS" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "protein_chunk_size_hmm" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "qc_min_length" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reverse_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rfam_model_clans" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rfam_models" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "single_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ssu_db" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ssu_label" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ssu_otus" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ssu_tax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chunking_nucleotides" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chunking_proteins" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "completed_flag_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "compressed_files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastp_filtering_json_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "functional_annotation_folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hashsum_paired" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hashsum_single" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "motus_output" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "no_cds_flag_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "no_tax_flag_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "qc-statistics" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "qc-status" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "qc_summary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rna-count" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sequence-categorisation_folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "stats" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "taxonomy-summary_folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "data_matrix" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gmt_filepath" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "index_col" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "outdir" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samples_on_rows" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "separator" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "resdir" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "ABRomics" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/amr_gene_detection/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_index_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_mdp_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_ndx_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_solvent_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dhdl_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_heteroatom_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_itp_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_molecule_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_ndx_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path_gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path_itp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path_top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_str_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xtc_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a schema1:Person ; + schema1:name "Cali Willet" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/141" ; + schema1:name "Tracy Chew" . + + a schema1:ComputerLanguage ; + schema1:name "Shell Script" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_molecule_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Darwin MacBook-Pro-Raul-2018.local 22.5.0 Darwin Kernel Version 22.5.0: Mon Apr 24 20:51:50 PDT 2023; root:xnu-8796.121.2~5/RELEASE_X86_64 x86_64 COMPSS_HOME=/Users/rsirvent/opt/COMPSs/" ; + schema1:endTime "2023-05-30T07:15:34+00:00" ; + schema1:instrument ; + schema1:name "COMPSs Matmul.java execution at MacBook-Pro-Raul-2018.local" ; + schema1:object , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:result , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.2" . + + a schema1:MediaObject ; + schema1:contentSize 342 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 2092 ; + schema1:description "Auxiliary File" ; + schema1:name "Readme" . + + a schema1:MediaObject ; + schema1:contentSize 4804 ; + schema1:description "Auxiliary File" ; + schema1:name "pom.xml" . + + a schema1:MediaObject ; + schema1:contentSize 2823 ; + schema1:description "Auxiliary File" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 3615 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "Matmul.java" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1182 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "MatmulImpl.java" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1162 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "MatmulItf.java" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 4341 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "Block.java" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 940 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "MatmulImpl.java" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1464 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "MatmulItf.java" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 2598 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "Block.java" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 3844 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "Matmul.java" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 932 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "MatmulItf.java" . + + a schema1:MediaObject ; + schema1:contentSize 289 ; + schema1:description "Auxiliary File" ; + schema1:name "project.xml" . + + a schema1:MediaObject ; + schema1:contentSize 983 ; + schema1:description "Auxiliary File" ; + schema1:name "resources.xml" . + + a schema1:MediaObject ; + schema1:contentSize 24 ; + schema1:description "COMPSs command line execution command, including parameters passed" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_command_line_arguments.txt" . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "concat_traj.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mybd_flexserv_bd_ensemble.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mybd_flexserv_bd_ensemble.mdcrd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myconcoord_disco_bfactor.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myconcoord_disco_rmsd.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myconcoord_disco_traj.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myconcoord_dist.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myconcoord_dist.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myconcoord_dist.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_disco_traj.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_flexserv_bd_rmsd.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_flexserv_bd_traj_fitted.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_flexserv_dmd_rmsd.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_flexserv_dmd_traj_fitted.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_flexserv_nma_ensemble.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_flexserv_nma_rmsd.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_imods_ensemble.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_mask_backbone" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_mask_ca.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_meta_traj_fitted.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_meta_traj_rmsd.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_nolb_ensemble.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_nolb_rmsd.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_pcz_proj1.dcd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_prody_anm_traj.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_prody_rms.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mydmd_flexserv_dmd_ensemble.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mydmd_flexserv_dmd_ensemble.mdcrd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myextract_model.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myextract_monomer.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_cluster.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_cluster.xpm" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_cluster.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_concat.cluster.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myimod_imc.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myimod_imode_evecs.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymake_gmx_ndx.ndx" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mynma_flexserv_nma_ensemble.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mynma_flexserv_nma_ensemble.mdcrd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mynolb_ensemble.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypcz_bfactor_all.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypcz_bfactor_all.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypcz_collectivity.json" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypcz_evecs.json" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypcz_hinges_bfactor_report.json" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypcz_hinges_fcte_report.json" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypcz_proj1.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypcz_report.json" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypcz_stiffness.json" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myprody_anm_traj.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mytrjcat_concat_traj.trr" . + + a schema1:Person ; + schema1:name "Peter van Heusden" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sars-cov-2-pe-illumina-artic-ivar-analysis/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux gs12r1b10 5.14.0-284.30.1.el9_2.x86_64 #1 SMP PREEMPT_DYNAMIC Fri Aug 25 09:13:12 EDT 2023 x86_64 x86_64 x86_64 GNU/Linux" ; + schema1:endTime "2024-07-12T11:16:08+00:00" ; + schema1:instrument ; + schema1:name "COMPSs lysozyme_in_water_full.py execution at marenostrum5 with JOB_ID 3846825" ; + schema1:object , + , + , + , + , + , + ; + schema1:result , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:startTime "2024-07-12T11:13:08+00:00" ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 3064 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 599 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "FULL.yaml" . + + a schema1:MediaObject ; + schema1:contentSize 1558 ; + schema1:description "Auxiliary File" ; + schema1:name "launch_full.sh" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 7601 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "lysozyme_in_water.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 8868 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "lysozyme_in_water_full_no_mpi.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 10194 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "lysozyme_in_water_full_singularity.py" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_marenostrum5_SLURM_JOB_ID_3846825" ; + schema1:contentSize 100 ; + schema1:description "COMPSs console standard error log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-3846825.err" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_marenostrum5_SLURM_JOB_ID_3846825" ; + schema1:contentSize 4609 ; + schema1:description "COMPSs console standard output log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-3846825.out" . + + a schema1:MediaObject ; + schema1:contentSize 434 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_30" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_gmx.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_gmx.itp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_gmx.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myappend_ligand.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mybabel_minimize.mol2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycat_pdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myeditconf.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myextract_heteroatoms.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myextract_molecule.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfix_side_chain.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygenion.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygenion.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygenrestr.itp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_energy.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_image.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_rgyr.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_rms_exp.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_trjconv_str_lig.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_trjconv_str_prot.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygrompp_ion.tpr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymake_ndx.ndx" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.cpt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.edr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.xtc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb2gmx.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb2gmx.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb_prot.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myreduce_add_hydrogens.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysolvate.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysolvate.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Alignment score from Kpax to analyse structures" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CATH family ids" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Obsolete and inconsistent CATH domain StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filename for residue-mapped CATH domain StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Core domain structure (.pdb)" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Database to select to compute core average structure" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "To store all the domain-like StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "To store all failed domain StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filename to store family ids per iteration" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Iteration number" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Threshold for minimum domain length" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Parameter file for current iteration" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The directory for storing all PDB files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Pfam family ids" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Obsolete and inconsistent Pfam domain StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filename for residue-mapped Pfam domain StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CATH cross-mapped domain StIs from previous iteration" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Pfam cross-mapped domain StIs from previous iteration" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Score threshold for given alignment score from Kpax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Directory for storing all SIFTS files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "To store all the true domain StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filename to store unmapped and not properly aligned instances from CATH" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filename to store unmapped but structurally well aligned instances from CATH" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filename to store unmapped and not properly aligned instances from Pfam" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filename to store unmapped but structurally well aligned instances from Pfam" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filename with alignment scores for unmapped instances" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Alignment results for CATH unmapped instances" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Alignment results for Pfam unmapped instances" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Domain-like StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Failed domain StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "All CATH cross-mapped domin StIs family-wise together" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "All Pfam domain StIs cross-mapped to CATH family-wise" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Alignment results from Kpax for all cross-mapped families" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Average structures per cross-mapped Pfam family for CATH StIs at family level" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Core domain StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Core domain structure (.pdb)" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Cross-mapped families with CATH domain StIs passing the threshold" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Cross-mapped families with Pfam domain StIs passing the threshold" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Merged cross-mapped and residue-mapped domain StIs from CATH" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Merged cross-mapped and residue-mapped domain StIs from Pfam" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Family ids per iteration" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Parameter file for next iteration of the workflow" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Average structures per cross-mapped CATH family for Pfam StIs at family level" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Obsolete and inconsistent domain StIs from CATH" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Obsolete and inconsistent domain StIs from Pfam" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "All CATH residue-mapped domain StIs with domain labels" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "All Pfam residue-mapped domain StIs with domain labels" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "True domain StIs per iteration" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "All un-mapped domin StIs from CATH" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Failed domain StIs from CATH" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Domain-like StIs from CATH" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "All Pfam un-mapped domin StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Failed domain StIs from Pfam" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Domain-like StIs from Pfam" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "BridgeDB cache" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Differential gene expression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Differential miRNA expression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "STRING identifier mapping" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mRNA expression correlation" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of correlation edges to use, USE FOR TESTING ONLY" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "miRNA mRNA correlation" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "miRTarBase data" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of miRTarBase edges to use, USE FOR TESTING ONLY" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of cores to let MOGAMUN use" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of generation to let the genetic algorithm in MOGAMUN evolve" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The maximum size of subnetworks during postprocessing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "the minimum Jaccard Index overlap between two subnetworks to allow them to be merged" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The minimum size of subnetworks during postprocessing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of parallel runs to let MOGAMUN do, these parallel runs are combined in postprocessing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "STRING data" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The minimum score for a STRING edge to be included in the analysis" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of STRING edges to use, USE FOR TESTING ONLY" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Variant Burden" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "full_graph" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "subnetworks" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq_files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "readgroup" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sample_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sorted_bam" . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "metrics" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken_taxonomy_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken_taxonomy_table_modified" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Taxonomic_prediction_report" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "filters" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_file" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:name "Oliver Woolland" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/159" ; + schema1:name "Oliver Woolland" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/445" ; + schema1:name "Diego De Panis" . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rnaseq_left_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rnaseq_right_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sample_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sars_cov_2_reference_genome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "indels" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "snps" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/26" ; + schema1:name "Ambarish Kumar" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "basecalling model" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "configuration_command" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "nanopore reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "identifier used" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "kraken_database" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "number of threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Flye de novo assembler for single-molecule reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Guppy for CPU" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken2 reports" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Krona taxonomy visualization" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Medaka polisher" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "FASTQ files merged" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MinION-Quality-Check" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "QUAlity assessment" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Scaffolding-HiC-VGP8/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "KNIME" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myautodock_vina_run.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myautodock_vina_run.pdbqt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mybabel_convert.ent" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mybox.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycat_pdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myextract_model_pdbqt.pdbqt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myextract_molecule.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfpocket_filter.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfpocket_run.json" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfpocket_run.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfpocket_select.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfpocket_select.pqr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myideal_sdf.sdf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mystr_check_add_hydrogens.pdb" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Javier Garrayo-Ventas" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:MediaObject ; + schema1:contentSize 177 . + + a schema1:MediaObject ; + schema1:contentSize 91 . + + a schema1:MediaObject ; + schema1:contentSize 724 . + + a schema1:MediaObject ; + schema1:contentSize 1043 . + + a schema1:Dataset . + + a schema1:MediaObject ; + schema1:contentSize 6645 . + + a schema1:Organization ; + schema1:name "Helmholtz-Zentrum für Umweltforschung - UFZ" . + + a schema1:Organization ; + schema1:name "Helmholtz-Zentrum für Umweltforschung - UFZ" . + + a schema1:Organization ; + schema1:name "Helmholtz-Zentrum für Umweltforschung - UFZ" . + + a schema1:Organization ; + schema1:name "Helmholtz-Zentrum für Umweltforschung - UFZ" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_Ia-import-multiplexed-se_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_Ia-import-multiplexed-se_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/qiime2-I-import/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_Ib-import-multiplexed-pe_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_Ib-import-multiplexed-pe_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/qiime2-I-import/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_Ic-import-demultiplexed-se_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_Ic-import-demultiplexed-se_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/qiime2-I-import/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_Id-import-demultiplexed-pe_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_Id-import-demultiplexed-pe_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/qiime2-I-import/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Egon Willighagen" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/262" ; + schema1:name "Marvin Martens" . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s01r1b45 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=COMPSs COMPSS_PYTHON_VERSION=3.9.10 SLURM_JOB_QOS=debug SLURM_MEM_PER_CPU=1880 SLURM_JOB_ID=31897997 SLURM_JOB_USER=bsc19057 COMPSS_HOME=/apps/COMPSs/3.3/ SLURM_JOB_UID=2952 SLURM_SUBMIT_DIR=/gpfs/home/bsc19/bsc19057/Tutorial_2024/lysozyme_in_water SLURM_JOB_NODELIST=s01r1b[45,47] SLURM_JOB_GID=2950 SLURM_JOB_CPUS_PER_NODE=48(x2) COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login3 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=2 COMPSS_MASTER_NODE=s01r1b45 COMPSS_WORKER_NODES= s01r1b47" ; + schema1:endTime "2024-03-05T11:04:52+00:00" ; + schema1:instrument ; + schema1:name "COMPSs lysozyme_in_water_full.py execution at marenostrum4 with JOB_ID 31897997" ; + schema1:object , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:result , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 2912 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 1581 ; + schema1:description "Auxiliary File" ; + schema1:name "launch_full.sh" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 7601 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "lysozyme_in_water.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 8868 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "lysozyme_in_water_full_no_mpi.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 10194 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "lysozyme_in_water_full_singularity.py" . + + a schema1:MediaObject ; + schema1:contentSize 764 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 727 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "BDD_Kakila_v2_2021021_observateur.tsv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "BDD_Kakila_v2_20210221_observation.tsv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "BDD_Kakila_v2_20210221_organisme.tsv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "BDD_Kakila_v2_20210221_secteur_geog.tsv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "BDD_Kakila_v2_20210221_sortie.tsv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "BDD_Kakila_v2_20210221_taxon.tsv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kakila_database_of_marine_mammal_observation_data_in_the_AGOA_sanctuary_-_French_Antilles.xml" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:Person ; + schema1:name "Engy Nasr" . + + a schema1:Person ; + schema1:name "Bérénice Batut" . + + a schema1:Person ; + schema1:name "Paul Zierep" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/gene-based-pathogen-identification/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Run binning workflow" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "BUSCO dataset" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Deduplicate reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output Destination (prov only)" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Contamination reference file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gtdbtk data directory" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Identifier used" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Forward reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Reverse reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Keep filtered reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken2 database" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Memory usage (MB)" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "When working with metagenomes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "PacBio reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ONT Basecalling model" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "PacBio reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Pilon fix list" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Run GEM workflow" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Use Flye" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Use Pilon" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Run SMETANA" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Use SPAdes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Number of threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Keep mapped reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Assembly output" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Binning output" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Community GEM output" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken2 reports" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Read filtering output" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Read filtering output" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s11r2b43 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=COMPSs COMPSS_PYTHON_VERSION=3.8.2 SLURM_JOB_QOS=debug SLURM_MEM_PER_CPU=1880 SLURM_JOB_ID=30498011 SLURM_JOB_USER=bsc19959 COMPSS_HOME=/apps/COMPSs/3.3.pr/ SLURM_JOB_UID=4373 SLURM_SUBMIT_DIR=/gpfs/scratch/bsc19/bsc19959/randomsvd SLURM_JOB_NODELIST=s11r2b[43-44] SLURM_JOB_GID=2950 SLURM_JOB_CPUS_PER_NODE=48(x2) COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login1 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=2 COMPSS_MASTER_NODE=s11r2b43 COMPSS_WORKER_NODES= s11r2b44" ; + schema1:endTime "2023-11-02T10:54:22+00:00" ; + schema1:instrument ; + schema1:name "COMPSs random_svd_compss.py execution at marenostrum4 with JOB_ID 30498011" ; + schema1:result ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.2.rc2310" . + + a schema1:MediaObject ; + schema1:contentSize 3183 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 482 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 1456 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Purge-duplicates-one-haplotype-VGP6b/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Fernando Cruz (CNAG)" . + + a schema1:Person ; + schema1:name "Francisco Camara (CNAG)" . + + a schema1:Person ; + schema1:name "Tyler Alioto (CNAG)" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/parallel-accession-download/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sars-cov-2-variation-reporting/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Bpipe" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CPAT_header_tab" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GRCh38_p13_genome_fa_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Pfam-A_hmm_dat_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Pfam-A_hmm_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "active_site_dat_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gencode_v43_annotation_gtf_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gencode_v43_lncRNA_transcripts_fa_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gencode_v43_pc_transcripts_fa_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gencode_v43_transcripts_fa_gz" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Mitogenome-assembly-VGP0/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-atac-cutandrun_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-atac-cutandrun_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-chip-pe_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-chip-pe_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-chip-sr_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-chip-sr_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/612" ; + schema1:name "Zafran Hussain Shah" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/galaxyproject/iwc/actions/workflows/workflow_test.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s02r2b54 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=svd_lanczos COMPSS_PYTHON_VERSION=3.9.10 SLURM_JOB_QOS=debug SLURM_MEM_PER_CPU=1880 SLURM_JOB_ID=31198136 SLURM_JOB_USER=bsc19756 COMPSS_HOME=/apps/COMPSs/3.3/ SLURM_JOB_UID=4486 SLURM_SUBMIT_DIR=/gpfs/scratch/bsc19/bsc19756/Variable_nsv_lanczos SLURM_JOB_NODELIST=s02r2b[54,56,58] SLURM_JOB_GID=2950 SLURM_JOB_CPUS_PER_NODE=48(x3) COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login1 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=3 COMPSS_MASTER_NODE=s02r2b54 COMPSS_WORKER_NODES= s02r2b56 s02r2b58" ; + schema1:endTime "2023-12-19T11:21:53+00:00" ; + schema1:instrument ; + schema1:name "COMPSs lanczos_dislib_version.py execution at marenostrum4 with JOB_ID 31198136" ; + schema1:result ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 4624 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 487 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 898 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "David F. Nieuwenhuijse" . + + a schema1:Person ; + schema1:name "Alexey Sokolov" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Purge-duplicates-one-haplotype-VGP6b/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Clinical Bioinformatics Unit" . + + a schema1:Person ; + schema1:name "Pathology Department" . + + a schema1:Person ; + schema1:name "Eramus Medical Center" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "Roberto Melero" . + + a schema1:Person ; + schema1:name "Marta Martinez" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/83" ; + schema1:name "Carlos Oscar Sorzano Sanchez" . + + a schema1:ComputerLanguage ; + schema1:name "Scipion" ; + schema1:url . + + a schema1:Person ; + schema1:name "Tim Dudgeon" . + + a schema1:Person ; + schema1:name "Simon Bray" . + + a schema1:Person ; + schema1:name "Gianmauro Cuccuru" . + + a schema1:Person ; + schema1:name "Björn Grüning" . + + a schema1:Person ; + schema1:name "Rachael Skyner" . + + a schema1:Person ; + schema1:name "Jack Scantlebury" . + + a schema1:Person ; + schema1:name "Susan Leung" . + + a schema1:Person ; + schema1:name "Frank von Delft" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bits_set" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "blacklist" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bwt_algorithm" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chromosome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_lratio" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_exomeDepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_manta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_bf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_exome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "min_mapping_quality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "read_group" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samples" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_bwa_mem" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastqc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_samtools" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "html_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "json_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_all" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_bam_filtering" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_exomedepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_manta" . + + a schema1:Person ; + schema1:name "Daniel López-López" . + + a schema1:ComputerLanguage ; + schema1:name "Bpipe" ; + schema1:url . + + a schema1:Person ; + schema1:name "Dannon Baker" . + + a schema1:Person ; + schema1:name "Björn Grüning" . + + a schema1:Person ; + schema1:name "Delphine Larivière" . + + a schema1:Person ; + schema1:name "Gildas Le Corguillé" . + + a schema1:Person ; + schema1:name "Andrew Lonie" . + + a schema1:Person ; + schema1:name "Nicholas Keener" . + + a schema1:Person ; + schema1:name "Sergei Kosakovsky Pond" . + + a schema1:Person ; + schema1:name "Wolfgang Maier" . + + a schema1:Person ; + schema1:name "Anton Nekrutenko" . + + a schema1:Person ; + schema1:name "James Taylor" . + + a schema1:Person ; + schema1:name "Steven Weaver" . + + a schema1:Person ; + schema1:name "Marius van den Beek" . + + a schema1:Person ; + schema1:name "Dave Bouvier" . + + a schema1:Person ; + schema1:name "John Chilton" . + + a schema1:Person ; + schema1:name "Nate Coraor" . + + a schema1:Person ; + schema1:name "Frederik Coppens" . + + a schema1:Person ; + schema1:name "Bert Droesbeke" . + + a schema1:Person ; + schema1:name "Ignacio Eguinoa" . + + a schema1:Person ; + schema1:name "Simon Gladman" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "IndexName" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "alignments_are_sorted" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bankfile" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "barcode_tag" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "base_correction" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bonferroni" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bq2_handling" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "call_indels" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "cancer" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "cancerSamples" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "canon" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "classic" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "comment" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "count" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "csvFile" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "def_alt_bq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "def_alt_jq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "defqual" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "del_baq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "disable_trim_poly_g" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "duplicate_scoring_strategy" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "empty_text" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_source_qual" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exclude_unmapped" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "extractFields" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "filterInterval" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "force_polyg_tail_trimming" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "formatEff" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "geneId" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "genome_reference" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hgvs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "html_report_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ignore_vcf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "illumina_1_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "importGenome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "interval" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "keepflags" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lof" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "max_depth_cov" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "max_mapping_quality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "min_alt_bq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "min_alt_jq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "min_bq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "min_cov" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "min_jq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "min_length_required" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "min_mq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "motif" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "nextProt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "noGenome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "noHgvs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "noLof" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "noMotif" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "noNextProt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "noShiftHgvs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "noStats" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "no_EffectType" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "no_baq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "no_default_filter" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "no_downstream" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "no_ext_base_alignment_quality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "no_idaq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "no_intergenic" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "no_intron" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "no_mapping_quality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "no_upstream" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "no_utr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "oicr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "onlyProtein" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "onlyReg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "only_indels" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "optical_duplicate_pixel_distance" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "outputFormat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pvalue_cutoff" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "qualified_phred_quality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reads_forward" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reads_reverse" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_in" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "region" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "remove_duplicates" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "replace_non_match" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "separator" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sequenceOntology" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sort_order" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "spliceRegionExonSize" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "spliceRegionIntronMax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "spliceRegionIntronMin" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "spliceSiteSize" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "strict" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_lf_call" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "transcripts" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "udLength" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "unqualified_phred_quality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "use_orphan" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "validation_stringency" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "validation_stringency_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "multiqc_fastp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "multiqc_markdups" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "multiqc_samtoolsstats" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "out_snpsift" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "statsFile_snpeff" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "stats_bam" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "All the Sarek team" . + + a schema1:Person ; + schema1:name "nf-core comunity and people in the IMPaCT-Data project." . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/parallel-accession-download/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-atac-cutandrun_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-atac-cutandrun_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-chip-pe_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-chip-pe_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-chip-sr_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-chip-sr_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Muon Spectroscopy Computational Project" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Copper-out.cell" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Copper.castep" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Copper.den_fmt" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "endpoint" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "query_file" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mutations_list" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step00_cat_pdb_input_structure2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step00_cat_pdb_output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step0_reduce_remove_hydrogens_input_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step0_reduce_remove_hydrogens_output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step100_make_ndx_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step100_make_ndx_output_ndx_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_mdrun_min_output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_mdrun_min_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_mdrun_min_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_mdrun_min_output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_grompp_nvt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_grompp_nvt_output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_mdrun_nvt_output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_mdrun_nvt_output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_mdrun_nvt_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_mdrun_nvt_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_mdrun_nvt_output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_grompp_npt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_grompp_npt_output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_mdrun_npt_output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_mdrun_npt_output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_mdrun_npt_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_mdrun_npt_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_mdrun_npt_output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step15_grompp_md_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step15_grompp_md_output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_mdrun_md_output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_mdrun_md_output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_mdrun_md_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_mdrun_md_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_mdrun_md_output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step17_gmx_image1_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step17_gmx_image1_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_gmx_image2_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_gmx_image2_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_gmx_trjconv_str_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_gmx_trjconv_str_output_str_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_extract_molecule_output_molecule_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_gmx_energy_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_gmx_energy_output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step21_gmx_rgyr_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step21_gmx_rgyr_output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_rmsd_first_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_rmsd_first_output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step23_rmsd_exp_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step23_rmsd_exp_output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step24_grompp_md_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step24_grompp_md_output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_fix_side_chain_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_mutate_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_pdb2gmx_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_pdb2gmx_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_pdb2gmx_output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_editconf_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_editconf_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_solvate_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_solvate_output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step7_grompp_genion_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step7_grompp_genion_output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_genion_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_genion_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_genion_output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_grompp_min_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_grompp_min_output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Collected Simulation Data" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Sample" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sra-manifest-to-concatenated-fastqs/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Milad Miladi" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "Milad Miladi" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fast5-Signals-Raw.tar.gz" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/fragment-based-docking-scoring/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "RECETOX SpecDat" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/753" ; + schema1:name "Zargham Ahmad" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CPAT_header_tab" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GRCh38_p13_genome_fa_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Pfam-A_hmm_dat_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Pfam-A_hmm_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "active_site_dat_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gencode_v43_annotation_gtf_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gencode_v43_lncRNA_transcripts_fa_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gencode_v43_pc_transcripts_fa_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gencode_v43_transcripts_fa_gz" . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s01r1b41 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=kmeans_prov COMPSS_PYTHON_VERSION=3.9.10 SLURM_JOB_QOS=debug SLURM_MEM_PER_CPU=1880 COMPSS_BINDINGS_DEBUG=1 SLURM_JOB_ID=30650595 SLURM_JOB_USER=bsc19057 COMPSS_HOME=/apps/COMPSs/3.3.pr/ SLURM_JOB_UID=2952 SLURM_SUBMIT_DIR=/gpfs/home/bsc19/bsc19057/COMPSs-DP/tutorial_apps/java/kmeans SLURM_JOB_NODELIST=s01r1b41 SLURM_JOB_GID=2950 SLURM_JOB_CPUS_PER_NODE=48 COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login3 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=1 COMPSS_MASTER_NODE=s01r1b41 COMPSS_WORKER_NODES=" ; + schema1:endTime "2023-11-10T13:57:34+00:00" ; + schema1:instrument ; + schema1:name "COMPSs KMeans.java execution at marenostrum4 with JOB_ID 30650595" ; + schema1:result ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.2.rc2311" . + + a schema1:MediaObject ; + schema1:contentSize 811 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 1802 ; + schema1:description "Auxiliary File" ; + schema1:name "Readme" . + + a schema1:MediaObject ; + schema1:contentSize 10022 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "application/java-archive" ; + schema1:name "kmeans.jar" . + + a schema1:MediaObject ; + schema1:contentSize 4349 ; + schema1:description "Auxiliary File" ; + schema1:name "pom.xml" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 6565 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "KMeansDataSet.java" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1638 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "KMeansItf.java" . + + a schema1:MediaObject ; + schema1:contentSize 289 ; + schema1:description "Auxiliary File" ; + schema1:name "project.xml" . + + a schema1:MediaObject ; + schema1:contentSize 983 ; + schema1:description "Auxiliary File" ; + schema1:name "resources.xml" . + + a schema1:MediaObject ; + schema1:contentSize 189 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 2801 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_Velocyto-on10X-filtered-barcodes_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_Velocyto-on10X-filtered-barcodes_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/velocyto/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_Velocyto-on10X-from-bundled_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_Velocyto-on10X-from-bundled_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/velocyto/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Darwin MacBook-Pro-Raul-2018.local 23.4.0 Darwin Kernel Version 23.4.0: Fri Mar 15 00:11:05 PDT 2024; root:xnu-10063.101.17~1/RELEASE_X86_64 x86_64" ; + schema1:endTime "2024-04-30T10:49:33+00:00" ; + schema1:instrument ; + schema1:name "COMPSs matmul_files.py execution at MacBook-Pro-Raul-2018.local" ; + schema1:object , + , + , + , + , + , + , + , + , + , + , + ; + schema1:result , + , + , + , + ; + schema1:startTime "2024-04-30T10:49:25+00:00" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3.rc2402" . + + a schema1:MediaObject ; + schema1:contentSize 244 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1627 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "matmul_tasks.py" . + + a schema1:MediaObject ; + schema1:contentSize 170 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 2279 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "matmul_reproducibility.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:Person ; + schema1:name "Special thanks to Sujeevan Ratnasingham and the team at CBG for the creation of the BCDM data exchange format that this pipeline operates on" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s18r2b06 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=wordcount_files COMPSS_PYTHON_VERSION=3.9.10 SLURM_JOB_QOS=debug SLURM_MEM_PER_CPU=1880 COMPSS_BINDINGS_DEBUG=1 SLURM_JOB_ID=30498188 SLURM_JOB_USER=bsc19057 COMPSS_HOME=/apps/COMPSs/3.3.pr/ SLURM_JOB_UID=2952 SLURM_SUBMIT_DIR=/gpfs/home/bsc19/bsc19057/COMPSs-DP/tutorial_apps/python/wordcount SLURM_JOB_NODELIST=s18r2b06 SLURM_JOB_GID=2950 SLURM_JOB_CPUS_PER_NODE=48 COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login3 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=1 COMPSS_MASTER_NODE=s18r2b06 COMPSS_WORKER_NODES=" ; + schema1:endTime "2023-11-02T10:55:02+00:00" ; + schema1:instrument ; + schema1:name "COMPSs wordcount.py execution at marenostrum4 with JOB_ID 30498188" ; + schema1:object , + , + , + ; + schema1:result ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.2.rc2310" . + + a schema1:MediaObject ; + schema1:contentSize 569 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 778 ; + schema1:description "Auxiliary File" ; + schema1:name "README" . + + a schema1:MediaObject ; + schema1:contentSize 4486 ; + schema1:description "Auxiliary File" ; + schema1:name "pom.xml" . + + a schema1:MediaObject ; + schema1:contentSize 284 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 1553 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/445" ; + schema1:name "Diego De Panis" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_grompp_npt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_grompp_md_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_mdrun_md_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_pdb_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_editconf_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_grompp_genion_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_genion_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_grompp_min_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_make_ndx_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_grompp_nvt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "whole workflow output" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:MediaObject ; + schema1:contentSize 177 . + + a schema1:MediaObject ; + schema1:contentSize 88 . + + a schema1:MediaObject ; + schema1:contentSize 274 . + + a schema1:MediaObject ; + schema1:contentSize 994 . + + a schema1:Dataset . + + a schema1:MediaObject ; + schema1:contentSize 21291 . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/cutandrun/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Nathaniel Butterworth" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/141" ; + schema1:name "Tracy Chew" . + + a schema1:ComputerLanguage ; + schema1:name "Shell Script" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step00_extract_molecule_input_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step00_extract_molecule_output_molecule_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step0_cat_pdb_input_structure2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step0_cat_pdb_output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_sander_mdrun_energy_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_sander_mdrun_energy_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_sander_mdrun_energy_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_sander_mdrun_energy_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_process_minout_energy_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_process_minout_energy_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_sander_mdrun_warm_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_sander_mdrun_warm_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_sander_mdrun_warm_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_sander_mdrun_warm_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_process_mdout_warm_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_process_mdout_warm_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_sander_mdrun_nvt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_sander_mdrun_nvt_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_sander_mdrun_nvt_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_sander_mdrun_nvt_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step15_process_mdout_nvt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step15_process_mdout_nvt_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_sander_mdrun_npt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_sander_mdrun_npt_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_sander_mdrun_npt_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_sander_mdrun_npt_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step17_process_mdout_npt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step17_process_mdout_npt_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_sander_mdrun_md_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_sander_mdrun_md_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_sander_mdrun_md_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_sander_mdrun_md_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_rmsd_first_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_rmsd_first_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_pdb4amber_run_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_rmsd_exp_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_rmsd_exp_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step21_cpptraj_rgyr_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step21_cpptraj_rgyr_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_cpptraj_image_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_cpptraj_image_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_leap_gen_top_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_leap_gen_top_input_frcmod_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_leap_gen_top_input_lib_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_leap_gen_top_output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_leap_gen_top_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_leap_gen_top_output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_sander_mdrun_minH_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_sander_mdrun_minH_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_sander_mdrun_minH_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_sander_mdrun_minH_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_process_minout_minH_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_process_minout_minH_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_sander_mdrun_min_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_sander_mdrun_min_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_sander_mdrun_min_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_sander_mdrun_min_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_process_minout_min_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_process_minout_min_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step7_amber_to_pdb_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_leap_solvate_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_leap_solvate_input_frcmod_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_leap_solvate_input_lib_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_leap_solvate_output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_leap_solvate_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_leap_solvate_output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_leap_add_ions_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_leap_add_ions_input_frcmod_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_leap_add_ions_input_lib_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_leap_add_ions_output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_leap_add_ions_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_leap_add_ions_output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_molecule_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_fpocket_select_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_fpocket_select_input_pockets_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_fpocket_select_output_pocket_pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_fpocket_select_output_pocket_pqr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_box_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_box_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_babel_convert_prep_lig_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_babel_convert_prep_lig_input_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_babel_convert_prep_lig_output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_str_check_add_hydrogens_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_str_check_add_hydrogens_input_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_str_check_add_hydrogens_output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_autodock_vina_run_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_autodock_vina_run_output_pdbqt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_babel_convert_pose_pdb_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_babel_convert_pose_pdb_output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pocket_pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pocket_pqr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdbqt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path" . + + a schema1:Person ; + schema1:name "" . + + a schema1:Person ; + schema1:name "Jesse van Dam" . + + a schema1:Person ; + schema1:name "Peter Schaap" . + + a schema1:Person ; + schema1:name "" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Reverse read length" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Forward primer" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "forward reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Reference database" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Reverse read length" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Reverse primer" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reverse reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Sample name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "files_to_folder_fastqc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "files_to_folder_ngtax" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output Destination" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Contamination reference file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "identifier used" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Keep mapped reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken2 database" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Maximum memory in MB" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Nanopore reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CWL base step number" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Number of threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filtered nanopore reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filtering reports folder" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "COMPSs" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_30" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_31" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_32" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_33" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_34" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_35" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_36" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_37" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_38" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_39" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_40" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myamber_to_pdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_image.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_rgyr.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_rms.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb4amber_run.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myprocess_mdout.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myprocess_minout.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.cpout" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.cprst" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.mdinfo" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.netcdf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.rst" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.trj" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:Person ; + schema1:name "ABRomics" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/bacterial_genome_annotation/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:Person ; + schema1:name "ERGA" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lineage" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "assembly_gfa" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "consensus" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output1" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/445" ; + schema1:name "Diego De Panis" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/galaxyproject/iwc/actions/workflows/workflow_test.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "champbloc_ivr.csv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "champbloc_qecb.csv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ficheterrain.csv" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Dannon Baker" . + + a schema1:Person ; + schema1:name "Björn Grüning" . + + a schema1:Person ; + schema1:name "Delphine Larivière" . + + a schema1:Person ; + schema1:name "Gildas Le Corguillé" . + + a schema1:Person ; + schema1:name "Andrew Lonie" . + + a schema1:Person ; + schema1:name "Nicholas Keener" . + + a schema1:Person ; + schema1:name "Sergei Kosakovsky Pond" . + + a schema1:Person ; + schema1:name "Wolfgang Maier" . + + a schema1:Person ; + schema1:name "Anton Nekrutenko" . + + a schema1:Person ; + schema1:name "James Taylor" . + + a schema1:Person ; + schema1:name "Steven Weaver" . + + a schema1:Person ; + schema1:name "Marius van den Beek" . + + a schema1:Person ; + schema1:name "Dave Bouvier" . + + a schema1:Person ; + schema1:name "John Chilton" . + + a schema1:Person ; + schema1:name "Nate Coraor" . + + a schema1:Person ; + schema1:name "Frederik Coppens" . + + a schema1:Person ; + schema1:name "Bert Droesbeke" . + + a schema1:Person ; + schema1:name "Ignacio Eguinoa" . + + a schema1:Person ; + schema1:name "Simon Gladman" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "poterlowicz-lab" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "poterlowicz-lab" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Normal_r1.fastq.gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Normal_r2.fastq.gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Tumor_r1.fastq.gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Tumor_r2.fastq.gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "capture_targets_chr5_12_17.bed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "out_chr_sorted_circos" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "out_ratio_log2_circos" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_png" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Occurrence.csv" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:Dataset . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:Dataset . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "barcodes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "binning_method" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "block_size" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "clip_max" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "clusterDataR" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "featureSelectionDataR" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "features" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "filterDataR" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "findNeighborsR" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "find_markersR" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "k" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "loadDataR" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "loess_span" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "margin" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "matrix" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "minCells" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "minFeatures" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "nCountRNAmax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "nCountRNAmin" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "nFeatureRNAmax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "nFeatureRNAmin" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "neighbors_method" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "normalization_method" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "normalizeDataR" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "num_bin" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "num_features" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pattern" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "percentMTmax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "percentMTmin" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "projectName" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "runPCAR" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "runTSNER" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "runUmapR" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "running_step" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "scaleDataR" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "scale_factor" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "selection_method" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "verbose" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "clusterDataOutput" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "filterDataOutput" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "filterDataPlot" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "findFeaturesOutput" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "findFeaturesPlot" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "findMarkersOutput" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "findNeighborsOutput" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "loadDataOutput" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "loadDataPlot" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "normalizeDataOutput" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "runPCAOutput" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "runPCAPlot1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "runPCAPlot2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "runPCAPlot3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "runTSNEOutput" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "runUMAPOutput" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "runUMAPOutputPlot" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "scaleDataOutput" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Simon Bray" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/galaxyproject/iwc/actions/workflows/workflow_test.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/cutandrun/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "RMD" ; + schema1:name "R markdown" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Amplicons" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:Person ; + schema1:name "Laure Quintric" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/145" ; + schema1:name "Alexandre Cormier" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/146" ; + schema1:name "Patrick Durand" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/148" ; + schema1:name "Laura Leroi" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Data_R1.fastq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Data_R2.fastq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken_taxonomy_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken_taxonomy_table_modified" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Taxonomic_prediction_report" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "Simon Bray" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/protein-ligand-complex-parameterization/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/159" ; + schema1:name "Oliver Woolland" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux gs18r2b72 5.14.0-284.30.1.el9_2.x86_64 #1 SMP PREEMPT_DYNAMIC Fri Aug 25 09:13:12 EDT 2023 x86_64 x86_64 x86_64 GNU/Linux" ; + schema1:endTime "2024-08-02T08:27:39+00:00" ; + schema1:instrument ; + schema1:name "COMPSs Matmul.java execution at marenostrum5 with JOB_ID 4500562" ; + schema1:object , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:result , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:startTime "2024-08-02T08:27:36+00:00" ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3.rc2408" . + + a schema1:MediaObject ; + schema1:contentSize 346 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 9090 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "application/java-archive" ; + schema1:name "matmul.jar" . + + a schema1:MediaObject ; + schema1:contentSize 4804 ; + schema1:description "Auxiliary File" ; + schema1:name "pom.xml" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 4189 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "Block.java" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 940 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "MatmulImpl.java" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1464 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "MatmulItf.java" . + + a schema1:MediaObject ; + schema1:contentSize 4540 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "Block.class" . + + a schema1:MediaObject ; + schema1:contentSize 4508 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "Matmul.class" . + + a schema1:MediaObject ; + schema1:contentSize 812 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "MatmulImpl.class" . + + a schema1:MediaObject ; + schema1:contentSize 642 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "MatmulItf.class" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:description "Auxiliary File" ; + schema1:name ".gitkeep" . + + a schema1:MediaObject ; + schema1:contentSize 27687 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "application/java-archive" ; + schema1:name "compss-api-2.8.rc2101.jar" . + + a schema1:MediaObject ; + schema1:contentSize 9090 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "application/java-archive" ; + schema1:name "matmul.jar" . + + a schema1:MediaObject ; + schema1:contentSize 129 ; + schema1:description "Auxiliary File" ; + schema1:name "pom.properties" . + + a schema1:MediaObject ; + schema1:contentSize 110 ; + schema1:description "Auxiliary File" ; + schema1:name "createdFiles.lst" . + + a schema1:MediaObject ; + schema1:contentSize 410 ; + schema1:description "Auxiliary File" ; + schema1:name "inputFiles.lst" . + + a schema1:MediaObject ; + schema1:contentSize 289 ; + schema1:description "Auxiliary File" ; + schema1:name "project.xml" . + + a schema1:MediaObject ; + schema1:contentSize 983 ; + schema1:description "Auxiliary File" ; + schema1:name "resources.xml" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_marenostrum5_SLURM_JOB_ID_4500562" ; + schema1:contentSize 224 ; + schema1:description "COMPSs console standard error log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-4500562.err" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_marenostrum5_SLURM_JOB_ID_4500562" ; + schema1:contentSize 2674 ; + schema1:description "COMPSs console standard output log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-4500562.out" . + + a schema1:MediaObject ; + schema1:contentSize 322 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 2236 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "java_matmul_reproducible_mn5.yaml" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step0_extract_model_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step0_extract_model_input_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step0_extract_model_output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_cpptraj_convert_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_cpptraj_convert_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_bd_run_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_bd_run_output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_bd_run_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_cpptraj_rms_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_cpptraj_rms_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_cpptraj_rms_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_dmd_run_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_dmd_run_output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_dmd_run_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_cpptraj_rms_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_cpptraj_rms_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_cpptraj_rms_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step15_nma_run_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step15_nma_run_output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step15_nma_run_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_cpptraj_rms_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_cpptraj_rms_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step17_cpptraj_convert_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step17_cpptraj_convert_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_nolb_nma_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_nolb_nma_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_cpptraj_rms_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_cpptraj_rms_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_extract_chain_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_extract_chain_output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_cpptraj_convert_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_cpptraj_convert_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step21_imod_imode_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step21_imod_imode_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_imod_imc_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_imod_imc_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step23_cpptraj_rms_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step23_cpptraj_rms_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step24_cpptraj_convert_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step24_cpptraj_convert_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step26_make_ndx_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step26_make_ndx_output_ndx_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step27_gmx_cluster_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step27_gmx_cluster_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step28_cpptraj_rms_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step28_cpptraj_rms_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step28_cpptraj_rms_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step29_pcz_zip_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step29_pcz_zip_output_pcz_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_cpptraj_mask_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_cpptraj_mask_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step30_pcz_zip_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step30_pcz_zip_output_pcz_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step31_pcz_info_output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step32_pcz_evecs_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step32_pcz_evecs_output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step33_pcz_animate_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step33_pcz_animate_output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step34_cpptraj_convert_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step34_cpptraj_convert_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step35_pcz_bfactor_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step35_pcz_bfactor_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step35_pcz_bfactor_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step36_pcz_hinges_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step36_pcz_hinges_output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step37_pcz_hinges_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step37_pcz_hinges_output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step38_pcz_hinges_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step38_pcz_hinges_output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step39_pcz_stiffness_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step39_pcz_stiffness_output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_cpptraj_mask_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_cpptraj_mask_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step40_pcz_collectivity_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step40_pcz_collectivity_output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_concoord_disco_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_concoord_dist_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_concoord_dist_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_concoord_dist_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_concoord_dist_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_concoord_disco_output_bfactor_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_concoord_disco_output_rmsd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_concoord_disco_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_cpptraj_rms_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_cpptraj_rms_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step7_cpptraj_convert_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step7_cpptraj_convert_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_prody_anm_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_prody_anm_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_cpptraj_rms_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_cpptraj_rms_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_ndx_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pcz_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pcz_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rmsd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_bfactor_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:name "Tim Dudgeon" . + + a schema1:Person ; + schema1:name "Simon Bray" . + + a schema1:Person ; + schema1:name "Gianmauro Cuccuru" . + + a schema1:Person ; + schema1:name "Björn Grüning" . + + a schema1:Person ; + schema1:name "Rachael Skyner" . + + a schema1:Person ; + schema1:name "Jack Scantlebury" . + + a schema1:Person ; + schema1:name "Susan Leung" . + + a schema1:Person ; + schema1:name "Frank von Delft" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "Martin Hölzer" . + + a schema1:Person ; + schema1:name "Alexandre Almeida" . + + a schema1:Person ; + schema1:name "Guillermo Rangel-Pineros and Ekaterina Sakharova" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Illumina beadchip array file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Clinical data" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Microarray data" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Additional network data table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GSEA edge table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GSEA node table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Module eigengene edge data" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Module eigengene node data" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Network data table" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "COMPSs" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Genome/bin" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output Destination (prov only)" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Identifier used" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "solver" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "number of threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CarveMe GEMs folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GEMstats" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MEMOTE outputs folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Protein files folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "SMETANA output" . + + a schema1:Person ; + schema1:name "ABRomics" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/bacterial-genome-assembly/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/ChIPseq_PE/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_COVID-19-PE-ARTIC-ILLUMINA_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_COVID-19-PE-ARTIC-ILLUMINA_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sars-cov-2-pe-illumina-artic-variant-calling/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Sample" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "barcodes.tsv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "genes.tsv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "matrix.mtx" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bits_set" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "blacklist" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bwt_algorithm" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chromosome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_lratio" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_exomeDepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_manta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_bf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "generate_bwa_indexes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_exome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "min_mapping_quality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "read_group" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_amb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_ann" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_bwt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fai" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_pac" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_sa" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samples" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_bwa_mem" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastqc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_samtools" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "html_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "json_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_all" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_bam_filtering" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_exomedepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_manta" . + + a schema1:Person ; + schema1:name "Daniel López-López" . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/generic-variant-calling-wgs-pe/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:MediaObject ; + schema1:contentSize 183 . + + a schema1:MediaObject ; + schema1:contentSize 2110 . + + a schema1:MediaObject ; + schema1:contentSize 572 . + + a schema1:MediaObject ; + schema1:contentSize 1442 . + + a schema1:Dataset . + + a schema1:MediaObject ; + schema1:contentSize 44222 . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:name "Tim Dudgeon" . + + a schema1:Person ; + schema1:name "Simon Bray" . + + a schema1:Person ; + schema1:name "Gianmauro Cuccuru" . + + a schema1:Person ; + schema1:name "Björn Grüning" . + + a schema1:Person ; + schema1:name "Rachael Skyner" . + + a schema1:Person ; + schema1:name "Jack Scantlebury" . + + a schema1:Person ; + schema1:name "Susan Leung" . + + a schema1:Person ; + schema1:name "Frank von Delft" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s10r1b54 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=sparseLU-java-DP SLURM_JOB_QOS=debug SLURM_MEM_PER_CPU=1880 SLURM_JOB_ID=29155949 SLURM_JOB_USER=bsc19057 COMPSS_HOME=/apps/COMPSs/3.2.pr/ SLURM_JOB_UID=2952 SLURM_SUBMIT_DIR=/gpfs/home/bsc19/bsc19057/COMPSs-DP/tutorial_apps/java/sparseLU SLURM_JOB_NODELIST=s10r1b[54,56] SLURM_JOB_GID=2950 SLURM_JOB_CPUS_PER_NODE=48(x2) COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login3 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=2 COMPSS_MASTER_NODE=s10r1b54 COMPSS_WORKER_NODES= s10r1b56" ; + schema1:endTime "2023-06-23T13:59:37+00:00" ; + schema1:instrument ; + schema1:name "COMPSs SparseLU.java execution at marenostrum4 with JOB_ID 29155949" ; + schema1:object , + , + , + , + , + , + , + , + , + , + , + ; + schema1:result , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.1.rc2305" . + + a schema1:MediaObject ; + schema1:contentSize 1584 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 1935 ; + schema1:description "Auxiliary File" ; + schema1:name "Readme" . + + a schema1:MediaObject ; + schema1:contentSize 28758 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "application/java-archive" ; + schema1:name "sparseLU.jar" . + + a schema1:MediaObject ; + schema1:contentSize 4454 ; + schema1:description "Auxiliary File" ; + schema1:name "pom.xml" . + + a schema1:MediaObject ; + schema1:contentSize 2628 ; + schema1:description "Auxiliary File" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:MediaObject ; + schema1:contentSize 3304 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "SparseLU.class" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 4840 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "SparseLU.java" . + + a schema1:MediaObject ; + schema1:contentSize 2430 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "SparseLUImpl.class" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 4114 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "SparseLUImpl.java" . + + a schema1:MediaObject ; + schema1:contentSize 808 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "SparseLUItf.class" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1899 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "SparseLUItf.java" . + + a schema1:MediaObject ; + schema1:contentSize 4135 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "Block.class" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 5589 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "Block.java" . + + a schema1:MediaObject ; + schema1:contentSize 4682 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "SparseLU.class" . + + a schema1:MediaObject ; + schema1:contentSize 1310 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "SparseLUImpl.class" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 2431 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "SparseLUImpl.java" . + + a schema1:MediaObject ; + schema1:contentSize 904 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "SparseLUItf.class" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1808 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "SparseLUItf.java" . + + a schema1:MediaObject ; + schema1:contentSize 2991 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "Block.class" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 4345 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "Block.java" . + + a schema1:MediaObject ; + schema1:contentSize 3403 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "SparseLU.class" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 4740 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "SparseLU.java" . + + a schema1:MediaObject ; + schema1:contentSize 816 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "SparseLUItf.class" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1529 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "SparseLUItf.java" . + + a schema1:MediaObject ; + schema1:contentSize 289 ; + schema1:description "Auxiliary File" ; + schema1:name "project.xml" . + + a schema1:MediaObject ; + schema1:contentSize 983 ; + schema1:description "Auxiliary File" ; + schema1:name "resources.xml" . + + a schema1:MediaObject ; + schema1:contentSize 695 ; + schema1:description "COMPSs command line execution command (runcompss), including flags and parameters passed" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_command_line_arguments.txt" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_sander_mdrun_eq3_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_sander_mdrun_eq3_input_mdin_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_sander_mdrun_eq3_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_sander_mdrun_eq3_output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_sander_mdrun_eq3_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_sander_mdrun_eq3_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_process_minout_eq3_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_process_minout_eq3_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_sander_mdrun_eq4_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_sander_mdrun_eq4_input_mdin_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_sander_mdrun_eq4_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_sander_mdrun_eq4_output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_sander_mdrun_eq4_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_sander_mdrun_eq4_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_process_minout_eq4_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_process_minout_eq4_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_sander_mdrun_eq5_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_sander_mdrun_eq5_input_mdin_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_sander_mdrun_eq5_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_sander_mdrun_eq5_output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_sander_mdrun_eq5_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_sander_mdrun_eq5_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step15_process_minout_eq5_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step15_process_minout_eq5_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_sander_mdrun_eq6_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_sander_mdrun_eq6_input_mdin_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_sander_mdrun_eq6_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_sander_mdrun_eq6_output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_sander_mdrun_eq6_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_sander_mdrun_eq6_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step17_process_mdout_eq6_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step17_process_mdout_eq6_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_sander_mdrun_eq7_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_sander_mdrun_eq7_input_mdin_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_sander_mdrun_eq7_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_sander_mdrun_eq7_output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_sander_mdrun_eq7_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_sander_mdrun_eq7_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_process_mdout_eq7_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_process_mdout_eq7_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_leap_gen_top_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_leap_gen_top_input_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_leap_gen_top_output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_leap_gen_top_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_leap_gen_top_output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_sander_mdrun_eq8_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_sander_mdrun_eq8_input_mdin_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_sander_mdrun_eq8_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_sander_mdrun_eq8_output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_sander_mdrun_eq8_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_sander_mdrun_eq8_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step21_process_mdout_eq8_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step21_process_mdout_eq8_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_sander_mdrun_eq9_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_sander_mdrun_eq9_input_mdin_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_sander_mdrun_eq9_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_sander_mdrun_eq9_output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_sander_mdrun_eq9_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_sander_mdrun_eq9_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step23_process_mdout_eq9_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step23_process_mdout_eq9_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step24_sander_mdrun_eq10_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step24_sander_mdrun_eq10_input_mdin_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step24_sander_mdrun_eq10_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step24_sander_mdrun_eq10_output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step24_sander_mdrun_eq10_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step24_sander_mdrun_eq10_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step25_process_mdout_eq10_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step25_process_mdout_eq10_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step26_sander_mdrun_md_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step26_sander_mdrun_md_input_mdin_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step26_sander_mdrun_md_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step26_sander_mdrun_md_output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step26_sander_mdrun_md_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step26_sander_mdrun_md_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_leap_solvate_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_leap_solvate_output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_leap_solvate_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_leap_solvate_output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_leap_add_ions_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_leap_add_ions_output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_leap_add_ions_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_leap_add_ions_output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_cpptraj_randomize_ions_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_cpptraj_randomize_ions_output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_cpptraj_randomize_ions_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_parmed_hmassrepartition_output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_sander_mdrun_eq1_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_sander_mdrun_eq1_input_mdin_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_sander_mdrun_eq1_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_sander_mdrun_eq1_output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_sander_mdrun_eq1_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_sander_mdrun_eq1_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step7_process_minout_eq1_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step7_process_minout_eq1_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_sander_mdrun_eq2_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_sander_mdrun_eq2_input_mdin_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_sander_mdrun_eq2_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_sander_mdrun_eq2_output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_sander_mdrun_eq2_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_sander_mdrun_eq2_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_process_mdout_eq2_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_process_mdout_eq2_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:name "Tim Dudgeon" . + + a schema1:Person ; + schema1:name "Simon Bray" . + + a schema1:Person ; + schema1:name "Gianmauro Cuccuru" . + + a schema1:Person ; + schema1:name "Björn Grüning" . + + a schema1:Person ; + schema1:name "Rachael Skyner" . + + a schema1:Person ; + schema1:name "Jack Scantlebury" . + + a schema1:Person ; + schema1:name "Susan Leung" . + + a schema1:Person ; + schema1:name "Frank von Delft" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "Docker" ; + schema1:identifier ; + schema1:name "Docker" ; + schema1:url . + + a schema1:Person ; + schema1:name "Dannon Baker" . + + a schema1:Person ; + schema1:name "Björn Grüning" . + + a schema1:Person ; + schema1:name "Delphine Larivière" . + + a schema1:Person ; + schema1:name "Gildas Le Corguillé" . + + a schema1:Person ; + schema1:name "Andrew Lonie" . + + a schema1:Person ; + schema1:name "Nicholas Keener" . + + a schema1:Person ; + schema1:name "Sergei Kosakovsky Pond" . + + a schema1:Person ; + schema1:name "Wolfgang Maier" . + + a schema1:Person ; + schema1:name "Anton Nekrutenko" . + + a schema1:Person ; + schema1:name "James Taylor" . + + a schema1:Person ; + schema1:name "Steven Weaver" . + + a schema1:Person ; + schema1:name "Marius van den Beek" . + + a schema1:Person ; + schema1:name "Dave Bouvier" . + + a schema1:Person ; + schema1:name "John Chilton" . + + a schema1:Person ; + schema1:name "Nate Coraor" . + + a schema1:Person ; + schema1:name "Frederik Coppens" . + + a schema1:Person ; + schema1:name "Bert Droesbeke" . + + a schema1:Person ; + schema1:name "Ignacio Eguinoa" . + + a schema1:Person ; + schema1:name "Simon Gladman" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/141" ; + schema1:name "Tracy Chew" . + + a schema1:ComputerLanguage ; + schema1:name "Shell Script" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bamqc_report_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "combined_coverage" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "combined_multifasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ivar_consensus_genome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ivar_variants_tabular" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "primer_trimmed_bam" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "snpeff_annotated_vcf" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/560" ; + schema1:name "Akshay Akshay" . + + a schema1:ComputerLanguage ; + schema1:alternateName "WDL" ; + schema1:identifier ; + schema1:name "Workflow Description Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chromosome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq_files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gqb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "known_indels_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "known_sites_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "readgroup_str" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_genome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sample_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gvcf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "metrics" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/chipseq-sr/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "ABRomics" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/quality-and-contamination-control/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/atacseq/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Peter van Heusden" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/galaxyproject/iwc/actions/workflows/workflow_test.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/galaxyproject/iwc/actions/workflows/workflow_test.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/assembly-with-flye/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Simon Bray" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/galaxyproject/iwc/actions/workflows/workflow_test.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/cutandrun/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "BridgeDB cache" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Differential gene expression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Differential miRNA expression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "STRING identifier mapping" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mRNA expression correlation" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of correlation edges to use, USE FOR TESTING ONLY" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "miRNA mRNA correlation" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "miRTarBase data" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of miRTarBase edges to use, USE FOR TESTING ONLY" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of cores to let MOGAMUN use" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of generation to let the genetic algorithm in MOGAMUN evolve" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The maximum size of subnetworks during postprocessing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "the minimum Jaccard Index overlap between two subnetworks to allow them to be merged" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The minimum size of subnetworks during postprocessing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of parallel runs to let MOGAMUN do, these parallel runs are combined in postprocessing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "STRING data" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The minimum score for a STRING edge to be included in the analysis" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of STRING edges to use, USE FOR TESTING ONLY" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Variant Burden" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "full_graph" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "subnetworks" . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/445" ; + schema1:name "Diego De Panis" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_molecule_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "Docker" ; + schema1:identifier ; + schema1:name "Docker" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bits_set" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "blacklist" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bwt_algorithm" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chromosome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_lratio" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_exomeDepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_manta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_bf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_exome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "min_mapping_quality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "read_group" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samples" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_bwa_mem" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastqc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_samtools" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "html_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "json_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_all" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_bam_filtering" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_exomedepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_manta" . + + a schema1:Person ; + schema1:name "Daniel López-López" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "List of mutations" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Collected Simulation Data" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Gareth Price" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sars-cov-2-pe-illumina-wgs-variant-calling/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/406" ; + schema1:name "Andrey Prjibelski" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/atacseq/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rRNA filtering" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "forward reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gzip compression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "identifier used" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken2 database" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "memory usage (mb)" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pacbio reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reverse reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "number of threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "BAM files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "FASTQC" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filtered reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "KRAKEN2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MetaBat2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "QUAST" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "SPADES" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/atacseq/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sars-cov-2-variation-reporting/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux gs11r2b04 5.14.0-284.30.1.el9_2.x86_64 #1 SMP PREEMPT_DYNAMIC Fri Aug 25 09:13:12 EDT 2023 x86_64 x86_64 x86_64 GNU/Linux" ; + schema1:endTime "2024-07-05T11:38:51+00:00" ; + schema1:instrument ; + schema1:name "COMPSs main.py execution at marenostrum5 with JOB_ID 3644777" ; + schema1:object , + ; + schema1:result , + , + , + ; + schema1:startTime "2024-07-05T11:37:39+00:00" ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 1388 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_marenostrum5_SLURM_JOB_ID_3644777" ; + schema1:contentSize 100 ; + schema1:description "COMPSs console standard error log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-3644777.err" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_marenostrum5_SLURM_JOB_ID_3644777" ; + schema1:contentSize 1723 ; + schema1:description "COMPSs console standard output log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-3644777.out" . + + a schema1:MediaObject ; + schema1:contentSize 590 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 996 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Scaffolding-HiC-VGP8/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Payam Emami" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sra-manifest-to-concatenated-fastqs/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:name "ABRomics" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/bacterial_genome_annotation/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Gareth Price" . + + a schema1:MediaObject ; + schema1:description "Workflow documentation" . + + a schema1:ComputerLanguage ; + schema1:alternateName "Docker" ; + schema1:identifier ; + schema1:name "Docker" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/203" ; + schema1:name "Luiz Gadelha" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/371" ; + schema1:name "Lucas Cruz" . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux gs05r2b06 5.14.0-284.30.1.el9_2.x86_64 #1 SMP PREEMPT_DYNAMIC Fri Aug 25 09:13:12 EDT 2023 x86_64 x86_64 x86_64 GNU/Linux" ; + schema1:endTime "2024-06-18T13:22:31+00:00" ; + schema1:instrument ; + schema1:name "COMPSs SparseLU.java execution at marenostrum5 with JOB_ID 3166653" ; + schema1:object , + , + , + , + , + , + , + , + , + , + , + ; + schema1:result , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:startTime "2024-06-18T13:21:59+00:00" ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 2208 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 1935 ; + schema1:description "Auxiliary File" ; + schema1:name "Readme" . + + a schema1:MediaObject ; + schema1:contentSize 28758 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "application/java-archive" ; + schema1:name "sparseLU.jar" . + + a schema1:MediaObject ; + schema1:contentSize 4454 ; + schema1:description "Auxiliary File" ; + schema1:name "pom.xml" . + + a schema1:MediaObject ; + schema1:contentSize 3304 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "SparseLU.class" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 4840 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "SparseLU.java" . + + a schema1:MediaObject ; + schema1:contentSize 2430 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "SparseLUImpl.class" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 4114 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "SparseLUImpl.java" . + + a schema1:MediaObject ; + schema1:contentSize 808 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "SparseLUItf.class" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1899 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "SparseLUItf.java" . + + a schema1:MediaObject ; + schema1:contentSize 4135 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "Block.class" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 5589 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "Block.java" . + + a schema1:MediaObject ; + schema1:contentSize 4682 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "SparseLU.class" . + + a schema1:MediaObject ; + schema1:contentSize 1310 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "SparseLUImpl.class" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 2431 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "SparseLUImpl.java" . + + a schema1:MediaObject ; + schema1:contentSize 904 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "SparseLUItf.class" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1808 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "SparseLUItf.java" . + + a schema1:MediaObject ; + schema1:contentSize 2991 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "Block.class" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 4345 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "Block.java" . + + a schema1:MediaObject ; + schema1:contentSize 3403 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "SparseLU.class" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 4740 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "SparseLU.java" . + + a schema1:MediaObject ; + schema1:contentSize 816 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "SparseLUItf.class" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1529 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "SparseLUItf.java" . + + a schema1:MediaObject ; + schema1:contentSize 289 ; + schema1:description "Auxiliary File" ; + schema1:name "project.xml" . + + a schema1:MediaObject ; + schema1:contentSize 983 ; + schema1:description "Auxiliary File" ; + schema1:name "resources.xml" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_marenostrum5_SLURM_JOB_ID_3166653" ; + schema1:contentSize 100 ; + schema1:description "COMPSs console standard error log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-3166653.err" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_marenostrum5_SLURM_JOB_ID_3166653" ; + schema1:contentSize 32821 ; + schema1:description "COMPSs console standard output log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-3166653.out" . + + a schema1:MediaObject ; + schema1:contentSize 192 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 2474 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rnaseq_left_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rnaseq_right_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sampleName" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sars_cov_2_reference_2bit_genome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sars_cov_2_reference_genome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "indels" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "snps" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/26" ; + schema1:name "Ambarish Kumar" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_scrna-seq-fastq-to-matrix-10x-cellplex_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_scrna-seq-fastq-to-matrix-10x-cellplex_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/fastq-to-matrix-10x/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_scrna-seq-fastq-to-matrix-10x-v3_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_scrna-seq-fastq-to-matrix-10x-v3_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/fastq-to-matrix-10x/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/cutandrun/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_scrna-seq-fastq-to-matrix-10x-cellplex_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_scrna-seq-fastq-to-matrix-10x-cellplex_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/fastq-to-matrix-10x/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_scrna-seq-fastq-to-matrix-10x-v3_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_scrna-seq-fastq-to-matrix-10x-v3_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/fastq-to-matrix-10x/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "S2B_MSIL2A_20200626T095029_N0214_R079_T34VFN_20200626T123234_tar" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sentinel2_tiles_world" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "test_parcels_32635" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/49" ; + schema1:name "Anne Fouilloux" . + + a schema1:Person ; + schema1:name "Tim Dudgeon" . + + a schema1:Person ; + schema1:name "Simon Bray" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/galaxyproject/iwc/actions/workflows/workflow_test.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s02r1b45 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=COMPSs COMPSS_PYTHON_VERSION=3.9.10 SLURM_JOB_QOS=debug SLURM_MEM_PER_CPU=1880 SLURM_JOB_ID=31507118 SLURM_JOB_USER=bsc19057 COMPSS_HOME=/apps/COMPSs/3.3/ SLURM_JOB_UID=2952 SLURM_SUBMIT_DIR=/gpfs/home/bsc19/bsc19057/Tutorial_2024/lysozyme_in_water SLURM_JOB_NODELIST=s02r1b[45-46] SLURM_JOB_GID=2950 SLURM_JOB_CPUS_PER_NODE=48(x2) COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login3 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=2 COMPSS_MASTER_NODE=s02r1b45 COMPSS_WORKER_NODES= s02r1b46" ; + schema1:endTime "2024-01-24T14:28:04+00:00" ; + schema1:instrument ; + schema1:name "COMPSs lysozyme_in_water.py execution at marenostrum4 with JOB_ID 31507118" ; + schema1:object , + , + , + , + , + , + ; + schema1:result , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 2295 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 1565 ; + schema1:description "Auxiliary File" ; + schema1:name "launch_full.sh" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 8957 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "lysozyme_in_water_full.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 8868 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "lysozyme_in_water_full_no_mpi.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 10194 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "lysozyme_in_water_full_singularity.py" . + + a schema1:MediaObject ; + schema1:contentSize 755 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 651 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s21r1b48 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=COMPSs COMPSS_PYTHON_VERSION=3.9.10 SLURM_JOB_QOS=debug SLURM_MEM_PER_CPU=1880 SLURM_JOB_ID=31494645 SLURM_JOB_USER=bsc19057 COMPSS_HOME=/apps/COMPSs/3.3/ SLURM_JOB_UID=2952 SLURM_SUBMIT_DIR=/gpfs/home/bsc19/bsc19057/Tutorial_2024/clustering_comparison SLURM_JOB_NODELIST=s21r1b48,s23r1b70 SLURM_JOB_GID=2950 SLURM_JOB_CPUS_PER_NODE=48(x2) COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login3 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=2 COMPSS_MASTER_NODE=s21r1b48 COMPSS_WORKER_NODES= s23r1b70" ; + schema1:endTime "2024-01-22T16:06:45+00:00" ; + schema1:instrument ; + schema1:name "COMPSs cc.py execution at marenostrum4 with JOB_ID 31494645" ; + schema1:result ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 8534 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 918 ; + schema1:description "Auxiliary File" ; + schema1:name "launch_cc.sh" . + + a schema1:MediaObject ; + schema1:contentSize 555 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 779 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Darwin MacBook-Pro-Raul-2018.local 23.6.0 Darwin Kernel Version 23.6.0: Fri Jul 5 17:54:20 PDT 2024; root:xnu-10063.141.1~2/RELEASE_X86_64 x86_64" ; + schema1:endTime "2024-08-02T07:02:03+00:00" ; + schema1:instrument ; + schema1:name "COMPSs Matmul.java execution at MacBook-Pro-Raul-2018.local" ; + schema1:object , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:result , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:startTime "2024-08-02T07:01:59+00:00" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3.rc2407" . + + a schema1:MediaObject ; + schema1:contentSize 342 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 9090 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "application/java-archive" ; + schema1:name "matmul.jar" . + + a schema1:MediaObject ; + schema1:contentSize 4804 ; + schema1:description "Auxiliary File" ; + schema1:name "pom.xml" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 4189 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "Block.java" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 940 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "MatmulImpl.java" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1464 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "MatmulItf.java" . + + a schema1:MediaObject ; + schema1:contentSize 4540 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "Block.class" . + + a schema1:MediaObject ; + schema1:contentSize 4508 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "Matmul.class" . + + a schema1:MediaObject ; + schema1:contentSize 812 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "MatmulImpl.class" . + + a schema1:MediaObject ; + schema1:contentSize 642 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "Java .class" ; + schema1:name "MatmulItf.class" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:description "Auxiliary File" ; + schema1:name ".gitkeep" . + + a schema1:MediaObject ; + schema1:contentSize 27687 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "application/java-archive" ; + schema1:name "compss-api-2.8.rc2101.jar" . + + a schema1:MediaObject ; + schema1:contentSize 9090 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat , + "application/java-archive" ; + schema1:name "matmul.jar" . + + a schema1:MediaObject ; + schema1:contentSize 129 ; + schema1:description "Auxiliary File" ; + schema1:name "pom.properties" . + + a schema1:MediaObject ; + schema1:contentSize 110 ; + schema1:description "Auxiliary File" ; + schema1:name "createdFiles.lst" . + + a schema1:MediaObject ; + schema1:contentSize 410 ; + schema1:description "Auxiliary File" ; + schema1:name "inputFiles.lst" . + + a schema1:MediaObject ; + schema1:contentSize 289 ; + schema1:description "Auxiliary File" ; + schema1:name "project.xml" . + + a schema1:MediaObject ; + schema1:contentSize 983 ; + schema1:description "Auxiliary File" ; + schema1:name "resources.xml" . + + a schema1:MediaObject ; + schema1:contentSize 199 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 2219 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "java_matmul_reproducible.yaml" . + + a schema1:Person ; + schema1:name "Milad Miladi" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "draft.fa" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fast5_files.tar.gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reads.fasta" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GROMACS grompp configuration dictionary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GROMACS grompp configuration dictionary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GROMACS mdrun configuration dictionary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Editconf configuration dictionary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GROMACS grompp configuration dictionary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Genion configuration dictionary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GROMACS grompp configuration dictionary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GROMACS make_ndx configuration dictionary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GROMACS grompp configuration dictionary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "whole workflow output" . + + a schema1:Person ; + schema1:name "Payam Emami" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Scipion" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:Person ; + schema1:name "The workflow is based on the Galaxy Training tutorial Analyses of metagenomics data. Thank you to the Galaxy Australia team" . + + a schema1:Person ; + schema1:name "Igor Makunin and Mike Thang for help with the workflow" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Darwin MacBook-Pro-Raul-2018.local 23.5.0 Darwin Kernel Version 23.5.0: Wed May 1 20:09:52 PDT 2024; root:xnu-10063.121.3~5/RELEASE_X86_64 x86_64" ; + schema1:endTime "2024-06-18T13:37:29+00:00" ; + schema1:instrument ; + schema1:name "COMPSs matmul_directory.py execution at MacBook-Pro-Raul-2018.local" ; + schema1:object , + , + ; + schema1:result , + ; + schema1:startTime "2024-06-18T13:37:20+00:00" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3.rc2402" . + + a schema1:MediaObject ; + schema1:contentSize 244 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 628 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "DIRECTORY_example.yaml" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1966 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "matmul_tasks.py" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.0" . + + a schema1:MediaObject ; + schema1:contentSize 11372 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 918 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "AsyncPlotter.py" . + + a schema1:MediaObject ; + schema1:contentSize 559 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "Config.py" . + + a schema1:MediaObject ; + schema1:contentSize 2166 ; + schema1:description "Auxiliary File" ; + schema1:name "IA_Ealloc.c" . + + a schema1:MediaObject ; + schema1:contentSize 12895 ; + schema1:description "Auxiliary File" ; + schema1:name "IA_Kdiag.c" . + + a schema1:MediaObject ; + schema1:contentSize 4171 ; + schema1:description "Auxiliary File" ; + schema1:name "IA_Kdiag.h" . + + a schema1:MediaObject ; + schema1:contentSize 6634 ; + schema1:description "Auxiliary File" ; + schema1:name "IA_R2upd.c" . + + a schema1:MediaObject ; + schema1:contentSize 1699 ; + schema1:description "Auxiliary File" ; + schema1:name "LICENSE" . + + a schema1:MediaObject ; + schema1:contentSize 819 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "LocalCC.py" . + + a schema1:MediaObject ; + schema1:contentSize 29992 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "NLLGrid.py" . + + a schema1:MediaObject ; + schema1:contentSize 3411 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "RosenbergerAlgorithm.py" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "__init__.py" . + + a schema1:MediaObject ; + schema1:contentSize 44 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "_version.py" . + + a schema1:MediaObject ; + schema1:contentSize 9838 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "bp_types.py" . + + a schema1:MediaObject ; + schema1:contentSize 4798 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "bt2eventdata.py" . + + a schema1:MediaObject ; + schema1:contentSize 10085 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "btbb.py" . + + a schema1:MediaObject ; + schema1:contentSize 18148 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "btbb_distrostream.py" . + + a schema1:MediaObject ; + schema1:contentSize 2155 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "btbb_utils.py" . + + a schema1:MediaObject ; + schema1:contentSize 319 ; + schema1:description "Parameters passed as arguments to the COMPSs application through the command line" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_command_line_arguments.txt" . + + a schema1:MediaObject ; + schema1:contentSize 3586 ; + schema1:description "Auxiliary File" ; + schema1:name "configspec.conf" . + + a schema1:MediaObject ; + schema1:contentSize 13075 ; + schema1:description "Auxiliary File" ; + schema1:name "coord_convert.c" . + + a schema1:MediaObject ; + schema1:contentSize 2224 ; + schema1:description "Auxiliary File" ; + schema1:name "coord_convert.h" . + + a schema1:MediaObject ; + schema1:contentSize 4168 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "ellipsoid.py" . + + a schema1:MediaObject ; + schema1:contentSize 2419 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "generate_signal.py" . + + a schema1:MediaObject ; + schema1:contentSize 1055 ; + schema1:description "Auxiliary File" ; + schema1:name "geo.h" . + + a schema1:MediaObject ; + schema1:contentSize 1165 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "grid_projection.py" . + + a schema1:MediaObject ; + schema1:contentSize 1259 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "group_triggers.py" . + + a schema1:MediaObject ; + schema1:contentSize 1232 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "init_filter.py" . + + a schema1:MediaObject ; + schema1:contentSize 171 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "input_parameters.py" . + + a schema1:MediaObject ; + schema1:contentSize 519 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "lib_names.py" . + + a schema1:MediaObject ; + schema1:contentSize 4049 ; + schema1:description "Auxiliary File" ; + schema1:name "lib_rec_cc.c" . + + a schema1:MediaObject ; + schema1:contentSize 2512 ; + schema1:description "Auxiliary File" ; + schema1:name "lib_rec_filter.c" . + + a schema1:MediaObject ; + schema1:contentSize 1786 ; + schema1:description "Auxiliary File" ; + schema1:name "lib_rec_hos.c" . + + a schema1:MediaObject ; + schema1:contentSize 892 ; + schema1:description "Auxiliary File" ; + schema1:name "lib_rec_rms.c" . + + a schema1:MediaObject ; + schema1:contentSize 9038 ; + schema1:description "Auxiliary File" ; + schema1:name "map_project.c" . + + a schema1:MediaObject ; + schema1:contentSize 269 ; + schema1:description "Auxiliary File" ; + schema1:name "map_project.h" . + + a schema1:MediaObject ; + schema1:contentSize 2282 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "map_project.py" . + + a schema1:MediaObject ; + schema1:contentSize 6658 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "mbf_plot.py" . + + a schema1:MediaObject ; + schema1:contentSize 14692 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "mod_btbb.py" . + + a schema1:MediaObject ; + schema1:contentSize 12605 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "mod_btbb_orig.py" . + + a schema1:MediaObject ; + schema1:contentSize 11696 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "mod_filter_picker.py" . + + a schema1:MediaObject ; + schema1:contentSize 1286 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "mod_group_trigs.py" . + + a schema1:MediaObject ; + schema1:contentSize 7422 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "mod_setup.py" . + + a schema1:MediaObject ; + schema1:contentSize 2493 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "mod_utils.py" . + + a schema1:MediaObject ; + schema1:contentSize 18536 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "plot.py" . + + a schema1:MediaObject ; + schema1:contentSize 2556 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "readNLL_grid.py" . + + a schema1:MediaObject ; + schema1:contentSize 966 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "read_grids.py" . + + a schema1:MediaObject ; + schema1:contentSize 7812 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "read_traces.py" . + + a schema1:MediaObject ; + schema1:contentSize 2053 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "rec_cc.py" . + + a schema1:MediaObject ; + schema1:contentSize 4537 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "rec_filter.py" . + + a schema1:MediaObject ; + schema1:contentSize 1115 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "rec_gauss_filter.py" . + + a schema1:MediaObject ; + schema1:contentSize 2526 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "rec_hos.py" . + + a schema1:MediaObject ; + schema1:contentSize 973 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "rec_memory.py" . + + a schema1:MediaObject ; + schema1:contentSize 1562 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "rec_rms.py" . + + a schema1:MediaObject ; + schema1:contentSize 4069 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "recursive_cc.py" . + + a schema1:MediaObject ; + schema1:contentSize 845 ; + schema1:description "Auxiliary File" ; + schema1:name "rosenberger.c" . + + a schema1:MediaObject ; + schema1:contentSize 4750 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "rosenberger.py" . + + a schema1:MediaObject ; + schema1:contentSize 4202 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "summary_cf.py" . + + a schema1:MediaObject ; + schema1:contentSize 3466 ; + schema1:description "Auxiliary File" ; + schema1:name "util.c" . + + a schema1:MediaObject ; + schema1:contentSize 1896 ; + schema1:description "Auxiliary File" ; + schema1:name "util.h" . + + a schema1:MediaObject ; + schema1:contentSize 47273 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "validate.py" . + + a schema1:MediaObject ; + schema1:contentSize 3605 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "version.py" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "daily_barchart" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "daily_mean_timeseries" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "monthly_mean_timeseries" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "stripes_daily_temperatures" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "stripes_monthly_temperatures" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bits_set" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "blacklist" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bwt_algorithm" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_lratio" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_exomeDepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_manta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_bf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "generate_bwa_indexes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_exome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "min_mapping_quality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "read_group" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_amb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_ann" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_bwt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fai" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_pac" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_sa" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samples" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_bwa_mem" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastqc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_samtools" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "html_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "json_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_all" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_bam_filtering" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_exomedepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_manta" . + + a schema1:Person ; + schema1:name "Daniel López-López" . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "Docker" ; + schema1:identifier ; + schema1:name "Docker" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "contamination reference file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "forward reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "identifier used" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "memory usage (mb)" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pacbio reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reverse reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filer rRNA" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "number of threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "BAM files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CheckM" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "FASTQC" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filtered reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MetaBat2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "QUAST" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "SPADES" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "FilterVariantTranches_resource_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "FilterVariantTranches_resource_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "FilterVariantTranches_resource_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "VariantFiltration_cluster" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "VariantFiltration_filter_indel" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "VariantFiltration_filter_name_indel" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "VariantFiltration_filter_name_snp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "VariantFiltration_filter_snp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "VariantFiltration_window" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bcftools_norm_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bcftools_view_include_CNN_filters" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bcftools_view_include_hard_filters" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bcftools_view_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bcftoomls_norm_multiallelics" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bwa_mem_num_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bwa_mem_sec_shorter_split_hits" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gatk_splitintervals_exclude_intervalList" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gatk_splitintervals_include_intervalList" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gatk_splitintervals_scatter_count" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_file_split" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_file_split_fwd_single" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_file_split_rev" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_qc_check" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_trimming_check" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "picard_addorreplacereadgroups_rgpl" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "raw_files_directory" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_genome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_fixmate_output_format" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_fixmate_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_flagstat_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_sort_compression_level" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_sort_memory" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_sort_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_cigar" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_collapsecigar" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_count" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_fastcompression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_iscram" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_randomseed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_readsingroup" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_readsinlibrary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_readsquality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_readswithbits" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_readswithoutbits" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_readtagtostrip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_region" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_samheader" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_uncompressed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sub_bqsr_interval_padding" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sub_bqsr_known_sites_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sub_bqsr_known_sites_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sub_bqsr_known_sites_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sub_hc_java_options" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sub_hc_native_pairHMM_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "table_annovar_build_over" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "table_annovar_convert_arg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "table_annovar_database_location" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "table_annovar_na_string" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "table_annovar_operation" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "table_annovar_otherinfo" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "table_annovar_protocol" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "table_annovar_remove" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "table_annovar_vcfinput" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tg_compression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tg_do_not_compress" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tg_length" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tg_quality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tg_strigency" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tg_trim_suffix" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_bcftools_concat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_bcftools_norm_cnn" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_bcftools_norm_hard_filter" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_bcftools_view_filter_cnn" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_bcftools_view_hard_filter" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_paired_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_paired_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_raw_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_raw_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_single_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_single_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gather_bwa_sam_files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_ApplyBQSR" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_CNNScoreVariants" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_FilterVariantTranches" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_HaplotypeCaller" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_bqsr_subworkflow" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_splitintervals" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_picard_addorreplacereadgroups" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_picard_markduplicates" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_picard_markduplicates_metrics" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_fixmate" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_flagstat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_index" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_index_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_sort" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_sort_by_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_view_conversion" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_view_count_total" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_tabix_indels" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_tabix_snps" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_table_annovar_cnn_filtered_avinput" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_table_annovar_cnn_filtered_multianno_txt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_table_annovar_cnn_filtered_multianno_vcf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_table_annovar_hard_filtered_avinput" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_table_annovar_hard_filtered_multianno_txt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_table_annovar_hard_filtered_multianno_vcf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_trim_galore_paired_fq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_trim_galore_paired_reports" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_trim_galore_single_fq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_trim_galore_single_reports" . + + a schema1:Person ; + schema1:name "Miguel Roncoroni" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/145" ; + schema1:name "Alexandre Cormier" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/146" ; + schema1:name "Patrick Durand" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/148" ; + schema1:name "Laura Leroi" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/150" ; + schema1:name "Pierre Cuzin" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-atac-cutandrun_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-atac-cutandrun_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-chip-pe_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-chip-pe_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-chip-sr_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-chip-sr_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bits_set" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "blacklist" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chromosome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_lratio" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_exomeDepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_manta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_bf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_exome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "min_mapping_quality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "read_group" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samples" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_bwa_mem" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastqc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_samtools" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "html_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "json_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_all" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_bam_filtering" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_exomedepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_manta" . + + a schema1:Person ; + schema1:name "Daniel López-López" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_30" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_31" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_32" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_33" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_34" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_35" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fast" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Leah W Roberts" . + + a schema1:Person ; + schema1:name "Scott A Beatson " . + + a schema1:Person ; + schema1:name " Brian M Forde" . + + a schema1:Person ; + schema1:name "Minh-Duy Phan" . + + a schema1:Person ; + schema1:name "Nguyen Thi Khanh Nhu" . + + a schema1:Person ; + schema1:name "Adam D Irwin" . + + a schema1:Person ; + schema1:name "Patrick N A Harris" . + + a schema1:Person ; + schema1:name "David L Paterson" . + + a schema1:Person ; + schema1:name "Mark A Schembri" . + + a schema1:Person ; + schema1:name "David M Whiley" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Alignment score from Kpax to analyse structures" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CATH family ids" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Obsolete and inconsistent CATH domain StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filename for residue-mapped CATH domain StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Core domain structure (.pdb)" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Database to select to compute core average structure" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "To store all the domain-like StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "To store all failed domain StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filename to store family ids per iteration" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Iteration number starting from 0" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Threshold for minimum domain length" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Parameter file for current iteration" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The directory for storing all PDB files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Pfam family ids" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Obsolete and inconsistent Pfam domain StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filename for residue-mapped Pfam domain StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CATH cross-mapped domain StIs from previous iteration" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Pfam cross-mapped domain StIs from previous iteration" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Score threshold for given alignment score from Kpax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Directory for storing all SIFTS files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "To store all the true domain StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filename to store unmapped and not properly aligned instances from CATH" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filename to store unmapped but structurally well aligned instances from CATH" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filename to store unmapped and not properly aligned instances from Pfam" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filename to store unmapped but structurally well aligned instances from Pfam" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filename with alignment scores for unmapped instances" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Alignment results for CATH unmapped instances" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Alignment results for Pfam unmapped instances" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Domain-like StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Failed domain StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "All CATH cross-mapped domin StIs family-wise together" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "All Pfam domain StIs cross-mapped to CATH family-wise" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Alignment results from Kpax for all cross-mapped families" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Average structures per cross-mapped Pfam family for CATH StIs at family level" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Core domain StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Core domain structure (.pdb)" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CATH domain StIs cross-mapped to Pfam family-wise" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Pfam domin StIs cross-mapped to CATH family-wise" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Cross-mapped families with CATH domain StIs passing the threshold" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Cross-mapped families with Pfam domain StIs passing the threshold" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Merged cross-mapped and residue-mapped domain StIs from CATH" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Merged cross-mapped and residue-mapped domain StIs from Pfam" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Family ids per iteration" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Parameter file for next iteration of the workflow" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Average structures per cross-mapped CATH family for Pfam StIs at family level" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Obsolete and inconsistent domain StIs from CATH" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Obsolete and inconsistent domain StIs from Pfam" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "All CATH residue-mapped domain StIs with domain labels" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "All Pfam residue-mapped domain StIs with domain labels" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "True domain StIs per iteration" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "All un-mapped domin StIs from CATH" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Failed domain StIs from CATH" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Domain-like StIs from CATH" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "All Pfam un-mapped domin StIs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Failed domain StIs from Pfam" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Domain-like StIs from Pfam" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/kmer-profiling-hifi-trio-VGP2/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "Docker" ; + schema1:identifier ; + schema1:name "Docker" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chrom_sizes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gencode.v46.annotation.gtf.gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "anndata_harmony_leiden" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "collection_without_first_entry" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "out_png" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tsse-plots" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "x_spectral_harmony" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "x_spectral_mnn-correct" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "x_spectral_scanorma" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-atac-cutandrun_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-atac-cutandrun_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-chip-pe_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-chip-pe_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-chip-sr_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-chip-sr_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Lucille Delisle" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/rnaseq-pe/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Research Infrastructure RECETOX RI (No LM2018121) financed by the Ministry of Education" . + + a schema1:Person ; + schema1:name "Youth and Sports" . + + a schema1:Person ; + schema1:name "and Operational Programme Research" . + + a schema1:Person ; + schema1:name "Development and Innovation - project CETOCOEN EXCELLENCE (No CZ.02.1.01/0.0/0.0/17_043/0009632)." . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/753" ; + schema1:name "Zargham Ahmad" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sra-manifest-to-concatenated-fastqs/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "contour_levels" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "dec" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "do_cone_search" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "level_threshold" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ra" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "radius" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "t1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "t2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "asciicat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "contours" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "image" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "skymap_files" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Martin Hölzer" . + + a schema1:Person ; + schema1:name "Alexandre Almeida" . + + a schema1:Person ; + schema1:name "Guillermo Rangel-Pineros and Ekaterina Sakharova" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "add_hmms_tsv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hmmscan_database_dir" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "img_blast_database_dir" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_fasta_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mashmap_reference_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ncbi_tax_db_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pprmeta_simg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "virsorter_data_dir" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "virsorter_virome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "blast_merged_tsvs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "blast_result_filtereds" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "blast_results" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "filtered_contigs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "high_confidence_contigs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "high_confidence_faa" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "krona_plot_all" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "krona_plots" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "low_confidence_contigs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "low_confidence_faa" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mashmap_hits" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "parse_prophages_contigs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "prophages_faa" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "taxonomy_assignations" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "virfinder_output" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "virsorter_output_fastas" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "KNIME" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux nvb4 2.6.32-642.6.2.el6.x86_64 #1 SMP Mon Oct 24 10:22:33 EDT 2016 x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=COMPSs SLURM_JOB_QOS=bsc_cs SLURM_JOB_GPUS=0,1,2,3 SLURM_MEM_PER_CPU=8000 SLURM_JOB_ID=1930260 SLURM_JOB_USER=bsc19959 COMPSS_HOME=/apps/COMPSs/3.3/ SLURM_JOB_UID=4373 SLURM_SUBMIT_DIR=/gpfs/scratch/bsc19/bsc19959/cache SLURM_JOB_NODELIST=nvb[4-9,12-21] SLURM_JOB_CPUS_PER_NODE=16(x16) SLURM_SUBMIT_HOST=nvblogin1 SLURM_JOB_PARTITION=projects SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=16 COMPSS_MASTER_NODE=nvb4 COMPSS_WORKER_NODES= nvb5 nvb6 nvb7 nvb8 nvb9 nvb12 nvb13 nvb14 nvb15 nvb16 nvb17 nvb18 nvb19 nvb20 nvb21" ; + schema1:endTime "2024-03-22T17:53:30+00:00" ; + schema1:instrument ; + schema1:name "COMPSs cch_kmeans_test.py execution at bsc_nvidia with JOB_ID 1930260" ; + schema1:result , + ; + schema1:startTime "2024-03-22T17:29:38+00:00" ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:ComputerLanguage ; + schema1:alternateName "Dislib" ; + schema1:name "Dislib" ; + schema1:url "https://dislib.readthedocs.io/en/latest/" ; + schema1:version "0.9" . + + a schema1:MediaObject ; + schema1:contentSize 6201 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_bsc_nvidia_SLURM_JOB_ID_1930260" ; + schema1:contentSize 64 ; + schema1:description "COMPSs console standard error log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-1930260.err" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_bsc_nvidia_SLURM_JOB_ID_1930260" ; + schema1:contentSize 2835 ; + schema1:description "COMPSs console standard output log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-1930260.out" . + + a schema1:MediaObject ; + schema1:contentSize 731 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 1009 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/atacseq/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "energy_min.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "energy_npt.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "energy_nvt.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myeditconf.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfix_side_chain.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygenion.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygenion.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_image.xtc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_rgyr.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_trjconv_str.xtc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygrompp.tpr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.cpt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.edr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.xtc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb2gmx.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb2gmx.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysolvate.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysolvate.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rmsd_exp.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rmsd_first.xvg" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "Docker" ; + schema1:identifier ; + schema1:name "Docker" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_fpocket_select_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_fpocket_select_input_pockets_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_fpocket_select_output_pocket_pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_fpocket_select_output_pocket_pqr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_box_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_box_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_babel_convert_prep_lig_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_babel_convert_prep_lig_input_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_babel_convert_prep_lig_output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_str_check_add_hydrogens_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_str_check_add_hydrogens_input_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_str_check_add_hydrogens_output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_autodock_vina_run_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_autodock_vina_run_output_pdbqt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_babel_convert_pose_pdb_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_babel_convert_pose_pdb_output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pocket_pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pocket_pqr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdbqt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pedigree" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "html_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "list_output_txt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "out_file1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/159" ; + schema1:name "Oliver Woolland" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/89" ; + schema1:name "Paul Brack" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/parallel-accession-download/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "barcodes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "block_size" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "features" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "filterDataR" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "loadDataR" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "margin" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "matrix" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "minCells" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "minFeatures" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "nCountRNAmax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "nCountRNAmin" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "nFeatureRNAmax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "nFeatureRNAmin" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "normalization_method" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "normalizeDataR" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pattern" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "percentMTmax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "percentMTmin" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "projectName" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "scale_factor" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "verbose" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11utput" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1Output" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22Output" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2Output" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3Output" . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux gs13r3b15 5.14.0-284.30.1.el9_2.x86_64 #1 SMP PREEMPT_DYNAMIC Fri Aug 25 09:13:12 EDT 2023 x86_64 x86_64 x86_64 GNU/Linux" ; + schema1:endTime "2024-07-18T11:10:51+00:00" ; + schema1:instrument ; + schema1:name "COMPSs lysozyme_in_water_full_no_mpi.py execution at marenostrum5 with JOB_ID 4055071" ; + schema1:object , + , + , + , + , + , + ; + schema1:result , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:startTime "2024-07-18T11:10:02+00:00" ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3.rc2407" . + + a schema1:MediaObject ; + schema1:contentSize 3278 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 613 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "FULL_NO_MPI.yaml" . + + a schema1:MediaObject ; + schema1:contentSize 1675 ; + schema1:description "Auxiliary File" ; + schema1:name "launch_full_no_mpi.sh" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 7601 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "lysozyme_in_water.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 8973 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "lysozyme_in_water_full.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 10194 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "lysozyme_in_water_full_singularity.py" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_marenostrum5_SLURM_JOB_ID_4055071" ; + schema1:contentSize 224 ; + schema1:description "COMPSs console standard error log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-4055071.err" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_marenostrum5_SLURM_JOB_ID_4055071" ; + schema1:contentSize 4887 ; + schema1:description "COMPSs console standard output log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-4055071.out" . + + a schema1:MediaObject ; + schema1:contentSize 604 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:Person ; + schema1:name "Lucille Delisle" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/rnaseq-pe/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CAMS-PM2_5-20211222_netcdf" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "Docker" ; + schema1:identifier ; + schema1:name "Docker" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "COMPSs" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_babel_minimize_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_babel_minimize_input_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_babel_minimize_output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_acpype_params_gmx_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_acpype_params_gmx_output_path_gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_acpype_params_gmx_output_path_itp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_acpype_params_gmx_output_path_top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path_gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path_itp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path_top" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sra-manifest-to-concatenated-fastqs/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Dannon Baker" . + + a schema1:Person ; + schema1:name "Björn Grüning" . + + a schema1:Person ; + schema1:name "Delphine Larivière" . + + a schema1:Person ; + schema1:name "Gildas Le Corguillé" . + + a schema1:Person ; + schema1:name "Andrew Lonie" . + + a schema1:Person ; + schema1:name "Nicholas Keener" . + + a schema1:Person ; + schema1:name "Sergei Kosakovsky Pond" . + + a schema1:Person ; + schema1:name "Wolfgang Maier" . + + a schema1:Person ; + schema1:name "Anton Nekrutenko" . + + a schema1:Person ; + schema1:name "James Taylor" . + + a schema1:Person ; + schema1:name "Steven Weaver" . + + a schema1:Person ; + schema1:name "Marius van den Beek" . + + a schema1:Person ; + schema1:name "Dave Bouvier" . + + a schema1:Person ; + schema1:name "John Chilton" . + + a schema1:Person ; + schema1:name "Nate Coraor" . + + a schema1:Person ; + schema1:name "Frederik Coppens" . + + a schema1:Person ; + schema1:name "Bert Droesbeke" . + + a schema1:Person ; + schema1:name "Ignacio Eguinoa" . + + a schema1:Person ; + schema1:name "Simon Gladman" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "Alex L Mitchell" . + + a schema1:Person ; + schema1:name "Lorna J Richardson" . + + a schema1:Person ; + schema1:name "Ekaterina Sakharova" . + + a schema1:Person ; + schema1:name "Maxim Scheremetjew" . + + a schema1:Person ; + schema1:name "Anton Korobeynikov" . + + a schema1:Person ; + schema1:name "Alex Shlemov" . + + a schema1:Person ; + schema1:name "Olga Kunyavskaya" . + + a schema1:Person ; + schema1:name "Alla Lapidus" . + + a schema1:Person ; + schema1:name "Robert D Finn" . + + a schema1:Person ; + schema1:name "Alexandre Almeida" . + + a schema1:Person ; + schema1:name "Martin Beracochea" . + + a schema1:Person ; + schema1:name "Miguel Boland" . + + a schema1:Person ; + schema1:name "Josephine Burgin" . + + a schema1:Person ; + schema1:name "Guy Cochrane" . + + a schema1:Person ; + schema1:name "Michael R Crusoe" . + + a schema1:Person ; + schema1:name "Varsha Kale" . + + a schema1:Person ; + schema1:name "Simon C Potter" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "5.8s_pattern" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "5s_pattern" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CGC_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CGC_postfixes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "EggNOG_data_dir" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "EggNOG_db" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "EggNOG_diamond_db" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "HMM_gathering_bit_score" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "HMM_name_database" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "HMM_omit_alignment" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "InterProScan_applications" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "InterProScan_databases" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "InterProScan_outputFormat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Uniref90_db_txt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "cgc_chunk_size" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "clusters_glossary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "contig_min_length" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "contigs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "diamond_databaseFile" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "diamond_header" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "diamond_maxTargetSeqs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "func_ann_names_hmmer" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "func_ann_names_ips" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "go_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gp_flatfiles_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "graphs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hmmsearch_header" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ips_header" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ko_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lsu_db" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lsu_label" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lsu_otus" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lsu_tax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "other_ncrna_models" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pathways_classes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pathways_names" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "protein_chunk_size_IPS" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "protein_chunk_size_eggnog" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "protein_chunk_size_hmm" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rfam_model_clans" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rfam_models" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ssu_db" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ssu_label" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ssu_otus" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ssu_tax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bgzip_fasta_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bgzip_index" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chunking_nucleotides" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chunking_proteins" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "completed_flag_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "compressed_files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "functional_annotation_folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hashsum_input" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "index_fasta_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "no_cds_flag_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "no_tax_flag_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pathways_systems_folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pathways_systems_folder_antismash" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pathways_systems_folder_antismash_summary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "qc-statistics_folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "qc-status" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "qc_summary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rna-count" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sequence-categorisation_folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "stats" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "taxonomy-summary_folder" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sars-cov-2-se-illumina-wgs-variant-calling/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_energy_min_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_energy_min_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_gppnvt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_energy_nvt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_energy_nvt_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_gppnpt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_energy_npt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_energy_npt_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step17_gppmd_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_rmsfirst_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_rmsfirst_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_pdb_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_pdb_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_rmsexp_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_rmsexp_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step21_rgyr_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_image_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step23_dry_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_editconf_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_gppion_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step7_genion_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_gppmin_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Checkpoint file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Structures - Raw structure" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Resulting protein structure" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GROMACS topology file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Topologies GROMACS portable binary run" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Trajectories - Raw trajectory" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Trajectories - Post-processed trajectory" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "System Setup Observables - Potential Energy" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "System Setup Observables - Pressure and density" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "System Setup Observables - Temperature" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Simulation Analysis" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Simulation Analysis" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Simulation Analysis" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/kmer-profiling-hifi-trio-VGP2/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "Ruby bioinformatics toolkit" ; + schema1:name "Rbbt" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Adrián Muñoz-Civico" . + + a schema1:Person ; + schema1:name "Daniel López-López" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Scaffolding-HiC-VGP8/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_30" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_31" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_32" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_33" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_34" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_35" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_36" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_37" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_38" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_39" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_40" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_41" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_42" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_43" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_44" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_45" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_46" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_47" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_48" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_49" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_50" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_51" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_52" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_53" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_54" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_55" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_56" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_57" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_58" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_59" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_60" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_61" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_62" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_63" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_64" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_65" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_66" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_67" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_68" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_randomize_ions.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_randomize_ions.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.parmtop" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.parmtop" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myparmed_hmassrepartition.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myprocess_mdout.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myprocess_minout.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.cpout" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.cprst" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.mdinfo" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.nc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.ncrst" . + + a schema1:Person ; + schema1:name "Sophie Alain" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/146" ; + schema1:name "Patrick Durand" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/148" ; + schema1:name "Laura Leroi" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/150" ; + schema1:name "Pierre Cuzin" . + + a schema1:Person ; + schema1:name "Engy Nasr" . + + a schema1:Person ; + schema1:name "Bérénice Batut" . + + a schema1:Person ; + schema1:name "Paul Zierep" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/nanopore-pre-processing/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tg_ens_mean_0_1deg_reg_v20_0e_Paris_daily_csv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ts_cities_csv" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/49" ; + schema1:name "Anne Fouilloux" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-decontamination-VGP9/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/445" ; + schema1:name "Diego De Panis" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pocket_pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pocket_pqr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdbqt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "workdir_array" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "final_result" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Deduplicate reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output Destination" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filter reference file(s)" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "filter rRNA" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Forward reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "identifier used" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Keep mapped reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken2 confidence threshold" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken2 database" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken2 standard report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Maximum memory in MB" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Prepare references" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Reverse reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Skip QC filtered" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Skip QC unfiltered" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output Step number" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Number of threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filtered forward read" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filtered reverse read" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken2 folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filtering reports folder" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_30" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_gmx.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_gmx.itp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_gmx.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myappend_ligand.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mybabel_minimize.mol2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycat_pdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myeditconf.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myextract_heteroatoms.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myextract_molecule.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfix_side_chain.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygenion.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygenion.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygenrestr.itp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_energy.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_image.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_rgyr.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_rms_exp.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_trjconv_str_lig.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_trjconv_str_prot.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygrompp_ion.tpr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymake_ndx.ndx" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.cpt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.edr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.xtc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb2gmx.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb2gmx.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb_prot.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myreduce_add_hydrogens.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysolvate.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysolvate.zip" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:Person ; + schema1:name "ERGA" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lineage" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/445" ; + schema1:name "Diego De Panis" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Shell Script" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/kmer-profiling-hifi-trio-VGP2/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "run_idw_interpolation_test_input1.csv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "run_idw_interpolation_test_input2.geojson" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "Andrey Bliznyuk" . + + a schema1:Person ; + schema1:name "Ben Menadue" . + + a schema1:Person ; + schema1:name "Rika Kobayashi" . + + a schema1:Person ; + schema1:name "Matthew Downton" . + + a schema1:Person ; + schema1:name "Yue Sun" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/141" ; + schema1:name "Tracy Chew" . + + a schema1:ComputerLanguage ; + schema1:name "Shell Script" . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-only-VGP3/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_Velocyto-on10X-filtered-barcodes_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_Velocyto-on10X-filtered-barcodes_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/velocyto/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_Velocyto-on10X-from-bundled_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_Velocyto-on10X-from-bundled_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/velocyto/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/cutandrun/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Assembly fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Bam file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "BUSCO dataset" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output destination (not used in the workflow itself)" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gtdbtk data directory" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Identifier used" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "memory usage (MB)" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Run SemiBin" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "SemiBin Environment" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CWL base step number" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Sub workflow Run" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Bin files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Assembly/Bin read stats" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Bins summary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "BUSCO" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CheckM" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "DAS Tool" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "EukRep fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "EukRep stats" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GTDB-Tk" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MaxBin2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MetaBAT2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "SemiBin" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "endpoint" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "query_file" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux nvb4 2.6.32-642.6.2.el6.x86_64 #1 SMP Mon Oct 24 10:22:33 EDT 2016 x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=COMPSs SLURM_JOB_QOS=bsc_cs SLURM_JOB_GPUS=0,1,2,3 SLURM_MEM_PER_CPU=8000 SLURM_JOB_ID=1930224 SLURM_JOB_USER=bsc19959 COMPSS_HOME=/apps/COMPSs/TrunkCTCache/ SLURM_JOB_UID=4373 SLURM_SUBMIT_DIR=/gpfs/scratch/bsc19/bsc19959/cache SLURM_JOB_NODELIST=nvb[4-9,12-21] SLURM_JOB_CPUS_PER_NODE=16(x16) SLURM_SUBMIT_HOST=nvblogin1 SLURM_JOB_PARTITION=projects SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=16 COMPSS_MASTER_NODE=nvb4 COMPSS_WORKER_NODES= nvb5 nvb6 nvb7 nvb8 nvb9 nvb12 nvb13 nvb14 nvb15 nvb16 nvb17 nvb18 nvb19 nvb20 nvb21" ; + schema1:endTime "2024-03-22T11:39:26+00:00" ; + schema1:instrument ; + schema1:name "COMPSs cch_matmul_test.py execution at bsc_nvidia with JOB_ID 1930224" ; + schema1:result , + , + , + ; + schema1:startTime "2024-03-22T11:31:38+00:00" ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:SoftwareApplication ; + schema1:name "Dislib" ; + schema1:version "0.9.x" . + + a schema1:MediaObject ; + schema1:contentSize 4112 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_bsc_nvidia_SLURM_JOB_ID_1930224" ; + schema1:contentSize 64 ; + schema1:description "COMPSs console standard error log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-1930224.err" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_bsc_nvidia_SLURM_JOB_ID_1930224" ; + schema1:contentSize 2864 ; + schema1:description "COMPSs console standard output log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-1930224.out" . + + a schema1:MediaObject ; + schema1:contentSize 708 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 1203 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-only-VGP3/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bits_set" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "blacklist" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bwt_algorithm" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chromosome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_lratio" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_exomeDepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_manta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_bf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "generate_bwa_indexes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_exome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "min_mapping_quality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "read_group" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_amb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_ann" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_bwt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fai" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_pac" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_sa" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samples" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_bwa_mem" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastqc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_samtools" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "html_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "json_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_all" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_bam_filtering" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_exomedepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_manta" . + + a schema1:Person ; + schema1:name "Daniel López-López" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Scipion" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "ABRomics" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/bacterial-genome-assembly/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "auto_kmer_choice" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "careful" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "cov_cutoff" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "cov_state" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq1_type" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq2_type" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq_file_type" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "iontorrent" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "kmers" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "libraries_fwd_rev" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "libraries_metadata" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "libraries_mono" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mode" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "nanopore_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "onlyassembler" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pacbio_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sanger_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "trusted_contigs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "untrusted_contigs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "all_log_spades" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "assembly_graph_spades" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "assembly_graph_unicycler" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "assembly_graph_with_scaffolds_spades" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "assembly_image_spades" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "assembly_image_unicycler" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "assembly_info_spades" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "assembly_info_unicycler" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "assembly_unicycler" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "out_contig_stats_spades" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "out_contigs_spades" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "out_scaffold_stats_spades" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "out_scaffolds_spades" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bits_set" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "blacklist" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bwt_algorithm" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_lratio" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_exomeDepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_manta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_bf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "generate_bwa_indexes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_exome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "min_mapping_quality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "read_group" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_amb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_ann" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_bwt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fai" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_pac" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_sa" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samples" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_bwa_mem" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastqc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_samtools" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "html_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "json_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_all" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_bam_filtering" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_exomedepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_manta" . + + a schema1:Person ; + schema1:name "Daniel López-López" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:MediaObject ; + schema1:contentSize 183 . + + a schema1:MediaObject ; + schema1:contentSize 90 . + + a schema1:MediaObject ; + schema1:contentSize 558 . + + a schema1:MediaObject ; + schema1:contentSize 1289 . + + a schema1:Dataset . + + a schema1:MediaObject ; + schema1:contentSize 44006 . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/445" ; + schema1:name "Diego De Panis" . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux nvb4 2.6.32-642.6.2.el6.x86_64 #1 SMP Mon Oct 24 10:22:33 EDT 2016 x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=COMPSs SLURM_JOB_QOS=bsc_cs SLURM_JOB_GPUS=0,1,2,3 SLURM_MEM_PER_CPU=8000 SLURM_JOB_ID=1930568 SLURM_JOB_USER=bsc19959 COMPSS_HOME=/apps/COMPSs/3.3/ SLURM_JOB_UID=4373 SLURM_SUBMIT_DIR=/gpfs/scratch/bsc19/bsc19959/cache/Imagenet SLURM_JOB_NODELIST=nvb[4-9,12-21] SLURM_JOB_CPUS_PER_NODE=16(x16) SLURM_SUBMIT_HOST=nvblogin1 SLURM_JOB_PARTITION=projects SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=16 COMPSS_MASTER_NODE=nvb4 COMPSS_WORKER_NODES= nvb5 nvb6 nvb7 nvb8 nvb9 nvb12 nvb13 nvb14 nvb15 nvb16 nvb17 nvb18 nvb19 nvb20 nvb21" ; + schema1:endTime "2024-03-25T11:02:59+00:00" ; + schema1:instrument ; + schema1:name "COMPSs main_pytorch_sync_5_nodes.py execution at bsc_nvidia with JOB_ID 1930568" ; + schema1:result , + ; + schema1:startTime "2024-03-25T10:49:35+00:00" ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:SoftwareApplication ; + schema1:name "Dislib" ; + schema1:version "0.9.x" . + + a schema1:SoftwareApplication ; + schema1:name "Pytorch" ; + schema1:version "1.7.1+cu101" . + + a schema1:MediaObject ; + schema1:contentSize 4515 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_bsc_nvidia_SLURM_JOB_ID_1930568" ; + schema1:contentSize 64 ; + schema1:description "COMPSs console standard error log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-1930568.err" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_bsc_nvidia_SLURM_JOB_ID_1930568" ; + schema1:contentSize 3012 ; + schema1:description "COMPSs console standard output log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-1930568.out" . + + a schema1:MediaObject ; + schema1:contentSize 769 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 928 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Engy Nasr" . + + a schema1:Person ; + schema1:name "Bérénice Batut" . + + a schema1:Person ; + schema1:name "Paul Zierep" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/allele-based-pathogen-identification/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "metrics" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/parallel-accession-download/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/brew3r/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_30" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_31" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_32" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_33" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_34" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_35" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_36" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_37" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_38" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_39" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_40" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_41" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_42" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_43" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_44" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_45" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_46" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_47" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_48" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_49" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_50" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_51" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_52" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_53" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_54" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_55" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_56" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_57" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_58" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fast" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Scaffolding-Bionano-VGP7/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_30" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_31" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_32" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_33" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_34" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_35" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_36" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_37" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_38" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_39" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_40" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_41" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_ac.frcmod" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_ac.inpcrd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_ac.lib" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_ac.prmtop" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myamber_to_pdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mybabel_minimize.mol2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_image.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_rgyr.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_rms.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myextract_heteroatoms.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.parmtop" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.parmtop" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb4amber_run.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myprocess_mdout.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myprocess_minout.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myreduce_add_hydrogens.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myremove_ligand.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myremove_pdb_water.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.cpout" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.cprst" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.mdinfo" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.netcdf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.rst" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.trj" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sra-manifest-to-concatenated-fastqs/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Abromics" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/bacterial-genome-assembly/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/atacseq/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myeditconf.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfix_side_chain.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygenion.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygenion.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_energy.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_image.xtc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_rgyr.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_rms.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_trjconv_str.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygrompp.tpr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.cpt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.edr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.xtc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb2gmx.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb2gmx.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysolvate.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysolvate.zip" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sra-manifest-to-concatenated-fastqs/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ChIPQC_annotation" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ChIPQC_bCount" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ChIPQC_blacklist" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ChIPQC_consensus" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ChIPQC_facetBy" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "DiffBind_bParallel" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "DiffBind_background" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "DiffBind_blacklist" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "DiffBind_consensus" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "DiffBind_cores" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "DiffBind_design" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "DiffBind_filterFun" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "DiffBind_greylist" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "DiffBind_library" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "DiffBind_low_read_count_filter" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "DiffBind_minOverlap" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "DiffBind_normalization" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "DiffBind_reorderMeta_factor" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "DiffBind_reorderMeta_value" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "DiffBind_retrieve_consensus" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bamCoverage_effective_genome_size" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bamCoverage_extendReads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bamCoverage_normalizeUsing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bamCoverage_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "blackListFile" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "computeMatrix_downstream" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "computeMatrix_outFileSortedRegions" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "computeMatrix_outputFile" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "computeMatrix_regions" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "computeMatrix_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "computeMatrix_upstream" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hisat2_idx_basename" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hisat2_idx_directory" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hisat2_num_of_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_control_samples" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_file_split" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_file_split_fwd_single" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_file_split_rev" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_qc_check" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_treatment_samples" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_trimming_check" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "macs2_callpeak_bdg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "macs2_callpeak_broad" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "macs2_callpeak_format" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "macs2_callpeak_gsize" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "macs2_callpeak_nomodel" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "macs2_extsize" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "macs2_pvalue" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "macs2_qvalue" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "macs2_shift" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "metadata_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "multiBamSummary_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "plotCorrelation_color" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "plotCorrelation_method" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "plotCorrelation_numbers" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "plotCorrelation_outFileName" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "plotCorrelation_plotType" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "plotCorrelation_title" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "plotCoverage_outFileName" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "plotCoverage_plotFileFormat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "plotCoverage_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "plotFingerprint_outFileName" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "plotFingerprint_plotFileFormat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "plotFingerprint_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "plotHeatmap_outputFile" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "plotHeatmap_plotFileFormat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "raw_files_directory" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rose_genome_build" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rose_stitch_distance" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rose_tss_distance" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_fixmate_output_format" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_fixmate_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_markdup_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_readswithoutbits" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_sort_compression_level" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_sort_memory" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_sort_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "trimmomatic_pe_illuminaClip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "trimmomatic_pe_leading" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "trimmomatic_pe_minlen" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "trimmomatic_pe_slidingWindow" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "trimmomatic_pe_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "trimmomatic_pe_trailing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "trimmomatic_se_illuminaClip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "trimmomatic_se_leading" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "trimmomatic_se_minlen" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "trimmomatic_se_slidingWindow" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "trimmomatic_se_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "trimmomatic_se_trailing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_ChIPQC_macs_ChIPQCexperiment" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_ChIPQC_macs_ChIPQCreport" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_ChIPQC_macs_outdir" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_ChIPQC_rose_ChIPQCexperiment" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_ChIPQC_rose_ChIPQCreport" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_ChIPQC_rose_outdir" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_DiffBind_macs_correlation_heatmap" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_DiffBind_macs_diffbind_consensus" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_DiffBind_macs_diffbind_dba_object" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_DiffBind_macs_diffbind_normalized_counts" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_DiffBind_macs_diffbind_results" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_DiffBind_rose_correlation_heatmap" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_DiffBind_rose_diffbind_consensus" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_DiffBind_rose_diffbind_dba_object" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_DiffBind_rose_diffbind_normalized_counts" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_DiffBind_rose_diffbind_results" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_append_files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_bamCoverage_norm" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_bed_to_rose_gff_conversion" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_bedtools_coverage" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_bedtools_intersect" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_bedtools_merge" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_computeMatrix_matrix" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_computeMatrix_regions" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_enhancer_bed_processing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_exclude_black_list_regions" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_exclude_black_list_regions_narrowPeak" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_paired_html_fwd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_paired_html_rev" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_paired_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_raw_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_raw_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_single_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_single_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_hisat2_for_paired_reads_sam" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_hisat2_for_paired_reads_stderr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_hisat2_for_single_reads_sam" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_hisat2_for_single_reads_stderr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_macs2_call_peaks_bed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_macs2_call_peaks_broadPeak" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_macs2_call_peaks_cutoff" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_macs2_call_peaks_gappedPeak" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_macs2_call_peaks_lambda" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_macs2_call_peaks_model_r" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_macs2_call_peaks_narrowPeak" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_macs2_call_peaks_pileup" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_macs2_call_peaks_xls" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_multiBamSummary_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_paste_content_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_paste_content_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_plotCorrelation_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_plotCoverage_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_plotFingerprint_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_plotHeatmap" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_printf_header_samples" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_rose_main_AllEnhancers_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_rose_main_Enhancers_withSuper" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_rose_main_Plot_points" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_rose_main_STITCHED_ENHANCER_REGION_MAP" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_rose_main_SuperEnhancers_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_rose_main_gff_dir_outputs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_rose_main_mappedGFF_dir_outputs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_fixmate" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_index" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_markdup" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_sort" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_sort_by_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_sort_peaks_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_total_peaks_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_trimmomatic_paired_end_fwd_paired" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_trimmomatic_paired_end_fwd_unpaired" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_trimmomatic_paired_end_rev_paired" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_trimmomatic_paired_end_rev_unpaired" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_trimmomatic_paired_end_stderr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_trimmomatic_single_end_fastq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_trimmomatic_single_end_stderr" . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux nvb4 2.6.32-642.6.2.el6.x86_64 #1 SMP Mon Oct 24 10:22:33 EDT 2016 x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=COMPSs SLURM_JOB_QOS=bsc_cs SLURM_JOB_GPUS=0,1,2,3 SLURM_MEM_PER_CPU=8000 SLURM_JOB_ID=1930560 SLURM_JOB_USER=bsc19959 COMPSS_HOME=/apps/COMPSs/3.3/ SLURM_JOB_UID=4373 SLURM_SUBMIT_DIR=/gpfs/scratch/bsc19/bsc19959/cache/Imagenet SLURM_JOB_NODELIST=nvb[4-9,12-21] SLURM_JOB_CPUS_PER_NODE=16(x16) SLURM_SUBMIT_HOST=nvblogin1 SLURM_JOB_PARTITION=projects SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=16 COMPSS_MASTER_NODE=nvb4 COMPSS_WORKER_NODES= nvb5 nvb6 nvb7 nvb8 nvb9 nvb12 nvb13 nvb14 nvb15 nvb16 nvb17 nvb18 nvb19 nvb20 nvb21" ; + schema1:endTime "2024-03-25T10:49:09+00:00" ; + schema1:instrument ; + schema1:name "COMPSs main_pytorch_sync_5_nodes.py execution at bsc_nvidia with JOB_ID 1930560" ; + schema1:result , + ; + schema1:startTime "2024-03-25T10:30:21+00:00" ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:SoftwareApplication ; + schema1:name "Dislib" ; + schema1:version "0.9.x" . + + a schema1:SoftwareApplication ; + schema1:name "Pytorch" ; + schema1:version "1.7.1+cu101" . + + a schema1:MediaObject ; + schema1:contentSize 4523 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_bsc_nvidia_SLURM_JOB_ID_1930560" ; + schema1:contentSize 64 ; + schema1:description "COMPSs console standard error log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-1930560.err" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_bsc_nvidia_SLURM_JOB_ID_1930560" ; + schema1:contentSize 3010 ; + schema1:description "COMPSs console standard output log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-1930560.out" . + + a schema1:MediaObject ; + schema1:contentSize 765 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 928 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step00_cat_pdb_input_structure2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step00_cat_pdb_output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step0_reduce_remove_hydrogens_input_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step0_reduce_remove_hydrogens_output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_genrestr_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_genrestr_input_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_genrestr_output_itp_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_gmx_trjconv_str_protein_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_gmx_trjconv_str_protein_output_str_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_gmx_trjconv_str_ligand_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_gmx_trjconv_str_ligand_input_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_gmx_trjconv_str_ligand_input_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_gmx_trjconv_str_ligand_output_str_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_cat_pdb_hydrogens_output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_append_ligand_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_append_ligand_input_itp_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_append_ligand_output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step15_editconf_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step15_editconf_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_solvate_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_solvate_output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step17_grompp_genion_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step17_grompp_genion_output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_genion_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_genion_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_genion_output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_grompp_min_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_grompp_min_output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_mdrun_min_output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_mdrun_min_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_mdrun_min_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_mdrun_min_output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step21_gmx_energy_min_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step21_gmx_energy_min_output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_make_ndx_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_make_ndx_output_ndx_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step23_grompp_nvt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step23_grompp_nvt_output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step24_mdrun_nvt_output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step24_mdrun_nvt_output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step24_mdrun_nvt_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step24_mdrun_nvt_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step24_mdrun_nvt_output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step25_gmx_energy_nvt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step25_gmx_energy_nvt_output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step26_grompp_npt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step26_grompp_npt_output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step27_mdrun_npt_output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step27_mdrun_npt_output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step27_mdrun_npt_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step27_mdrun_npt_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step27_mdrun_npt_output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step28_gmx_energy_npt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step28_gmx_energy_npt_output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step29_grompp_md_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step29_grompp_md_output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_extract_molecule_output_molecule_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step30_mdrun_md_output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step30_mdrun_md_output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step30_mdrun_md_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step30_mdrun_md_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step30_mdrun_md_output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step31_rmsd_first_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step31_rmsd_first_output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step32_rmsd_exp_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step32_rmsd_exp_output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step33_gmx_rgyr_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step33_gmx_rgyr_output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step34_gmx_image_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step34_gmx_image_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step34b_gmx_image2_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step34b_gmx_image2_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step35_gmx_trjconv_str_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step35_gmx_trjconv_str_output_str_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step36_grompp_md_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step36_grompp_md_output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_fix_side_chain_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_pdb2gmx_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_pdb2gmx_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_pdb2gmx_output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_make_ndx_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_make_ndx_input_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_make_ndx_output_ndx_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_itp_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_str_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_str_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_ndx_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_molecule_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_str_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_ndx_path" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-decontamination-VGP9/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Engy Nasr" . + + a schema1:Person ; + schema1:name "Bérénice Batut" . + + a schema1:Person ; + schema1:name "Paul Zierep" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/taxonomy-profiling-and-visualization-with-krona/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bits_set" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "blacklist" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chromosome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_lratio" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_exomeDepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_manta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_bf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_exome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "min_mapping_quality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "read_group" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samples" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_bwa_mem" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastqc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_samtools" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "html_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "json_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_all" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_bam_filtering" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_exomedepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_manta" . + + a schema1:Person ; + schema1:name "Daniel López-López" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/cutandrun/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/732" ; + schema1:name "Andrii Neronov" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "BridgeDB cache" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Differential gene expression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Differential miRNA expression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "STRING identifier mapping" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mRNA expression correlation" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of correlation edges to use, USE FOR TESTING ONLY" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "miRNA mRNA correlation" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "miRTarBase data" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of miRTarBase edges to use, USE FOR TESTING ONLY" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of cores to let MOGAMUN use" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of generation to let the genetic algorithm in MOGAMUN evolve" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The maximum size of subnetworks during postprocessing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "the minimum Jaccard Index overlap between two subnetworks to allow them to be merged" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The minimum size of subnetworks during postprocessing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of parallel runs to let MOGAMUN do, these parallel runs are combined in postprocessing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "STRING data" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The minimum score for a STRING edge to be included in the analysis" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of STRING edges to use, USE FOR TESTING ONLY" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Variant Burden" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "full_graph" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "subnetworks" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "COMPSs" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_COVID-19-PE-ARTIC-ILLUMINA_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_COVID-19-PE-ARTIC-ILLUMINA_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sars-cov-2-pe-illumina-artic-variant-calling/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CPAT_header_tab" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GRCh38_p13_genome_fa_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Pfam-A_hmm_dat_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Pfam-A_hmm_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "active_site_dat_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gencode_v43_annotation_gtf_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gencode_v43_lncRNA_transcripts_fa_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gencode_v43_pc_transcripts_fa_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gencode_v43_transcripts_fa_gz" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/231" ; + schema1:name "Ryan Patterson-Cross" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:MediaObject ; + schema1:contentSize 200 . + + a schema1:MediaObject ; + schema1:contentSize 1322 . + + a schema1:MediaObject ; + schema1:contentSize 1297 . + + a schema1:MediaObject ; + schema1:contentSize 718 . + + a schema1:Dataset . + + a schema1:MediaObject ; + schema1:contentSize 262005 . + + a schema1:MediaObject ; + schema1:contentSize 21291 . + + a schema1:MediaObject ; + schema1:contentSize 30406 . + + a schema1:ComputerLanguage ; + schema1:name "Shell Script" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/parallel-accession-download/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Lucille Delisle" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/rnaseq-sr/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Shell Script" . + + a schema1:Person ; + schema1:name "Lucille Delisle" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/rnaseq-sr/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Scaffolding-HiC-VGP8/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "ABRomics" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/quality-and-contamination-control/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Simon Bray" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/gromacs-dctmd/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chrom_sizes_new.txt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "marker_regions" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "marker_regions-plot" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "Daniel López-López" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/357" ; + schema1:name "Tatiana Gurbich" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/chipseq-sr/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/atacseq/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Scaffolding-Bionano-VGP7/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s11r1b48 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=COMPSs COMPSS_PYTHON_VERSION=3.9.10 SLURM_JOB_QOS=debug SLURM_MEM_PER_CPU=1880 SLURM_JOB_ID=29726926 SLURM_JOB_USER=bsc19776 COMPSS_HOME=/apps/COMPSs/Trunk/ SLURM_JOB_UID=7363 SLURM_SUBMIT_DIR=/gpfs/home/bsc19/bsc19776/PyCOMPSs/Galileo_school/example_egu11_v04 SLURM_JOB_NODELIST=s11r1b48 SLURM_JOB_GID=2950 SLURM_JOB_CPUS_PER_NODE=48 COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login1 SLURM_JOB_PARTITION=sequential SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=1 COMPSS_MASTER_NODE=s11r1b48 COMPSS_WORKER_NODES=" ; + schema1:endTime "2023-07-20T15:48:28+00:00" ; + schema1:instrument ; + schema1:name "COMPSs my_workflow_multiple.py execution at marenostrum4 with JOB_ID 29726926" ; + schema1:result , + , + , + , + , + , + , + ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.2.rc2307" . + + a schema1:MediaObject ; + schema1:contentSize 839 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 512 ; + schema1:description "Auxiliary File" ; + schema1:name "launch.sh" . + + a schema1:MediaObject ; + schema1:contentSize 1827 ; + schema1:description "Auxiliary File" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:MediaObject ; + schema1:contentSize 12520 ; + schema1:description "Auxiliary File" ; + schema1:name "my_analytic" . + + a schema1:MediaObject ; + schema1:contentSize 770 ; + schema1:description "Auxiliary File" ; + schema1:name "my_analytic.c" . + + a schema1:MediaObject ; + schema1:contentSize 12392 ; + schema1:description "Auxiliary File" ; + schema1:name "my_sim" . + + a schema1:MediaObject ; + schema1:contentSize 355 ; + schema1:description "Auxiliary File" ; + schema1:name "my_sim.c" . + + a schema1:MediaObject ; + schema1:contentSize 690 ; + schema1:description "COMPSs command line execution command (runcompss), including flags and parameters passed" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_command_line_arguments.txt" . + + a schema1:Person ; + schema1:name "Lucille Delisle" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/rnaseq-pe/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/422" ; + schema1:name "Zavolan Lab" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Lucille Delisle" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/rnaseq-pe/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "Matthew Downton" . + + a schema1:Person ; + schema1:name "Andrey Bliznyuk" . + + a schema1:Person ; + schema1:name "Rika Kobayashi" . + + a schema1:Person ; + schema1:name "Ben Menadue" . + + a schema1:Person ; + schema1:name "Ben Evans" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/141" ; + schema1:name "Tracy Chew" . + + a schema1:ComputerLanguage ; + schema1:name "Shell Script" . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pcz_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pcz_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pcz_path" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:Person ; + schema1:name "Miguel Roncoroni" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Proteinortho_extract_by_orthogroup" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "extracted_ORFs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fasta_header_cleaned" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "funannotate_predicted_proteins" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "headers_shortened" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "proteomes_to_one_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "repeat_masked" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sample_names_to_headers" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/atacseq/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Sample" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/atacseq/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:name "Tim Dudgeon" . + + a schema1:Person ; + schema1:name "Simon Bray" . + + a schema1:Person ; + schema1:name "Gianmauro Cuccuru" . + + a schema1:Person ; + schema1:name "Björn Grüning" . + + a schema1:Person ; + schema1:name "Rachael Skyner" . + + a schema1:Person ; + schema1:name "Jack Scantlebury" . + + a schema1:Person ; + schema1:name "Susan Leung" . + + a schema1:Person ; + schema1:name "Frank von Delft" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/chipseq-sr/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sars-cov-2-variation-reporting/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "WDL" ; + schema1:identifier ; + schema1:name "Workflow Description Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step0_extract_model_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step0_extract_model_input_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step0_extract_model_output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_cpptraj_convert_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_cpptraj_convert_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_bd_run_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_bd_run_output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_bd_run_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_cpptraj_rms_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_cpptraj_rms_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_cpptraj_rms_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_dmd_run_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_dmd_run_output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_dmd_run_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_cpptraj_rms_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_cpptraj_rms_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_cpptraj_rms_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step15_nma_run_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step15_nma_run_output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step15_nma_run_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_cpptraj_rms_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_cpptraj_rms_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step17_cpptraj_convert_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step17_cpptraj_convert_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_nolb_nma_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_nolb_nma_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_cpptraj_rms_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_cpptraj_rms_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_extract_chain_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_extract_chain_output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_cpptraj_convert_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_cpptraj_convert_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step21_imod_imode_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step21_imod_imode_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_imod_imc_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_imod_imc_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step23_cpptraj_rms_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step23_cpptraj_rms_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step24_cpptraj_convert_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step24_cpptraj_convert_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step26_make_ndx_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step26_make_ndx_output_ndx_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step27_gmx_cluster_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step27_gmx_cluster_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step28_cpptraj_rms_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step28_cpptraj_rms_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step28_cpptraj_rms_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step29_pcz_zip_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step29_pcz_zip_output_pcz_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_cpptraj_mask_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_cpptraj_mask_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step30_pcz_zip_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step30_pcz_zip_output_pcz_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step31_pcz_info_output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step32_pcz_evecs_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step32_pcz_evecs_output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step33_pcz_animate_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step33_pcz_animate_output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step34_cpptraj_convert_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step34_cpptraj_convert_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step35_pcz_bfactor_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step35_pcz_bfactor_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step35_pcz_bfactor_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step36_pcz_hinges_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step36_pcz_hinges_output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step37_pcz_hinges_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step37_pcz_hinges_output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step38_pcz_hinges_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step38_pcz_hinges_output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step39_pcz_stiffness_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step39_pcz_stiffness_output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_cpptraj_mask_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_cpptraj_mask_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step40_pcz_collectivity_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step40_pcz_collectivity_output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_concoord_disco_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_concoord_dist_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_concoord_dist_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_concoord_dist_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_concoord_dist_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_concoord_disco_output_bfactor_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_concoord_disco_output_rmsd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_concoord_disco_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_cpptraj_rms_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_cpptraj_rms_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step7_cpptraj_convert_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step7_cpptraj_convert_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_prody_anm_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_prody_anm_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_cpptraj_rms_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_cpptraj_rms_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_ndx_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pcz_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pcz_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_json_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rmsd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_bfactor_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:ComputerLanguage ; + schema1:alternateName "Docker" ; + schema1:identifier ; + schema1:name "Docker" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Heatmap" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sars-cov-2-consensus-from-variation/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/cutandrun/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/atacseq/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CPAT_header_tab" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GRCh38_p13_genome_fa_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Pfam-A_hmm_dat_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Pfam-A_hmm_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "active_site_dat_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gencode_v43_annotation_gtf_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gencode_v43_lncRNA_transcripts_fa_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gencode_v43_pc_transcripts_fa_gz" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gencode_v43_transcripts_fa_gz" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "Docker" ; + schema1:identifier ; + schema1:name "Docker" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "COMPSs" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/141" ; + schema1:name "Tracy Chew" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Basecalling model" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Run binning workflow" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Deduplicate reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Contamination reference file(s)" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Identifier used" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "illumina forward reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "illumina reverse reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken2 database" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Maximum memory in MB" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "When working with metagenomes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Nanopore reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Pilon fix list" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Number of threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Use mapped reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Assembly output" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Binning output" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filtered statistics" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken2 reports" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Read quality and filtering reports" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/141" ; + schema1:name "Tracy Chew" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Cali Willet" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/141" ; + schema1:name "Tracy Chew" . + + a schema1:ComputerLanguage ; + schema1:name "Shell Script" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_chic-fastq-to-cool-hicup-cooler_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_chic-fastq-to-cool-hicup-cooler_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/hic-hicup-cooler/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_hic-fastq-to-cool-hicup-cooler_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_hic-fastq-to-cool-hicup-cooler_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/hic-hicup-cooler/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_hic-fastq-to-pairs-hicup_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_hic-fastq-to-pairs-hicup_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/hic-hicup-cooler/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_hic-juicermediumtabix-to-cool-cooler_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_hic-juicermediumtabix-to-cool-cooler_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/hic-hicup-cooler/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "KNIME" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Scaffolding-Bionano-VGP7/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Simon Bray" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/gromacs-mmgbsa/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/159" ; + schema1:name "Oliver Woolland" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/89" ; + schema1:name "Paul Brack" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "Andrea Zaliani" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:MediaObject ; + schema1:contentSize 180 . + + a schema1:MediaObject ; + schema1:contentSize 88 . + + a schema1:MediaObject ; + schema1:contentSize 274 . + + a schema1:MediaObject ; + schema1:contentSize 708 . + + a schema1:Dataset . + + a schema1:MediaObject ; + schema1:contentSize 75358 . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/average-bigwig-between-replicates/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-only-VGP3/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux gs05r2b62 5.14.0-284.30.1.el9_2.x86_64 #1 SMP PREEMPT_DYNAMIC Fri Aug 25 09:13:12 EDT 2023 x86_64 x86_64 x86_64 GNU/Linux" ; + schema1:endTime "2024-07-12T10:15:07+00:00" ; + schema1:instrument ; + schema1:name "COMPSs lysozyme_in_water_full.py execution at marenostrum5 with JOB_ID 3844353" ; + schema1:object , + , + , + , + , + , + ; + schema1:result , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:startTime "2024-07-12T10:12:10+00:00" ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 3095 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 600 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "FULL.yaml" . + + a schema1:MediaObject ; + schema1:contentSize 1558 ; + schema1:description "Auxiliary File" ; + schema1:name "launch_full.sh" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 7601 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "lysozyme_in_water.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 8868 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "lysozyme_in_water_full_no_mpi.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 10194 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "lysozyme_in_water_full_singularity.py" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_marenostrum5_SLURM_JOB_ID_3844353" ; + schema1:contentSize 100 ; + schema1:description "COMPSs console standard error log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-3844353.err" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_marenostrum5_SLURM_JOB_ID_3844353" ; + schema1:contentSize 4543 ; + schema1:description "COMPSs console standard output log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-3844353.out" . + + a schema1:MediaObject ; + schema1:contentSize 434 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myautodock_vina_run.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myautodock_vina_run.pdbqt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mybabel_convert.ent" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mybox.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycat_pdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myextract_model_pdbqt.pdbqt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myextract_molecule.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfpocket_filter.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfpocket_run.json" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfpocket_run.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfpocket_select.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfpocket_select.pqr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myideal_sdf.sdf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mystr_check_add_hydrogens.pdb" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_chic-fastq-to-cool-hicup-cooler_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_chic-fastq-to-cool-hicup-cooler_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/hic-hicup-cooler/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_hic-fastq-to-cool-hicup-cooler_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_hic-fastq-to-cool-hicup-cooler_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/hic-hicup-cooler/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_hic-fastq-to-pairs-hicup_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_hic-fastq-to-pairs-hicup_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/hic-hicup-cooler/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_hic-juicermediumtabix-to-cool-cooler_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_hic-juicermediumtabix-to-cool-cooler_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/hic-hicup-cooler/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/atacseq/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:MediaObject ; + schema1:contentSize 213 . + + a schema1:MediaObject ; + schema1:contentSize 115 . + + a schema1:MediaObject ; + schema1:contentSize 1240 . + + a schema1:MediaObject ; + schema1:contentSize 1037 . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/chipseq-pe/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Lucille Delisle" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/rnaseq-pe/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a ; + dct:conformsTo ; + schema1:description "Note: The synthetic dataset is saved as a CSV file, UTF-8 format, pipe-separated (|)" ; + schema1:name "vaccines_synthetic_1M.csv" . + + a schema1:CreateAction ; + schema1:endTime "2022-07-27" ; + schema1:instrument ; + schema1:name "dataspice JSON-LD created from CSV templates" ; + schema1:object , + , + , + , + ; + schema1:result , + . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:endTime "2023-01-06" ; + schema1:name "RO-Crate metadata created based on README and dataspice JSON-LD" ; + schema1:object , + ; + schema1:result . + + a schema1:CreateAction ; + schema1:endTime "2022-05-24" ; + schema1:instrument ; + schema1:name "Execution of Jupyter Notebook to generate 10k synthetic dataset" ; + schema1:result . + + a schema1:CreateAction ; + schema1:description "Note that this CSV has only 650k entries, but the Notebook code is meant to crate 1M entries. A modified notebook must have been executed." ; + schema1:endTime "2022-05-24" ; + schema1:instrument ; + schema1:name "Second (?) execution of Jupyter Notebook to generate 650k synthetic dataset" ; + schema1:result . + + a schema1:CreateAction ; + schema1:endTime "2022-05-27T09:52:32.926250" ; + schema1:instrument ; + schema1:name "Execution of pandas-profiling for exploratory data analysis" ; + schema1:object , + ; + schema1:result , + ; + schema1:startTime "2022-05-27T09:52:29.847284" . + + a schema1:CreateAction ; + schema1:endTime "2022-07-27T12:00:00+01:00" ; + schema1:instrument ; + schema1:name "Generating HTML from QMD" ; + schema1:object ; + schema1:result . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about ; + schema1:author ; + schema1:isBasedOn . + + a schema1:Dataset ; + schema1:author , + , + , + , + ; + schema1:datePublished "2023-04-26" ; + schema1:description "Reproducable analytical pipeline to apply causal inference techniques using distributed observational data" ; + schema1:hasPart , + , + , + , + , + , + , + , + ; + schema1:name "BY-COVID - WP5 - Baseline Use Case: SARS-CoV-2 vaccine effectiveness assessment - Analytical pipeline" ; + schema1:url "https://github.com/by-covid/BY-COVID_WP5_T5.2_baseline-use-case/tree/main/vaccine_effectiveness_analytical_pipeline" ; + schema1:version "1.0.0" . + + a schema1:MediaObject ; + schema1:contributor , + , + , + ; + schema1:creator , + , + , + , + ; + schema1:datePublished "2023-01-26" ; + schema1:description "Causal model responding to the research question, using a Directed Acyclic Graph" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.6913045" ; + schema1:name "BY-COVID - WP5 - Baseline Use Case: SARS-CoV-2 vaccine effectiveness assessment - Causal Model" ; + schema1:url "https://zenodo.org/record/7572373" ; + schema1:version "1.1.0" . + + a schema1:Dataset ; + schema1:creator , + , + , + , + ; + schema1:datePublished "2023-02-09" ; + schema1:description "Data management plan" ; + schema1:hasPart , + , + , + , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.7625783" ; + schema1:name "BY-COVID - WP5 - Baseline Use Case: SARS-CoV-2 vaccine effectiveness assessment - Data Management Plan" ; + schema1:url "https://zenodo.org/record/7625784" ; + schema1:version "0.0.1" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Geofile" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/159" ; + schema1:name "Oliver Woolland" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Data_R1.fastq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Data_R2.fastq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Contigs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Read_mapping_alignement" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mapped_read" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fasta" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/atacseq/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:Person ; + schema1:name "ERGA" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lineage" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/445" ; + schema1:name "Diego De Panis" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Scaffolding-HiC-VGP8/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "WDL" ; + schema1:identifier ; + schema1:name "Workflow Description Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Bpipe" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sars-cov-2-ont-artic-variant-calling/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Egon Willighagen" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/262" ; + schema1:name "Marvin Martens" . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Mpro-x0195_0_apo-desolv_pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hits_frankenstein_17_sdf" . + + a schema1:Person ; + schema1:name "Tim Dudgeon" . + + a schema1:Person ; + schema1:name "Simon Bray" . + + a schema1:Person ; + schema1:name "Gianmauro Cuccuru" . + + a schema1:Person ; + schema1:name "Björn Grüning" . + + a schema1:Person ; + schema1:name "Rachael Skyner" . + + a schema1:Person ; + schema1:name "Jack Scantlebury" . + + a schema1:Person ; + schema1:name "Susan Leung" . + + a schema1:Person ; + schema1:name "Frank von Delft" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "Mar Batlle" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "LFC" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Molti_Louvain" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Molti_modularity" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "approach" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "control_id" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "counts" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "layers" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "metadata" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "min_nodes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "multiXrank_r" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "multiXrank_selfloops" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dir" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "padj" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step0_extract_molecule_input_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step0_extract_molecule_output_molecule_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_sander_mdrun_energy_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_sander_mdrun_energy_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_sander_mdrun_energy_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_sander_mdrun_energy_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_process_minout_energy_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_process_minout_energy_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_sander_mdrun_warm_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_sander_mdrun_warm_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_sander_mdrun_warm_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_sander_mdrun_warm_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_process_mdout_warm_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_process_mdout_warm_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_sander_mdrun_nvt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_sander_mdrun_nvt_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_sander_mdrun_nvt_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_sander_mdrun_nvt_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step15_process_mdout_nvt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step15_process_mdout_nvt_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_sander_mdrun_npt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_sander_mdrun_npt_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_sander_mdrun_npt_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_sander_mdrun_npt_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step17_process_mdout_npt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step17_process_mdout_npt_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_sander_mdrun_md_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_sander_mdrun_md_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_sander_mdrun_md_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_sander_mdrun_md_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_rmsd_first_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_rmsd_first_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_pdb4amber_run_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_rmsd_exp_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_rmsd_exp_input_exp_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_rmsd_exp_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step21_cpptraj_rgyr_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step21_cpptraj_rgyr_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_cpptraj_image_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_cpptraj_image_output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_leap_gen_top_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_leap_gen_top_output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_leap_gen_top_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_leap_gen_top_output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_sander_mdrun_minH_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_sander_mdrun_minH_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_sander_mdrun_minH_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step3_sander_mdrun_minH_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_process_minout_minH_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_process_minout_minH_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_sander_mdrun_min_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_sander_mdrun_min_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_sander_mdrun_min_output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_sander_mdrun_min_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_process_minout_min_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_process_minout_min_output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step7_amber_to_pdb_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_leap_solvate_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_leap_solvate_output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_leap_solvate_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_leap_solvate_output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_leap_add_ions_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_leap_add_ions_output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_leap_add_ions_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_leap_add_ions_output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_molecule_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux gs06r3b72 5.14.0-284.30.1.el9_2.x86_64 #1 SMP PREEMPT_DYNAMIC Fri Aug 25 09:13:12 EDT 2023 x86_64 x86_64 x86_64 GNU/Linux" ; + schema1:endTime "2024-04-30T13:04:29+00:00" ; + schema1:instrument ; + schema1:name "COMPSs matmul_files.py execution at marenostrum5 with JOB_ID 1236485" ; + schema1:object , + , + , + , + , + , + , + , + , + , + , + ; + schema1:result , + , + , + , + ; + schema1:startTime "2024-04-30T13:04:01+00:00" ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 248 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1549 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "matmul_tasks.py" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_marenostrum5_SLURM_JOB_ID_1236485" ; + schema1:contentSize 103 ; + schema1:description "COMPSs console standard error log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-1236485.err" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_marenostrum5_SLURM_JOB_ID_1236485" ; + schema1:contentSize 37369 ; + schema1:description "COMPSs console standard output log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-1236485.out" . + + a schema1:MediaObject ; + schema1:contentSize 287 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 2394 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "matmul_reproducibility_no_persistence.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Payam Emami" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/cutandrun/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_gmx.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_gmx.itp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_gmx.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mybabel_minimize.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myligand.pdb" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_gmx.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_gmx.itp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_gmx.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mybabel_minimize.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myligand.pdb" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_30" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_31" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_32" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_33" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_34" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_35" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_36" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_37" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_38" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_39" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_40" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_41" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_ac.frcmod" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_ac.inpcrd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_ac.lib" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_ac.prmtop" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myamber_to_pdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mybabel_minimize.mol2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_image.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_rgyr.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_rms.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myextract_heteroatoms.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.parmtop" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.parmtop" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb4amber_run.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myprocess_mdout.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myprocess_minout.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myreduce_add_hydrogens.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myremove_ligand.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myremove_pdb_water.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.cpout" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.cprst" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.mdinfo" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.netcdf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.rst" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.trj" . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux gs10r3b56 5.14.0-284.30.1.el9_2.x86_64 #1 SMP PREEMPT_DYNAMIC Fri Aug 25 09:13:12 EDT 2023 x86_64 x86_64 x86_64 GNU/Linux" ; + schema1:endTime "2024-07-04T13:38:20+00:00" ; + schema1:instrument ; + schema1:name "COMPSs random_svd_compss.py execution at marenostrum5 with JOB_ID 3618551" ; + schema1:object , + ; + schema1:result ; + schema1:startTime "2024-07-04T13:34:27+00:00" ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 7154 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_marenostrum5_SLURM_JOB_ID_3618551" ; + schema1:contentSize 100 ; + schema1:description "COMPSs console standard error log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-3618551.err" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_marenostrum5_SLURM_JOB_ID_3618551" ; + schema1:contentSize 8696 ; + schema1:description "COMPSs console standard output log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-3618551.out" . + + a schema1:MediaObject ; + schema1:contentSize 623 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 1046 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rRNA filtering" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "forward reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gzip compression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken2 database" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "memory usage (mb)" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pacbio reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reverse reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "number of threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "BAM files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "FASTQC" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filtered reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "KRAKEN2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MetaBat2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "QUAST" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "SPADES" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/269" ; + schema1:name "Marlene Rezk" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "API key for CDS service" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Longitude for right-edge of domain" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "emepcores" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Day for end date" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Month for end date" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Year for end date" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "generate_metdir" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "generate_rundir" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Geographic inputs for geogrid" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Geogrid data table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "HTTPS proxy information, if needed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "EMEP Input Files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Directory name for WRF input Files, should match 'meteo' base-directory in namelist" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "EMEP configuration file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Geogrid namelist" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "metgrid configuration" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Real preprocessor Configuration File" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Configuration File" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Configuration File" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "WRF Configuration File" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Latitude for top of domain" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "outname_atm" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "outname_sfc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "realcores" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "EMEP run label, for output files, should match 'runlabel1' in namelist" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Latitude for bottom of domain" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Day for starting date" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Month for starting date" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Year for starting date" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "grib variable table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "grib variable table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Longitude for left-edge of domain" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "wrfcores" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output files" . + + a schema1:Person ; + schema1:name "ABRomics" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/amr_gene_detection/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a ; + dct:conformsTo ; + schema1:description "Note: The synthetic dataset is saved as a CSV file, UTF-8 format, pipe-separated (|)" ; + schema1:name "vaccines_synthetic_1M.csv" . + + a schema1:CreateAction ; + schema1:endTime "2022-07-27" ; + schema1:instrument ; + schema1:name "dataspice JSON-LD created from CSV templates" ; + schema1:object , + , + , + , + ; + schema1:result , + . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:endTime "2023-01-06" ; + schema1:name "RO-Crate metadata created based on README and dataspice JSON-LD" ; + schema1:object , + ; + schema1:result . + + a schema1:CreateAction ; + schema1:endTime "2022-05-24" ; + schema1:instrument ; + schema1:name "Execution of Jupyter Notebook to generate 10k synthetic dataset" ; + schema1:result . + + a schema1:CreateAction ; + schema1:description "Note that this CSV has only 650k entries, but the Notebook code is meant to crate 1M entries. A modified notebook must have been executed." ; + schema1:endTime "2022-05-24" ; + schema1:instrument ; + schema1:name "Second (?) execution of Jupyter Notebook to generate 650k synthetic dataset" ; + schema1:result . + + a schema1:CreateAction ; + schema1:endTime "2022-05-27T09:52:32.926250" ; + schema1:instrument ; + schema1:name "Execution of pandas-profiling for exploratory data analysis" ; + schema1:object , + ; + schema1:result , + ; + schema1:startTime "2022-05-27T09:52:29.847284" . + + a schema1:CreateAction ; + schema1:endTime "2022-07-27T12:00:00+01:00" ; + schema1:instrument ; + schema1:name "Generating HTML from QMD" ; + schema1:object ; + schema1:result . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about ; + schema1:author ; + schema1:isBasedOn . + + a schema1:Dataset ; + schema1:author , + , + , + , + ; + schema1:datePublished "2023-04-26" ; + schema1:description "Reproducable analytical pipeline to apply causal inference techniques using distributed observational data" ; + schema1:hasPart , + , + , + , + , + , + , + , + ; + schema1:name "BY-COVID - WP5 - Baseline Use Case: SARS-CoV-2 vaccine effectiveness assessment - Analytical pipeline" ; + schema1:url "https://github.com/by-covid/BY-COVID_WP5_T5.2_baseline-use-case/tree/main/vaccine_effectiveness_analytical_pipeline" ; + schema1:version "1.0.0" . + + a schema1:MediaObject ; + schema1:contributor , + , + , + ; + schema1:creator , + , + , + , + ; + schema1:datePublished "2023-01-26" ; + schema1:description "Causal model responding to the research question, using a Directed Acyclic Graph" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.6913045" ; + schema1:name "BY-COVID - WP5 - Baseline Use Case: SARS-CoV-2 vaccine effectiveness assessment - Causal Model" ; + schema1:url "https://zenodo.org/record/7572373" ; + schema1:version "1.1.0" . + + a schema1:Dataset ; + schema1:creator , + , + , + , + ; + schema1:datePublished "2023-02-09" ; + schema1:description "Data management plan" ; + schema1:hasPart , + , + , + , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.7625783" ; + schema1:name "BY-COVID - WP5 - Baseline Use Case: SARS-CoV-2 vaccine effectiveness assessment - Data Management Plan" ; + schema1:url "https://zenodo.org/record/7625784" ; + schema1:version "0.0.1" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MaxMTpc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MinCountPerCell" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MinGenesPerCell" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "genecount_qc_plot" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mito_qc_plot" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "qc_anndata_object" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "top_genes_plot" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Darwin bsccs742.int.bsc.es 23.1.0 Darwin Kernel Version 23.1.0: Mon Oct 9 21:27:27 PDT 2023; root:xnu-10002.41.9~6/RELEASE_X86_64 x86_64 COMPSS_HOME=/Users/rsirvent/opt/COMPSs/" ; + schema1:endTime "2023-11-16T14:25:14+00:00" ; + schema1:instrument ; + schema1:name "COMPSs simple.py execution at bsccs742.int.bsc.es" ; + schema1:object ; + schema1:result , + . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 232 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 639 ; + schema1:description "Auxiliary File" ; + schema1:name "README" . + + a schema1:MediaObject ; + schema1:contentSize 4467 ; + schema1:description "Auxiliary File" ; + schema1:name "pom.xml" . + + a schema1:MediaObject ; + schema1:contentSize 121 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 1312 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sra-manifest-to-concatenated-fastqs/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "basecalling model" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "identifier used" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "kraken_database" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "nanopore FASTQ reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "number of threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Flye de novo assembler for single-molecule reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Kraken2 reports" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Krona taxonomy visualization" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Medaka polisher" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "QUAlity assessment" . + + a schema1:ComputerLanguage ; + schema1:name "Scipion" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "collision_info" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "db_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "db_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "file_id" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gnps_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hmdb_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mbank_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mzml_files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ppmx" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "python_script" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "r_script" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "candidate_files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "result" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "Matthias Bernt" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/openms-metaprosip/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "Matthias Bernt" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/openms-metaprosip/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Andreas Wilm and October SESSIONS and Paola Florez DE SESSIONS and ZHU Yuan and Shuzhen SIM and CHU Wenhan Collins" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:ComputerLanguage ; + schema1:alternateName "Docker" ; + schema1:identifier ; + schema1:name "Docker" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/parallel-accession-download/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux 663d7a747723 5.15.0-101-generic #111~20.04.1-Ubuntu SMP Mon Mar 11 15:44:43 UTC 2024 x86_64 x86_64 x86_64 GNU/Linux COMPSS_HOME=/opt/COMPSs/" ; + schema1:endTime "2024-03-28T09:14:42+00:00" ; + schema1:instrument ; + schema1:name "COMPSs fibonacci.py execution at 663d7a747723" ; + schema1:result . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3.rc2401" . + + a schema1:MediaObject ; + schema1:contentSize 244 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 26 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 340 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:Person ; + schema1:name "All the Sarek team" . + + a schema1:Person ; + schema1:name "nf-core comunity and people in the IMPaCT-Data project." . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "WDL" ; + schema1:identifier ; + schema1:name "Workflow Description Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "Docker" ; + schema1:identifier ; + schema1:name "Docker" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Purge-duplicates-one-haplotype-VGP6b/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:MediaObject ; + schema1:contentSize 183 . + + a schema1:MediaObject ; + schema1:contentSize 2039 . + + a schema1:MediaObject ; + schema1:contentSize 572 . + + a schema1:MediaObject ; + schema1:contentSize 1442 . + + a schema1:Dataset . + + a schema1:MediaObject ; + schema1:contentSize 44222 . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/370" ; + schema1:name "Peter Menzel" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:name "Eleni Mina" . + + a schema1:Person ; + schema1:name "Daniël Wijnbergen" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Egon Willighagen" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/262" ; + schema1:name "Marvin Martens" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/polish-with-long-reads/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "All the Sarek team" . + + a schema1:Person ; + schema1:name "nf-core comunity and people in the IMPaCT-data project." . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/gromacs-mmgbsa/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Occurrence_southpacific.csv" . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Purge-duplicates-one-haplotype-VGP6b/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "KNIME" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-only-VGP3/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:MediaObject ; + schema1:contentSize 178 . + + a schema1:MediaObject ; + schema1:contentSize 106 . + + a schema1:MediaObject ; + schema1:contentSize 268 . + + a schema1:MediaObject ; + schema1:contentSize 846 . + + a schema1:Dataset . + + a schema1:MediaObject ; + schema1:contentSize 1237 . + + a schema1:MediaObject ; + schema1:contentSize 650 . + + a schema1:MediaObject ; + schema1:contentSize 650 . + + a schema1:MediaObject ; + schema1:contentSize 12 . + + a schema1:MediaObject ; + schema1:contentSize 10 . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-atac-cutandrun_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-atac-cutandrun_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-chip-pe_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-chip-pe_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-chip-sr_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-chip-sr_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "body" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "db_host" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "db_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "db_password" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "db_user" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "is_availability" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "result_modifiers" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "results" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_file" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/476" ; + schema1:name "Vasiliki Panagi" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/499" ; + schema1:name "Elisabetta Spinazzola" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a ; + dct:conformsTo ; + schema1:description "Note: The synthetic dataset is saved as a CSV file, UTF-8 format, pipe-separated (|)" ; + schema1:name "vaccines_synthetic_1M.csv" . + + a schema1:CreateAction ; + schema1:endTime "2022-07-27" ; + schema1:instrument ; + schema1:name "dataspice JSON-LD created from CSV templates" ; + schema1:object , + , + , + , + ; + schema1:result , + . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:endTime "2023-01-06" ; + schema1:name "RO-Crate metadata created based on README and dataspice JSON-LD" ; + schema1:object , + ; + schema1:result . + + a schema1:CreateAction ; + schema1:endTime "2022-05-24" ; + schema1:instrument ; + schema1:name "Execution of Jupyter Notebook to generate 10k synthetic dataset" ; + schema1:result . + + a schema1:CreateAction ; + schema1:description "Note that this CSV has only 650k entries, but the Notebook code is meant to crate 1M entries. A modified notebook must have been executed." ; + schema1:endTime "2022-05-24" ; + schema1:instrument ; + schema1:name "Second (?) execution of Jupyter Notebook to generate 650k synthetic dataset" ; + schema1:result . + + a schema1:CreateAction ; + schema1:endTime "2022-05-27T09:52:32.926250" ; + schema1:instrument ; + schema1:name "Execution of pandas-profiling for exploratory data analysis" ; + schema1:object , + ; + schema1:result , + ; + schema1:startTime "2022-05-27T09:52:29.847284" . + + a schema1:CreateAction ; + schema1:endTime "2022-07-27T12:00:00+01:00" ; + schema1:instrument ; + schema1:name "Generating HTML from QMD" ; + schema1:object ; + schema1:result . + + a schema1:ImageObject, + schema1:MediaObject ; + schema1:about , + , + , + ; + schema1:description "From Causal model, via a data model spec to generation of synthetic data" ; + schema1:name "Conceptual phases" . + + a schema1:CreativeWork ; + dct:conformsTo , + ; + schema1:about ; + schema1:author ; + schema1:isBasedOn . + + a schema1:Dataset ; + schema1:author , + , + , + , + ; + schema1:datePublished "2023-04-26" ; + schema1:description "Reproducable analytical pipeline to apply causal inference techniques using distributed observational data" ; + schema1:hasPart , + , + , + , + , + , + , + , + ; + schema1:name "BY-COVID - WP5 - Baseline Use Case: SARS-CoV-2 vaccine effectiveness assessment - Analytical pipeline" ; + schema1:url "https://github.com/by-covid/BY-COVID_WP5_T5.2_baseline-use-case/tree/main/vaccine_effectiveness_analytical_pipeline" ; + schema1:version "1.0.0" . + + a schema1:MediaObject ; + schema1:contributor , + , + , + ; + schema1:creator , + , + , + , + ; + schema1:datePublished "2023-01-26" ; + schema1:description "Causal model responding to the research question, using a Directed Acyclic Graph" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.6913045" ; + schema1:name "BY-COVID - WP5 - Baseline Use Case: SARS-CoV-2 vaccine effectiveness assessment - Causal Model" ; + schema1:url "https://zenodo.org/record/7572373" ; + schema1:version "1.1.0" . + + a schema1:Dataset ; + schema1:creator , + , + , + , + ; + schema1:datePublished "2023-02-09" ; + schema1:description "Data management plan" ; + schema1:hasPart , + , + , + , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.7625783" ; + schema1:name "BY-COVID - WP5 - Baseline Use Case: SARS-CoV-2 vaccine effectiveness assessment - Data Management Plan" ; + schema1:url "https://zenodo.org/record/7625784" ; + schema1:version "0.0.1" . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:Person ; + schema1:name "ERGA" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lineage" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/445" ; + schema1:name "Diego De Panis" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Reverse read length" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Forward primer" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "forward reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Subfragment name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Primers are removed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Reference database" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Reverse read length" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Reverse primer" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reverse reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Sample name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "files_to_folder_fastqc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "files_to_folder_ngtax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "files_to_folder_phyloseq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "files_to_folder_picrust2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "turtle" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "WDL" ; + schema1:identifier ; + schema1:name "Workflow Description Language" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-only-VGP3/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Alex L Mitchell" . + + a schema1:Person ; + schema1:name "Lorna J Richardson" . + + a schema1:Person ; + schema1:name "Ekaterina Sakharova" . + + a schema1:Person ; + schema1:name "Maxim Scheremetjew" . + + a schema1:Person ; + schema1:name "Anton Korobeynikov" . + + a schema1:Person ; + schema1:name "Alex Shlemov" . + + a schema1:Person ; + schema1:name "Olga Kunyavskaya" . + + a schema1:Person ; + schema1:name "Alla Lapidus" . + + a schema1:Person ; + schema1:name "Robert D Finn" . + + a schema1:Person ; + schema1:name "Alexandre Almeida" . + + a schema1:Person ; + schema1:name "Martin Beracochea" . + + a schema1:Person ; + schema1:name "Miguel Boland" . + + a schema1:Person ; + schema1:name "Josephine Burgin" . + + a schema1:Person ; + schema1:name "Guy Cochrane" . + + a schema1:Person ; + schema1:name "Michael R Crusoe" . + + a schema1:Person ; + schema1:name "Varsha Kale" . + + a schema1:Person ; + schema1:name "Simon C Potter" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "5.8s_pattern" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "5s_pattern" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "forward_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "itsonedb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "itsonedb_label" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "itsonedb_otu_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "itsonedb_tax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lsu_db" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lsu_label" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lsu_otus" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lsu_tax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "qc_min_length" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reverse_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rfam_model_clans" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rfam_models" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "single_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ssu_db" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ssu_label" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ssu_otus" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ssu_tax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "stats_file_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "unite_db" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "unite_label" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "unite_otu_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "unite_tax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ITS-length" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "completed_flag_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastp_filtering_json_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gz_files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hashsum_paired" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hashsum_single" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "no_tax_flag_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "qc-statistics" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "qc-status" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "qc_summary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rna-count" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sequence-categorisation_folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "suppressed_upload" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "taxonomy-summary_folder" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Scaffolding-Bionano-VGP7/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input File" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Select One Gene" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "File URL" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MetGENE metabolite table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MetGENE Reaction Table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Regulatory Element Set" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GlyGen Protein Products" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Gene Count Matrix" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Scored Genes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Gene" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "LINCS L1000 Reverse Search Dashboard" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Scored Drugs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Scored Genes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Scored Genes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MetGENE Summary" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/chipseq-pe/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "Docker" ; + schema1:identifier ; + schema1:name "Docker" ; + schema1:url . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-atac-cutandrun_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-atac-cutandrun_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-chip-pe_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-chip-pe_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-chip-sr_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-chip-sr_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s14r2b13 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=svd_lanczos COMPSS_PYTHON_VERSION=3.9.10 SLURM_JOB_QOS=debug SLURM_MEM_PER_CPU=1880 SLURM_JOB_ID=31727336 SLURM_JOB_USER=bsc19756 COMPSS_HOME=/apps/COMPSs/3.3/ SLURM_JOB_UID=4486 SLURM_SUBMIT_DIR=/gpfs/projects/bsc19/bsc19756/RF_Example SLURM_JOB_NODELIST=s14r2b[13,15-16] SLURM_JOB_GID=2950 SLURM_JOB_CPUS_PER_NODE=48(x3) COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login1 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=3 COMPSS_MASTER_NODE=s14r2b13 COMPSS_WORKER_NODES= s14r2b15 s14r2b16" ; + schema1:endTime "2024-02-14T13:47:39+00:00" ; + schema1:instrument ; + schema1:name "COMPSs main_rf.py execution at marenostrum4 with JOB_ID 31727336" ; + schema1:result ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 6801 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 941 ; + schema1:description "Auxiliary File" ; + schema1:name "launch_provenance.sh" . + + a schema1:MediaObject ; + schema1:contentSize 445 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 1032 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MaxMTpc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MinCountPerCell" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MinGenesPerCell" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "genecount_qc_plot" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mito_qc_plot" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "qc_anndata_object" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "top_genes_plot" . + + a schema1:Person ; + schema1:name "Lucille Delisle" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/rnaseq-pe/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/average-bigwig-between-replicates/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Adrián Muñoz-Civico" . + + a schema1:Person ; + schema1:name "Daniel López-López" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_30" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_31" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_32" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_33" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_34" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_35" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_36" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_37" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_38" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_39" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_40" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_41" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_42" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_43" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fasta" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_30" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_31" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_32" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_33" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_34" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_35" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_36" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_37" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_38" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_39" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_40" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myamber_to_pdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_image.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_rgyr.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_rms.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb4amber_run.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myprocess_mdout.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myprocess_minout.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.cpout" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.cprst" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.mdinfo" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.netcdf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.rst" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.trj" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_baredSC-1d-logNorm_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_baredSC-1d-logNorm_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/baredsc/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_baredSC-2d-logNorm_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_baredSC-2d-logNorm_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/baredsc/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bits_set" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "blacklist" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bwt_algorithm" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_lratio" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_exomeDepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_manta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_bf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "generate_bwa_indexes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_exome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "min_mapping_quality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "read_group" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_amb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_ann" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_bwt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fai" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_pac" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_sa" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samples" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_bwa_mem" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastqc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_samtools" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "html_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "json_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_all" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_bam_filtering" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_exomedepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_manta" . + + a schema1:Person ; + schema1:name "Daniel López-López" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/chipseq-pe/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:Person ; + schema1:name "Milad Miladi" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "PkorrP19E3_ONT_fast5.tar.gz" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Directory with cached BridgeDB data" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output of differential mRNA expression testing from DESeq2, with Ensembl gene ID's concatened with gene symbols with \";\" inbetween in the first column" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output of differential miRNA expression testing from DESeq2, with miRBase ID's in the first column" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Table with entrez mappings from STRING protein idenfitiers as downloaded from STRING" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Tab separated edge list with Ensembl gene ID's in the first two columns, and their bi-weight midcorrelation as defined by Langelder et al. in the third column" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of correlation edges to use, USE FOR TESTING ONLY" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Tab separated edge list with Ensembl gene ID's in the first column, miRBase ID's in the second column. and their bi-weight midcorrelation as defined by Langelder et al. in the third column" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Table with miRNA - mRNA target data as downloaded from miRTarBase" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of miRTarBase edges to use, USE FOR TESTING ONLY" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of cores to let MOGAMUN use" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of generation to let the genetic algorithm in MOGAMUN evolve" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The maximum size of subnetworks during postprocessing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "the minimum Jaccard Index overlap between two subnetworks to allow them to be merged" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The minimum size of subnetworks during postprocessing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of parallel runs to let MOGAMUN do, these parallel runs are combined in postprocessing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Table with Protein - protein interaction data as downloaded from STRING" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The minimum score for a STRING edge to be included in the analysis" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of STRING edges to use, USE FOR TESTING ONLY" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output of variant burden testing using SKAT in the rvtests package, with HGNC symbols in the first column" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "full_graph" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "subnetworks" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "annotation_metadata" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "cgi_genes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "civic_genes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "final_variants" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gene_cards_germline" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gene_cards_loh" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gene_cards_somatic" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gene_reports_tabular" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "germline_cancer_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "loh_cancer_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "loh_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "maf_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mutations_summary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "somatic_cancer_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "somatic_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "uniprot_cancer_genes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "variant_reports_tabular" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux a5026d668aa6 5.15.0-102-generic #112~20.04.1-Ubuntu SMP Thu Mar 14 14:28:24 UTC 2024 x86_64 x86_64 x86_64 GNU/Linux COMPSS_HOME=/opt/COMPSs/" ; + schema1:endTime "2024-04-19T08:05:56+00:00" ; + schema1:instrument ; + schema1:name "COMPSs increment_fibonacci.py execution at a5026d668aa6" ; + schema1:result . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3.rc2404" . + + a schema1:MediaObject ; + schema1:contentSize 454 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 48 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 1072 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:alternateName "RMD" ; + schema1:name "R markdown" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_30" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_31" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_32" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_33" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_34" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_35" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_36" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_37" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_38" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_39" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_40" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_41" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_ac.frcmod" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_ac.inpcrd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_ac.lib" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_ac.prmtop" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myamber_to_pdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mybabel_minimize.mol2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_image.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_rgyr.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_rms.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myextract_heteroatoms.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.parmtop" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.parmtop" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb4amber_run.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myprocess_mdout.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myprocess_minout.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myreduce_add_hydrogens.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myremove_ligand.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myremove_pdb_water.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.cpout" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.cprst" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.mdinfo" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.netcdf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.rst" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.trj" . + + a schema1:ComputerLanguage ; + schema1:name "Bpipe" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:name "Payam Emami" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ApplyVQSR_ts_filter_level" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "VariantRecalibrator_trust_all_polymorphic" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "VariantRecalibrator_truth_sensitivity_trance_indels" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "VariantRecalibrator_truth_sensitivity_trance_snps" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "VariantRecalibrator_use_annotation" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bcftools_norm_multiallelics" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bcftools_norm_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bcftools_view_include_VQSR_filters" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bcftools_view_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bwa_mem_num_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bwa_mem_sec_shorter_split_hits" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gatk_splitintervals_exclude_intervalList" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gatk_splitintervals_include_intervalList" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gatk_splitintervals_scatter_count" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_file_split" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_file_split_fwd_single" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_file_split_rev" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_qc_check" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_trimming_check" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "picard_addorreplacereadgroups_rgpl" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "raw_files_directory" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_genome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_fixmate_output_format" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_fixmate_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_flagstat_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_sort_compression_level" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_sort_memory" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_sort_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_cigar" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_collapsecigar" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_count" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_fastcompression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_iscram" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_randomseed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_readsingroup" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_readsinlibrary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_readsquality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_readswithbits" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_readswithoutbits" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_readtagtostrip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_region" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_samheader" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_target_bed_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_uncompressed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sub_bqsr_hc_java_options" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sub_bqsr_hc_native_pairHMM_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sub_bqsr_interval_padding" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sub_bqsr_known_sites_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sub_bqsr_known_sites_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sub_bqsr_known_sites_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "table_annovar_build_over" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "table_annovar_convert_arg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "table_annovar_database_location" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "table_annovar_na_string" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "table_annovar_operation" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "table_annovar_otherinfo" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "table_annovar_protocol" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "table_annovar_remove" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "table_annovar_vcfinput" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tg_compression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tg_do_not_compress" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tg_length" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tg_quality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tg_strigency" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tg_trim_suffix" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "vqsr_arguments_indels_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "vqsr_arguments_indels_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "vqsr_arguments_indels_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "vqsr_arguments_snps_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "vqsr_arguments_snps_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "vqsr_arguments_snps_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "vqsr_arguments_snps_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "vqsr_known_sites_indels_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "vqsr_known_sites_indels_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "vqsr_known_sites_indels_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "vqsr_known_sites_snps_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "vqsr_known_sites_snps_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "vqsr_known_sites_snps_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "vqsr_known_sites_snps_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_bcftools_norm_vqsr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_bcftools_view_filter_vqsr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_bwa_mem_paired" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_bwa_mem_single" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_paired_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_paired_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_raw_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_raw_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_single_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_single_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gather_bwa_sam_files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_ApplyVQSR_indel" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_ApplyVQSR_snp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_CombineGVCFs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_GenotypeGVCFs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_MakeSitesOnlyVcf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_VQSR_MergeVCFs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_VariantRecalibrator_indel_recal" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_VariantRecalibrator_indel_tranches" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_VariantRecalibrator_snp_recal" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_VariantRecalibrator_snp_tranches" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_bqsr_subworkflowbqsr_bqsr_bam" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_bqsr_subworkflowbqsr_hc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_bqsr_subworkflowbqsr_mergevcfs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_bqsr_subworkflowbqsr_tables" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_gatk_splitintervals" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_picard_addorreplacereadgroups" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_picard_markduplicates" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_picard_markduplicates_metrics" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_fixmate" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_flagstat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_index" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_sort" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_view_conversion" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_view_count_total" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_table_annovar_filtered_avinput" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_table_annovar_filtered_multianno_txt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_table_annovar_filtered_multianno_vcf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_trim_galore_paired_fq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_trim_galore_paired_reports" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_trim_galore_single_fq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_trim_galore_single_reports" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_sort_by_name" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_baredSC-1d-logNorm_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_baredSC-1d-logNorm_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/baredsc/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_baredSC-2d-logNorm_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_baredSC-2d-logNorm_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/baredsc/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name " test suite" ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "GitHub testing workflow for " ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/crs4/fair-crcc-send-data/actions/workflows/main.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:Dataset ; + schema1:description "Integration tests for the workflow" . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset ; + schema1:description "Configuration folder" . + + a schema1:MediaObject . + + a schema1:Dataset ; + schema1:description "Workflow folder" . + + a schema1:Dataset ; + schema1:description "Workflow rule module" . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset ; + schema1:description "Validation files" . + + a schema1:MediaObject . + + a schema1:Dataset ; + schema1:description "Scripts folder" . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Javier Garrayo-Ventas" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myeditconf.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfix_side_chain.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygenion.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygenion.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_energy.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_image.xtc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_rgyr.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_rms.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygmx_trjconv_str.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mygrompp.tpr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.cpt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.edr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.xtc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mymdrun.xvg" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb2gmx.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb2gmx.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysolvate.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysolvate.zip" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Purge-duplicates-one-haplotype-VGP6b/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-only-VGP3/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "ABRomics" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/bacterial_genome_annotation/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a ; + dct:conformsTo ; + schema1:description "Note: The synthetic dataset is saved as a CSV file, UTF-8 format, pipe-separated (|)" ; + schema1:name "vaccines_synthetic_1M.csv" . + + a schema1:CreateAction ; + schema1:endTime "2022-07-27" ; + schema1:instrument ; + schema1:name "dataspice JSON-LD created from CSV templates" ; + schema1:object , + , + , + , + ; + schema1:result , + . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:endTime "2023-01-06" ; + schema1:name "RO-Crate metadata created based on README and dataspice JSON-LD" ; + schema1:object , + ; + schema1:result . + + a schema1:CreateAction ; + schema1:endTime "2022-05-24" ; + schema1:instrument ; + schema1:name "Execution of Jupyter Notebook to generate 10k synthetic dataset" ; + schema1:result . + + a schema1:CreateAction ; + schema1:description "Note that this CSV has only 650k entries, but the Notebook code is meant to crate 1M entries. A modified notebook must have been executed." ; + schema1:endTime "2022-05-24" ; + schema1:instrument ; + schema1:name "Second (?) execution of Jupyter Notebook to generate 650k synthetic dataset" ; + schema1:result . + + a schema1:CreateAction ; + schema1:endTime "2022-05-27T09:52:32.926250" ; + schema1:instrument ; + schema1:name "Execution of pandas-profiling for exploratory data analysis" ; + schema1:object , + ; + schema1:result , + ; + schema1:startTime "2022-05-27T09:52:29.847284" . + + a schema1:CreateAction ; + schema1:endTime "2022-07-27T12:00:00+01:00" ; + schema1:instrument ; + schema1:name "Generating HTML from QMD" ; + schema1:object ; + schema1:result . + + a schema1:CreativeWork ; + dct:conformsTo ; + schema1:about ; + schema1:author ; + schema1:isBasedOn . + + a schema1:Dataset ; + schema1:author , + , + , + , + ; + schema1:datePublished "2023-04-26" ; + schema1:description "Reproducable analytical pipeline to apply causal inference techniques using distributed observational data" ; + schema1:hasPart , + , + , + , + , + , + , + , + ; + schema1:name "BY-COVID - WP5 - Baseline Use Case: SARS-CoV-2 vaccine effectiveness assessment - Analytical pipeline" ; + schema1:url "https://github.com/by-covid/BY-COVID_WP5_T5.2_baseline-use-case/tree/main/vaccine_effectiveness_analytical_pipeline" ; + schema1:version "1.0.0" . + + a schema1:MediaObject ; + schema1:contributor , + , + , + ; + schema1:creator , + , + , + , + ; + schema1:datePublished "2023-01-26" ; + schema1:description "Causal model responding to the research question, using a Directed Acyclic Graph" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.6913045" ; + schema1:name "BY-COVID - WP5 - Baseline Use Case: SARS-CoV-2 vaccine effectiveness assessment - Causal Model" ; + schema1:url "https://zenodo.org/record/7572373" ; + schema1:version "1.1.0" . + + a schema1:Dataset ; + schema1:creator , + , + , + , + ; + schema1:datePublished "2023-02-09" ; + schema1:description "Data management plan" ; + schema1:hasPart , + , + , + , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.7625783" ; + schema1:name "BY-COVID - WP5 - Baseline Use Case: SARS-CoV-2 vaccine effectiveness assessment - Data Management Plan" ; + schema1:url "https://zenodo.org/record/7625784" ; + schema1:version "0.0.1" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s01r2b50 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=Increment COMPSS_PYTHON_VERSION=3.9.10 SLURM_JOB_QOS=debug SLURM_MEM_PER_CPU=1880 SLURM_JOB_ID=30894524 SLURM_JOB_USER=bsc19057 COMPSS_HOME=/apps/COMPSs/3.3/ SLURM_JOB_UID=2952 SLURM_SUBMIT_DIR=/gpfs/home/bsc19/bsc19057/COMPSs-DP/tutorial_apps/python/increment SLURM_JOB_NODELIST=s01r2b50 SLURM_JOB_GID=2950 SLURM_JOB_CPUS_PER_NODE=48 COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login3 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=1 COMPSS_MASTER_NODE=s01r2b50 COMPSS_WORKER_NODES=" ; + schema1:endTime "2023-11-24T08:24:54+00:00" ; + schema1:instrument ; + schema1:name "COMPSs increment.py execution at marenostrum4 with JOB_ID 30894524" ; + schema1:object , + , + ; + schema1:result , + , + , + ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 243 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 920 ; + schema1:description "Auxiliary File" ; + schema1:name "README" . + + a schema1:MediaObject ; + schema1:contentSize 4618 ; + schema1:description "Auxiliary File" ; + schema1:name "pom.xml" . + + a schema1:MediaObject ; + schema1:contentSize 207 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 1633 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/chipseq-sr/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:Person ; + schema1:name "Tim Dudgeon" . + + a schema1:Person ; + schema1:name "Simon Bray" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/galaxyproject/iwc/actions/workflows/workflow_test.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "Alex L Mitchell" . + + a schema1:Person ; + schema1:name "Lorna J Richardson" . + + a schema1:Person ; + schema1:name "Ekaterina Sakharova" . + + a schema1:Person ; + schema1:name "Maxim Scheremetjew" . + + a schema1:Person ; + schema1:name "Anton Korobeynikov" . + + a schema1:Person ; + schema1:name "Alex Shlemov" . + + a schema1:Person ; + schema1:name "Olga Kunyavskaya" . + + a schema1:Person ; + schema1:name "Alla Lapidus" . + + a schema1:Person ; + schema1:name "Robert D Finn" . + + a schema1:Person ; + schema1:name "Alexandre Almeida" . + + a schema1:Person ; + schema1:name "Martin Beracochea" . + + a schema1:Person ; + schema1:name "Miguel Boland" . + + a schema1:Person ; + schema1:name "Josephine Burgin" . + + a schema1:Person ; + schema1:name "Guy Cochrane" . + + a schema1:Person ; + schema1:name "Michael R Crusoe" . + + a schema1:Person ; + schema1:name "Varsha Kale" . + + a schema1:Person ; + schema1:name "Simon C Potter" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "5.8s_pattern" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "5s_pattern" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CGC_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CGC_postfixes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "EggNOG_data_dir" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "EggNOG_db" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "EggNOG_diamond_db" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "HMM_gathering_bit_score" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "HMM_name_database" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "HMM_omit_alignment" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "InterProScan_applications" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "InterProScan_databases" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "InterProScan_outputFormat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Uniref90_db_txt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "cgc_chunk_size" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "clusters_glossary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "contig_min_length" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "contigs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "diamond_databaseFile" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "diamond_header" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "diamond_maxTargetSeqs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "func_ann_names_hmmer" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "func_ann_names_ips" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "go_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gp_flatfiles_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "graphs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hmmsearch_header" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ips_header" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ko_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lsu_db" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lsu_label" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lsu_otus" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "lsu_tax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "other_ncrna_models" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pathways_classes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pathways_names" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "protein_chunk_size_IPS" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "protein_chunk_size_eggnog" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "protein_chunk_size_hmm" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rfam_model_clans" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rfam_models" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ssu_db" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ssu_label" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ssu_otus" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ssu_tax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bgzip_fasta_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bgzip_index" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chunking_nucleotides" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chunking_proteins" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "completed_flag_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "compressed_files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "functional_annotation_folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hashsum_input" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "index_fasta_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "no_cds_flag_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "no_tax_flag_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pathways_systems_folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pathways_systems_folder_antismash" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pathways_systems_folder_antismash_summary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "qc-statistics_folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "qc-status" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "qc_summary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rna-count" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sequence-categorisation_folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "stats" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "taxonomy-summary_folder" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-only-VGP3/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s01r2b53 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=COMPSs COMPSS_PYTHON_VERSION=3.9.10 SLURM_JOB_QOS=bsc_cs SLURM_MEM_PER_CPU=1880 SLURM_JOB_ID=31494577 SLURM_JOB_USER=bsc19057 COMPSS_HOME=/apps/COMPSs/3.3/ SLURM_JOB_UID=2952 SLURM_SUBMIT_DIR=/gpfs/home/bsc19/bsc19057/Tutorial_2024/kmeans SLURM_JOB_NODELIST=s01r2b[53-54] SLURM_JOB_GID=2950 SLURM_JOB_CPUS_PER_NODE=48(x2) COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login3 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=2 COMPSS_MASTER_NODE=s01r2b53 COMPSS_WORKER_NODES= s01r2b54" ; + schema1:endTime "2024-01-22T15:53:19+00:00" ; + schema1:instrument ; + schema1:name "COMPSs kmeans.py execution at marenostrum4 with JOB_ID 31494577" ; + schema1:result ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 1137 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 1017 ; + schema1:description "Auxiliary File" ; + schema1:name "launch_kmeans.sh" . + + a schema1:MediaObject ; + schema1:contentSize 471 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 761 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:MediaObject ; + schema1:contentSize 177 . + + a schema1:MediaObject ; + schema1:contentSize 422 . + + a schema1:MediaObject ; + schema1:contentSize 724 . + + a schema1:MediaObject ; + schema1:contentSize 1056 . + + a schema1:Dataset . + + a schema1:MediaObject ; + schema1:contentSize 6619 . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_molecules_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_aln_orig_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_aln_target_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_ene_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_trj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Dannon Baker" . + + a schema1:Person ; + schema1:name "Björn Grüning" . + + a schema1:Person ; + schema1:name "Delphine Larivière" . + + a schema1:Person ; + schema1:name "Gildas Le Corguillé" . + + a schema1:Person ; + schema1:name "Andrew Lonie" . + + a schema1:Person ; + schema1:name "Nicholas Keener" . + + a schema1:Person ; + schema1:name "Sergei Kosakovsky Pond" . + + a schema1:Person ; + schema1:name "Wolfgang Maier" . + + a schema1:Person ; + schema1:name "Anton Nekrutenko" . + + a schema1:Person ; + schema1:name "James Taylor" . + + a schema1:Person ; + schema1:name "Steven Weaver" . + + a schema1:Person ; + schema1:name "Marius van den Beek" . + + a schema1:Person ; + schema1:name "Dave Bouvier" . + + a schema1:Person ; + schema1:name "John Chilton" . + + a schema1:Person ; + schema1:name "Nate Coraor" . + + a schema1:Person ; + schema1:name "Frederik Coppens" . + + a schema1:Person ; + schema1:name "Bert Droesbeke" . + + a schema1:Person ; + schema1:name "Ignacio Eguinoa" . + + a schema1:Person ; + schema1:name "Simon Gladman" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux archit-Alpha-15-B5EEK 6.5.0-35-generic #35~22.04.1-Ubuntu SMP PREEMPT_DYNAMIC Tue May 7 09:00:52 UTC 2 x86_64 x86_64 x86_64 GNU/Linux COMPSS_HOME=/opt/COMPSs/" ; + schema1:endTime "2024-06-06T10:12:50+00:00" ; + schema1:instrument ; + schema1:name "COMPSs monte_carlo_pi.py execution at archit-Alpha-15-B5EEK" ; + schema1:result , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3.rc2404" . + + a schema1:MediaObject ; + schema1:contentSize 480 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 44 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 727 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "RMD" ; + schema1:name "R markdown" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s11r2b54 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=COMPSs COMPSS_PYTHON_VERSION=3.9.10 SLURM_JOB_QOS=debug SLURM_MEM_PER_CPU=1880 SLURM_JOB_ID=31521170 SLURM_JOB_USER=nct00014 COMPSS_HOME=/apps/COMPSs/3.3/ SLURM_JOB_UID=9214 SLURM_SUBMIT_DIR=/gpfs/home/nct00/nct00014/lysozyme_in_water SLURM_JOB_NODELIST=s11r2b[54-56,62] SLURM_JOB_GID=16440 SLURM_JOB_CPUS_PER_NODE=48(x4) COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login3 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=nct_290 SLURM_JOB_NUM_NODES=4 COMPSS_MASTER_NODE=s11r2b54 COMPSS_WORKER_NODES= s11r2b55 s11r2b56 s11r2b62" ; + schema1:endTime "2024-01-25T16:09:26+00:00" ; + schema1:instrument ; + schema1:name "COMPSs lysozyme_in_water.py execution at marenostrum4 with JOB_ID 31521170" ; + schema1:object , + , + , + , + , + , + ; + schema1:result , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 4169 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 1584 ; + schema1:description "Auxiliary File" ; + schema1:name "launch.sh" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 8957 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "lysozyme_in_water_full.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 8868 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "lysozyme_in_water_full_no_mpi.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 10194 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "lysozyme_in_water_full_singularity.py" . + + a schema1:MediaObject ; + schema1:contentSize 629 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 600 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Scaffolding-HiC-VGP8/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Purge-duplicates-one-haplotype-VGP6b/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/cutandrun/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Lucille Delisle" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/rnaseq-sr/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "QCFilteredAnnDataObject" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "1k_cell_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "1k_gene_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "marker_dot_plot" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "marker_table" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "processed_anndata_object" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "umap_cluster_plot" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "umap_sample_plot" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/141" ; + schema1:name "Tracy Chew" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MaxMTpc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MinCountPerCell" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MinGenesPerCell" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "genecount_qc_plot" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mito_qc_plot" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "qc_anndata_object" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "top_genes_plot" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sra-manifest-to-concatenated-fastqs/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Cali Willet" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/141" ; + schema1:name "Tracy Chew" . + + a schema1:ComputerLanguage ; + schema1:name "Shell Script" . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "indices_folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rnaseq_left_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rnaseq_right_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sars_cov_2_reference_genome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "indels" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "snps" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/26" ; + schema1:name "Ambarish Kumar" . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux nvb4 2.6.32-642.6.2.el6.x86_64 #1 SMP Mon Oct 24 10:22:33 EDT 2016 x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=COMPSs SLURM_JOB_QOS=bsc_cs SLURM_JOB_GPUS=0,1,2,3 SLURM_MEM_PER_CPU=8000 SLURM_JOB_ID=1930260 SLURM_JOB_USER=bsc19959 COMPSS_HOME=/apps/COMPSs/3.3/ SLURM_JOB_UID=4373 SLURM_SUBMIT_DIR=/gpfs/scratch/bsc19/bsc19959/cache SLURM_JOB_NODELIST=nvb[4-9,12-21] SLURM_JOB_CPUS_PER_NODE=16(x16) SLURM_SUBMIT_HOST=nvblogin1 SLURM_JOB_PARTITION=projects SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=16 COMPSS_MASTER_NODE=nvb4 COMPSS_WORKER_NODES= nvb5 nvb6 nvb7 nvb8 nvb9 nvb12 nvb13 nvb14 nvb15 nvb16 nvb17 nvb18 nvb19 nvb20 nvb21" ; + schema1:endTime "2024-03-22T17:53:30+00:00" ; + schema1:instrument ; + schema1:name "COMPSs cch_kmeans_test.py execution at bsc_nvidia with JOB_ID 1930260" ; + schema1:result , + ; + schema1:startTime "2024-03-22T17:29:38+00:00" ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:ComputerLanguage ; + schema1:alternateName "Dislib" ; + schema1:name "Dislib" ; + schema1:url "https://dislib.readthedocs.io/en/latest/" ; + schema1:version "0.9" . + + a schema1:MediaObject ; + schema1:contentSize 6201 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_bsc_nvidia_SLURM_JOB_ID_1930260" ; + schema1:contentSize 64 ; + schema1:description "COMPSs console standard error log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-1930260.err" . + + a schema1:MediaObject ; + schema1:about "#COMPSs_Workflow_Run_Crate_bsc_nvidia_SLURM_JOB_ID_1930260" ; + schema1:contentSize 2835 ; + schema1:description "COMPSs console standard output log file" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss-1930260.out" . + + a schema1:MediaObject ; + schema1:contentSize 731 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 1009 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s02r2b29 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=COMPSs COMPSS_PYTHON_VERSION=3.9.10 SLURM_JOB_QOS=debug SLURM_MEM_PER_CPU=1880 SLURM_JOB_ID=31494702 SLURM_JOB_USER=bsc19057 COMPSS_HOME=/apps/COMPSs/3.3/ SLURM_JOB_UID=2952 SLURM_SUBMIT_DIR=/gpfs/home/bsc19/bsc19057/Tutorial_2024/wordcount SLURM_JOB_NODELIST=s02r2b[29,45] SLURM_JOB_GID=2950 SLURM_JOB_CPUS_PER_NODE=48(x2) COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login3 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=2 COMPSS_MASTER_NODE=s02r2b29 COMPSS_WORKER_NODES= s02r2b45" ; + schema1:endTime "2024-01-22T16:19:50+00:00" ; + schema1:instrument ; + schema1:name "COMPSs wc_reduce.py execution at marenostrum4 with JOB_ID 31494702" ; + schema1:object ; + schema1:result ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 972 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 994 ; + schema1:description "Auxiliary File" ; + schema1:name "launch_wordcount.sh" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 2782 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "wc_merge.py" . + + a schema1:MediaObject ; + schema1:contentSize 600 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 854 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/parallel-accession-download/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:MediaObject ; + schema1:contentSize 177 . + + a schema1:MediaObject ; + schema1:contentSize 167 . + + a schema1:MediaObject ; + schema1:contentSize 274 . + + a schema1:MediaObject ; + schema1:contentSize 994 . + + a schema1:Dataset . + + a schema1:MediaObject ; + schema1:contentSize 21291 . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Sample" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "barcodes.tsv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "genes.tsv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "matrix.mtx" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_baredSC-1d-logNorm_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_baredSC-1d-logNorm_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/baredsc/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_baredSC-2d-logNorm_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_baredSC-2d-logNorm_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/baredsc/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/kmer-profiling-hifi-VGP1/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_summary_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_summary_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_molecules_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_molecules_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a schema1:ComputerLanguage ; + schema1:name "Pi" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "BridgeDB cache" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Differential gene expression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Differential miRNA expression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "STRING identifier mapping" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mRNA expression correlation" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Correlation limit" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "miRNA mRNA correlation" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "miRTarBase data" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "miRTarBase limit" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MOGAMUN cores" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MOGAMUN generations" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Maximum subnetwork size" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Subnetwork merge threshold" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Minimum subnetwork size" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MOGAMUN runs" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "STRING data" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "STRING filter" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "STRING limit" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Variant Burden" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Full network" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Subnetworks" . + + a schema1:Person ; + schema1:name "Scott Handley" . + + a schema1:Person ; + schema1:name "Rob Edwards" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Reverse read length" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Forward primer" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "forward reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Subfragment name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Metadata file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Primers are removed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Reference database" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Reverse read length" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Reverse primer" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reverse reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Sample name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "number of threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "files_to_folder_fastqc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "files_to_folder_ngtax" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "files_to_folder_phyloseq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "files_to_folder_picrust2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "turtle" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myautodock_vina_run.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myautodock_vina_run.pdbqt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mybabel_convert.ent" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mybox.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycat_pdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myextract_model_pdbqt.pdbqt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myextract_molecule.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfpocket_filter.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfpocket_run.json" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfpocket_run.zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfpocket_select.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myfpocket_select.pqr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myideal_sdf.sdf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mystr_check_add_hydrogens.pdb" . + + a schema1:Person ; + schema1:name "Fernando Cruz (CNAG)" . + + a schema1:Person ; + schema1:name "Francisco Camara (CNAG)" . + + a schema1:Person ; + schema1:name "Tyler Alioto (CNAG)" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "collision_info" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "db_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "db_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gnps_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hmdb_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mbank_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mzml_files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ppmx" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "python_script" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "r_script" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "candidate_files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "result" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/galaxyproject/iwc/actions/workflows/workflow_test.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Marc Del Pino" . + + a schema1:Person ; + schema1:name "Laia Codo" . + + a schema1:Person ; + schema1:name "Luis Jordá" . + + a schema1:SoftwareApplication ; + schema1:citation ; + schema1:description "AmberTools is a set of programs for biomolecular simulation and analysis" ; + schema1:installUrl "https://anaconda.org/conda-forge/ambertools" ; + schema1:name "AmberTools20" ; + schema1:softwareHelp "http://ambermd.org/doc12/Amber20.pdf" ; + schema1:url "http://ambermd.org/AmberTools.php" ; + schema1:version "20.0" . + + a schema1:Person ; + schema1:name "Frank Kauff" . + + a schema1:Person ; + schema1:name "Servaas Michielssens" . + + a schema1:Dataset ; + schema1:dateModified "2021-09-28T10:02:17.833Z" ; + schema1:description "HPC launchers for PyCOMPSs workflows" ; + schema1:hasPart , + ; + schema1:name "launchers" . + + a schema1:Dataset ; + schema1:dateModified "2021-09-28T10:02:17.833Z" ; + schema1:description "HPC workflows using PyCOMPSs" ; + schema1:hasPart ; + schema1:name "workflows" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "folder where the STAR indices are" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "filter_rrna" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "forward reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gtf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "folder where the kallisto indices are" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "maximum memory usage in megabytes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "prefix_id" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "quantMode" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reverse reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "number of threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "STAR output folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "FASTQC" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "FeatureCounts output" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filtered reads folder" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "kallisto output" . + + a schema1:Person ; + schema1:name "ABRomics" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/amr_gene_detection/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_chic-fastq-to-cool-hicup-cooler_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_chic-fastq-to-cool-hicup-cooler_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/hic-hicup-cooler/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_hic-fastq-to-cool-hicup-cooler_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_hic-fastq-to-cool-hicup-cooler_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/hic-hicup-cooler/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_hic-fastq-to-pairs-hicup_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_hic-fastq-to-pairs-hicup_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/hic-hicup-cooler/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_hic-juicermediumtabix-to_cool-cooler_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_hic-juicermediumtabix-to_cool-cooler_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/hic-hicup-cooler/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_30" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_31" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_32" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_33" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_34" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_35" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_36" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_37" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_38" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_39" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_40" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myamber_to_pdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_image.trr" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_rgyr.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_rms.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mypdb4amber_run.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myprocess_mdout.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myprocess_minout.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.cpout" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.cprst" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.mdinfo" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.netcdf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.rst" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.trj" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/chipseq-sr/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Delphine Lariviere" . + + a schema1:Organization ; + schema1:name "VGP" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Plot-Nx-Size/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/galaxyproject/iwc/actions/workflows/workflow_test.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sra-manifest-to-concatenated-fastqs/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "W1_oversample_tr_features.tsv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "W1_oversample_tr_labels.tsv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "W1_te_features.tsv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "W1_te_labels.tsv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "W1_val_labels.tsv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "labels.tsv" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "transdecoder-nucleotides.fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "transdecoder-peptides.fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_baredSC-1d-logNorm_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_baredSC-1d-logNorm_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/baredsc/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_baredSC-2d-logNorm_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_baredSC-2d-logNorm_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/baredsc/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:name "Research Infrastructure RECETOX RI (No LM2018121) financed by the Ministry of Education" . + + a schema1:Person ; + schema1:name "Youth and Sports" . + + a schema1:Person ; + schema1:name "and Operational Programme Research" . + + a schema1:Person ; + schema1:name "Development and Innovation - project CETOCOEN EXCELLENCE (No CZ.02.1.01/0.0/0.0/17_043/0009632)." . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/753" ; + schema1:name "Zargham Ahmad" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "converted_library" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/fluorescence-nuclei-segmentation-and-counting/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bin_widths" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "cells_per_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exclude" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "inputReads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "metadata" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "metadataSchema" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "subjects" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "colinear_components" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "indexed_paths" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mergedMetadata" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "odgiGraph" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "odgiPNG" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "odgiRDF" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "readsMergeDedup" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "seqwishGFA" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pedigree" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:Person ; + schema1:name "Lucille Delisle" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/rnaseq-pe/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/kmer-profiling-hifi-VGP1/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sars-cov-2-pe-illumina-artic-variant-calling/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "inputdata_cesm_2_1_3_B1850_f19_g17_tar" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "user_nl_cam_rs" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sars-cov-2-consensus-from-variation/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:Person ; + schema1:name "ERGA" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/445" ; + schema1:name "Diego De Panis" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/476" ; + schema1:name "Vasiliki Panagi" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "body" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "db_host" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "db_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "db_password" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "db_user" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "is_availability" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "result_modifiers" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "results" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_file" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Lucille Delisle" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/rnaseq-sr/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Clinical Bioinformatics Unit" . + + a schema1:Person ; + schema1:name "Pathology Department" . + + a schema1:Person ; + schema1:name "Eramus Medical Center" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "factordata" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "count_data" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "limma_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "minerva_table" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_itp_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_str_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_str_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_ndx_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_molecule_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_str_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_ndx_path" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rnaseq_left_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "rnaseq_right_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sample_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "sars_cov_2_reference_genome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "indel" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "snp" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Lucille Delisle" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/rnaseq-sr/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Scaffolding-HiC-VGP8/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpptraj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_crd_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_rst_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_mdinfo_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_dat_path" . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s01r1b31 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=COMPSs COMPSS_PYTHON_VERSION=3.9.10 SLURM_JOB_QOS=debug SLURM_MEM_PER_CPU=1880 SLURM_JOB_ID=31494609 SLURM_JOB_USER=bsc19057 COMPSS_HOME=/apps/COMPSs/3.3/ SLURM_JOB_UID=2952 SLURM_SUBMIT_DIR=/gpfs/home/bsc19/bsc19057/Tutorial_2024/cholesky SLURM_JOB_NODELIST=s01r1b[31,33] SLURM_JOB_GID=2950 SLURM_JOB_CPUS_PER_NODE=48(x2) COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login3 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=2 COMPSS_MASTER_NODE=s01r1b31 COMPSS_WORKER_NODES= s01r1b33" ; + schema1:endTime "2024-01-22T15:55:23+00:00" ; + schema1:instrument ; + schema1:name "COMPSs cholesky.py execution at marenostrum4 with JOB_ID 31494609" ; + schema1:result ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 1164 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 1021 ; + schema1:description "Auxiliary File" ; + schema1:name "launch_cholesky.sh" . + + a schema1:MediaObject ; + schema1:contentSize 501 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 765 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s23r2b61 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=COMPSs COMPSS_PYTHON_VERSION=3.8.2 SLURM_JOB_QOS=debug SLURM_MEM_PER_CPU=1880 SLURM_JOB_ID=30503085 SLURM_JOB_USER=bsc19959 COMPSS_HOME=/apps/COMPSs/3.3.pr/ SLURM_JOB_UID=4373 SLURM_SUBMIT_DIR=/gpfs/scratch/bsc19/bsc19959/randomsvd SLURM_JOB_NODELIST=s23r2b[61,68] SLURM_JOB_GID=2950 SLURM_JOB_CPUS_PER_NODE=48(x2) COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login3 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=2 COMPSS_MASTER_NODE=s23r2b61 COMPSS_WORKER_NODES= s23r2b68" ; + schema1:endTime "2023-11-03T08:10:10+00:00" ; + schema1:instrument ; + schema1:name "COMPSs random_svd_compss.py execution at marenostrum4 with JOB_ID 30503085" ; + schema1:object ; + schema1:result ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.2.rc2310" . + + a schema1:MediaObject ; + schema1:contentSize 2515 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 502 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 1451 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "WDL" ; + schema1:identifier ; + schema1:name "Workflow Description Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/galaxyproject/iwc/actions/workflows/workflow_test.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s12r2b30 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=COMPSs COMPSS_PYTHON_VERSION=3.9.10 SLURM_JOB_QOS=debug SLURM_MEM_PER_CPU=1880 SLURM_JOB_ID=31494751 SLURM_JOB_USER=bsc19057 COMPSS_HOME=/apps/COMPSs/3.3/ SLURM_JOB_UID=2952 SLURM_SUBMIT_DIR=/gpfs/home/bsc19/bsc19057/Tutorial_2024/wordcount SLURM_JOB_NODELIST=s12r2b[30,34] SLURM_JOB_GID=2950 SLURM_JOB_CPUS_PER_NODE=48(x2) COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login3 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=2 COMPSS_MASTER_NODE=s12r2b30 COMPSS_WORKER_NODES= s12r2b34" ; + schema1:endTime "2024-01-22T16:32:29+00:00" ; + schema1:instrument ; + schema1:name "COMPSs wc_merge.py execution at marenostrum4 with JOB_ID 31494751" ; + schema1:object , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:result ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 878 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 1004 ; + schema1:description "Auxiliary File" ; + schema1:name "launch_wordcount_merge.sh" . + + a schema1:MediaObject ; + schema1:contentSize 600 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 873 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/chipseq-sr/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "ABRomics" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/bacterial_genome_annotation/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-atac-cutandrun_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-atac-cutandrun_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-chip-pe_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-chip-pe_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-chip-sr_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-chip-sr_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_gmx.gro" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_gmx.itp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myacpype_params_gmx.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mybabel_minimize.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myligand.pdb" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_30" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_31" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_32" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_33" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_34" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_35" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_36" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_37" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_38" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_39" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_40" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_41" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_42" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_43" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_44" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_45" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_46" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_47" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_48" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_49" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_50" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_51" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_52" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_53" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_54" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_55" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_56" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_57" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_58" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_59" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_60" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_61" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_62" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_63" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_64" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_65" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_66" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_67" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_68" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_randomize_ions.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_randomize_ions.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.parmtop" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.parmtop" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myparmed_hmassrepartition.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myprocess_mdout.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myprocess_minout.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.cpout" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.cprst" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.mdinfo" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.nc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.ncrst" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:Dataset . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:Dataset . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Dataset . + + a schema1:MediaObject . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-atac-cutandrun_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-atac-cutandrun_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-chip-pe_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-chip-pe_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_consensus-peaks-chip-sr_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_consensus-peaks-chip-sr_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/consensus-peaks/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sra-manifest-to-concatenated-fastqs/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "Gareth Price" . + + a schema1:MediaObject ; + schema1:description "Workflow documentation" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:name "Unrecognized Workflow Type" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/gromacs-mmgbsa/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Delphine Lariviere" . + + a schema1:Organization ; + schema1:name "VGP" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Plot-Nx-Size/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bits_set" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "blacklist" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bwt_algorithm" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "codex_min_lratio" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_exomeDepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "enable_manta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_bf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "exomeDepth_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastq2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "generate_bwa_indexes" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gridss_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_exome" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_max_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_len" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "manta_min_q" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "min_mapping_quality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "read_group" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_amb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_ann" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_bwt" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fai" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_fasta" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_pac" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reference_sa" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samples" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_bwa_mem" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastp" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_fastqc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "threads_samtools" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_paired_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastqc_raw_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "html_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "json_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_all" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_bam_filtering" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_codex" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_exomedepth" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gridss" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_manta" . + + a schema1:Person ; + schema1:name "Daniel López-López" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/atacseq/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sra-manifest-to-concatenated-fastqs/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/VGP-meryldb-creation/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s02r1b59 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=COMPSs COMPSS_PYTHON_VERSION=3.9.10 SLURM_JOB_QOS=debug SLURM_MEM_PER_CPU=1880 SLURM_JOB_ID=31507350 SLURM_JOB_USER=bsc19057 COMPSS_HOME=/apps/COMPSs/3.3/ SLURM_JOB_UID=2952 SLURM_SUBMIT_DIR=/gpfs/home/bsc19/bsc19057/Tutorial_2024/lysozyme_in_water SLURM_JOB_NODELIST=s02r1b[59,66] SLURM_JOB_GID=2950 SLURM_JOB_CPUS_PER_NODE=48(x2) COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login3 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=2 COMPSS_MASTER_NODE=s02r1b59 COMPSS_WORKER_NODES= s02r1b66" ; + schema1:endTime "2024-01-24T15:00:23+00:00" ; + schema1:instrument ; + schema1:name "COMPSs lysozyme_in_water.py execution at marenostrum4 with JOB_ID 31507350" ; + schema1:object , + , + , + , + , + , + ; + schema1:result , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 2295 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 1565 ; + schema1:description "Auxiliary File" ; + schema1:name "launch_full.sh" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 8957 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "lysozyme_in_water_full.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 8868 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "lysozyme_in_water_full_no_mpi.py" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 10194 ; + schema1:description "Auxiliary File" ; + schema1:encodingFormat "text/plain" ; + schema1:name "lysozyme_in_water_full_singularity.py" . + + a schema1:MediaObject ; + schema1:contentSize 755 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 676 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:Person ; + schema1:name "Peter van Heusden" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/galaxyproject/iwc/actions/workflows/workflow_test.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:MediaObject ; + schema1:contentSize 177 . + + a schema1:MediaObject ; + schema1:contentSize 329 . + + a schema1:MediaObject ; + schema1:contentSize 724 . + + a schema1:MediaObject ; + schema1:contentSize 1056 . + + a schema1:Dataset . + + a schema1:MediaObject ; + schema1:contentSize 6619 . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_1" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_10" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_11" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_12" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_13" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_14" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_15" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_16" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_17" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_18" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_19" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_20" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_21" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_22" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_23" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_24" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_25" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_26" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_27" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_28" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_29" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_30" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_31" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_32" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_33" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_34" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_35" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_36" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_37" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_38" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_39" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_40" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_41" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_42" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_43" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_44" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_45" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_46" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_47" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_48" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_49" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_50" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_51" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_52" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_53" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_54" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_55" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_56" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_57" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_58" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_59" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_60" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_61" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_62" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_63" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_64" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_65" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_66" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_67" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_68" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_7" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_8" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_9" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_randomize_ions.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mycpptraj_randomize_ions.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.parmtop" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_add_ions.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_gen_top.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.crd" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.parmtop" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myleap_solvate.pdb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myparmed_hmassrepartition.top" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myprocess_mdout.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "myprocess_minout.dat" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.cpout" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.cprst" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.log" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.mdinfo" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.nc" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mysander_mdrun.ncrst" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Darwin MacBook-Pro-Raul-2018.local 23.1.0 Darwin Kernel Version 23.1.0: Mon Oct 9 21:27:27 PDT 2023; root:xnu-10002.41.9~6/RELEASE_X86_64 x86_64 COMPSS_HOME=/Users/rsirvent/opt/COMPSs/" ; + schema1:endTime "2023-10-27T13:17:52+00:00" ; + schema1:instrument ; + schema1:name "COMPSs matmul_objects.py execution at MacBook-Pro-Raul-2018.local" ; + schema1:result . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.2.rc2310" . + + a schema1:MediaObject ; + schema1:contentSize 252 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 1442 ; + schema1:description "Auxiliary File" ; + schema1:name "README" . + + a schema1:MediaObject ; + schema1:contentSize 4553 ; + schema1:description "Auxiliary File" ; + schema1:name "pom.xml" . + + a schema1:MediaObject ; + schema1:contentSize 122 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 2246 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:name "Lucille Delisle" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/rnaseq-sr/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "Docker" ; + schema1:identifier ; + schema1:name "Docker" ; + schema1:url . + + a schema1:Person ; + schema1:name "Krisztian Papp" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bg_confounders" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bg_custom_model" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bg_feature" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bg_measure" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bg_mod" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bg_mod0" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bg_phenotype" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bg_phenotype_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bg_samples" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "bg_timecourse" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_alpha" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_blind" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_contrast" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_cores" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_denominator" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_design" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_hidden_batch_effects" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_hidden_batch_method" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_hidden_batch_row_means" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_hypothesis" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_lfcThreshold" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_metadata" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_min_sum_of_reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_numerator" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_pAdjustMethod" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_parallelization" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_phenotype" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_reduced" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_reference_level" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_samples" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_transformation" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "deseq2_variables" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastx_first_base_to_keep" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "fastx_last_base_to_keep" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "featureCounts_annotation_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "featureCounts_number_of_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "featureCounts_output_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "featureCounts_read_meta_feature_overlap" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hisat2_alignments_tailored_trans_assemb" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hisat2_idx_basename" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hisat2_idx_directory" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hisat2_known_splicesite_infile" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "hisat2_num_of_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_file_split" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_file_split_fwd_single" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_file_split_rev" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_qc_check" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "input_trimming_check" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "premapping_input_check" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "raw_files_directory" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_sort_compression_level" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_sort_memory" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_sort_sort_by_name" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_sort_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_cigar" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_collapsecigar" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_count" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_fastcompression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_isbam" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_iscram" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_randomseed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_readsingroup" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_readsinlibrary" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_readsquality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_readswithbits" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_readswithoutbits" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_readtagtostrip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_region" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_samheader" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "samtools_view_uncompressed" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "stringtie_ballgown_table_files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "stringtie_conservative_mode" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "stringtie_cpus" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "stringtie_expression_estimation_mode" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "stringtie_guide_gff" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "stringtie_junction_coverage" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "stringtie_min_isoform_abundance" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "stringtie_min_read_coverage" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "stringtie_out_gtf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "stringtie_transcript_merge_mode" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "stringtie_verbose" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tg_compression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tg_do_not_compress" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tg_length" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tg_quality" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tg_strigency" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "tg_trim_suffix" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_ballgown_de_custom_model" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_ballgown_de_results" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_ballgown_object" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_collect_hisat2_sam_files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_deseq2_dds_object" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_deseq2_de_results" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_deseq2_res_lfcShrink_object" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_deseq2_transformed_object" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_paired_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_paired_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_raw_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_raw_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_single_html" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastqc_single_zip" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastx_trimmer_paired" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_fastx_trimmer_single" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_featureCounts" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_hisat2_for_paired_reads_reports" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_hisat2_for_single_reads_reports" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_sort" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_samtools_view" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_stringtie_expression_gtf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_stringtie_expression_outdir" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_stringtie_merge" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_stringtie_transcript_assembly_gtf" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_trim_galore_paired_fq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_trim_galore_paired_reports" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_trim_galore_single_fq" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "o_trim_galore_single_reports" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s04r2b44 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=wordcount_blocks COMPSS_PYTHON_VERSION=3.9.10 SLURM_JOB_QOS=debug SLURM_MEM_PER_CPU=1880 COMPSS_BINDINGS_DEBUG=1 SLURM_JOB_ID=31176032 SLURM_JOB_USER=bsc19057 COMPSS_HOME=/apps/COMPSs/3.3/ SLURM_JOB_UID=2952 SLURM_SUBMIT_DIR=/gpfs/home/bsc19/bsc19057/COMPSs-DP/tutorial_apps/python/wordcount_blocks SLURM_JOB_NODELIST=s04r2b44 SLURM_JOB_GID=2950 SLURM_JOB_CPUS_PER_NODE=48 COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login3 SLURM_JOB_PARTITION=sequential SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=1 COMPSS_MASTER_NODE=s04r2b44 COMPSS_WORKER_NODES=" ; + schema1:endTime "2023-12-15T14:53:21+00:00" ; + schema1:instrument ; + schema1:name "COMPSs wordcount_blocks.py execution at marenostrum4 with JOB_ID 31176032" ; + schema1:object ; + schema1:result , + ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.3" . + + a schema1:MediaObject ; + schema1:contentSize 447 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 961 ; + schema1:description "Auxiliary File" ; + schema1:name "README" . + + a schema1:MediaObject ; + schema1:contentSize 4649 ; + schema1:description "Auxiliary File" ; + schema1:name "pom.xml" . + + a schema1:MediaObject ; + schema1:contentSize 331 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 2207 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "contamination reference file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "forward reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "identifier used" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "memory usage (mb)" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "pacbio reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "reverse reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Run GTDB-Tk" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "number of threads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "BAM files" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "CheckM" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "FASTQC" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Filtered reads" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "GTDB-Tk" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "MetaBat2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "QUAST" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "SPADES" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "WDL" ; + schema1:identifier ; + schema1:name "Workflow Description Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/sars-cov-2-variation-reporting/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Jupyter" ; + schema1:url . + + a schema1:Person ; + schema1:name "ABRomics" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/quality-and-contamination-control/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "BridgeDB cache" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Differential gene expression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Differential miRNA expression" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "STRING identifier mapping" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "mRNA expression correlation" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of correlation edges to use, USE FOR TESTING ONLY" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "miRNA mRNA correlation" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "miRTarBase data" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of miRTarBase edges to use, USE FOR TESTING ONLY" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of cores to let MOGAMUN use" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of generation to let the genetic algorithm in MOGAMUN evolve" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The maximum size of subnetworks during postprocessing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "the minimum Jaccard Index overlap between two subnetworks to allow them to be merged" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The minimum size of subnetworks during postprocessing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of parallel runs to let MOGAMUN do, these parallel runs are combined in postprocessing" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "STRING data" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The minimum score for a STRING edge to be included in the analysis" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "The number of STRING edges to use, USE FOR TESTING ONLY" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Variant Burden" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "full_graph" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "subnetworks" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_4" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_5" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "_anonymous_output_6" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Python" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/Assembly-decontamination-VGP9/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/cutandrun/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Organization ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Input file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Config file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Output file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_ndx_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_str_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_molecule_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:CreateAction ; + schema1:actionStatus schema1:CompletedActionStatus ; + schema1:agent ; + schema1:description "Linux s23r2b61 4.4.59-92.20-default #1 SMP Wed May 31 14:05:24 UTC 2017 (8cd473d) x86_64 x86_64 x86_64 GNU/Linux SLURM_JOB_NAME=COMPSs COMPSS_PYTHON_VERSION=3.8.2 SLURM_JOB_QOS=debug SLURM_MEM_PER_CPU=1880 SLURM_JOB_ID=30503085 SLURM_JOB_USER=bsc19959 COMPSS_HOME=/apps/COMPSs/3.3.pr/ SLURM_JOB_UID=4373 SLURM_SUBMIT_DIR=/gpfs/scratch/bsc19/bsc19959/randomsvd SLURM_JOB_NODELIST=s23r2b[61,68] SLURM_JOB_GID=2950 SLURM_JOB_CPUS_PER_NODE=48(x2) COMPSS_MPIRUN_TYPE=impi SLURM_SUBMIT_HOST=login3 SLURM_JOB_PARTITION=main SLURM_JOB_ACCOUNT=bsc19 SLURM_JOB_NUM_NODES=2 COMPSS_MASTER_NODE=s23r2b61 COMPSS_WORKER_NODES= s23r2b68" ; + schema1:endTime "2023-11-03T08:10:10+00:00" ; + schema1:instrument ; + schema1:name "COMPSs random_svd_compss.py execution at marenostrum4 with JOB_ID 30503085" ; + schema1:object ; + schema1:result ; + schema1:subjectOf "https://userportal.bsc.es/" . + + a schema1:ComputerLanguage ; + schema1:alternateName "COMPSs" ; + schema1:citation "https://doi.org/10.1007/s10723-013-9272-5" ; + schema1:name "COMPSs Programming Model" ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "3.2.rc2310" . + + a schema1:MediaObject ; + schema1:contentSize 2515 ; + schema1:description "COMPSs application Tasks profile" ; + schema1:encodingFormat , + "application/json" ; + schema1:name "App_Profile.json" . + + a schema1:MediaObject ; + schema1:contentSize 502 ; + schema1:description "COMPSs submission command line (runcompss / enqueue_compss), including flags and parameters passed to the application" ; + schema1:encodingFormat "text/plain" ; + schema1:name "compss_submission_command_line.txt" . + + a schema1:MediaObject ; + schema1:contentSize 1451 ; + schema1:description "COMPSs Workflow Provenance YAML configuration file" ; + schema1:encodingFormat , + "YAML" ; + schema1:name "ro-crate-info.yaml" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Cali Willet" . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/141" ; + schema1:name "Tracy Chew" . + + a schema1:ComputerLanguage ; + schema1:name "Shell Script" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Bam-file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Fragment_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Replace_file" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "chromosome_sizes.tabular" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "gene_annotation" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "anndata" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "anndata_cell_type" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "anndata_gene-matrix_leiden" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "anndata_leiden_clustering" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "anndata_magic" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "umap_cell-type" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "umap_leiden-clusters" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "umap_marker-genes" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:identifier "https://workflowhub.eu/people/370" ; + schema1:name "Peter Menzel" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:Person ; + schema1:name "Wolfgang Maier" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "af_recalculated" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "all_variants_all_samples" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "by_variant_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "cleaned_header" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "collapsed_effects" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "combined_variant_report" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "filtered_extracted_variants" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "filtered_variants" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "highest_impact_effects" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "prefiltered_variants" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "processed_variants_collection" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "variants_for_plotting" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test_main_1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test_main_1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/iwc-workflows/chipseq-pe/actions/workflows/wftest.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Person ; + schema1:name "Galaxy" . + + a schema1:Person ; + schema1:name "VGP" . + + a schema1:Person ; + schema1:name "VGP" . + + a schema1:Person ; + schema1:name "Galaxy" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "Lineage" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "assembly.fasta" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step00_cat_pdb_input_structure2" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step00_cat_pdb_output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step0_reduce_remove_hydrogens_input_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step0_reduce_remove_hydrogens_output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step100_make_ndx_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step100_make_ndx_output_ndx_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_mdrun_min_output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_mdrun_min_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_mdrun_min_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step10_mdrun_min_output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_grompp_nvt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step11_grompp_nvt_output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_mdrun_nvt_output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_mdrun_nvt_output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_mdrun_nvt_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_mdrun_nvt_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step12_mdrun_nvt_output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_grompp_npt_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step13_grompp_npt_output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_mdrun_npt_output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_mdrun_npt_output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_mdrun_npt_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_mdrun_npt_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step14_mdrun_npt_output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step15_grompp_md_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step15_grompp_md_output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_mdrun_md_output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_mdrun_md_output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_mdrun_md_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_mdrun_md_output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step16_mdrun_md_output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step17_gmx_image1_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step17_gmx_image1_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_gmx_image2_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step18_gmx_image2_output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_gmx_trjconv_str_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step19_gmx_trjconv_str_output_str_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step1_extract_molecule_output_molecule_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_gmx_energy_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step20_gmx_energy_output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step21_gmx_rgyr_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step21_gmx_rgyr_output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_rmsd_first_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step22_rmsd_first_output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step23_rmsd_exp_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step23_rmsd_exp_output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step24_grompp_md_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step24_grompp_md_output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step2_fix_side_chain_output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_pdb2gmx_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_pdb2gmx_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step4_pdb2gmx_output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_editconf_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step5_editconf_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_solvate_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step6_solvate_output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step7_grompp_genion_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step7_grompp_genion_output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_genion_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_genion_output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step8_genion_output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_grompp_min_config" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "step9_grompp_min_output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_structure_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_ndx_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_trr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_edr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_log_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_cpt_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_traj_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_str_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_molecule_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_xvg_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_pdb_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_gro_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_top_zip_path" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "output_tpr_path" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a ns2:TestSuite ; + schema1:mainEntity ; + schema1:name "test1" ; + ns2:definition ; + ns2:instance . + + a ns2:TestInstance ; + schema1:name "test1_1" ; + schema1:url "https://api.github.com" ; + ns2:resource "repos/galaxyproject/iwc/actions/workflows/workflow_test.yml" ; + ns2:runsOn ns2:GithubService . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:name "Tim Dudgeon" . + + a schema1:Person ; + schema1:name "Simon Bray" . + + a schema1:Person ; + schema1:name "Gianmauro Cuccuru" . + + a schema1:Person ; + schema1:name "Björn Grüning" . + + a schema1:Person ; + schema1:name "Rachael Skyner" . + + a schema1:Person ; + schema1:name "Jack Scantlebury" . + + a schema1:Person ; + schema1:name "Susan Leung" . + + a schema1:Person ; + schema1:name "Frank von Delft" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url . + + a schema1:ComputerLanguage ; + schema1:alternateName "NFL" ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url ; + schema1:version "21.10.3" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "ceamarc_env.csv" . + + a ; + dct:conformsTo "https://bioschemas.org/profiles/FormalParameter/1.0-RELEASE/" ; + schema1:name "cnidaria_filtered.csv" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:MediaObject ; + schema1:contentSize 155 ; + schema1:dateModified "2023-11-03T17:21:39" ; + schema1:name "platforms.yml" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:Dataset . + + a schema1:Dataset . + + a schema1:MediaObject ; + schema1:contentSize 114 ; + schema1:dateModified "2023-11-03T22:41:31" ; + schema1:name "include.yml" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 996 ; + schema1:dateModified "2023-11-03T22:41:31" ; + schema1:name "experiment.yml" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2478 ; + schema1:dateModified "2023-11-03T22:41:31" ; + schema1:name "jobs.yml" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1019 ; + schema1:dateModified "2023-11-03T22:41:31" ; + schema1:name "mhm.yml" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1459 ; + schema1:dateModified "2023-11-03T22:41:31" ; + schema1:name "platforms.yml" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1679 ; + schema1:dateModified "2023-11-03T22:41:31" ; + schema1:name "rocrate.yaml" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" . + + a schema1:ContactPoint ; + schema1:name "biobb forum at ask.bioexcel.eu" ; + schema1:url "https://ask.bioexcel.eu/c/BioExcel-Building-Blocks-library/23" . + + a schema1:SoftwareApplication ; + schema1:citation ; + schema1:installUrl "https://anaconda.org/bioconda/biopython" ; + schema1:license , + ; + schema1:name "BioPython" ; + schema1:url "https://biopython.org/" ; + schema1:version "1.70" . + + a schema1:ScholarlyArticle ; + schema1:author , + , + , + ; + schema1:datePublished "2015-01-01T00:00:00Z" ; + schema1:name "pmx: Automated protein structure and topology" . + + a schema1:ScholarlyArticle ; + schema1:datePublished "1995-09-02T00:00:00.000Z" ; + schema1:name "AMBER, a package of computer programs for applying molecular mechanics, normal mode analysis, molecular dynamics and free energy calculations to simulate the structural and energetic properties of molecules" . + + a schema1:ScholarlyArticle ; + schema1:author , + ; + schema1:datePublished "2010-01-01T00:00:00Z" ; + schema1:name "Protein Thermostability Calculations Using Alchemical Free Energy Simulations" . + + a schema1:ScholarlyArticle ; + schema1:author , + , + , + , + , + , + ; + schema1:name "GROMACS: High performance molecular simulations through multi-level parallelism from laptops to supercomputers" . + + a schema1:ScholarlyArticle ; + schema1:author , + , + , + , + , + , + , + , + , + , + , + ; + schema1:copyrightYear "2019-09-10T00:00:00.000Z" ; + schema1:datePublished "2019-09-10T00:00:00.000Z" ; + schema1:description "In the recent years, the improvement of software and hardware performance has made biomolecular simulations a mature tool for the study of biological processes. Simulation length and the size and complexity of the analyzed systems make simulations both complementary and compatible with other bioinformatics disciplines. However, the characteristics of the software packages used for simulation have prevented the adoption of the technologies accepted in other bioinformatics fields like automated deployment systems, workflow orchestration, or the use of software containers. We present here a comprehensive exercise to bring biomolecular simulations to the “bioinformatics way of working”. The exercise has led to the development of the BioExcel Building Blocks (BioBB) library. BioBB’s are built as Python wrappers to provide an interoperable architecture. BioBB’s have been integrated in a chain of usual software management tools to generate data ontologies, documentation, installation packages, software containers and ways of integration with workflow managers, that make them usable in most computational environments." ; + schema1:license ; + schema1:name "BioExcel Building Blocks, a software library for interoperable biomolecular simulation workflows" . + + a schema1:ScholarlyArticle ; + schema1:author , + , + , + , + , + , + , + , + , + , + ; + schema1:datePublished "2009-03-20T00:00:00.000Z" ; + schema1:name "Biopython: freely available Python tools for computational molecular biology and bioinformatics" . + + a schema1:Dataset, + ; + schema1:distribution ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.120.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_md_setup" ; + schema1:name "Research Object Crate for Protein MD Setup tutorial using BioExcel Building Blocks (biobb)" ; + schema1:url "https://workflowhub.eu/workflows/120/ro_crate?version=2" . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:CreativeWork ; + schema1:description """Permission to use, copy, modify, and distribute this software and its documentation with or without modifications and for any purpose and without fee is hereby granted, provided that any copyright notices appear in all copies and that both those copyright notices and this permission notice appear in supporting documentation, and that the names of the contributors or copyright holders not be used in advertising or publicity pertaining to distribution of the software without specific prior permission. + + THE CONTRIBUTORS AND COPYRIGHT HOLDERS OF THIS SOFTWARE DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE""" ; + schema1:name "Biopython License Agreement" . + + a schema1:SoftwareSourceCode ; + schema1:abstract """The Autosubmit project. It contains the templates used +by Autosubmit for the scripts used in the workflow, as well as any other +source code used by the scripts (i.e. any files sourced, or other source +code compiled or executed in the workflow).""" ; + schema1:codeRepository "https://github.com/kinow/auto-mhm-test-domains.git" ; + schema1:codeSampleType "template" ; + schema1:name "https://github.com/kinow/auto-mhm-test-domains.git" ; + schema1:programmingLanguage "Any" ; + schema1:runtimePlatform "Autosubmit 4.0.98" ; + schema1:sdDatePublished "2023-11-03T22:43:56+00:00" ; + schema1:targetProduct "Autosubmit" ; + schema1:version "985863e5b257fa36c6ede7e73ac4fc9fe1ff4c73" . + + a schema1:Person ; + schema1:name "Daniele Lezzi" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Sebastian Ares de Parga Regalado" . + + a schema1:Person ; + schema1:name "Adam M. Novak" . + + a schema1:Person ; + schema1:name "Mark James Abraham" . + + a schema1:Person ; + schema1:name "Fani Hatjina" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Justin A. Lemkul" . + + a schema1:Person ; + schema1:name "Kamshat Temirbayeva" . + + a schema1:Person ; + schema1:name "Slađan Rašić" . + + a schema1:Person ; + schema1:name "Tim Dudgeon" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0001-7036-9987" ; + schema1:name "Wudmir Rojas" . + + a schema1:Person ; + schema1:name "Maria Bouga" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0001-8197-3303" ; + schema1:name "Stevie Pederson" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0001-8875-7304" ; + schema1:name "Thomas Roetzer-Pejrimovsky" . + + a schema1:Person ; + schema1:name "Andrew Dalke" . + + a schema1:Person ; + schema1:name "Rutger Vos" . + + a schema1:Person ; + schema1:name "Peter J. A. Cock" . + + a schema1:Person ; + schema1:name "Zlatko Puškadija" . + + a schema1:Person ; + schema1:name "M. Alice Pinto" . + + a schema1:Person ; + schema1:name "Andrea Guarracino" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-0320-0214" ; + schema1:name "Matthias Haimel" . + + a schema1:Person ; + schema1:name "Andrzej Oleksa" . + + a schema1:Person ; + schema1:name "Pedro João Rodrigues" . + + a schema1:Person ; + schema1:name "Josep Ll. Gelpi" . + + a schema1:Person ; + schema1:name "Franck Dedeine" . + + a schema1:Person ; + schema1:name "Usman Rashid" . + + a schema1:Person ; + schema1:name "Leonidas Charistos" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-1640-3045" ; + schema1:name "Justin Sonneck" . + + a schema1:Person ; + schema1:name "Iddo Friedberg" . + + a schema1:Person ; + schema1:name "Max Schubach" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-2319-2960" ; + schema1:name "Woosub Shin" . + + a schema1:Person ; + schema1:name "Erik Lindahl" . + + a schema1:Person ; + schema1:name "Irfan Kandemir" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-2937-9259" ; + schema1:name "Anton Korobeynikov" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-2977-5032" ; + schema1:name "Johan Gustafsson" . + + a schema1:Person ; + schema1:name "Jeremy C. Smith" . + + a schema1:Person ; + schema1:name "Brad A. Chapman" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Claudio Satriano" . + + a schema1:Person ; + schema1:name "Fabian Deister" . + + a schema1:Person ; + schema1:name "Aibyn Torekhanov" . + + a schema1:Person ; + schema1:name "Luc Cornet" . + + a schema1:Person ; + schema1:name "Vincent Hervé" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-3668-8711" ; + schema1:name "Davide Gurnari" . + + a schema1:Person ; + schema1:name "Marin Kovačić" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-3811-4527" ; + schema1:name "Rafael Terra" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-3935-2279" ; + schema1:name "Sarah Beecroft" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-4416-5224" ; + schema1:name "Ian Brennan" . + + a schema1:Person ; + schema1:name "Zhanar Sheralieva" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Jose Raul Bravo Martinez" . + + a schema1:Person ; + schema1:name "Eliza Căuia" . + + a schema1:Person ; + schema1:name "Jeffrey T. Chang" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-4594-8423" ; + schema1:name "Willem de Koning" . + + a schema1:Person ; + schema1:name "Cymon J. Cox" . + + a schema1:Person ; + schema1:name "Bonface Munyoki" . + + a schema1:Person ; + schema1:name "Szilvia Kusza" . + + a schema1:Person ; + schema1:name "Anton Nekrutenko" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Natalia Poiata" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-6435-7100" ; + schema1:name "Richard Lupat" . + + a schema1:Person ; + schema1:name "Vytautas Gapsys" . + + a schema1:Person ; + schema1:name "Ulzhan Nuralieva" . + + a schema1:Person ; + schema1:name "Tiago Antao" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-7490-8260" ; + schema1:name "Dominik Lux" . + + a schema1:Person ; + schema1:name "Berk Hess" . + + a schema1:Person ; + schema1:name "Pjotr Prins" . + + a schema1:Person ; + schema1:name "Bartek Wilczynski" . + + a schema1:Person ; + schema1:name "Modesto Orozco" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-8639-8325" ; + schema1:name "Carlos Classen" . + + a schema1:Person ; + schema1:name "Michael Heuer" . + + a schema1:Person ; + schema1:name "Kateřina Storchmannová" . + + a schema1:Person ; + schema1:name "Michiel J. L. de Hoon" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Riccardo Rossi Bernecoli" . + + a schema1:Person ; + schema1:name "Szilárd Páll" . + + a schema1:Person ; + schema1:name "Michael Lloyd" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-1075-1628" ; + schema1:name "David Yuan" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-1157-9030" ; + schema1:name "Camille Juigné" . + + a schema1:Person ; + schema1:name "Carole Goble" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-1302-3909" ; + schema1:name "Felicita Gernhardt" . + + a schema1:Person ; + schema1:name "Leonid Kostrykin" . + + a schema1:Person ; + schema1:name "Adrian Siceanu" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-1488-5148" ; + schema1:name "Michael Roach" . + + a schema1:Person ; + schema1:name "Diego Carvalho" . + + a schema1:Person ; + schema1:name "Roland Schulz" . + + a schema1:Person ; + schema1:name "Nachida Tadrent" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-2470-9519" ; + schema1:name "Tim Booth" . + + a schema1:Person ; + schema1:name "Janez Prešern" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-2739-1315" ; + schema1:name "Pablo Riesgo Ferreiro" . + + a schema1:Person ; + schema1:name "Leandro Liborio" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-2878-4049" ; + schema1:name "Aishwarya Iyer" . + + a schema1:Person ; + schema1:name "Thomas Hamelryck" . + + a schema1:Person ; + schema1:name "Teemu Murtola" . + + a schema1:Person ; + schema1:name "Thomas Liener" . + + a schema1:Person ; + schema1:name "Marina Kennerson" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-3549-9115" ; + schema1:name "Alejandra Escobar" . + + a schema1:Person ; + schema1:name "Peter Amstutz" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-3777-5945" ; + schema1:name "Tazro Inutano" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-3876-6581" ; + schema1:name "Agata Kilar" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-4684-317X" ; + schema1:name "Xiaokang Zhang" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-4771-6113" ; + schema1:name "Sagane Joye-Dind" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0009-0001-3228-105X" ; + schema1:name "Timon Schlegel" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0009-0007-9672-6728" ; + schema1:name "Xiaolong Luo" . + + a schema1:MediaObject . + + a schema1:SoftwareApplication ; + schema1:citation , + ; + schema1:description """pmx is a python library that allows users to setup and analyse molecular dynamics simulations with the Gromacs package. Among its main features are the setup and analysis of alchemical free energy calculations for protein, nucleic acid, and small molecule mutations. +""" ; + schema1:license ; + schema1:name "pmx-biobb" ; + schema1:softwareRequirements , + ; + schema1:url "https://degrootlab.github.io/pmx/" . + + a schema1:SoftwareApplication ; + schema1:license ; + schema1:name "SciPy" ; + schema1:url "https://www.scipy.org/" ; + schema1:version "1.7.1." . + + a schema1:ComputerLanguage ; + schema1:name "Quarto Markdown" ; + schema1:url "https://quarto.org/docs/authoring/markdown-basics.html" . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject ; + schema1:name "sdrf" . + + a schema1:MediaObject ; + schema1:name "fasta" . + + a schema1:MediaObject ; + schema1:name "parameters" . + + a schema1:Organization ; + schema1:alternateName "DANS" ; + schema1:legalName "Nederlands Instituut voor Permanente Toegang tot Digitale Onderzoeksgegevens" ; + schema1:name "Data Archiving and Networked Services" ; + schema1:url "https://dans.knaw.nl/" . + + a schema1:CreativeWork ; + schema1:identifier "CC0-1.0" ; + schema1:name "Creative Commons Zero v1.0 Universal" ; + schema1:url "https://creativecommons.org/publicdomain/zero/1.0/" . + + a schema1:CreativeWork ; + schema1:identifier "LGPL-2.1-or-later" ; + schema1:name "GNU Lesser General Public License v2.1 or later" ; + schema1:url "https://www.gnu.org/licenses/old-licenses/lgpl-2.1-standalone.html" . + + a schema1:CreativeWork ; + schema1:identifier "LGPL-3.0-only" ; + schema1:name "GNU Lesser General Public License v3.0 only" ; + schema1:url "https://www.gnu.org/licenses/lgpl-3.0-standalone.html" . + + a schema1:MediaObject . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Snakemake" ; + schema1:url ; + schema1:version "6.5.0" . + + a schema1:Organization ; + schema1:name "workflow4metabolomics" . + + a schema1:Organization ; + schema1:name "workflow4metabolomics" . + + a schema1:Collection ; + schema1:name "Sanger Tree of Life Assembly method" . + + a schema1:Collection ; + schema1:name "BY-COVID related workflows" . + + a schema1:Person ; + schema1:name "Sarah Beecroft" . + + a schema1:Person ; + schema1:name "Camille Juigné" . + + a schema1:Person ; + schema1:name "Richard Lupat" . + + a schema1:Person ; + schema1:name "Johannes Köster" . + + a schema1:Person ; + schema1:name "Valentin Tilloy" . + + a schema1:Person ; + schema1:name "Michael Roach" . + + a schema1:Person ; + schema1:name "Elida Schneltzer" . + + a schema1:Person ; + schema1:name "Stian Soiland-Reyes" . + + a schema1:Person ; + schema1:name "Luiz Gadelha" . + + a schema1:Person ; + schema1:name "Woosub Shin" . + + a schema1:Person ; + schema1:name "Ryan Patterson-Cross" . + + a schema1:Person ; + schema1:name "Anthony Bretaudeau" . + + a schema1:Person ; + schema1:name "Pasi Korhonen" . + + a schema1:Person ; + schema1:name "Tazro Inutano" . + + a schema1:Person ; + schema1:name "Marlene Rezk" . + + a schema1:Person ; + schema1:name "Tim Booth" . + + a schema1:Person ; + schema1:name "Alejandra Escobar" . + + a schema1:Person ; + schema1:name "Lucas Cruz" . + + a schema1:Person ; + schema1:name "Pablo Riesgo Ferreiro" . + + a schema1:Person ; + schema1:name "Felicita Gernhardt" . + + a schema1:Person ; + schema1:name "Andrey Prjibelski" . + + a schema1:Person ; + schema1:name "Xiaokang Zhang" . + + a schema1:Person ; + schema1:name "Zavolan Lab" . + + a schema1:Person ; + schema1:name "Stevie Pederson" . + + a schema1:Person ; + schema1:name "Anton Korobeynikov" . + + a schema1:Person ; + schema1:name "Willem de Koning" . + + a schema1:Person ; + schema1:name "Elisabetta Spinazzola" . + + a schema1:Person ; + schema1:name "Agata Kilar" . + + a schema1:Person ; + schema1:name "Sagane Joye-Dind" . + + a schema1:Person ; + schema1:name "Akshay Akshay" . + + a schema1:Person ; + schema1:name "Davide Gurnari" . + + a schema1:Person ; + schema1:name "Johan Gustafsson" . + + a schema1:Person ; + schema1:name "David Yuan" . + + a schema1:Person ; + schema1:name "Justin Sonneck" . + + a schema1:Person ; + schema1:name "Damon-Lee Pointon" . + + a schema1:Person ; + schema1:name "William Eagles" . + + a schema1:Person ; + schema1:name "Zafran Hussain Shah" . + + a schema1:Person ; + schema1:name "Nandan Deshpande" . + + a schema1:Person ; + schema1:name "Rafael Terra" . + + a schema1:Person ; + schema1:name "Ian Brennan" . + + a schema1:Person ; + schema1:name "Aishwarya Iyer" . + + a schema1:Person ; + schema1:name "Dominik Lux" . + + a schema1:Person ; + schema1:name "Matthias Haimel" . + + a schema1:Person ; + schema1:name "Andrii Neronov" . + + a schema1:Person ; + schema1:name "Thomas Roetzer-Pejrimovsky" . + + a schema1:Person ; + schema1:name "Xiaolong Luo" . + + a schema1:Person ; + schema1:name "Wudmir Rojas" . + + a schema1:Person ; + schema1:name "Carlos Classen" . + + a schema1:Person ; + schema1:name "Timon Schlegel" . + + a schema1:Person ; + schema1:name "Carlos Oscar Sorzano Sanchez" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Edinburgh Genomics" . + + a schema1:Organization, + schema1:Project ; + schema1:name "KircherLab" . + + a schema1:Organization, + schema1:Project ; + schema1:name "BCCM_ULC" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Computational Science at HZDR" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Big data in biomedicine" . + + a schema1:Organization, + schema1:Project ; + schema1:name "MISTIC" . + + a schema1:Organization, + schema1:Project ; + schema1:name "NMR Workflow" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Guigó lab" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Statistical genetics" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Delineating Regions-of-interest for Mass Spectrometry Imaging by Multimodally Corroborated Spatial Segmentation" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Bioinformatics Unit @ CRG" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Black Ochre Data Labs" . + + a schema1:Organization, + schema1:Project ; + schema1:name "IRRI Bioinformatics Group" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Zavolan Lab" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Bioinformatics workflows for life science" . + + a schema1:Organization, + schema1:Project ; + schema1:name "EOSC-Life WP3 OC Team, cross RI project" . + + a schema1:Organization, + schema1:Project ; + schema1:name "ARA-dev" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Mendel Centre for Plant Genomics and Proteomics" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Metagenomic tools" . + + a schema1:Organization, + schema1:Project ; + schema1:name "ERGA Annotation" . + + a schema1:Organization, + schema1:Project ; + schema1:name "MLme: Machine Learning Made Easy" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Dioscuri TDA" . + + a schema1:Organization, + schema1:Project ; + schema1:name "NIH CFDE Playbook Workflow Partnership" . + + a schema1:Organization, + schema1:Project ; + schema1:name "MMV-Lab" . + + a schema1:Organization, + schema1:Project ; + schema1:name "EMBL-CBA" . + + a schema1:Organization, + schema1:Project ; + schema1:name "EBP-Nor" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Evaluation of Swin Transformer and knowledge transfer for denoising of super-resolution structured illumination microscopy data" . + + a schema1:Organization, + schema1:Project ; + schema1:name "COVID-19 PubSeq: Public SARS-CoV-2 Sequence Resource" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Chemical Data Lab" . + + a schema1:Organization, + schema1:Project ; + schema1:name "HP2NET - Framework for construction of phylogenetic networks on High Performance Computing (HPC) environment" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Building spatio-temporal workflows in Galaxy" . + + a schema1:Organization, + schema1:Project ; + schema1:name "CEMCOF" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Jackson Laboratory NGS-Ops" . + + a schema1:Organization, + schema1:Project ; + schema1:name "BRAIN - Biomedical Research on Adult Intracranial Neoplasms" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Italy-Covid-data-Portal" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Institute for Human Genetics and Genomic Medicine Aachen" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Plant-Food-Research-Open" . + + a schema1:Organization, + schema1:Project ; + schema1:name "WGGC" . + + a schema1:Organization, + schema1:Project ; + schema1:name "EOSC-Life WP3" . + + a schema1:Organization, + schema1:Project ; + schema1:name "MOLGENIS" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Janis" . + + a schema1:Organization, + schema1:Project ; + schema1:name "IAA-CSIC" . + + a schema1:Organization, + schema1:Project ; + schema1:name "FAME" . + + a schema1:Organization, + schema1:Project ; + schema1:name "CHU Limoges - UF9481 Bioinformatique / CNR Herpesvirus" . + + a schema1:Organization, + schema1:Project ; + schema1:name "HecatombDevelopment" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Bioinformatics and Biostatistics (BIO2 ) Core" . + + a schema1:Organization, + schema1:Project ; + schema1:name "VIB Bioinformatics Core" . + + a schema1:Organization, + schema1:Project ; + schema1:name "CINECA" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Gyn Department" . + + a schema1:Organization, + schema1:Project ; + schema1:name "AGRF BIO" . + + a schema1:DataDownload ; + schema1:contentSize "11867" ; + schema1:encodingFormat "application/zip" . + + a schema1:Organization ; + schema1:name "Department of Computer Science" ; + schema1:parentOrganization ; + schema1:url "https://www.cs.manchester.ac.uk/" . + + a schema1:Organization ; + schema1:name "ICREA" ; + schema1:url "https://www.icrea.cat/" . + + a schema1:Organization ; + schema1:name "CECH" ; + schema1:url "https://www.imim.cat/programesrecerca/neurociencies/en_cech.htm" . + + a schema1:CreativeWork ; + schema1:identifier "BSD-3-Clause" ; + schema1:isBasedOn ; + schema1:url "https://www.scipy.org/scipylib/license.html" . + + a schema1:Organization ; + schema1:name "Universitat de Barcelona" ; + schema1:url "https://www.ub.edu/" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "Raul.Sirvent@bsc.es" ; + schema1:identifier "Raul.Sirvent@bsc.es" ; + schema1:url "https://orcid.org/0000-0003-0606-2512" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "Rosa.M.Badia@bsc.es" ; + schema1:identifier "Rosa.M.Badia@bsc.es" ; + schema1:url "https://orcid.org/0000-0003-2941-5499" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "adam.hospital@irbbarcelona.org" ; + schema1:identifier "adam.hospital@irbbarcelona.org" ; + schema1:url "https://orcid.org/0000-0002-8291-8071" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "alessandro.danca@cmcc.it" ; + schema1:identifier "alessandro.danca@cmcc.it" ; + schema1:url "https://orcid.org/0000-0002-0372-2530" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "architdabral1234567890@gmail.com" ; + schema1:identifier "architdabral1234567890@gmail.com" ; + schema1:url "https://orcid.org/0009-0009-6701-3547" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "castro@anamat.cie.uma.es" ; + schema1:identifier "castro@anamat.cie.uma.es" ; + schema1:url "https://orcid.org/0000-0003-3164-7715" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "cristian.tatu@bsc.es" ; + schema1:identifier "cristian.tatu@bsc.es" ; + schema1:url "https://orcid.org/0009-0003-8848-9436" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "csl@uma.es" ; + schema1:identifier "csl@uma.es" ; + schema1:url "https://orcid.org/0000-0002-5493-5982" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "fabrizio.bernardi@ingv.it" ; + schema1:identifier "fabrizio.bernardi@ingv.it" ; + schema1:url "https://orcid.org/0000-0002-0414-8411" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "fabrizio.romano@ingv.it" ; + schema1:identifier "fabrizio.romano@ingv.it" ; + schema1:url "https://orcid.org/0000-0003-2725-3596" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "fernando.vazquez@bsc.es" ; + schema1:identifier "fernando.vazquez@bsc.es" ; + schema1:url "https://orcid.org/0000-0001-5634-509X" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "finn.lovholt@ngi.no" ; + schema1:identifier "finn.lovholt@ngi.no" ; + schema1:url "https://orcid.org/0000-0003-1019-7321" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "francisco.conejero@bsc.es" ; + schema1:identifier "francisco.conejero@bsc.es" ; + schema1:url "https://orcid.org/0000-0001-6401-6229" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "jacopo.selva@unina.it" ; + schema1:identifier "jacopo.selva@unina.it" ; + schema1:url "https://orcid.org/0000-0001-6263-6934" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "jalemkul@vt.edu" ; + schema1:identifier "jalemkul@vt.edu" ; + schema1:url "https://orcid.org/0000-0001-6661-8653" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "javier.conejero@bsc.es" ; + schema1:identifier "javier.conejero@bsc.es" ; + schema1:url "https://orcid.org/0000-0001-6401-6229" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "jorge.ejarque@bsc.es" ; + schema1:identifier "jorge.ejarque@bsc.es" ; + schema1:url "https://orcid.org/0000-0003-4725-5097" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "jose.raul.bravo@upc.edu" ; + schema1:identifier "jose.raul.bravo@upc.edu" ; + schema1:url "https://orcid.org/0000-0002-4465-7536" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "louise.cordrie@ingv.it" ; + schema1:identifier "louise.cordrie@ingv.it" ; + schema1:url "https://orcid.org/0000-0003-2290-8637" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "macias@anamat.cie.uma.es" ; + schema1:identifier "macias@anamat.cie.uma.es" ; + schema1:url "https://orcid.org/0000-0002-3010-8050" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "manuela.volpe@ingv.it" ; + schema1:identifier "manuela.volpe@ingv.it" ; + schema1:url "https://orcid.org/0000-0003-4551-3339" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "marcah@uma.es" ; + schema1:identifier "marcah@uma.es" ; + schema1:url "https://orcid.org/0000-0003-3130-1335" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "raul.sirvent@bsc.es" ; + schema1:identifier "raul.sirvent@bsc.es" ; + schema1:url "https://orcid.org/0000-0003-0606-2512" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "roberto.tonini@ingv.it" ; + schema1:identifier "roberto.tonini@ingv.it" ; + schema1:url "https://orcid.org/0000-0001-7617-7206" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "rosa.m.badia@bsc.es" ; + schema1:identifier "rosa.m.badia@bsc.es" ; + schema1:url "https://orcid.org/0000-0003-2941-5499" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "rrossi@cimne.upc.edu" ; + schema1:identifier "rrossi@cimne.upc.edu" ; + schema1:url "https://orcid.org/0000-0003-0528-7074" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "sebastian.ares@upc.edu" ; + schema1:identifier "sebastian.ares@upc.edu" ; + schema1:url "https://orcid.org/0000-0001-5709-4683" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "sonia.scardigno@cmcc.it" ; + schema1:identifier "sonia.scardigno@cmcc.it" ; + schema1:url "https://orcid.org/0000-0003-2347-3698" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "stefano.lorito@ingv.it" ; + schema1:identifier "stefano.lorito@ingv.it" ; + schema1:url "https://orcid.org/0000-0002-1458-2131" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "steven.gibbons@ngi.no" ; + schema1:identifier "steven.gibbons@ngi.no" ; + schema1:url "https://orcid.org/0000-0002-7822-0244" . + + a schema1:Dataset ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:name "outdir" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Demultiplexing pipeline for Illumina sequencing data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/978?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/demultiplex" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/demultiplex" ; + schema1:sdDatePublished "2024-08-05 10:24:06 +0100" ; + schema1:url "https://workflowhub.eu/workflows/978/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8275 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:47Z" ; + schema1:dateModified "2024-06-11T12:54:47Z" ; + schema1:description "Demultiplexing pipeline for Illumina sequencing data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/978?version=7" ; + schema1:keywords "bases2fastq, bcl2fastq, demultiplexing, elementbiosciences, illumina" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/demultiplex" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/978?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# rquest-omop-worker-workflows\r +\r +Source for workflow definitions for the open source RQuest OMOP Worker tool developed for Hutch/TRE-FX\r +\r +Note: ARM workflows are currently broken. x86 ones work.\r +\r +## Inputs\r +\r +### Body\r +Sample input payload:\r +\r +```json\r +{\r + "task_id": "job-2023-01-13-14: 20: 38-",\r + "project": "",\r + "owner": "",\r + "cohort": {\r + "groups": [\r + {\r + "rules": [\r + {\r + "varname": "OMOP",\r + "varcat": "Person",\r + "type": "TEXT",\r + "oper": "=",\r + "value": "8507"\r + }\r + ],\r + "rules_oper": "AND"\r + }\r + ],\r + "groups_oper": "OR"\r + },\r + "collection": "",\r + "protocol_version": "",\r + "char_salt": "",\r + "uuid": ""\r +}\r +```\r +\r +### Database access\r +\r +Currently this workflow requires inputs for connecting to the database it will run queries against.\r +\r +In future this may be moved to environment variables.""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/471?version=3" ; + schema1:isBasedOn "https://github.com/HDRUK/rquest-omop-worker-workflows" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for rquest-omop-worker-workflow" ; + schema1:sdDatePublished "2024-08-05 10:27:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/471/ro_crate?version=3" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 5257 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 607 ; + schema1:creator ; + schema1:dateCreated "2023-10-23T13:58:11Z" ; + schema1:dateModified "2023-10-23T13:58:11Z" ; + schema1:description """# rquest-omop-worker-workflows\r +\r +Source for workflow definitions for the open source RQuest OMOP Worker tool developed for Hutch/TRE-FX\r +\r +Note: ARM workflows are currently broken. x86 ones work.\r +\r +## Inputs\r +\r +### Body\r +Sample input payload:\r +\r +```json\r +{\r + "task_id": "job-2023-01-13-14: 20: 38-",\r + "project": "",\r + "owner": "",\r + "cohort": {\r + "groups": [\r + {\r + "rules": [\r + {\r + "varname": "OMOP",\r + "varcat": "Person",\r + "type": "TEXT",\r + "oper": "=",\r + "value": "8507"\r + }\r + ],\r + "rules_oper": "AND"\r + }\r + ],\r + "groups_oper": "OR"\r + },\r + "collection": "",\r + "protocol_version": "",\r + "char_salt": "",\r + "uuid": ""\r +}\r +```\r +\r +### Database access\r +\r +Currently this workflow requires inputs for connecting to the database it will run queries against.\r +\r +In future this may be moved to environment variables.""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/471?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "rquest-omop-worker-workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/471?version=3" ; + schema1:version 3 ; + ns1:input , + , + , + ; + ns1:output . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Workflow for Creating a large disease network from various datasets and databases for IBM, and applying the active subnetwork identification method MOGAMUN." ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.681.7" ; + schema1:isBasedOn "https://github.com/jdwijnbergen/IBM_ASI_workflow.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Inclusion Body Myositis Active Subnetwork Identification Workflow" ; + schema1:sdDatePublished "2024-08-05 10:25:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/681/ro_crate?version=7" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 26610 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8257 ; + schema1:creator , + ; + schema1:dateCreated "2023-11-27T16:19:23Z" ; + schema1:dateModified "2024-02-01T11:27:19Z" ; + schema1:description "Workflow for Creating a large disease network from various datasets and databases for IBM, and applying the active subnetwork identification method MOGAMUN." ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/681?version=6" ; + schema1:keywords "Bioinformatics, CWL, Genomics, Transcriptomics, Protein-Protein Interaction" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Inclusion Body Myositis Active Subnetwork Identification Workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/681?version=7" ; + schema1:version 7 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Coprolite Identification" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/974?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/coproid" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/coproid" ; + schema1:sdDatePublished "2024-08-05 10:24:10 +0100" ; + schema1:url "https://workflowhub.eu/workflows/974/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4835 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:45Z" ; + schema1:dateModified "2024-06-11T12:54:45Z" ; + schema1:description "Coprolite Identification" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/974?version=2" ; + schema1:keywords "adna, ancient-dna, coprolite, microbiome" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/coproid" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/974?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/986?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/fetchngs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/fetchngs" ; + schema1:sdDatePublished "2024-08-05 10:23:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/986/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6171 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:52Z" ; + schema1:dateModified "2024-06-11T12:54:52Z" ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/986?version=13" ; + schema1:keywords "ddbj, download, ena, FASTQ, geo, sra, synapse" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/fetchngs" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/986?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """## Summary\r +\r +The PPI information aggregation pipeline starts getting all the datasets in [GEO](https://www.ncbi.nlm.nih.gov/geo/) database whose material was generated using expression profiling by high throughput sequencing. From each database identifiers, it extracts the supplementary files that had the counts table. Once finishing the download step, it identifies those that were normalized or had the raw counts to normalize. It also identify and map the gene ids to uniprot (the ids found usually were from HGNC and Ensembl). For each normalized counts table belonging to some experiment, il filters those which have the proteins (already mapped from HGNC to Uniprot identifiers) in the pairs in evaluation. Then, it calculates the correlation matrix based on Pearson method in the tables and saves the respective pairs correlation value for each table. Finally, a repor is made for each pair in descending order of correlation value with the experiment identifiers.\r +\r +## Requirements:\r +* Python packages needed:\r + - os\r + - scipy\r + - pandas\r + - sklearn\r + - Bio python\r + - numpy\r +\r +## Usage Instructions\r +* Preparation:\r + 1. ````git clone https://github.com/YasCoMa/PipeAggregationInfo.git````\r + 2. ````cd PipeAggregationInfo````\r + 3. ````pip3 install -r requirements.txt````\r +\r +### Preprocessing pipeline\r +* Go to the ncbi [GDS database webpage](https://www.ncbi.nlm.nih.gov/gds), use the key words to filter your gds datasets of interest and save the results as file ("Send to" option), and choose "Summary (text)"\r +* Alternatively, we already saved the results concerning protein interactions, you may use them to run preprocessing in order to obtain the necessary files for the main pipeline\r +* Running preprocessing:\r + - ````cd preprocessing````\r + - ````python3 data_preprocessing.py ./workdir_preprocessing filter_files````\r + - ````cd ../````\r + - Copy the generated output folder "data_matrices_count" into the workflow folder: ````cp -R preprocessing/workdir_preprocessing/data_matrices_count .````\r +\r +### Main pipeline\r +\r +* Pipeline parameters:\r + - __-rt__ or __--running_type__
\r + Use to indicate the step you want to execute (it is desirable following the order):
\r + 1 - Make the process of finding the experiments and ranking them by correlation
\r + 2 - Select pairs that were already processed and ranked making a separated folder of interest\r +\r + - __-fo__ or __--folder__
\r + Folder to store the files (use the folder where the other required file can be found)\r + \r + - __-if__ or __--interactome_file__
\r + File with the pairs (two columns with uniprot identifiers in tsv format)
\r + \r + Example of this file: running_example/all_pairs.tsv\r +\r + - __-spf__ or __--selected_pairs_file__
\r + File with PPIs of interest (two columns with uniprot identifiers in tsv format)
\r + \r + Example of this file: running_example/selected_pairs.tsv\r +\r +* Running modes examples:\r + 1. Run step 1:
\r + ````python3 pipeline_expression_pattern.py -rt 1 -fo running_example/ -if all_pairs.tsv ````\r +\r + 2. Run step 2:
\r + ````python3 pipeline_expression_pattern.py -rt 2 -fo running_example/ -spf selected_pairs.tsv ````\r +\r +## Bug Report\r +Please, use the [Issue](https://github.com/YasCoMa/PipeAggregationInfo/issues) tab to report any bug.""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/619?version=1" ; + schema1:isBasedOn "https://github.com/YasCoMa/PipeAggregationInfo" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for PipePatExp - Pipeline to aggregate gene expression correlation information for PPI" ; + schema1:sdDatePublished "2024-08-05 10:27:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/619/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 23984 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7332 ; + schema1:creator ; + schema1:dateCreated "2023-10-22T00:02:52Z" ; + schema1:dateModified "2023-10-22T00:02:52Z" ; + schema1:description """## Summary\r +\r +The PPI information aggregation pipeline starts getting all the datasets in [GEO](https://www.ncbi.nlm.nih.gov/geo/) database whose material was generated using expression profiling by high throughput sequencing. From each database identifiers, it extracts the supplementary files that had the counts table. Once finishing the download step, it identifies those that were normalized or had the raw counts to normalize. It also identify and map the gene ids to uniprot (the ids found usually were from HGNC and Ensembl). For each normalized counts table belonging to some experiment, il filters those which have the proteins (already mapped from HGNC to Uniprot identifiers) in the pairs in evaluation. Then, it calculates the correlation matrix based on Pearson method in the tables and saves the respective pairs correlation value for each table. Finally, a repor is made for each pair in descending order of correlation value with the experiment identifiers.\r +\r +## Requirements:\r +* Python packages needed:\r + - os\r + - scipy\r + - pandas\r + - sklearn\r + - Bio python\r + - numpy\r +\r +## Usage Instructions\r +* Preparation:\r + 1. ````git clone https://github.com/YasCoMa/PipeAggregationInfo.git````\r + 2. ````cd PipeAggregationInfo````\r + 3. ````pip3 install -r requirements.txt````\r +\r +### Preprocessing pipeline\r +* Go to the ncbi [GDS database webpage](https://www.ncbi.nlm.nih.gov/gds), use the key words to filter your gds datasets of interest and save the results as file ("Send to" option), and choose "Summary (text)"\r +* Alternatively, we already saved the results concerning protein interactions, you may use them to run preprocessing in order to obtain the necessary files for the main pipeline\r +* Running preprocessing:\r + - ````cd preprocessing````\r + - ````python3 data_preprocessing.py ./workdir_preprocessing filter_files````\r + - ````cd ../````\r + - Copy the generated output folder "data_matrices_count" into the workflow folder: ````cp -R preprocessing/workdir_preprocessing/data_matrices_count .````\r +\r +### Main pipeline\r +\r +* Pipeline parameters:\r + - __-rt__ or __--running_type__
\r + Use to indicate the step you want to execute (it is desirable following the order):
\r + 1 - Make the process of finding the experiments and ranking them by correlation
\r + 2 - Select pairs that were already processed and ranked making a separated folder of interest\r +\r + - __-fo__ or __--folder__
\r + Folder to store the files (use the folder where the other required file can be found)\r + \r + - __-if__ or __--interactome_file__
\r + File with the pairs (two columns with uniprot identifiers in tsv format)
\r + \r + Example of this file: running_example/all_pairs.tsv\r +\r + - __-spf__ or __--selected_pairs_file__
\r + File with PPIs of interest (two columns with uniprot identifiers in tsv format)
\r + \r + Example of this file: running_example/selected_pairs.tsv\r +\r +* Running modes examples:\r + 1. Run step 1:
\r + ````python3 pipeline_expression_pattern.py -rt 1 -fo running_example/ -if all_pairs.tsv ````\r +\r + 2. Run step 2:
\r + ````python3 pipeline_expression_pattern.py -rt 2 -fo running_example/ -spf selected_pairs.tsv ````\r +\r +## Bug Report\r +Please, use the [Issue](https://github.com/YasCoMa/PipeAggregationInfo/issues) tab to report any bug.""" ; + schema1:image ; + schema1:keywords "Bioinformatics, gene expression correlation, gene expression data wrangling, geo database mining" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "PipePatExp - Pipeline to aggregate gene expression correlation information for PPI" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/619?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.285.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_abc_md_setup/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python ABC MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:46 +0100" ; + schema1:url "https://workflowhub.eu/workflows/285/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8001 ; + schema1:creator , + ; + schema1:dateCreated "2023-04-14T08:44:03Z" ; + schema1:dateModified "2023-04-14T08:45:01Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/285?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python ABC MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_abc_md_setup/python/workflow.py" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Take an anndata file, and perform basic QC with scanpy. Produces a filtered AnnData object." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/467?version=4" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq_CellQC" ; + schema1:sdDatePublished "2024-08-05 10:24:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/467/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 21434 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-05-30T05:51:13Z" ; + schema1:dateModified "2024-05-30T05:51:13Z" ; + schema1:description "Take an anndata file, and perform basic QC with scanpy. Produces a filtered AnnData object." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/467?version=3" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "scRNAseq_CellQC" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/467?version=4" ; + schema1:version 4 ; + ns1:input , + , + , + , + ; + ns1:output , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Detects SNPs and INDELs using VARSCAN2." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/36?version=1" ; + schema1:isBasedOn "https://usegalaxy.eu/u/ambarishk/w/varscan" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for COVID-19: VARSCAN" ; + schema1:sdDatePublished "2024-08-05 10:33:33 +0100" ; + schema1:url "https://workflowhub.eu/workflows/36/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 31968 ; + schema1:dateCreated "2020-06-18T22:57:27Z" ; + schema1:dateModified "2023-02-13T14:06:46Z" ; + schema1:description "Detects SNPs and INDELs using VARSCAN2." ; + schema1:image ; + schema1:keywords "Galaxy, VARSCAN2, SNPs, INDELs" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "COVID-19: VARSCAN" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/36?version=1" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 221636 . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:datePublished "2024-03-04T20:06:19+00:00" ; + schema1:description "PyCOMPSs implementation of Probabilistic Tsunami Forecast (PTF). PTF explicitly treats data- and forecast-uncertainties, enabling alert level definitions according to any predefined level of conservatism, which is connected to the average balance of missed-vs-false-alarms. Run of the Kos-Bodrum 2017 event test-case with 1000 scenarios, 8h tsunami simulation for each and forecast calculations for partial and full ensembles with focal mechanism and tsunami data updates." ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "https://creativecommons.org/licenses/by-nc-nd/4.0/" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "PyCOMPSs Probabilistic Tsunami Forecast (PTF) - Kos-Bodrum 2017 earthquake and tsunami test-case" ; + schema1:publisher , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 1685175 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:MediaObject ; + schema1:contentSize 243467 ; + schema1:dateModified "2023-10-19T13:01:10" ; + schema1:name "POIs.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 42945012 ; + schema1:dateModified "2023-10-19T13:01:10" ; + schema1:name "regional_domain.grd" ; + schema1:sdDatePublished "2024-03-04T20:06:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 37218 ; + schema1:dateModified "2023-10-19T13:01:10" ; + schema1:name "regional_domain_POIs_depth.dat" ; + schema1:sdDatePublished "2024-03-04T20:06:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 5895 ; + schema1:dateModified "2023-10-19T13:13:04" ; + schema1:name "Step1_config_template_mod.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2126 ; + schema1:dateModified "2023-10-19T12:59:46" ; + schema1:name "Step2_parfile_tmp.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 231 ; + schema1:dateModified "2024-03-04T14:23:49" ; + schema1:name "parfile_mod.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:19+00:00" . + + a schema1:Dataset ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:name "Run" ; + schema1:sdDatePublished "2024-03-04T20:06:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name ".gitkeep" ; + schema1:sdDatePublished "2024-03-04T20:06:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101755 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "Step1_scenario_list_BS.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2061820 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "ptf_out.hdf5" ; + schema1:sdDatePublished "2024-03-04T20:06:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "Step2_BS_failed.log" ; + schema1:sdDatePublished "2024-03-04T20:06:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2086912 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "ptf_out.hdf5" ; + schema1:sdDatePublished "2024-03-04T20:06:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 6538 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "ptf_main.config" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 5142 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "sim_files.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 40 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulBS_lb_01level_01proc.bin" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS1.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS10.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS100.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS101.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS102.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS103.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS104.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS105.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS106.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS107.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS108.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS109.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS11.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS110.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS111.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS112.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS113.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS114.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS115.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS116.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS117.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS118.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS119.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS12.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS120.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS121.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS122.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS123.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS124.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS125.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS126.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS127.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS128.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS129.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS13.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS130.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS131.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS132.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS133.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS134.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS135.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS136.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS137.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS138.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS139.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS14.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS140.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS141.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS142.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS143.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS144.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS145.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS146.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS147.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS148.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS149.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS15.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS150.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS151.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS152.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS153.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS154.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS155.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS156.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS157.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS158.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS159.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS16.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS160.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS161.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS162.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS163.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS164.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS165.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS166.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS167.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS168.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS169.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS17.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS170.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS171.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS172.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS173.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS174.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS175.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS176.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS177.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS178.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS179.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS18.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS180.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS181.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS182.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS183.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS184.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS185.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS186.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS187.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS188.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS189.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS19.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS190.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS191.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS192.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS193.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS194.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS195.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS196.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS197.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS198.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS199.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS2.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS20.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS200.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS201.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS202.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS203.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS204.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS205.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS206.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS207.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS208.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS209.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS21.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS210.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS211.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS212.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS213.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS214.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS215.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS216.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS217.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS218.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS219.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS22.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS220.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS221.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS222.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS223.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS224.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS225.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS226.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS227.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS228.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS229.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS23.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS230.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS231.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS232.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS233.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS234.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS235.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS236.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS237.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS238.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS239.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS24.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS240.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS241.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS242.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS243.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS244.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS245.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS246.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS247.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS248.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS249.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS25.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS250.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS26.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS27.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS28.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS29.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS3.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS30.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS31.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS32.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS33.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS34.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS35.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS36.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS37.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS38.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS39.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS4.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS40.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS41.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS42.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS43.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS44.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS45.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS46.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS47.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS48.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS49.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS5.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS50.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS51.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS52.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS53.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS54.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS55.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS56.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS57.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS58.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS59.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS6.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS60.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS61.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS62.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS63.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS64.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS65.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS66.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS67.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS68.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS69.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS7.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS70.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS71.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS72.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS73.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS74.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS75.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS76.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS77.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS78.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS79.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS8.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS80.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS81.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS82.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS83.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS84.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS85.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS86.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS87.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS88.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS89.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS9.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS90.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS91.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS92.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS93.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS94.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS95.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS96.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS97.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS98.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-04T20:05:45" ; + schema1:name "simulationsBS99.txt" ; + schema1:sdDatePublished "2024-03-04T20:06:20+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "This workflow extracts 5 different time periods e.g. January- June 2019, 2020 and 2021, July-December 2019 and 2020 over a single selected location. Then statistics (mean, minimum, maximum) are computed. The final products are maximum, minimum and mean." ; + schema1:hasPart , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.251.1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Investigation of lockdown effect on air quality between January 2019 to May 2021." ; + schema1:sdDatePublished "2024-08-05 10:32:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/251/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 20663 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 117910 ; + schema1:dateCreated "2021-12-20T09:43:13Z" ; + schema1:dateModified "2023-01-16T13:56:22Z" ; + schema1:description "This workflow extracts 5 different time periods e.g. January- June 2019, 2020 and 2021, July-December 2019 and 2020 over a single selected location. Then statistics (mean, minimum, maximum) are computed. The final products are maximum, minimum and mean." ; + schema1:image ; + schema1:keywords "RELIANCE, copernicus, air-quality" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Investigation of lockdown effect on air quality between January 2019 to May 2021." ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/251?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 61932 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# The Polygenic Score Catalog Calculator (`pgsc_calc`)\r +\r +[![Documentation Status](https://readthedocs.org/projects/pgsc-calc/badge/?version=latest)](https://pgsc-calc.readthedocs.io/en/latest/?badge=latest)\r +[![pgscatalog/pgsc_calc CI](https://github.com/PGScatalog/pgsc_calc/actions/workflows/ci.yml/badge.svg)](https://github.com/PGScatalog/pgsc_calc/actions/workflows/ci.yml)\r +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.5970794.svg)](https://doi.org/10.5281/zenodo.5970794)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-≥22.10.0-23aa62.svg?labelColor=000000)](https://www.nextflow.io/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +\r +## Introduction\r +\r +`pgsc_calc` is a bioinformatics best-practice analysis pipeline for calculating\r +polygenic [risk] scores on samples with imputed genotypes using existing scoring\r +files from the [Polygenic Score (PGS) Catalog](https://www.pgscatalog.org/)\r +and/or user-defined PGS/PRS.\r +\r +## Pipeline summary\r +\r +

\r + \r +

\r +\r +The workflow performs the following steps:\r +\r +* Downloading scoring files using the PGS Catalog API in a specified genome build (GRCh37 and GRCh38).\r +* Reading custom scoring files (and performing a liftover if genotyping data is in a different build).\r +* Automatically combines and creates scoring files for efficient parallel computation of multiple PGS\r + - Matching variants in the scoring files against variants in the target dataset (in plink bfile/pfile or VCF format)\r +* Calculates PGS for all samples (linear sum of weights and dosages)\r +* Creates a summary report to visualize score distributions and pipeline metadata (variant matching QC)\r +\r +And optionally:\r +\r +- Genetic Ancestry: calculate similarity of target samples to populations in a\r + reference dataset ([1000 Genomes (1000G)](http://www.nature.com/nature/journal/v526/n7571/full/nature15393.html)), using principal components analysis (PCA)\r +- PGS Normalization: Using reference population data and/or PCA projections to report\r + individual-level PGS predictions (e.g. percentiles, z-scores) that account for genetic ancestry\r +\r +See documentation for a list of planned [features under development](https://pgsc-calc.readthedocs.io/en/latest/index.html#Features-under-development).\r +\r +## Quick start\r +\r +1. Install\r +[`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation)\r +(`>=22.10.0`)\r +\r +2. Install [`Docker`](https://docs.docker.com/engine/installation/) or\r +[`Singularity (v3.8.3 minimum)`](https://www.sylabs.io/guides/3.0/user-guide/)\r +(please only use [`Conda`](https://conda.io/miniconda.html) as a last resort)\r +\r +3. Download the pipeline and test it on a minimal dataset with a single command:\r +\r + ```console\r + nextflow run pgscatalog/pgsc_calc -profile test,\r + ```\r +\r +4. Start running your own analysis!\r +\r + ```console\r + nextflow run pgscatalog/pgsc_calc -profile --input samplesheet.csv --pgs_id PGS001229\r + ```\r +\r +See [getting\r +started](https://pgsc-calc.readthedocs.io/en/latest/getting-started.html) for more\r +details.\r +\r +## Documentation\r +\r +[Full documentation is available on Read the Docs](https://pgsc-calc.readthedocs.io/)\r +\r +## Credits\r +\r +pgscatalog/pgsc_calc is developed as part of the PGS Catalog project, a\r +collaboration between the University of Cambridge’s Department of Public Health\r +and Primary Care (Michael Inouye, Samuel Lambert) and the European\r +Bioinformatics Institute (Helen Parkinson, Laura Harris).\r +\r +The pipeline seeks to provide a standardized workflow for PGS calculation and\r +ancestry inference implemented in nextflow derived from an existing set of\r +tools/scripts developed by Inouye lab (Rodrigo Canovas, Scott Ritchie, Jingqin\r +Wu) and PGS Catalog teams (Samuel Lambert, Laurent Gil).\r +\r +The adaptation of the codebase, nextflow implementation, and PGS Catalog features\r +are written by Benjamin Wingfield, Samuel Lambert, Laurent Gil with additional input\r +from Aoife McMahon (EBI). Development of new features, testing, and code review\r +is ongoing including Inouye lab members (Rodrigo Canovas, Scott Ritchie) and others. A\r +manuscript describing the tool is *in preparation*. In the meantime if you use the\r +tool we ask you to cite the repo and the paper describing the PGS Catalog\r +resource:\r +\r +- >PGS Catalog Calculator _(in preparation)_. PGS Catalog\r + Team. [https://github.com/PGScatalog/pgsc_calc](https://github.com/PGScatalog/pgsc_calc)\r +- >Lambert _et al._ (2021) The Polygenic Score Catalog as an open database for\r +reproducibility and systematic evaluation. Nature Genetics. 53:420–425\r +doi:[10.1038/s41588-021-00783-5](https://doi.org/10.1038/s41588-021-00783-5).\r +\r +This pipeline is distrubuted under an [Apache License](LICENSE) amd uses code and \r +infrastructure developed and maintained by the [nf-core](https://nf-co.re) community \r +(Ewels *et al. Nature Biotech* (2020) doi:[10.1038/s41587-020-0439-x](https://doi.org/10.1038/s41587-020-0439-x)), \r +reused here under the [MIT license](https://github.com/nf-core/tools/blob/master/LICENSE).\r +\r +Additional references of open-source tools and data used in this pipeline are described in\r +[`CITATIONS.md`](CITATIONS.md).\r +\r +This work has received funding from EMBL-EBI core funds, the Baker Institute,\r +the University of Cambridge, Health Data Research UK (HDRUK), and the European\r +Union’s Horizon 2020 research and innovation programme under grant agreement No\r +101016775 INTERVENE.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/556?version=2" ; + schema1:isBasedOn "https://github.com/PGScatalog/pgsc_calc" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for The Polygenic Score Catalog Calculator" ; + schema1:sdDatePublished "2024-08-05 10:28:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/556/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1673 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-08-10T09:09:35Z" ; + schema1:dateModified "2023-08-10T09:09:35Z" ; + schema1:description """# The Polygenic Score Catalog Calculator (`pgsc_calc`)\r +\r +[![Documentation Status](https://readthedocs.org/projects/pgsc-calc/badge/?version=latest)](https://pgsc-calc.readthedocs.io/en/latest/?badge=latest)\r +[![pgscatalog/pgsc_calc CI](https://github.com/PGScatalog/pgsc_calc/actions/workflows/ci.yml/badge.svg)](https://github.com/PGScatalog/pgsc_calc/actions/workflows/ci.yml)\r +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.5970794.svg)](https://doi.org/10.5281/zenodo.5970794)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-≥22.10.0-23aa62.svg?labelColor=000000)](https://www.nextflow.io/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +\r +## Introduction\r +\r +`pgsc_calc` is a bioinformatics best-practice analysis pipeline for calculating\r +polygenic [risk] scores on samples with imputed genotypes using existing scoring\r +files from the [Polygenic Score (PGS) Catalog](https://www.pgscatalog.org/)\r +and/or user-defined PGS/PRS.\r +\r +## Pipeline summary\r +\r +

\r + \r +

\r +\r +The workflow performs the following steps:\r +\r +* Downloading scoring files using the PGS Catalog API in a specified genome build (GRCh37 and GRCh38).\r +* Reading custom scoring files (and performing a liftover if genotyping data is in a different build).\r +* Automatically combines and creates scoring files for efficient parallel computation of multiple PGS\r + - Matching variants in the scoring files against variants in the target dataset (in plink bfile/pfile or VCF format)\r +* Calculates PGS for all samples (linear sum of weights and dosages)\r +* Creates a summary report to visualize score distributions and pipeline metadata (variant matching QC)\r +\r +And optionally:\r +\r +- Genetic Ancestry: calculate similarity of target samples to populations in a\r + reference dataset ([1000 Genomes (1000G)](http://www.nature.com/nature/journal/v526/n7571/full/nature15393.html)), using principal components analysis (PCA)\r +- PGS Normalization: Using reference population data and/or PCA projections to report\r + individual-level PGS predictions (e.g. percentiles, z-scores) that account for genetic ancestry\r +\r +See documentation for a list of planned [features under development](https://pgsc-calc.readthedocs.io/en/latest/index.html#Features-under-development).\r +\r +## Quick start\r +\r +1. Install\r +[`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation)\r +(`>=22.10.0`)\r +\r +2. Install [`Docker`](https://docs.docker.com/engine/installation/) or\r +[`Singularity (v3.8.3 minimum)`](https://www.sylabs.io/guides/3.0/user-guide/)\r +(please only use [`Conda`](https://conda.io/miniconda.html) as a last resort)\r +\r +3. Download the pipeline and test it on a minimal dataset with a single command:\r +\r + ```console\r + nextflow run pgscatalog/pgsc_calc -profile test,\r + ```\r +\r +4. Start running your own analysis!\r +\r + ```console\r + nextflow run pgscatalog/pgsc_calc -profile --input samplesheet.csv --pgs_id PGS001229\r + ```\r +\r +See [getting\r +started](https://pgsc-calc.readthedocs.io/en/latest/getting-started.html) for more\r +details.\r +\r +## Documentation\r +\r +[Full documentation is available on Read the Docs](https://pgsc-calc.readthedocs.io/)\r +\r +## Credits\r +\r +pgscatalog/pgsc_calc is developed as part of the PGS Catalog project, a\r +collaboration between the University of Cambridge’s Department of Public Health\r +and Primary Care (Michael Inouye, Samuel Lambert) and the European\r +Bioinformatics Institute (Helen Parkinson, Laura Harris).\r +\r +The pipeline seeks to provide a standardized workflow for PGS calculation and\r +ancestry inference implemented in nextflow derived from an existing set of\r +tools/scripts developed by Inouye lab (Rodrigo Canovas, Scott Ritchie, Jingqin\r +Wu) and PGS Catalog teams (Samuel Lambert, Laurent Gil).\r +\r +The adaptation of the codebase, nextflow implementation, and PGS Catalog features\r +are written by Benjamin Wingfield, Samuel Lambert, Laurent Gil with additional input\r +from Aoife McMahon (EBI). Development of new features, testing, and code review\r +is ongoing including Inouye lab members (Rodrigo Canovas, Scott Ritchie) and others. A\r +manuscript describing the tool is *in preparation*. In the meantime if you use the\r +tool we ask you to cite the repo and the paper describing the PGS Catalog\r +resource:\r +\r +- >PGS Catalog Calculator _(in preparation)_. PGS Catalog\r + Team. [https://github.com/PGScatalog/pgsc_calc](https://github.com/PGScatalog/pgsc_calc)\r +- >Lambert _et al._ (2021) The Polygenic Score Catalog as an open database for\r +reproducibility and systematic evaluation. Nature Genetics. 53:420–425\r +doi:[10.1038/s41588-021-00783-5](https://doi.org/10.1038/s41588-021-00783-5).\r +\r +This pipeline is distrubuted under an [Apache License](LICENSE) amd uses code and \r +infrastructure developed and maintained by the [nf-core](https://nf-co.re) community \r +(Ewels *et al. Nature Biotech* (2020) doi:[10.1038/s41587-020-0439-x](https://doi.org/10.1038/s41587-020-0439-x)), \r +reused here under the [MIT license](https://github.com/nf-core/tools/blob/master/LICENSE).\r +\r +Additional references of open-source tools and data used in this pipeline are described in\r +[`CITATIONS.md`](CITATIONS.md).\r +\r +This work has received funding from EMBL-EBI core funds, the Baker Institute,\r +the University of Cambridge, Health Data Research UK (HDRUK), and the European\r +Union’s Horizon 2020 research and innovation programme under grant agreement No\r +101016775 INTERVENE.\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/556?version=3" ; + schema1:keywords "Nextflow, Workflows, polygenic risk score, polygenic score, prediction, GWAS, genomic ancestry" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "The Polygenic Score Catalog Calculator" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/556?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-23T06:34:49+00:00" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/AustralianBioCommons/Genome-assessment-post-assembly.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Genome-assessment-post-assembly" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Genome-assessment-post-assembly" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/AustralianBioCommons/Genome-assessment-post-assembly.git" ; + schema1:version "main" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Analysis pipeline for CUT&RUN and CUT&TAG experiments that includes sequencing QC, spike-in normalisation, IgG control normalisation, peak calling and downstream peak analysis." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/976?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/cutandrun" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/cutandrun" ; + schema1:sdDatePublished "2024-08-05 10:24:09 +0100" ; + schema1:url "https://workflowhub.eu/workflows/976/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15191 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:46Z" ; + schema1:dateModified "2024-06-11T12:54:46Z" ; + schema1:description "Analysis pipeline for CUT&RUN and CUT&TAG experiments that includes sequencing QC, spike-in normalisation, IgG control normalisation, peak calling and downstream peak analysis." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/976?version=7" ; + schema1:keywords "cutandrun, cutandrun-seq, cutandtag, cutandtag-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/cutandrun" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/976?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-23T13:33:07.952232" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-Trio-phasing-VGP5/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Demultiplexing pipeline for Illumina sequencing data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/978?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/demultiplex" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/demultiplex" ; + schema1:sdDatePublished "2024-08-05 10:24:06 +0100" ; + schema1:url "https://workflowhub.eu/workflows/978/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9518 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:47Z" ; + schema1:dateModified "2024-06-11T12:54:47Z" ; + schema1:description "Demultiplexing pipeline for Illumina sequencing data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/978?version=7" ; + schema1:keywords "bases2fastq, bcl2fastq, demultiplexing, elementbiosciences, illumina" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/demultiplex" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/978?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=13" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-08-05 10:23:18 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=13" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15644 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:15Z" ; + schema1:dateModified "2024-06-11T12:55:15Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=13" ; + schema1:version 13 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-08-05 10:23:40 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7062 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:58Z" ; + schema1:dateModified "2024-06-11T12:54:58Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.292.2" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Amber Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-08-05 10:32:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/292/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1722 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-24T14:21:22Z" ; + schema1:dateModified "2023-01-16T13:58:35Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/292?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Amber Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/292?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=12" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-08-05 10:23:18 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=12" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15642 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:15Z" ; + schema1:dateModified "2024-06-11T12:55:15Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=12" ; + schema1:version 12 . + + a schema1:Dataset ; + schema1:datePublished "2021-12-20T17:04:47.611475" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/fragment-based-docking-scoring" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "fragment-based-docking-scoring/main" ; + schema1:sdDatePublished "2021-12-21 03:01:02 +0000" ; + schema1:softwareVersion "v0.1.2" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """## CWL based workflow to assemble haploid/diploid eukaryote genomes of non-model organisms\r +The workflow is designed to use both PacBio long-reads and Illumina short-reads. The workflow first extracts, corrects, trims and decontaminates the long reads. Decontaminated trimmed reads are then used to assemble the genome and raw reads are used to polish it. Next, Illumina reads are cleaned and used to further polish the resultant assembly. Finally, the polished assembly is masked using inferred repeats and haplotypes are eliminated. The workflow uses BioConda and DockerHub to install required software and is therefore fully automated. In addition to final assembly, the workflow produces intermediate assemblies before and after polishing steps. The workflow follows the syntax for CWL v1.0.\r +\r +### Dependencies\r +# Programs\r +The pipeline can be run either using [Cromwell](https://cromwell.readthedocs.io/en/stable) or [cwltool reference](https://github.com/common-workflow-language/cwltool) implementation and docker containers can be run either using [Singularity](https://singularity.lbl.gov) or [udocker](https://singularity.lbl.gov).\r +\r +Cromwell implementation\r +* [cromwell v44](https://github.com/broadinstitute/cromwell/releases/tag/44)\r +* [java-jdk v8.0.112](https://www.java.com/en)\r +\r +Reference implementation\r +* [cwltool v1.0.20181012180214](https://github.com/common-workflow-language/cwltool)\r +* [nodejs v10.4.1 required by cwltool](https://nodejs.org/en)\r +* [Python library galaxy-lib v18.5.7](https://pypi.org/project/galaxy-lib)\r +\r +Singularity software packages have to be installed server-wide by administrator\r +* [Singularity v3.2.1](https://singularity.lbl.gov)\r +* [squashfs-tools v4.3.0](https://github.com/plougher/squashfs-tools)\r +\r +Udocker software package can be installed locally\r +* [udocker v1.1.2](https://github.com/indigo-dc/udocker)\r +\r +# Data\r +* [Illumina adapters converted to FASTA format](http://sapac.support.illumina.com/downloads/illumina-adapter-sequences-document-1000000002694.html)\r +* [NCBI nucleotide non-redundant sequences for decontamination with Centrifuge](http://www.ccb.jhu.edu/software/centrifuge)\r +* [RepBase v17.02 file RMRBSeqs.embl](https://www.girinst.org/repbase)\r +\r +### Installation\r +Install miniconda using installation script ```installConda.sh```.\r +To install CWL, use either installation script ```installCromwell.sh``` or ```installCwltool.sh```.\r +To install udocker, use installation script ```installUdocker.sh```.\r +To install singularity, ask your system administrator.\r +\r +```\r +# First confirm that you have the program 'git' installed in your system\r +> cd\r +> git clone -b 'v0.1.3-beta' --single-branch --depth 1 https://github.com/vetscience/Assemblosis\r +> cd Assemblosis\r +> bash installConda.sh\r +> bash installCromwell.sh # or bash installCwltool.sh\r +> bash installUdocker.sh # if singularity cannot be installed or does not run\r +\r +```\r +For data dependencies: download and extract [RepBase database](https://www.girinst.org/repbase), download Centrifuge version of [NCBI nt database](http://www.ccb.jhu.edu/software/centrifuge) and create [Illumina adapter FASTA file](http://sapac.support.illumina.com/downloads/illumina-adapter-sequences-document-1000000002694.html) to your preferred locations. If your reads are clean from adapters, the adapter FASTA file can be empty.\r +Give the location of these data in the configuration (.yml) file (see **Usage**).\r +\r +### Usage\r +You have to create a YAML (.yml) file for each assembly. This file defines the required parameters and the location for both PacBio and Illumina raw-reads.\r +```\r +> cd\r +> export PATH=~/miniconda3/bin:$PATH\r +> cd Assemblosis/Run\r +> cp ../Examples/assemblyCele.yml .\r +\r +"Edit assemblyCele.yml to fit your computing environment and to define the location for the read files, databases and Illumina adapters"\r +\r +"Running docker images using Cromwell and singularity:"\r +> java -Dconfig.file=cromwell.udocker.conf -jar cromwell-44.jar run -t CWL -v v1.0 assembly.cwl -i assemblyCele.yml\r +\r +"Running docker images using Cromwell and udocker:"\r +> java -Dconfig.file=cromwell.singularity.conf -jar cromwell-44.jar run -t CWL -v v1.0 assembly.cwl -i assemblyCele.yml\r +\r +"Running docker images using Cwltool and singularity:"\r +> cwltool --tmpdir-prefix /home//Tmp --beta-conda-dependencies --cachedir /home//Cache --singularity --leave-tmpdir assembly.cwl assemblyCele.yml\r +\r +"Running docker images using Cwltool and udocker:"\r +> cwltool --tmpdir-prefix /home//Tmp --beta-conda-dependencies --cachedir /home//Cache --user-space-docker-cmd udocker --leave-tmpdir assembly.cwl assemblyCele.yml\r +```\r +\r +An annotated example of the YAML file for Caenorhabditis elegans assembly.\r +```\r +## Directory, which contains the PacBio raw data\r +# NOTE! The software looks for all .h5 file (or bam files if bacBioInBam below is defined true) in given directory\r +pacBioDataDir:\r + class: Directory\r + location: /home//Dna\r +\r +## PacBio files are in bam format as returned from Sequel platform\r +pacBioInBam: true\r +\r +## Prefix for the resultant assembly files\r +prefix: cele\r +\r +## Maximum number of threads used in the pipeline\r +threads: 24\r +\r +## Minimum number of threads per job used in canu assembler\r +minThreads: 4\r +\r +## Number of concurrent jobs in canu assembler (recommended to use threads / minThreads)\r +canuConcurrency: 6\r +\r +### Parameters for the program Canu are described in https://canu.readthedocs.io/en/latest/parameter-reference.html\r +## Expected genome size. This parameter is forwarded to Canu assembler.\r +genomeSize: 100m\r +\r +## Minimum length for the PacBio reads used for the assembly. This parameter is forwarded to Canu assembler.\r +# The maximum resolvable repeat regions becomes 2 x minReadLength\r +minReadLen: 6000\r +\r +## Parameter for Canu assembler to adjust to GC-content. Should be 0.15 for high or low GC content.\r +corMaxEvidenceErate: 0.20\r +\r +### Parameters for the program Trimmomatic are described in http://www.usadellab.org/cms/?page=trimmomatic\r +## Paired-end (PE) reads of Illumina raw data. These files are given to the program Trimmomatic.\r +# NOTE! Data for two paired libraries is given below.\r +readsPe1:\r + - class: File\r + format: edam:format_1930 # fastq\r + path: /home//Dna/SRR2598966_1.fastq.gz\r + - class: File\r + format: edam:format_1930 # fastq\r + path: /home//Dna/SRR2598967_1.fastq.gz\r +readsPe2:\r + - class: File\r + format: edam:format_1930 # fastq\r + path: /home//Dna/SRR2598966_2.fastq.gz\r + - class: File\r + format: edam:format_1930 # fastq\r + path: /home//Dna/SRR2598967_2.fastq.gz\r +\r +## Phred coding of Illumina data. This parameter is forwarded to Trimmomatic.\r +# NOTE! Each read-pair needs one phred value.\r +phredsPe: ['33','33']\r +\r +## Sliding window and illuminaClip parameters for Trimmomatic\r +slidingWindow:\r + windowSize: 4\r + requiredQuality: 25\r +illuminaClip:\r + adapters:\r + class: File\r + path: \r + seedMismatches: 2\r + palindromeClipThreshold: 30\r + simpleClipThreshold: 10\r + minAdapterLength: 20\r + keepBothReads: true\r +## Further parameters for Trimmomatic\r +# Required phred-quality for leading 5 nucleotides\r +leading: 25\r +# Required phred-quality for trailing 5 nucleotides\r +trailing: 25\r +# Minimum accepted read-length to keep the read after trimming\r +minlen: 40\r +\r +### Parameters for the program bowtie2 are described in http://bowtie-bio.sourceforge.net/bowtie2/manual.shtml\r +## Illumina PE fragment length. Program bowtie2 parameter -X.\r +# NOTE! Each read-pair needs one phred value.\r +maxFragmentLens: [500, 600]\r +# Orientation of pair-end reads e.g. 'fr', 'rf', 'ff': Program bowtie2 parameters --fr, --rf or --ff\r +orientation: 'fr'\r +\r +### Parameters for the program Pilon are described in https://github.com/broadinstitute/pilon/wiki/Requirements-&-Usage\r +# Prefix for the resultant pilon polished assembly. Pilon parameter --output\r +polishedAssembly: celePilon\r +# This is set 'true' for an organism with diploid genome: Pilon parameter --diploid\r +diploidOrganism: true\r +# Value 'bases' fixes snps and indels: Pilon parameter --fix\r +fix: bases\r +\r +### Parameters for the program centrifuge are described in http://www.ccb.jhu.edu/software/centrifuge/manual.shtml\r +# Path to the directory, that contains NCBI nt database in nt.?.cf files. Centrifuge parameter -x\r +database:\r + class: Directory\r + path: /home//ntDatabase\r +# Lenght of the identical match in nucleotides required to infer a read as contaminant. Centrifuge parameter --min-hitlen\r +partialMatch: 100\r +# NCBI taxon root identifers for the species considered contaminants: e.g. bacteria (=2), viruses (=10239), fungi (=4751), mammals (=40674), artificial seqs (=81077). Pipeline specific parameter.\r +taxons: [2,10239,4751,40674,81077]\r +\r +## Parameters for the RepeatModeler and RepeatMasker are described in http://www.repeatmasker.org\r +repBaseLibrary:\r + class: File\r + # This is the RepBase file from https://www.girinst.org/repbase. RepeatMasker parameter -lib\r + path: /home//RepBaseLibrary/RMRBSeqs.embl\r +# Constant true and false values for repeat masker\r +trueValue: true\r +falseValue: false\r +\r +```\r +### Runtimes and hardware requirements\r +The workflow was tested in Linux environment (CentOS Linux release 7.2.1511) in a server with 24 physical CPUs (48 hyperthreaded CPUs) and 512 GB RAM.\r +\r +| Assembly | Runtime in CPU hours | RAM usage (GB) |\r +| --- | --- | --- |\r +| *Caenorhabditis elegans* | 1537 | 134.1 |\r +| *Drosophila melanogaster* | 6501 | 134.1 |\r +| *Plasmodium falciparum* | 424 | 134.1 |\r +\r +Maximum memory usage of 134.1 GB was claimed by the program Centrifuge for each assembly.\r +\r +### Software tools used in this pipeline\r +* [Dextractor v1.0](https://github.com/thegenemyers/DEXTRACTOR)\r +* [Trimmomatic v0.36](http://www.usadellab.org/cms/?page=trimmomatic)\r +* [Centrifuge v1.0.3](http://www.ccb.jhu.edu/software/centrifuge)\r +* [Canu v1.8](http://canu.readthedocs.io/en/latest/index.html)\r +* [Arrow in SmrtLink v7.0.1](https://www.pacb.com/support/software-downloads)\r +* [Bowtie 2 v2.2.8](http://bowtie-bio.sourceforge.net/bowtie2/index.shtml)\r +* [SAMtools v1.6](http://samtools.sourceforge.net)\r +* [Pilon v1.22](https://github.com/broadinstitute/pilon)\r +* [RepeatMasker v4.0.6](http://www.repeatmasker.org)\r +* [RepeatModeler v1.0.11](http://www.repeatmasker.org)\r +* [RepBase v17.02](https://www.girinst.org/repbase)\r +* [HaploMerger2 build_20160512](https://github.com/mapleforest/HaploMerger2)\r +\r +### Cite\r +If you use the pipeline, please cite:\r +Korhonen, Pasi K., Ross S. Hall, Neil D. Young, and Robin B. Gasser. "Common Workflow Language (CWL)-based software pipeline for de novo genome assembly from long-and short-read data." GigaScience 8, no. 4 (2019): giz014.\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/334?version=1" ; + schema1:isBasedOn "https://github.com/vetscience/Assemblosis.git" ; + schema1:license "BSD-3-Clause" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Assemblosis" ; + schema1:sdDatePublished "2024-08-05 10:32:11 +0100" ; + schema1:url "https://workflowhub.eu/workflows/334/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8166 ; + schema1:creator ; + schema1:dateCreated "2022-04-20T23:10:26Z" ; + schema1:dateModified "2023-01-16T13:59:43Z" ; + schema1:description """## CWL based workflow to assemble haploid/diploid eukaryote genomes of non-model organisms\r +The workflow is designed to use both PacBio long-reads and Illumina short-reads. The workflow first extracts, corrects, trims and decontaminates the long reads. Decontaminated trimmed reads are then used to assemble the genome and raw reads are used to polish it. Next, Illumina reads are cleaned and used to further polish the resultant assembly. Finally, the polished assembly is masked using inferred repeats and haplotypes are eliminated. The workflow uses BioConda and DockerHub to install required software and is therefore fully automated. In addition to final assembly, the workflow produces intermediate assemblies before and after polishing steps. The workflow follows the syntax for CWL v1.0.\r +\r +### Dependencies\r +# Programs\r +The pipeline can be run either using [Cromwell](https://cromwell.readthedocs.io/en/stable) or [cwltool reference](https://github.com/common-workflow-language/cwltool) implementation and docker containers can be run either using [Singularity](https://singularity.lbl.gov) or [udocker](https://singularity.lbl.gov).\r +\r +Cromwell implementation\r +* [cromwell v44](https://github.com/broadinstitute/cromwell/releases/tag/44)\r +* [java-jdk v8.0.112](https://www.java.com/en)\r +\r +Reference implementation\r +* [cwltool v1.0.20181012180214](https://github.com/common-workflow-language/cwltool)\r +* [nodejs v10.4.1 required by cwltool](https://nodejs.org/en)\r +* [Python library galaxy-lib v18.5.7](https://pypi.org/project/galaxy-lib)\r +\r +Singularity software packages have to be installed server-wide by administrator\r +* [Singularity v3.2.1](https://singularity.lbl.gov)\r +* [squashfs-tools v4.3.0](https://github.com/plougher/squashfs-tools)\r +\r +Udocker software package can be installed locally\r +* [udocker v1.1.2](https://github.com/indigo-dc/udocker)\r +\r +# Data\r +* [Illumina adapters converted to FASTA format](http://sapac.support.illumina.com/downloads/illumina-adapter-sequences-document-1000000002694.html)\r +* [NCBI nucleotide non-redundant sequences for decontamination with Centrifuge](http://www.ccb.jhu.edu/software/centrifuge)\r +* [RepBase v17.02 file RMRBSeqs.embl](https://www.girinst.org/repbase)\r +\r +### Installation\r +Install miniconda using installation script ```installConda.sh```.\r +To install CWL, use either installation script ```installCromwell.sh``` or ```installCwltool.sh```.\r +To install udocker, use installation script ```installUdocker.sh```.\r +To install singularity, ask your system administrator.\r +\r +```\r +# First confirm that you have the program 'git' installed in your system\r +> cd\r +> git clone -b 'v0.1.3-beta' --single-branch --depth 1 https://github.com/vetscience/Assemblosis\r +> cd Assemblosis\r +> bash installConda.sh\r +> bash installCromwell.sh # or bash installCwltool.sh\r +> bash installUdocker.sh # if singularity cannot be installed or does not run\r +\r +```\r +For data dependencies: download and extract [RepBase database](https://www.girinst.org/repbase), download Centrifuge version of [NCBI nt database](http://www.ccb.jhu.edu/software/centrifuge) and create [Illumina adapter FASTA file](http://sapac.support.illumina.com/downloads/illumina-adapter-sequences-document-1000000002694.html) to your preferred locations. If your reads are clean from adapters, the adapter FASTA file can be empty.\r +Give the location of these data in the configuration (.yml) file (see **Usage**).\r +\r +### Usage\r +You have to create a YAML (.yml) file for each assembly. This file defines the required parameters and the location for both PacBio and Illumina raw-reads.\r +```\r +> cd\r +> export PATH=~/miniconda3/bin:$PATH\r +> cd Assemblosis/Run\r +> cp ../Examples/assemblyCele.yml .\r +\r +"Edit assemblyCele.yml to fit your computing environment and to define the location for the read files, databases and Illumina adapters"\r +\r +"Running docker images using Cromwell and singularity:"\r +> java -Dconfig.file=cromwell.udocker.conf -jar cromwell-44.jar run -t CWL -v v1.0 assembly.cwl -i assemblyCele.yml\r +\r +"Running docker images using Cromwell and udocker:"\r +> java -Dconfig.file=cromwell.singularity.conf -jar cromwell-44.jar run -t CWL -v v1.0 assembly.cwl -i assemblyCele.yml\r +\r +"Running docker images using Cwltool and singularity:"\r +> cwltool --tmpdir-prefix /home//Tmp --beta-conda-dependencies --cachedir /home//Cache --singularity --leave-tmpdir assembly.cwl assemblyCele.yml\r +\r +"Running docker images using Cwltool and udocker:"\r +> cwltool --tmpdir-prefix /home//Tmp --beta-conda-dependencies --cachedir /home//Cache --user-space-docker-cmd udocker --leave-tmpdir assembly.cwl assemblyCele.yml\r +```\r +\r +An annotated example of the YAML file for Caenorhabditis elegans assembly.\r +```\r +## Directory, which contains the PacBio raw data\r +# NOTE! The software looks for all .h5 file (or bam files if bacBioInBam below is defined true) in given directory\r +pacBioDataDir:\r + class: Directory\r + location: /home//Dna\r +\r +## PacBio files are in bam format as returned from Sequel platform\r +pacBioInBam: true\r +\r +## Prefix for the resultant assembly files\r +prefix: cele\r +\r +## Maximum number of threads used in the pipeline\r +threads: 24\r +\r +## Minimum number of threads per job used in canu assembler\r +minThreads: 4\r +\r +## Number of concurrent jobs in canu assembler (recommended to use threads / minThreads)\r +canuConcurrency: 6\r +\r +### Parameters for the program Canu are described in https://canu.readthedocs.io/en/latest/parameter-reference.html\r +## Expected genome size. This parameter is forwarded to Canu assembler.\r +genomeSize: 100m\r +\r +## Minimum length for the PacBio reads used for the assembly. This parameter is forwarded to Canu assembler.\r +# The maximum resolvable repeat regions becomes 2 x minReadLength\r +minReadLen: 6000\r +\r +## Parameter for Canu assembler to adjust to GC-content. Should be 0.15 for high or low GC content.\r +corMaxEvidenceErate: 0.20\r +\r +### Parameters for the program Trimmomatic are described in http://www.usadellab.org/cms/?page=trimmomatic\r +## Paired-end (PE) reads of Illumina raw data. These files are given to the program Trimmomatic.\r +# NOTE! Data for two paired libraries is given below.\r +readsPe1:\r + - class: File\r + format: edam:format_1930 # fastq\r + path: /home//Dna/SRR2598966_1.fastq.gz\r + - class: File\r + format: edam:format_1930 # fastq\r + path: /home//Dna/SRR2598967_1.fastq.gz\r +readsPe2:\r + - class: File\r + format: edam:format_1930 # fastq\r + path: /home//Dna/SRR2598966_2.fastq.gz\r + - class: File\r + format: edam:format_1930 # fastq\r + path: /home//Dna/SRR2598967_2.fastq.gz\r +\r +## Phred coding of Illumina data. This parameter is forwarded to Trimmomatic.\r +# NOTE! Each read-pair needs one phred value.\r +phredsPe: ['33','33']\r +\r +## Sliding window and illuminaClip parameters for Trimmomatic\r +slidingWindow:\r + windowSize: 4\r + requiredQuality: 25\r +illuminaClip:\r + adapters:\r + class: File\r + path: \r + seedMismatches: 2\r + palindromeClipThreshold: 30\r + simpleClipThreshold: 10\r + minAdapterLength: 20\r + keepBothReads: true\r +## Further parameters for Trimmomatic\r +# Required phred-quality for leading 5 nucleotides\r +leading: 25\r +# Required phred-quality for trailing 5 nucleotides\r +trailing: 25\r +# Minimum accepted read-length to keep the read after trimming\r +minlen: 40\r +\r +### Parameters for the program bowtie2 are described in http://bowtie-bio.sourceforge.net/bowtie2/manual.shtml\r +## Illumina PE fragment length. Program bowtie2 parameter -X.\r +# NOTE! Each read-pair needs one phred value.\r +maxFragmentLens: [500, 600]\r +# Orientation of pair-end reads e.g. 'fr', 'rf', 'ff': Program bowtie2 parameters --fr, --rf or --ff\r +orientation: 'fr'\r +\r +### Parameters for the program Pilon are described in https://github.com/broadinstitute/pilon/wiki/Requirements-&-Usage\r +# Prefix for the resultant pilon polished assembly. Pilon parameter --output\r +polishedAssembly: celePilon\r +# This is set 'true' for an organism with diploid genome: Pilon parameter --diploid\r +diploidOrganism: true\r +# Value 'bases' fixes snps and indels: Pilon parameter --fix\r +fix: bases\r +\r +### Parameters for the program centrifuge are described in http://www.ccb.jhu.edu/software/centrifuge/manual.shtml\r +# Path to the directory, that contains NCBI nt database in nt.?.cf files. Centrifuge parameter -x\r +database:\r + class: Directory\r + path: /home//ntDatabase\r +# Lenght of the identical match in nucleotides required to infer a read as contaminant. Centrifuge parameter --min-hitlen\r +partialMatch: 100\r +# NCBI taxon root identifers for the species considered contaminants: e.g. bacteria (=2), viruses (=10239), fungi (=4751), mammals (=40674), artificial seqs (=81077). Pipeline specific parameter.\r +taxons: [2,10239,4751,40674,81077]\r +\r +## Parameters for the RepeatModeler and RepeatMasker are described in http://www.repeatmasker.org\r +repBaseLibrary:\r + class: File\r + # This is the RepBase file from https://www.girinst.org/repbase. RepeatMasker parameter -lib\r + path: /home//RepBaseLibrary/RMRBSeqs.embl\r +# Constant true and false values for repeat masker\r +trueValue: true\r +falseValue: false\r +\r +```\r +### Runtimes and hardware requirements\r +The workflow was tested in Linux environment (CentOS Linux release 7.2.1511) in a server with 24 physical CPUs (48 hyperthreaded CPUs) and 512 GB RAM.\r +\r +| Assembly | Runtime in CPU hours | RAM usage (GB) |\r +| --- | --- | --- |\r +| *Caenorhabditis elegans* | 1537 | 134.1 |\r +| *Drosophila melanogaster* | 6501 | 134.1 |\r +| *Plasmodium falciparum* | 424 | 134.1 |\r +\r +Maximum memory usage of 134.1 GB was claimed by the program Centrifuge for each assembly.\r +\r +### Software tools used in this pipeline\r +* [Dextractor v1.0](https://github.com/thegenemyers/DEXTRACTOR)\r +* [Trimmomatic v0.36](http://www.usadellab.org/cms/?page=trimmomatic)\r +* [Centrifuge v1.0.3](http://www.ccb.jhu.edu/software/centrifuge)\r +* [Canu v1.8](http://canu.readthedocs.io/en/latest/index.html)\r +* [Arrow in SmrtLink v7.0.1](https://www.pacb.com/support/software-downloads)\r +* [Bowtie 2 v2.2.8](http://bowtie-bio.sourceforge.net/bowtie2/index.shtml)\r +* [SAMtools v1.6](http://samtools.sourceforge.net)\r +* [Pilon v1.22](https://github.com/broadinstitute/pilon)\r +* [RepeatMasker v4.0.6](http://www.repeatmasker.org)\r +* [RepeatModeler v1.0.11](http://www.repeatmasker.org)\r +* [RepBase v17.02](https://www.girinst.org/repbase)\r +* [HaploMerger2 build_20160512](https://github.com/mapleforest/HaploMerger2)\r +\r +### Cite\r +If you use the pipeline, please cite:\r +Korhonen, Pasi K., Ross S. Hall, Neil D. Young, and Robin B. Gasser. "Common Workflow Language (CWL)-based software pipeline for de novo genome assembly from long-and short-read data." GigaScience 8, no. 4 (2019): giz014.\r +\r +""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/BSD-3-Clause" ; + schema1:name "Assemblosis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/334?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 98508 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Alignment, assembly and annotation of RNQSEQ reads using TOPHAT (without filtering out host reads)." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/37?version=1" ; + schema1:isBasedOn "https://usegalaxy.eu/u/ambarishk/w/covid-19-assembly-using-tophat2-and-annotation-alternate" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Assembly using Tophat2 and annotation (alternate)" ; + schema1:sdDatePublished "2024-08-05 10:33:29 +0100" ; + schema1:url "https://workflowhub.eu/workflows/37/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 35059 ; + schema1:dateCreated "2020-06-18T23:03:09Z" ; + schema1:dateModified "2023-02-13T14:06:46Z" ; + schema1:description "Alignment, assembly and annotation of RNQSEQ reads using TOPHAT (without filtering out host reads)." ; + schema1:image ; + schema1:keywords "Galaxy, Tophat2, Assembly, Alignment, RNASEQ, covid-19" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Assembly using Tophat2 and annotation (alternate)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/37?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 207755 . + + a schema1:Dataset ; + schema1:datePublished "2024-07-23T20:31:18.780636" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-only-VGP3" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-only-VGP3/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.24" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """This Galaxy workflow takes a list of tumor/normal sample pair variants in VCF format and\r +1. annotates them using the ENSEMBL Variant Effect Predictor and custom annotation data\r +2. turns the annotated VCF into a MAF file for import into cBioPortal\r +3. generates human-readable variant- and gene-centric reports\r +\r +The input VCF is expected to encode somatic status, somatic p-value and germline p-value of each variant in varscan somatic format, i.e., via SS, SPV and GPV INFO keys, respectively.""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.607.1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Cancer variant annotation (hg38 VEP-based)" ; + schema1:sdDatePublished "2024-08-05 10:27:46 +0100" ; + schema1:url "https://workflowhub.eu/workflows/607/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 164505 ; + schema1:creator ; + schema1:dateCreated "2023-10-10T16:08:59Z" ; + schema1:dateModified "2023-10-27T13:42:31Z" ; + schema1:description """This Galaxy workflow takes a list of tumor/normal sample pair variants in VCF format and\r +1. annotates them using the ENSEMBL Variant Effect Predictor and custom annotation data\r +2. turns the annotated VCF into a MAF file for import into cBioPortal\r +3. generates human-readable variant- and gene-centric reports\r +\r +The input VCF is expected to encode somatic status, somatic p-value and germline p-value of each variant in varscan somatic format, i.e., via SS, SPV and GPV INFO keys, respectively.""" ; + schema1:keywords "EOSC4Cancer" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Cancer variant annotation (hg38 VEP-based)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://usegalaxy.eu/api/workflows/2424282f793f0f1b/download?format=json-download" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """Parabricks-Genomics-nf is a GPU-enabled pipeline for alignment and germline short variant calling for short read sequencing data. The pipeline utilises [NVIDIA's Clara Parabricks](https://docs.nvidia.com/clara/parabricks/4.2.0/index.html) toolkit to dramatically speed up the execution of best practice bioinformatics tools. Currently, this pipeline is **configured specifically for [NCI's Gadi HPC](https://nci.org.au/our-systems/hpc-systems)**. \r +\r +NVIDIA's Clara Parabricks can deliver a significant speed improvement over traditional CPU-based methods, and is designed to be used only with NVIDIA GPUs. This pipeline is suitable for population screening projects as it executes Parabrick's implementations of BWA mem for short read alignment and Google's DeepVariant for short variant calling. Additionally, it uses standard CPU implementations of data quality evaluation tools [FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/) and [MultiQC](https://multiqc.info/) and [DNAnexus' GLnexus](https://academic.oup.com/bioinformatics/article/36/24/5582/6064144) for scalable gVCF merging and joint variant calling. Optionally, [Variant Effect Predictor (VEP)](https://genomebiology.biomedcentral.com/articles/10.1186/s13059-016-0974-4) can be run for variant annotation. \r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.836.1" ; + schema1:isBasedOn "https://github.com/Sydney-Informatics-Hub/Parabricks-Genomics-nf.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Parabricks-Genomics-nf" ; + schema1:sdDatePublished "2024-08-05 10:24:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/836/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6413 ; + schema1:creator ; + schema1:dateCreated "2024-04-25T23:19:53Z" ; + schema1:dateModified "2024-04-25T23:20:08Z" ; + schema1:description """Parabricks-Genomics-nf is a GPU-enabled pipeline for alignment and germline short variant calling for short read sequencing data. The pipeline utilises [NVIDIA's Clara Parabricks](https://docs.nvidia.com/clara/parabricks/4.2.0/index.html) toolkit to dramatically speed up the execution of best practice bioinformatics tools. Currently, this pipeline is **configured specifically for [NCI's Gadi HPC](https://nci.org.au/our-systems/hpc-systems)**. \r +\r +NVIDIA's Clara Parabricks can deliver a significant speed improvement over traditional CPU-based methods, and is designed to be used only with NVIDIA GPUs. This pipeline is suitable for population screening projects as it executes Parabrick's implementations of BWA mem for short read alignment and Google's DeepVariant for short variant calling. Additionally, it uses standard CPU implementations of data quality evaluation tools [FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/) and [MultiQC](https://multiqc.info/) and [DNAnexus' GLnexus](https://academic.oup.com/bioinformatics/article/36/24/5582/6064144) for scalable gVCF merging and joint variant calling. Optionally, [Variant Effect Predictor (VEP)](https://genomebiology.biomedcentral.com/articles/10.1186/s13059-016-0974-4) can be run for variant annotation. \r +""" ; + schema1:keywords "Bioinformatics, INDELs, SNPs, variant calling, Genomics, whole genome sequencing, gpu, Annotation, mapping" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Parabricks-Genomics-nf" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/836?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1027?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/viralrecon" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/viralrecon" ; + schema1:sdDatePublished "2024-08-05 10:23:05 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1027/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10538 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:20Z" ; + schema1:dateModified "2024-06-11T12:55:20Z" ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1027?version=10" ; + schema1:keywords "Amplicon, ARTIC, Assembly, covid-19, covid19, illumina, long-read-sequencing, Metagenomics, nanopore, ONT, oxford-nanopore, SARS-CoV-2, variant-calling, viral, Virus" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/viralrecon" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1027?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +**Based on the official [pmx tutorial](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate how to compute a **fast-growth mutation free energy** calculation, step by step, using the BioExcel **Building Blocks library (biobb)**. The particular example used is the **Staphylococcal nuclease** protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +The **non-equilibrium free energy calculation** protocol performs a **fast alchemical transition** in the direction **WT->Mut** and back **Mut->WT**. The two equilibrium trajectories needed for the tutorial, one for **Wild Type (WT)** and another for the **Mutated (Mut)** protein (Isoleucine 10 to Alanine -I10A-), have already been generated and are included in this example. We will name **WT as stateA** and **Mut as stateB**.\r +\r +![](https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/schema.png)\r +\r +The tutorial calculates the **free energy difference** in the folded state of a protein. Starting from **two 1ns-length independent equilibrium simulations** (WT and mutant), snapshots are selected to start **fast (50ps) transitions** driving the system in the **forward** (WT to mutant) and **reverse** (mutant to WT) directions, and the **work values** required to perform these transitions are collected. With these values, **Crooks Gaussian Intersection** (CGI), **Bennett Acceptance Ratio** (BAR) and **Jarzynski estimator** methods are used to calculate the **free energy difference** between the two states.\r +\r +*Please note that for the sake of disk space this tutorial is using 1ns-length equilibrium trajectories, whereas in the [original example](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/eq.mdp) the equilibrium trajectories used were obtained from 10ns-length simulations.*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.55.5" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_pmx_tutorial" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)" ; + schema1:sdDatePublished "2024-08-05 10:25:30 +0100" ; + schema1:url "https://workflowhub.eu/workflows/55/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 49135 ; + schema1:creator , + ; + schema1:dateCreated "2023-07-26T09:23:40Z" ; + schema1:dateModified "2023-07-26T09:26:49Z" ; + schema1:description """# Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +**Based on the official [pmx tutorial](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate how to compute a **fast-growth mutation free energy** calculation, step by step, using the BioExcel **Building Blocks library (biobb)**. The particular example used is the **Staphylococcal nuclease** protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +The **non-equilibrium free energy calculation** protocol performs a **fast alchemical transition** in the direction **WT->Mut** and back **Mut->WT**. The two equilibrium trajectories needed for the tutorial, one for **Wild Type (WT)** and another for the **Mutated (Mut)** protein (Isoleucine 10 to Alanine -I10A-), have already been generated and are included in this example. We will name **WT as stateA** and **Mut as stateB**.\r +\r +![](https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/schema.png)\r +\r +The tutorial calculates the **free energy difference** in the folded state of a protein. Starting from **two 1ns-length independent equilibrium simulations** (WT and mutant), snapshots are selected to start **fast (50ps) transitions** driving the system in the **forward** (WT to mutant) and **reverse** (mutant to WT) directions, and the **work values** required to perform these transitions are collected. With these values, **Crooks Gaussian Intersection** (CGI), **Bennett Acceptance Ratio** (BAR) and **Jarzynski estimator** methods are used to calculate the **free energy difference** between the two states.\r +\r +*Please note that for the sake of disk space this tutorial is using 1ns-length equilibrium trajectories, whereas in the [original example](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/eq.mdp) the equilibrium trajectories used were obtained from 10ns-length simulations.*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/55?version=5" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/biobb_wf_pmx_tutorial.ipynb" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A mapping-based pipeline for creating a phylogeny from bacterial whole genome sequences" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/967?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/bactmap" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/bactmap" ; + schema1:sdDatePublished "2024-08-05 10:24:16 +0100" ; + schema1:url "https://workflowhub.eu/workflows/967/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5953 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:43Z" ; + schema1:dateModified "2024-06-11T12:54:43Z" ; + schema1:description "A mapping-based pipeline for creating a phylogeny from bacterial whole genome sequences" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/967?version=2" ; + schema1:keywords "bacteria, bacterial, bacterial-genome-analysis, Genomics, mapping, phylogeny, tree" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/bactmap" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/967?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for the analysis of CRISPR data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/975?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/crisprseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/crisprseq" ; + schema1:sdDatePublished "2024-08-05 10:22:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/975/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12235 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-21T03:02:41Z" ; + schema1:dateModified "2024-06-21T03:02:41Z" ; + schema1:description "Pipeline for the analysis of CRISPR data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/975?version=5" ; + schema1:keywords "crispr, crispr-analysis, crispr-cas, NGS" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/crisprseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/975?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """The image is referenced in the paper "NesSys: a novel method for accurate nuclear segmentation in 3D" published August 2019 in PLOS Biology: https://doi.org/10.1371/journal.pbio.3000388 and can be viewed online in the [Image Data Resource](https://idr.openmicroscopy.org/webclient/?show=image-6001247).\r +\r +This original image was converted into the Zarr format. The analysis results produced by the authors of the paper were converted into labels and linked to the Zarr file which was placed into a public S3 repository.\r +\r +In this notebook, the Zarr file is then loaded together with the labels from the S3 storage and analyzed using [StarDist](https://github.com/stardist/stardist). The StarDist analysis produces a segmentation, which is then viewed side-by-side with the original segmentations produced by the authors of the paper obtained via the loaded labels.\r +\r +## Launch\r +This notebook uses the [environment_stardist.yml](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_5/environment_stardist.yml) file.\r +\r +See [Setup](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_5/setup.md).""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.496.1" ; + schema1:license "BSD-2-Clause" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Load ome.zarr Image with labels from a public S3 repository, analyze using StarDist and compare results" ; + schema1:sdDatePublished "2024-08-05 10:30:13 +0100" ; + schema1:url "https://workflowhub.eu/workflows/496/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 689789 ; + schema1:url "https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_5/includes/StarDistNgff.png" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 27375 ; + schema1:creator , + ; + schema1:dateCreated "2023-06-01T10:49:40Z" ; + schema1:dateModified "2023-06-01T10:50:42Z" ; + schema1:description """The image is referenced in the paper "NesSys: a novel method for accurate nuclear segmentation in 3D" published August 2019 in PLOS Biology: https://doi.org/10.1371/journal.pbio.3000388 and can be viewed online in the [Image Data Resource](https://idr.openmicroscopy.org/webclient/?show=image-6001247).\r +\r +This original image was converted into the Zarr format. The analysis results produced by the authors of the paper were converted into labels and linked to the Zarr file which was placed into a public S3 repository.\r +\r +In this notebook, the Zarr file is then loaded together with the labels from the S3 storage and analyzed using [StarDist](https://github.com/stardist/stardist). The StarDist analysis produces a segmentation, which is then viewed side-by-side with the original segmentations produced by the authors of the paper obtained via the loaded labels.\r +\r +## Launch\r +This notebook uses the [environment_stardist.yml](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_5/environment_stardist.yml) file.\r +\r +See [Setup](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_5/setup.md).""" ; + schema1:image ; + schema1:keywords "OME-Zarr, Python, Machine Learning, imaging, S3" ; + schema1:license "https://spdx.org/licenses/BSD-2-Clause" ; + schema1:name "Load ome.zarr Image with labels from a public S3 repository, analyze using StarDist and compare results" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_5/stardist.ipynb" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Galaxy Workflow created on Galaxy-E european instance, ecology.usegalaxy.eu, related to the Galaxy training tutorial "[Metabarcoding/eDNA through Obitools](https://training.galaxyproject.org/training-material/topics/ecology/tutorials/Obitools-metabarcoding/tutorial.html)" .\r +\r +This workflow allows to analyze DNA metabarcoding / eDNA data produced on Illumina sequencers using the OBITools.""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/655?version=1" ; + schema1:isBasedOn "https://ecology.usegalaxy.eu/u/ylebras/w/obitools-edna-metabarcoding" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Obitools eDNA metabarcoding" ; + schema1:sdDatePublished "2024-08-05 10:27:19 +0100" ; + schema1:url "https://workflowhub.eu/workflows/655/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 38160 ; + schema1:creator , + ; + schema1:dateCreated "2023-11-09T12:41:17Z" ; + schema1:dateModified "2023-11-09T21:02:58Z" ; + schema1:description """Galaxy Workflow created on Galaxy-E european instance, ecology.usegalaxy.eu, related to the Galaxy training tutorial "[Metabarcoding/eDNA through Obitools](https://training.galaxyproject.org/training-material/topics/ecology/tutorials/Obitools-metabarcoding/tutorial.html)" .\r +\r +This workflow allows to analyze DNA metabarcoding / eDNA data produced on Illumina sequencers using the OBITools.""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Obitools eDNA metabarcoding" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/655?version=1" ; + schema1:version 1 ; + ns1:input . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description "Purges any retained haplotypes using minimap2 to make the input assembly to itself and the reads to the assembly followed by purge-dups to identify and remove any error, repeat or retained haplotype contigs." ; + schema1:hasPart , + , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/506?version=1" ; + schema1:isBasedOn "https://github.com/ERGA-consortium/pipelines/tree/main/assembly/snakemake/3.Purging/purge-dups" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Purge retained haplotypes using Purge-Dups" ; + schema1:sdDatePublished "2024-08-05 10:25:08 +0100" ; + schema1:url "https://workflowhub.eu/workflows/506/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1706 ; + schema1:creator ; + schema1:dateCreated "2023-06-16T13:56:33Z" ; + schema1:dateModified "2024-03-16T07:44:46Z" ; + schema1:description "Purges any retained haplotypes using minimap2 to make the input assembly to itself and the reads to the assembly followed by purge-dups to identify and remove any error, repeat or retained haplotype contigs." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/506?version=1" ; + schema1:isPartOf ; + schema1:keywords "Assembly, Genomics, Snakemake, Bioinformatics, Genome assembly" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Purge retained haplotypes using Purge-Dups" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/snakemake/3.Purging/purge-dups/Snakefile" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """Take a scRNAseq counts matrix from a single sample, and perform basic QC with scanpy. Then, do further processing by making a UMAP and clustering. Produces a processed AnnData \r +object.""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/514?version=1" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "other-closed" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq Single Sample Processing (counts matrix)" ; + schema1:sdDatePublished "2024-08-05 10:24:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/514/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 105638 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-06-22T06:50:32Z" ; + schema1:dateModified "2023-06-23T06:47:26Z" ; + schema1:description """Take a scRNAseq counts matrix from a single sample, and perform basic QC with scanpy. Then, do further processing by making a UMAP and clustering. Produces a processed AnnData \r +object.""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/514?version=2" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "" ; + schema1:name "scRNAseq Single Sample Processing (counts matrix)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/514?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for analysis of Molecular Pixelation assays" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1010?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/pixelator" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/pixelator" ; + schema1:sdDatePublished "2024-08-05 10:23:26 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1010/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10413 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:07Z" ; + schema1:dateModified "2024-06-11T12:55:07Z" ; + schema1:description "Pipeline for analysis of Molecular Pixelation assays" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1010?version=7" ; + schema1:keywords "molecular-pixelation, pixelator, pixelgen-technologies, proteins, single-cell, single-cell-omics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/pixelator" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1010?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.11068736.svg)](https://doi.org/10.5281/zenodo.11068736)\r +\r +# JAX NGS Operations Nextflow DSL2 Pipelines\r +\r +This repository contains production bioinformatic analysis pipelines for a variety of bulk 'omics data analysis. Please see the [Wiki documentation](https://github.com/TheJacksonLaboratory/cs-nf-pipelines/wiki) associated with this repository for all documentation and available analysis workflows.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.874.1" ; + schema1:isBasedOn "https://github.com/TheJacksonLaboratory/cs-nf-pipelines.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for JAX NGS Operations Nextflow DSL2 Pipelines" ; + schema1:sdDatePublished "2024-08-05 10:24:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/874/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3246 ; + schema1:creator , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-05-03T12:55:48Z" ; + schema1:dateModified "2024-05-03T13:42:53Z" ; + schema1:description """[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.11068736.svg)](https://doi.org/10.5281/zenodo.11068736)\r +\r +# JAX NGS Operations Nextflow DSL2 Pipelines\r +\r +This repository contains production bioinformatic analysis pipelines for a variety of bulk 'omics data analysis. Please see the [Wiki documentation](https://github.com/TheJacksonLaboratory/cs-nf-pipelines/wiki) associated with this repository for all documentation and available analysis workflows.\r +""" ; + schema1:keywords "Bioinformatics, Nextflow, Workflows" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "JAX NGS Operations Nextflow DSL2 Pipelines" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/874?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/gmx-protein-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.277.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_md_setup/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/277/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 59732 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-22T09:46:27Z" ; + schema1:dateModified "2023-01-16T13:57:43Z" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/gmx-protein-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/277?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_md_setup/galaxy/biobb_wf_md_setup.ga" ; + schema1:version 2 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1021?version=14" ; + schema1:isBasedOn "https://github.com/nf-core/scrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/scrnaseq" ; + schema1:sdDatePublished "2024-08-05 10:23:15 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1021/ro_crate?version=14" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12056 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-26T03:02:46Z" ; + schema1:dateModified "2024-06-26T03:02:46Z" ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1021?version=13" ; + schema1:keywords "10x-genomics, 10xgenomics, alevin, bustools, Cellranger, kallisto, rna-seq, single-cell, star-solo" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/scrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1021?version=14" ; + schema1:version 14 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-08-05 10:24:23 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4811 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:11Z" ; + schema1:dateModified "2024-06-11T12:55:11Z" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.128.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_virtual-screening" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein-ligand Docking tutorial (PDBe REST API)" ; + schema1:sdDatePublished "2024-08-05 10:30:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/128/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 36987 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-04-14T08:15:11Z" ; + schema1:dateModified "2023-04-14T08:17:24Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/128?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein-ligand Docking tutorial (PDBe REST API)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_virtual-screening/blob/master/biobb_wf_virtual-screening/notebooks/ebi_api/wf_vs_ebi_api.ipynb" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:datePublished "2024-03-01T16:01:02+00:00" ; + schema1:description "PyCOMPSs implementation of Probabilistic Tsunami Forecast (PTF). PTF explicitly treats data- and forecast-uncertainties, enabling alert level definitions according to any predefined level of conservatism, which is connected to the average balance of missed-vs-false-alarms. Run of the Kos-Bodrum 2017 event test-case with 1000 scenarios, 8h tsunami simulation for each and forecast calculations for partial and full ensembles with focal mechanism and tsunami data updates." ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "https://creativecommons.org/licenses/by-nc-nd/4.0/" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "PyCOMPSs Probabilistic Tsunami Forecast (PTF) - Kos-Bodrum 2017 earthquake and tsunami test-case" ; + schema1:publisher , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 1685175 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:MediaObject ; + schema1:contentSize 243467 ; + schema1:dateModified "2023-10-19T13:01:10" ; + schema1:name "POIs.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 42945012 ; + schema1:dateModified "2023-10-19T13:01:10" ; + schema1:name "regional_domain.grd" ; + schema1:sdDatePublished "2024-03-01T16:01:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 37218 ; + schema1:dateModified "2023-10-19T13:01:10" ; + schema1:name "regional_domain_POIs_depth.dat" ; + schema1:sdDatePublished "2024-03-01T16:01:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 5895 ; + schema1:dateModified "2023-10-19T13:13:04" ; + schema1:name "Step1_config_template_mod.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2126 ; + schema1:dateModified "2023-10-19T12:59:46" ; + schema1:name "Step2_parfile_tmp.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 233 ; + schema1:dateModified "2024-02-29T09:18:35" ; + schema1:name "parfile_mod.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:02+00:00" . + + a schema1:Dataset ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:name "Run" ; + schema1:sdDatePublished "2024-03-01T16:01:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name ".gitkeep" ; + schema1:sdDatePublished "2024-03-01T16:01:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103564 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "Step1_scenario_list_BS.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2061724 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "ptf_out.hdf5" ; + schema1:sdDatePublished "2024-03-01T16:01:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "Step2_BS_failed.log" ; + schema1:sdDatePublished "2024-03-01T16:01:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2086816 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "ptf_out.hdf5" ; + schema1:sdDatePublished "2024-03-01T16:01:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 6539 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "ptf_main.config" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 5142 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "sim_files.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 40 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulBS_lb_01level_01proc.bin" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS1.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS10.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS100.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS101.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS102.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS103.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS104.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS105.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS106.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS107.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS108.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS109.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS11.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS110.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS111.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS112.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS113.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS114.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS115.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS116.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS117.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS118.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS119.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS12.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS120.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS121.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS122.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS123.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS124.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS125.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS126.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS127.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS128.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS129.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS13.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS130.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS131.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS132.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS133.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS134.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS135.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS136.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS137.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS138.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS139.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS14.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS140.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS141.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS142.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS143.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS144.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS145.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS146.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS147.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS148.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS149.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS15.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS150.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS151.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS152.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS153.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS154.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS155.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS156.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS157.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS158.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS159.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS16.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS160.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS161.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS162.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS163.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS164.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS165.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS166.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS167.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS168.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS169.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS17.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS170.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS171.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS172.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS173.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS174.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS175.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS176.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS177.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS178.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS179.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS18.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS180.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS181.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS182.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS183.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS184.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS185.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS186.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS187.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS188.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS189.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS19.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS190.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS191.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS192.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS193.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS194.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS195.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS196.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS197.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS198.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS199.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS2.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS20.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS200.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS201.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS202.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS203.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS204.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS205.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS206.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS207.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS208.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS209.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS21.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS210.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS211.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS212.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS213.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS214.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS215.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS216.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS217.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS218.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS219.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS22.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS220.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS221.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS222.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS223.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS224.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS225.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS226.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS227.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS228.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS229.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS23.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS230.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS231.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS232.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS233.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS234.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS235.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS236.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS237.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS238.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS239.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS24.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS240.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS241.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS242.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS243.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS244.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS245.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS246.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS247.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS248.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS249.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS25.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS250.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS26.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS27.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS28.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS29.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS3.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS30.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS31.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS32.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS33.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS34.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS35.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS36.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS37.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS38.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS39.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS4.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS40.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS41.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS42.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS43.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS44.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS45.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS46.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS47.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS48.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS49.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS5.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS50.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS51.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS52.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS53.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS54.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS55.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS56.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS57.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS58.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS59.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS6.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS60.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS61.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS62.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS63.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS64.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS65.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS66.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS67.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS68.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS69.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS7.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS70.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS71.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS72.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS73.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS74.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS75.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS76.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS77.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS78.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS79.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS8.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS80.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS81.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS82.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS83.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS84.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS85.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS86.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS87.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS88.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS89.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS9.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS90.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS91.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS92.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS93.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS94.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS95.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS96.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS97.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS98.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 152 ; + schema1:dateModified "2024-03-01T16:00:42" ; + schema1:name "simulationsBS99.txt" ; + schema1:sdDatePublished "2024-03-01T16:01:03+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/998?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/methylseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/methylseq" ; + schema1:sdDatePublished "2024-08-05 10:22:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/998/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3877 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:01Z" ; + schema1:dateModified "2024-06-11T12:55:01Z" ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/998?version=14" ; + schema1:keywords "bisulfite-sequencing, dna-methylation, em-seq, epigenome, Epigenomics, methyl-seq, pbat, rrbs" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/methylseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/998?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/245?version=1" ; + schema1:isBasedOn "https://sdr.nhm.ac.uk/" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for De novo digitisation" ; + schema1:sdDatePublished "2024-08-05 10:32:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/245/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 18313 ; + schema1:dateCreated "2021-11-26T14:46:05Z" ; + schema1:dateModified "2023-01-16T13:55:15Z" ; + schema1:description "" ; + schema1:keywords "Segmentation" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "De novo digitisation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/245?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 129436 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + schema1:dateModified "2023-12-04T08:32:38" ; + schema1:hasPart , + ; + schema1:name "data-set" ; + schema1:sdDatePublished "2023-12-04T14:19:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777398 ; + schema1:dateModified "2020-09-21T10:34:27" ; + schema1:name "file_long.txt" ; + schema1:sdDatePublished "2023-12-04T14:19:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4872 ; + schema1:dateModified "2020-09-21T10:34:27" ; + schema1:name "file_small.txt" ; + schema1:sdDatePublished "2023-12-04T14:19:45+00:00" . + + a schema1:Dataset ; + schema1:datePublished "2023-10-17T15:35:20.967645" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/chipseq-pe" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "chipseq-pe/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.12" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/991?version=10" ; + schema1:isBasedOn "https://github.com/nf-core/hlatyping" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hlatyping" ; + schema1:sdDatePublished "2024-08-05 10:22:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/991/ro_crate?version=10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8220 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:57Z" ; + schema1:dateModified "2024-06-11T12:54:57Z" ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/991?version=9" ; + schema1:keywords "DNA, hla, hla-typing, immunology, optitype, personalized-medicine, rna" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hlatyping" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/991?version=10" ; + schema1:version 10 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for the identification of circular DNAs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/972?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/circdna" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/circdna" ; + schema1:sdDatePublished "2024-08-05 10:24:12 +0100" ; + schema1:url "https://workflowhub.eu/workflows/972/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11216 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:45Z" ; + schema1:dateModified "2024-06-11T12:54:45Z" ; + schema1:description "Pipeline for the identification of circular DNAs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/972?version=7" ; + schema1:keywords "ampliconarchitect, ampliconsuite, circle-seq, circular, DNA, eccdna, ecdna, extrachromosomal-circular-dna, Genomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/circdna" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/972?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.127.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_virtual-screening" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein-ligand Docking tutorial (Cluster90)" ; + schema1:sdDatePublished "2024-08-05 10:30:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/127/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 39576 ; + schema1:creator , + , + ; + schema1:dateCreated "2022-09-15T11:15:47Z" ; + schema1:dateModified "2023-01-16T13:50:18Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/127?version=5" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein-ligand Docking tutorial (Cluster90)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_virtual-screening/master/biobb_wf_virtual-screening/notebooks/clusterBindingSite/wf_vs_clusterBindingSite.ipynb" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Analysis of Chromosome Conformation Capture data (Hi-C)" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/989?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/hic" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hic" ; + schema1:sdDatePublished "2024-08-05 10:23:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/989/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7211 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:54Z" ; + schema1:dateModified "2024-06-11T12:54:54Z" ; + schema1:description "Analysis of Chromosome Conformation Capture data (Hi-C)" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/989?version=7" ; + schema1:keywords "chromosome-conformation-capture, Hi-C" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hic" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/989?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "BioTranslator performs sequentially pathway analysis and gene prioritization: A specific operator is executed for each task to translate the input gene set into semantic terms and pinpoint the pivotal-role genes on the derived semantic network. The output consists of the set of statistically significant semantic terms and the associated hub genes (the gene signature), prioritized according to their involvement in the underlying semantic topology." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/189?version=1" ; + schema1:isBasedOn "http://www.biotranslator.gr:8080/u/thodk/w/biotranslator-workflow" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for BioTranslator Workflow" ; + schema1:sdDatePublished "2024-08-05 10:33:02 +0100" ; + schema1:url "https://workflowhub.eu/workflows/189/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3955 ; + schema1:dateCreated "2021-09-15T17:27:55Z" ; + schema1:dateModified "2021-09-15T17:28:33Z" ; + schema1:description "BioTranslator performs sequentially pathway analysis and gene prioritization: A specific operator is executed for each task to translate the input gene set into semantic terms and pinpoint the pivotal-role genes on the derived semantic network. The output consists of the set of statistically significant semantic terms and the associated hub genes (the gene signature), prioritized according to their involvement in the underlying semantic topology." ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/189?version=2" ; + schema1:keywords "Semantic Network Analysis, Gene Prioritization, Pathway Analysis, Biomedical Ontologies, Semantic Interpretation" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "BioTranslator Workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/189?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 44291 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description "Refining Genome Annotations with Apollo" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/749?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Refining Genome Annotations with Apollo (prokaryotes)" ; + schema1:sdDatePublished "2024-08-05 10:25:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/749/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 18150 ; + schema1:creator ; + schema1:dateCreated "2024-02-14T15:59:09Z" ; + schema1:dateModified "2024-02-15T13:44:41Z" ; + schema1:description "Refining Genome Annotations with Apollo" ; + schema1:isPartOf ; + schema1:keywords "genome-annotation" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Refining Genome Annotations with Apollo (prokaryotes)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/749?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + ; + ns1:output , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Workflow converts one or multiple bam/cram files to fastq format" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/968?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/bamtofastq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/bamtofastq" ; + schema1:sdDatePublished "2024-08-05 10:24:15 +0100" ; + schema1:url "https://workflowhub.eu/workflows/968/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10293 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:44Z" ; + schema1:dateModified "2024-06-11T12:54:44Z" ; + schema1:description "Workflow converts one or multiple bam/cram files to fastq format" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/968?version=5" ; + schema1:keywords "bamtofastq, Conversion, cramtofastq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/bamtofastq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/968?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """## Description\r +\r +The workflow takes an input file with Cancer Driver Genes predictions (i.e. the results provided by a participant), computes a set of metrics, and compares them against the data currently stored in OpenEBench within the TCGA community. Two assessment metrics are provided for that predicitions. Also, some plots (which are optional) that allow to visualize the performance of the tool are generated. The workflow consists in three standard steps, defined by OpenEBench. The tools needed to run these steps must be in one or more Docker images, generated from [Docker containers](https://github.com/inab/TCGA_benchmarking_dockers ). Separated instances are spawned from these images for each step:\r +1. **Validation**: the input file format is checked and, if required, the content of the file is validated (e.g check whether the submitted gene IDs exist)\r +2. **Metrics Computation**: the predictions are compared with the 'Gold Standards' provided by the community, which results in two performance metrics - precision (Positive Predictive Value) and recall(True Positive Rate).\r +3. **Results Consolidation**: the benchmark itself is performed by merging the tool metrics with the rest of TCGA data. The results are provided in JSON format and SVG format (scatter plot).\r +\r +![alt text](https://raw.githubusercontent.com/inab/TCGA_benchmarking_workflow/1.0.6/workflow.jpg)\r +\r +\r +## Data\r +\r +* [TCGA_sample_data](./TCGA_sample_data) folder contains all the reference data required by the steps. It is derived from the manuscript:\r +[Comprehensive Characterization of Cancer Driver Genes and Mutations](https://www.cell.com/cell/fulltext/S0092-8674%2818%2930237-X?code=cell-site), Bailey et al, 2018, Cell [![doi:10.1016/j.cell.2018.02.060](https://img.shields.io/badge/doi-10.1016%2Fj.cell.2018.02.060-green.svg)](https://doi.org/10.1016/j.cell.2018.02.060) \r +* [TCGA_sample_out](./TCGA_sample_out) folder contains an example output for a worklow run, with two cancer types / challenges selected (ACC, BRCA). Results obtained from the default execution should be similar to those ones available in this directory. Results found in [TCGA_sample_out/results](./TCGA_sample_out/results) can be visualized in the browser using [`benchmarking_workflows_results_visualizer` javascript library](https://github.com/inab/benchmarking_workflows_results_visualizer).\r +\r +\r +## Usage\r +In order to use the workflow you need to:\r +* Install [Nextflow](https://www.nextflow.io/), which depends on Java virtual machine (>=8 , <15 ). You can automate their installation for local testing using [run_local_nextflow.bash](run_local_nextflow.bash).\r +* Clone [TCGA benchmarking Docker definitions repository](https://github.com/inab/TCGA_benchmarking_dockers) from tag 1.0.3 in a separate directory, and build locally the three Docker images found in it, running the `build.sh 1.0.3` script within that repo.\r +* Run it just using either *`nextflow run main.nf -profile docker`* or *`./run_local_nextflow.bash run main.nf -profile docker`*. Arguments specifications:\r +\r +```\r +Usage:\r +Run the pipeline with default parameters:\r +nextflow run main.nf -profile docker\r +\r +Run with user parameters:\r +nextflow run main.nf -profile docker --predictionsFile {driver.genes.file} --public_ref_dir {validation.reference.file} --participant_name {tool.name} --metrics_ref_dir {gold.standards.dir} --cancer_types {analyzed.cancer.types} --assess_dir {benchmark.data.dir} --results_dir {output.dir}\r +\r +Mandatory arguments:\r + --input List of cancer genes prediction\r + --community_id Name or OEB permanent ID for the benchmarking community\r + --public_ref_dir Directory with list of cancer genes used to validate the predictions\r + --participant_id Name of the tool used for prediction\r + --goldstandard_dir Dir that contains metrics reference datasets for all cancer types\r + --event_id List of types of cancer selected by the user, separated by spaces\r + --assess_dir Dir where the data for the benchmark are stored\r +\r +Other options:\r + --validation_result The output file where the results from validation step will be saved\r + --assessment_results The output file where the results from the computed metrics step will be saved\r + --aggregation_results The output file where the consolidation of the benchmark will be saved\r + --statistics_results The output directory with nextflow statistics\r + --outdir The output directory where the consolidation of the benchmark will be saved\r + --statsdir The output directory with nextflow statistics\r + --data_model_export_dir The output dir where json file with benchmarking data model contents will be saved\r + --otherdir The output directory where custom results will be saved (no directory inside)\r +Flags:\r + --help Display this message\r +```\r +\r +Default input parameters and Docker images to use for each step can be specified in the [config](./nextflow.config) file\r +**NOTE: In order to make your workflow compatible with the [OpenEBench VRE Nextflow Executor](https://github.com/inab/vre-process_nextflow-executor), please make sure to use the same parameter names in your workflow.**""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/244?version=2" ; + schema1:isBasedOn "https://github.com/inab/TCGA_benchmarking_workflow/tree/1.0.8" ; + schema1:license "LGPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for OpenEBench assessment workflow" ; + schema1:sdDatePublished "2024-08-05 10:32:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/244/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5597 ; + schema1:creator , + , + ; + schema1:dateCreated "2021-11-24T14:03:22Z" ; + schema1:dateModified "2021-11-24T21:57:06Z" ; + schema1:description """## Description\r +\r +The workflow takes an input file with Cancer Driver Genes predictions (i.e. the results provided by a participant), computes a set of metrics, and compares them against the data currently stored in OpenEBench within the TCGA community. Two assessment metrics are provided for that predicitions. Also, some plots (which are optional) that allow to visualize the performance of the tool are generated. The workflow consists in three standard steps, defined by OpenEBench. The tools needed to run these steps must be in one or more Docker images, generated from [Docker containers](https://github.com/inab/TCGA_benchmarking_dockers ). Separated instances are spawned from these images for each step:\r +1. **Validation**: the input file format is checked and, if required, the content of the file is validated (e.g check whether the submitted gene IDs exist)\r +2. **Metrics Computation**: the predictions are compared with the 'Gold Standards' provided by the community, which results in two performance metrics - precision (Positive Predictive Value) and recall(True Positive Rate).\r +3. **Results Consolidation**: the benchmark itself is performed by merging the tool metrics with the rest of TCGA data. The results are provided in JSON format and SVG format (scatter plot).\r +\r +![alt text](https://raw.githubusercontent.com/inab/TCGA_benchmarking_workflow/1.0.6/workflow.jpg)\r +\r +\r +## Data\r +\r +* [TCGA_sample_data](./TCGA_sample_data) folder contains all the reference data required by the steps. It is derived from the manuscript:\r +[Comprehensive Characterization of Cancer Driver Genes and Mutations](https://www.cell.com/cell/fulltext/S0092-8674%2818%2930237-X?code=cell-site), Bailey et al, 2018, Cell [![doi:10.1016/j.cell.2018.02.060](https://img.shields.io/badge/doi-10.1016%2Fj.cell.2018.02.060-green.svg)](https://doi.org/10.1016/j.cell.2018.02.060) \r +* [TCGA_sample_out](./TCGA_sample_out) folder contains an example output for a worklow run, with two cancer types / challenges selected (ACC, BRCA). Results obtained from the default execution should be similar to those ones available in this directory. Results found in [TCGA_sample_out/results](./TCGA_sample_out/results) can be visualized in the browser using [`benchmarking_workflows_results_visualizer` javascript library](https://github.com/inab/benchmarking_workflows_results_visualizer).\r +\r +\r +## Usage\r +In order to use the workflow you need to:\r +* Install [Nextflow](https://www.nextflow.io/), which depends on Java virtual machine (>=8 , <15 ). You can automate their installation for local testing using [run_local_nextflow.bash](run_local_nextflow.bash).\r +* Clone [TCGA benchmarking Docker definitions repository](https://github.com/inab/TCGA_benchmarking_dockers) from tag 1.0.3 in a separate directory, and build locally the three Docker images found in it, running the `build.sh 1.0.3` script within that repo.\r +* Run it just using either *`nextflow run main.nf -profile docker`* or *`./run_local_nextflow.bash run main.nf -profile docker`*. Arguments specifications:\r +\r +```\r +Usage:\r +Run the pipeline with default parameters:\r +nextflow run main.nf -profile docker\r +\r +Run with user parameters:\r +nextflow run main.nf -profile docker --predictionsFile {driver.genes.file} --public_ref_dir {validation.reference.file} --participant_name {tool.name} --metrics_ref_dir {gold.standards.dir} --cancer_types {analyzed.cancer.types} --assess_dir {benchmark.data.dir} --results_dir {output.dir}\r +\r +Mandatory arguments:\r + --input List of cancer genes prediction\r + --community_id Name or OEB permanent ID for the benchmarking community\r + --public_ref_dir Directory with list of cancer genes used to validate the predictions\r + --participant_id Name of the tool used for prediction\r + --goldstandard_dir Dir that contains metrics reference datasets for all cancer types\r + --event_id List of types of cancer selected by the user, separated by spaces\r + --assess_dir Dir where the data for the benchmark are stored\r +\r +Other options:\r + --validation_result The output file where the results from validation step will be saved\r + --assessment_results The output file where the results from the computed metrics step will be saved\r + --aggregation_results The output file where the consolidation of the benchmark will be saved\r + --statistics_results The output directory with nextflow statistics\r + --outdir The output directory where the consolidation of the benchmark will be saved\r + --statsdir The output directory with nextflow statistics\r + --data_model_export_dir The output dir where json file with benchmarking data model contents will be saved\r + --otherdir The output directory where custom results will be saved (no directory inside)\r +Flags:\r + --help Display this message\r +```\r +\r +Default input parameters and Docker images to use for each step can be specified in the [config](./nextflow.config) file\r +**NOTE: In order to make your workflow compatible with the [OpenEBench VRE Nextflow Executor](https://github.com/inab/vre-process_nextflow-executor), please make sure to use the same parameter names in your workflow.**""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/244?version=3" ; + schema1:keywords "tcga, openebench, benchmarking" ; + schema1:license "https://spdx.org/licenses/LGPL-3.0" ; + schema1:name "OpenEBench assessment workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/244?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1027?version=9" ; + schema1:isBasedOn "https://github.com/nf-core/viralrecon" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/viralrecon" ; + schema1:sdDatePublished "2024-08-05 10:23:06 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1027/ro_crate?version=9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10179 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:21Z" ; + schema1:dateModified "2024-06-11T12:55:21Z" ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1027?version=10" ; + schema1:keywords "Amplicon, ARTIC, Assembly, covid-19, covid19, illumina, long-read-sequencing, Metagenomics, nanopore, ONT, oxford-nanopore, SARS-CoV-2, variant-calling, viral, Virus" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/viralrecon" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1027?version=9" ; + schema1:version 9 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-23T13:33:07.961947" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Scaffolding-HiC-VGP8" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Scaffolding-HiC-VGP8/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1017?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/rnafusion" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnafusion" ; + schema1:sdDatePublished "2024-08-05 10:22:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1017/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9644 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:10Z" ; + schema1:dateModified "2024-06-11T12:55:10Z" ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1017?version=16" ; + schema1:keywords "fusion, fusion-genes, gene-fusion, rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnafusion" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1017?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=15" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-08-05 10:23:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=15" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8964 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:04Z" ; + schema1:dateModified "2024-06-11T12:55:04Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=15" ; + schema1:version 15 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """This is a genomics pipeline to do a single germline sample variant-calling, adapted from GATK Best Practice Workflow.\r +\r +This workflow is a reference pipeline for using the Janis Python framework (pipelines assistant).\r +- Alignment: bwa-mem\r +- Variant-Calling: GATK HaplotypeCaller\r +- Outputs the final variants in the VCF format.\r +\r +**Resources**\r +\r +This pipeline has been tested using the HG38 reference set, available on Google Cloud Storage through:\r +\r +- https://console.cloud.google.com/storage/browser/genomics-public-data/references/hg38/v0/\r +\r +This pipeline expects the assembly references to be as they appear in that storage (".fai", ".amb", ".ann", ".bwt", ".pac", ".sa", "^.dict").\r +The known sites (snps_dbsnp, snps_1000gp, known_indels, mills_indels) should be gzipped and tabix indexed.\r +\r +\r +Infrastructure_deployment_metadata: Spartan (Unimelb)""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/236?version=1" ; + schema1:isBasedOn "https://github.com/PMCC-BioinformaticsCore/janis-pipelines" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Janis Germline Variant-Calling Workflow (GATK)" ; + schema1:sdDatePublished "2024-08-05 10:32:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/236/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 9869 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7806 ; + schema1:creator , + ; + schema1:dateCreated "2021-11-12T02:30:06Z" ; + schema1:dateModified "2023-01-16T13:54:51Z" ; + schema1:description """This is a genomics pipeline to do a single germline sample variant-calling, adapted from GATK Best Practice Workflow.\r +\r +This workflow is a reference pipeline for using the Janis Python framework (pipelines assistant).\r +- Alignment: bwa-mem\r +- Variant-Calling: GATK HaplotypeCaller\r +- Outputs the final variants in the VCF format.\r +\r +**Resources**\r +\r +This pipeline has been tested using the HG38 reference set, available on Google Cloud Storage through:\r +\r +- https://console.cloud.google.com/storage/browser/genomics-public-data/references/hg38/v0/\r +\r +This pipeline expects the assembly references to be as they appear in that storage (".fai", ".amb", ".ann", ".bwt", ".pac", ".sa", "^.dict").\r +The known sites (snps_dbsnp, snps_1000gp, known_indels, mills_indels) should be gzipped and tabix indexed.\r +\r +\r +Infrastructure_deployment_metadata: Spartan (Unimelb)""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Janis Germline Variant-Calling Workflow (GATK)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/236?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 37814 . + + a schema1:Dataset ; + schema1:datePublished "2023-09-15T15:02:21.110980" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/rnaseq-pe" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "rnaseq-pe/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.11" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:description "This workflow uses Illumina and Oxford Nanopore reads that were pre-processed to remove human-derived sequences. Two assembly tools are used: spades and unicycler. In addition to assemblies (actual sequences) the two tools produce assembly graphs that can be used for visualization of assembly with bandage. More info can be found at https://covid19.galaxyproject.org/genomics/" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/5?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genomics - Assembly of the genome sequence" ; + schema1:sdDatePublished "2024-08-05 10:33:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/5/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 4086 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15846 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2020-04-10T12:32:21Z" ; + schema1:dateModified "2023-01-16T13:39:53Z" ; + schema1:description "This workflow uses Illumina and Oxford Nanopore reads that were pre-processed to remove human-derived sequences. Two assembly tools are used: spades and unicycler. In addition to assemblies (actual sequences) the two tools produce assembly graphs that can be used for visualization of assembly with bandage. More info can be found at https://covid19.galaxyproject.org/genomics/" ; + schema1:image ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Genomics - Assembly of the genome sequence" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/5?version=1" ; + schema1:version 1 ; + ns1:input , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 7739 . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2023-11-03T22:43:56+00:00" ; + schema1:description "Autosubmit mHM test domains" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:publisher . + + a ; + schema1:additionalType "Text" ; + schema1:name "CONFIG.AUTOSUBMIT_VERSION" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Integer" ; + schema1:name "CONFIG.MAXWAITINGJOBS" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Integer" ; + schema1:name "CONFIG.TOTALJOBS" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Text" ; + schema1:name "DEFAULT.CUSTOM_CONFIG" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Text" ; + schema1:name "DEFAULT.EXPID" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Text" ; + schema1:name "DEFAULT.HPCARCH" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Text" ; + schema1:name "EXPERIMENT.CALENDAR" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Integer" ; + schema1:name "EXPERIMENT.CHUNKSIZE" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Text" ; + schema1:name "EXPERIMENT.CHUNKSIZEUNIT" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Integer" ; + schema1:name "EXPERIMENT.DATELIST" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Text" ; + schema1:name "EXPERIMENT.MEMBERS" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Integer" ; + schema1:name "EXPERIMENT.NUMCHUNKS" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Boolean" ; + schema1:name "GIT.FETCH_SINGLE_BRANCH" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Text" ; + schema1:name "GIT.PROJECT_BRANCH" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Text" ; + schema1:name "GIT.PROJECT_COMMIT" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Text" ; + schema1:name "GIT.PROJECT_ORIGIN" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Text" ; + schema1:name "GIT.PROJECT_SUBMODULES" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Text" ; + schema1:name "MHM.BRANCH_NAME" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Integer" ; + schema1:name "MHM.DOMAIN" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Integer" ; + schema1:name "MHM.EVAL_PERIOD_DURATION_YEARS" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Text" ; + schema1:name "MHM.SINGULARITY_CONTAINER" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Text" ; + schema1:name "PROJECT.PROJECT_DESTINATION" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "Text" ; + schema1:name "PROJECT.PROJECT_TYPE" ; + schema1:valueRequired "True" . + + a ; + schema1:additionalType "File" ; + schema1:name "plot.gif" ; + schema1:valueRequired "True" . + + a schema1:MediaObject ; + schema1:contentSize 1579151 ; + schema1:dateModified "2023-11-03T22:41:31" ; + schema1:encodingFormat "image/gif" ; + schema1:exampleOfWork ; + schema1:name "plot.gif" ; + schema1:sdDatePublished "2023-11-03T22:43:57+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "Basic processing of a QC-filtered Anndata Object. UMAP, clustering e.t.c " ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/468?version=1" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq_QCtoBasicProcessing" ; + schema1:sdDatePublished "2024-08-05 10:24:33 +0100" ; + schema1:url "https://workflowhub.eu/workflows/468/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 33517 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-05-05T06:50:24Z" ; + schema1:dateModified "2023-06-22T06:20:10Z" ; + schema1:description "Basic processing of a QC-filtered Anndata Object. UMAP, clustering e.t.c " ; + schema1:isBasedOn "https://workflowhub.eu/workflows/468?version=2" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "scRNAseq_QCtoBasicProcessing" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/468?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=24" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-08-05 10:24:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=24" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13434 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:51Z" ; + schema1:dateModified "2024-06-11T12:54:51Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=24" ; + schema1:version 24 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-08-05 10:23:17 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9465 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:15Z" ; + schema1:dateModified "2024-06-11T12:55:15Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Takes fastqs and reference data, to produce a single cell counts matrix into and save in annData format - adding a column called sample with the sample name. " ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/513?version=1" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "other-closed" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq: Count and Load with starSOLO" ; + schema1:sdDatePublished "2024-08-05 10:24:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/513/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 27880 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-06-22T06:47:36Z" ; + schema1:dateModified "2023-11-09T03:49:56Z" ; + schema1:description "Takes fastqs and reference data, to produce a single cell counts matrix into and save in annData format - adding a column called sample with the sample name. " ; + schema1:isBasedOn "https://workflowhub.eu/workflows/513?version=2" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "" ; + schema1:name "scRNAseq: Count and Load with starSOLO" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/513?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-variant-calling" ; + schema1:mainEntity ; + schema1:name "COVID-19-PE-ARTIC-ILLUMINA (v0.2)" ; + schema1:sdDatePublished "2021-04-09 03:00:39 +0100" ; + schema1:softwareVersion "v0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 59379 ; + schema1:name "COVID-19-PE-ARTIC-ILLUMINA" ; + schema1:programmingLanguage . + + a schema1:Dataset ; + schema1:datePublished "2024-07-09T15:26:27.773435" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-only-VGP3" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-only-VGP3/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.24" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/986?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/fetchngs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/fetchngs" ; + schema1:sdDatePublished "2024-08-05 10:23:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/986/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6338 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:52Z" ; + schema1:dateModified "2024-06-11T12:54:52Z" ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/986?version=13" ; + schema1:keywords "ddbj, download, ena, FASTQ, geo, sra, synapse" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/fetchngs" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/986?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Conformational ensembles generation\r +\r +## Workflow included in the [ELIXIR 3D-Bioinfo](https://elixir-europe.org/communities/3d-bioinfo) Implementation Study:\r +\r +### Building on PDBe-KB to chart and characterize the conformation landscape of native proteins\r +\r +This tutorial aims to illustrate the process of generating **protein conformational ensembles** from** 3D structures **and analysing its **molecular flexibility**, step by step, using the **BioExcel Building Blocks library (biobb)**.\r +\r +## Conformational landscape of native proteins\r +**Proteins** are **dynamic** systems that adopt multiple **conformational states**, a property essential for many **biological processes** (e.g. binding other proteins, nucleic acids, small molecule ligands, or switching between functionaly active and inactive states). Characterizing the different **conformational states** of proteins and the transitions between them is therefore critical for gaining insight into their **biological function** and can help explain the effects of genetic variants in **health** and **disease** and the action of drugs.\r +\r +**Structural biology** has become increasingly efficient in sampling the different **conformational states** of proteins. The **PDB** has currently archived more than **170,000 individual structures**, but over two thirds of these structures represent **multiple conformations** of the same or related protein, observed in different crystal forms, when interacting with other proteins or other macromolecules, or upon binding small molecule ligands. Charting this conformational diversity across the PDB can therefore be employed to build a useful approximation of the **conformational landscape** of native proteins.\r +\r +A number of resources and **tools** describing and characterizing various often complementary aspects of protein **conformational diversity** in known structures have been developed, notably by groups in Europe. These tools include algorithms with varying degree of sophistication, for aligning the 3D structures of individual protein chains or domains, of protein assemblies, and evaluating their degree of **structural similarity**. Using such tools one can **align structures pairwise**, compute the corresponding **similarity matrix**, and identify ensembles of **structures/conformations** with a defined **similarity level** that tend to recur in different PDB entries, an operation typically performed using **clustering** methods. Such workflows are at the basis of resources such as **CATH, Contemplate, or PDBflex** that offer access to **conformational ensembles** comprised of similar **conformations** clustered according to various criteria. Other types of tools focus on differences between **protein conformations**, identifying regions of proteins that undergo large **collective displacements** in different PDB entries, those that act as **hinges or linkers**, or regions that are inherently **flexible**.\r +\r +To build a meaningful approximation of the **conformational landscape** of native proteins, the **conformational ensembles** (and the differences between them), identified on the basis of **structural similarity/dissimilarity** measures alone, need to be **biophysically characterized**. This may be approached at **two different levels**. \r +- At the **biological level**, it is important to link observed **conformational ensembles**, to their **functional roles** by evaluating the correspondence with **protein family classifications** based on sequence information and **functional annotations** in public databases e.g. Uniprot, PDKe-Knowledge Base (KB). These links should provide valuable mechanistic insights into how the **conformational and dynamic properties** of proteins are exploited by evolution to regulate their **biological function**.

\r +\r +- At the **physical level** one needs to introduce **energetic consideration** to evaluate the likelihood that the identified **conformational ensembles** represent **conformational states** that the protein (or domain under study) samples in isolation. Such evaluation is notoriously **challenging** and can only be roughly approximated by using **computational methods** to evaluate the extent to which the observed **conformational ensembles** can be reproduced by algorithms that simulate the **dynamic behavior** of protein systems. These algorithms include the computationally expensive **classical molecular dynamics (MD) simulations** to sample local thermal fluctuations but also faster more approximate methods such as **Elastic Network Models** and **Normal Node Analysis** (NMA) to model low energy **collective motions**. Alternatively, **enhanced sampling molecular dynamics** can be used to model complex types of **conformational changes** but at a very high computational cost. \r +\r +The **ELIXIR 3D-Bioinfo Implementation Study** *Building on PDBe-KB to chart and characterize the conformation landscape of native proteins* focuses on:\r +\r +1. Mapping the **conformational diversity** of proteins and their homologs across the PDB. \r +2. Characterize the different **flexibility properties** of protein regions, and link this information to sequence and functional annotation.\r +3. Benchmark **computational methods** that can predict a biophysical description of protein motions.\r +\r +This notebook is part of the third objective, where a list of **computational resources** that are able to predict **protein flexibility** and **conformational ensembles** have been collected, evaluated, and integrated in reproducible and interoperable workflows using the **BioExcel Building Blocks library**. Note that the list is not meant to be exhaustive, it is built following the expertise of the implementation study partners.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.822.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/main/biobb_wf_flexdyn/docker" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Docker Protein conformational ensembles generation" ; + schema1:sdDatePublished "2024-08-05 10:24:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/822/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 746 ; + schema1:creator , + ; + schema1:dateCreated "2024-04-22T10:40:29Z" ; + schema1:dateModified "2024-05-22T13:46:38Z" ; + schema1:description """# Protein Conformational ensembles generation\r +\r +## Workflow included in the [ELIXIR 3D-Bioinfo](https://elixir-europe.org/communities/3d-bioinfo) Implementation Study:\r +\r +### Building on PDBe-KB to chart and characterize the conformation landscape of native proteins\r +\r +This tutorial aims to illustrate the process of generating **protein conformational ensembles** from** 3D structures **and analysing its **molecular flexibility**, step by step, using the **BioExcel Building Blocks library (biobb)**.\r +\r +## Conformational landscape of native proteins\r +**Proteins** are **dynamic** systems that adopt multiple **conformational states**, a property essential for many **biological processes** (e.g. binding other proteins, nucleic acids, small molecule ligands, or switching between functionaly active and inactive states). Characterizing the different **conformational states** of proteins and the transitions between them is therefore critical for gaining insight into their **biological function** and can help explain the effects of genetic variants in **health** and **disease** and the action of drugs.\r +\r +**Structural biology** has become increasingly efficient in sampling the different **conformational states** of proteins. The **PDB** has currently archived more than **170,000 individual structures**, but over two thirds of these structures represent **multiple conformations** of the same or related protein, observed in different crystal forms, when interacting with other proteins or other macromolecules, or upon binding small molecule ligands. Charting this conformational diversity across the PDB can therefore be employed to build a useful approximation of the **conformational landscape** of native proteins.\r +\r +A number of resources and **tools** describing and characterizing various often complementary aspects of protein **conformational diversity** in known structures have been developed, notably by groups in Europe. These tools include algorithms with varying degree of sophistication, for aligning the 3D structures of individual protein chains or domains, of protein assemblies, and evaluating their degree of **structural similarity**. Using such tools one can **align structures pairwise**, compute the corresponding **similarity matrix**, and identify ensembles of **structures/conformations** with a defined **similarity level** that tend to recur in different PDB entries, an operation typically performed using **clustering** methods. Such workflows are at the basis of resources such as **CATH, Contemplate, or PDBflex** that offer access to **conformational ensembles** comprised of similar **conformations** clustered according to various criteria. Other types of tools focus on differences between **protein conformations**, identifying regions of proteins that undergo large **collective displacements** in different PDB entries, those that act as **hinges or linkers**, or regions that are inherently **flexible**.\r +\r +To build a meaningful approximation of the **conformational landscape** of native proteins, the **conformational ensembles** (and the differences between them), identified on the basis of **structural similarity/dissimilarity** measures alone, need to be **biophysically characterized**. This may be approached at **two different levels**. \r +- At the **biological level**, it is important to link observed **conformational ensembles**, to their **functional roles** by evaluating the correspondence with **protein family classifications** based on sequence information and **functional annotations** in public databases e.g. Uniprot, PDKe-Knowledge Base (KB). These links should provide valuable mechanistic insights into how the **conformational and dynamic properties** of proteins are exploited by evolution to regulate their **biological function**.

\r +\r +- At the **physical level** one needs to introduce **energetic consideration** to evaluate the likelihood that the identified **conformational ensembles** represent **conformational states** that the protein (or domain under study) samples in isolation. Such evaluation is notoriously **challenging** and can only be roughly approximated by using **computational methods** to evaluate the extent to which the observed **conformational ensembles** can be reproduced by algorithms that simulate the **dynamic behavior** of protein systems. These algorithms include the computationally expensive **classical molecular dynamics (MD) simulations** to sample local thermal fluctuations but also faster more approximate methods such as **Elastic Network Models** and **Normal Node Analysis** (NMA) to model low energy **collective motions**. Alternatively, **enhanced sampling molecular dynamics** can be used to model complex types of **conformational changes** but at a very high computational cost. \r +\r +The **ELIXIR 3D-Bioinfo Implementation Study** *Building on PDBe-KB to chart and characterize the conformation landscape of native proteins* focuses on:\r +\r +1. Mapping the **conformational diversity** of proteins and their homologs across the PDB. \r +2. Characterize the different **flexibility properties** of protein regions, and link this information to sequence and functional annotation.\r +3. Benchmark **computational methods** that can predict a biophysical description of protein motions.\r +\r +This notebook is part of the third objective, where a list of **computational resources** that are able to predict **protein flexibility** and **conformational ensembles** have been collected, evaluated, and integrated in reproducible and interoperable workflows using the **BioExcel Building Blocks library**. Note that the list is not meant to be exhaustive, it is built following the expertise of the implementation study partners.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Docker Protein conformational ensembles generation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/main/biobb_wf_flexdyn/docker/Dockerfile" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """The notebook shows how to load an IDR image with labels.\r +\r +The image is referenced in the paper "NesSys: a novel method for accurate nuclear segmentation in 3D" published August 2019 in PLOS Biology: https://doi.org/10.1371/journal.pbio.3000388 and can be viewed online in the Image Data Resource.\r +\r +In this notebook, the image is loaded together with the labels and analyzed using StarDist. The StarDist analysis produces a segmentation, which is then viewed side-by-side with the original segmentations produced by the authors of the paper obtained via the loaded labels.\r +\r +## Launch\r +This notebook uses the [environment_stardist.yml](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/environment_stardist.yml) file.\r +\r +See [Setup](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/setup.md).""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.493.1" ; + schema1:isBasedOn "https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/stardist.ipynb" ; + schema1:license "BSD-2-Clause" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Validate a tool against IDR data: Load Image with labels from IDR, re-analyze using StarDist" ; + schema1:sdDatePublished "2024-08-05 10:30:16 +0100" ; + schema1:url "https://workflowhub.eu/workflows/493/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 683190 ; + schema1:url "https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/includes/StarDistIDR.png" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 30978 ; + schema1:creator , + ; + schema1:dateCreated "2023-06-01T10:05:54Z" ; + schema1:dateModified "2023-06-01T10:07:09Z" ; + schema1:description """The notebook shows how to load an IDR image with labels.\r +\r +The image is referenced in the paper "NesSys: a novel method for accurate nuclear segmentation in 3D" published August 2019 in PLOS Biology: https://doi.org/10.1371/journal.pbio.3000388 and can be viewed online in the Image Data Resource.\r +\r +In this notebook, the image is loaded together with the labels and analyzed using StarDist. The StarDist analysis produces a segmentation, which is then viewed side-by-side with the original segmentations produced by the authors of the paper obtained via the loaded labels.\r +\r +## Launch\r +This notebook uses the [environment_stardist.yml](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/environment_stardist.yml) file.\r +\r +See [Setup](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/setup.md).""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/BSD-2-Clause" ; + schema1:name "Validate a tool against IDR data: Load Image with labels from IDR, re-analyze using StarDist" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/stardist.ipynb" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """The workflows in this collection are from the '16S Microbial Analysis with mothur' tutorial for analysis of 16S data (Saskia Hiltemann, Bérénice Batut, Dave Clements), adapted for pipeline use on galaxy australia (Ahmed Mehdi). The workflows developed in galaxy use mothur software package developed by Schloss et al https://pubmed.ncbi.nlm.nih.gov/19801464/. \r +\r +Please also refer to the 16S tutorials available at Galaxy https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop-short/tutorial.html and [https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop/tutorial.html\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/652?version=1" ; + schema1:isBasedOn "https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop-short/tutorial.html" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Workflow 6: Alpha Diversity [16S Microbial Analysis With Mothur]" ; + schema1:sdDatePublished "2024-08-05 10:27:19 +0100" ; + schema1:url "https://workflowhub.eu/workflows/652/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8494 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2023-11-09T05:24:50Z" ; + schema1:dateModified "2023-11-09T05:24:50Z" ; + schema1:description """The workflows in this collection are from the '16S Microbial Analysis with mothur' tutorial for analysis of 16S data (Saskia Hiltemann, Bérénice Batut, Dave Clements), adapted for pipeline use on galaxy australia (Ahmed Mehdi). The workflows developed in galaxy use mothur software package developed by Schloss et al https://pubmed.ncbi.nlm.nih.gov/19801464/. \r +\r +Please also refer to the 16S tutorials available at Galaxy https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop-short/tutorial.html and [https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop/tutorial.html\r +""" ; + schema1:isPartOf ; + schema1:keywords "Metagenomics" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Workflow 6: Alpha Diversity [16S Microbial Analysis With Mothur]" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/652?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Summary\r +\r +This tutorial aims to illustrate the process of setting up a simulation system containing a protein in complex with a ligand, step by step, using the BioExcel Building Blocks library (biobb). The particular example used is the T4 lysozyme L99A/M102Q protein (PDB code 3HTB), in complex with the 2-propylphenol small molecule (3-letter Code JZ4).\r +\r +Workflow engine is a jupyter notebook. It can be run in binder, following the link given, or locally. Auxiliar libraries used are: nb\\_conda_kernels, nglview, ipywidgets, os, plotly, and simpletraj. Environment can be setup using the included environment.yml file.\r +\r +\r +# Parameters\r +\r +## Inputs\r +\r +Parameters needed to configure the workflow:\r +\r +* **pdbCode**: PDB code of the protein-ligand complex structure (e.g. 3HTB)\r +\r +* **ligandCode**: Small molecule 3-letter code for the ligand structure (e.g. JZ4)\r +\r +* **mol\\_charge**: Charge of the small molecule, needed to add hydrogen atoms.\r +\r +## Outputs\r +\r +Output files generated (named according to the input parameters given above):\r +\r +* **output\\_md\\_gro**: final structure of the MD setup protocol\r +\r +* **output\\_md\\_trr**: final trajectory of the MD setup protocol\r +\r +* **output\\_md\\_cpt**: final checkpoint file\r +\r +* **output\\_gppmd\\_tpr**: final tpr file\r +\r +* **output\\_genion\\_top\\_zip**: final topology of the MD system""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.56.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_protein-complex_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb) (jupyter notebook)" ; + schema1:sdDatePublished "2024-08-05 10:31:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/56/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 272452 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 85281 ; + schema1:creator , + ; + schema1:dateCreated "2020-09-22T15:30:25Z" ; + schema1:dateModified "2021-05-07T13:28:09Z" ; + schema1:description """# Summary\r +\r +This tutorial aims to illustrate the process of setting up a simulation system containing a protein in complex with a ligand, step by step, using the BioExcel Building Blocks library (biobb). The particular example used is the T4 lysozyme L99A/M102Q protein (PDB code 3HTB), in complex with the 2-propylphenol small molecule (3-letter Code JZ4).\r +\r +Workflow engine is a jupyter notebook. It can be run in binder, following the link given, or locally. Auxiliar libraries used are: nb\\_conda_kernels, nglview, ipywidgets, os, plotly, and simpletraj. Environment can be setup using the included environment.yml file.\r +\r +\r +# Parameters\r +\r +## Inputs\r +\r +Parameters needed to configure the workflow:\r +\r +* **pdbCode**: PDB code of the protein-ligand complex structure (e.g. 3HTB)\r +\r +* **ligandCode**: Small molecule 3-letter code for the ligand structure (e.g. JZ4)\r +\r +* **mol\\_charge**: Charge of the small molecule, needed to add hydrogen atoms.\r +\r +## Outputs\r +\r +Output files generated (named according to the input parameters given above):\r +\r +* **output\\_md\\_gro**: final structure of the MD setup protocol\r +\r +* **output\\_md\\_trr**: final trajectory of the MD setup protocol\r +\r +* **output\\_md\\_cpt**: final checkpoint file\r +\r +* **output\\_gppmd\\_tpr**: final tpr file\r +\r +* **output\\_genion\\_top\\_zip**: final topology of the MD system""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/56?version=6" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb) (jupyter notebook)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/56?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2022-06-13T14:36:29.894406" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/VGP-meryldb-creation-trio" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "VGP-meryldb-creation-trio/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.11" . + + a schema1:Dataset ; + schema1:datePublished "2024-05-28T11:44:29.989985" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/dada2" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "dada2/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Common Workflow Language example that illustrate the process of setting up a simulation system containing a protein, step by step, using the [BioExcel Building Blocks](/projects/11) library (biobb). The particular example used is the Lysozyme protein (PDB code 1AKI). This workflow returns a resulting protein structure and simulated 3D trajectories." ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.29.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb-wf-md-setup-protein-cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Protein MD Setup tutorial using BioExcel Building Blocks (biobb) in CWL" ; + schema1:sdDatePublished "2024-08-05 10:32:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/29/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11271 ; + schema1:creator ; + schema1:dateCreated "2021-05-10T09:00:56Z" ; + schema1:dateModified "2023-01-16T13:42:00Z" ; + schema1:description "Common Workflow Language example that illustrate the process of setting up a simulation system containing a protein, step by step, using the [BioExcel Building Blocks](/projects/11) library (biobb). The particular example used is the Lysozyme protein (PDB code 1AKI). This workflow returns a resulting protein structure and simulated 3D trajectories." ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/29?version=2" ; + schema1:isPartOf , + ; + schema1:keywords "molecular dynamics, trajectories, protein" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Protein MD Setup tutorial using BioExcel Building Blocks (biobb) in CWL" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/29?version=3" ; + schema1:version 3 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 60267 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# MGnify genomes catalogue pipeline\r +\r +[MGnify](https://www.ebi.ac.uk/metagenomics/) A pipeline to perform taxonomic and functional annotation and to generate a catalogue from a set of isolate and/or metagenome-assembled genomes (MAGs) using the workflow described in the following publication:\r +\r +Gurbich TA, Almeida A, Beracochea M, Burdett T, Burgin J, Cochrane G, Raj S, Richardson L, Rogers AB, Sakharova E, Salazar GA and Finn RD. (2023) [MGnify Genomes: A Resource for Biome-specific Microbial Genome Catalogues.](https://www.sciencedirect.com/science/article/pii/S0022283623000724) J Mol Biol. doi: https://doi.org/10.1016/j.jmb.2023.168016\r +\r +Detailed information about existing MGnify catalogues: https://docs.mgnify.org/src/docs/genome-viewer.html\r +\r +### Tools used in the pipeline\r +| Tool/Database | Version | Purpose |\r +|---------------------------------------------------------|-------------------|----------- |\r +| CheckM2 | 1.0.1 | Determining genome quality |\r +| dRep | 3.2.2 | Genome clustering |\r +| Mash | 2.3 | Sketch for the catalogue; placement of genomes into clusters (update only); strain tree |\r +| GUNC | 1.0.3 | Quality control |\r +| GUNC DB | 2.0.4 | Database for GUNC |\r +| GTDB-Tk | 2.3.0 | Assigning taxonomy; generating alignments |\r +| GTDB | r214 | Database for GTDB-Tk |\r +| Prokka | 1.14.6 | Protein annotation |\r +| IQ-TREE 2 | 2.2.0.3 | Generating a phylogenetic tree |\r +| Kraken 2 | 2.1.2 | Generating a kraken database |\r +| Bracken | 2.6.2 | Generating a bracken database |\r +| MMseqs2 | 13.45111 | Generating a protein catalogue |\r +| eggNOG-mapper | 2.1.11 | Protein annotation (eggNOG, KEGG, COG, CAZy) |\r +| eggNOG DB | 5.0.2 | Database for eggNOG-mapper |\r +| Diamond | 2.0.11 | Protein annotation (eggNOG) |\r +| InterProScan | 5.62-94.0 | Protein annotation (InterPro, Pfam) |\r +| CRISPRCasFinder | 4.3.2 | Annotation of CRISPR arrays |\r +| AMRFinderPlus | 3.11.4 | Antimicrobial resistance gene annotation; virulence factors, biocide, heat, acid, and metal resistance gene annotation |\r +| AMRFinderPlus DB | 3.11 2023-02-23.1 | Database for AMRFinderPlus |\r +| SanntiS | 0.9.3.2 | Biosynthetic gene cluster annotation |\r +| Infernal | 1.1.4 | RNA predictions |\r +| tRNAscan-SE | 2.0.9 | tRNA predictions |\r +| Rfam | 14.9 | Identification of SSU/LSU rRNA and other ncRNAs |\r +| Panaroo | 1.3.2 | Pan-genome computation |\r +| Seqtk | 1.3 | Generating a gene catalogue |\r +| VIRify | 2.0.1 | Viral sequence annotation |\r +| [Mobilome annotation pipeline](https://github.com/EBI-Metagenomics/mobilome-annotation-pipeline) | 2.0.1 | Mobilome annotation |\r +| samtools | 1.15 | FASTA indexing |\r +\r +## Setup\r +\r +### Environment\r +\r +The pipeline is implemented in [Nextflow](https://www.nextflow.io/).\r +\r +Requirements:\r +- [singulairty](https://sylabs.io/docs/) or [docker](https://www.docker.com/)\r +\r +#### Reference databases\r +\r +The pipeline needs the following reference databases and configuration files (roughtly ~150G):\r +\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/gunc_db_2.0.4.dmnd.gz\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/eggnog_db_5.0.2.tgz\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/rfam_14.9/\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/kegg_classes.tsv\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/continent_countries.csv\r +- https://data.ace.uq.edu.au/public/gtdb/data/releases/release214/214.0/auxillary_files/gtdbtk_r214_data.tar.gz\r +- ftp://ftp.ncbi.nlm.nih.gov/pathogen/Antimicrobial_resistance/AMRFinderPlus/database/3.11/2023-02-23.1\r +- https://zenodo.org/records/4626519/files/uniref100.KO.v1.dmnd.gz\r +\r +### Containers\r +\r +This pipeline requires [singularity](https://sylabs.io/docs/) or [docker](https://www.docker.com/) as the container engine to run pipeline.\r +\r +The containers are hosted in [biocontainers](https://biocontainers.pro/) and [quay.io/microbiome-informatics](https://quay.io/organization/microbiome-informatics) repository.\r +\r +It's possible to build the containers from scratch using the following script:\r +\r +```bash\r +cd containers && bash build.sh\r +```\r +\r +## Running the pipeline\r +\r +## Data preparation\r +\r +1. You need to pre-download your data to directories and make sure that genomes are uncompressed. Scripts to fetch genomes from ENA ([fetch_ena.py](https://github.com/EBI-Metagenomics/genomes-pipeline/blob/master/bin/fetch_ena.py)) and NCBI ([fetch_ncbi.py](https://github.com/EBI-Metagenomics/genomes-pipeline/blob/master/bin/fetch_ncbi.py)) are provided and need to be executed separately from the pipeline. If you have downloaded genomes from both ENA and NCBI, put them into separate folders.\r +\r +2. When genomes are fetched from ENA using the `fetch_ena.py` script, a CSV file with contamination and completeness statistics is also created in the same directory where genomes are saved to. If you are downloading genomes using a different approach, a CSV file needs to be created manually (each line should be genome accession, % completeness, % contamination). The ENA fetching script also pre-filters genomes to satisfy the QS50 cut-off (QS = % completeness - 5 * % contamination).\r +\r +3. You will need the following information to run the pipeline:\r + - catalogue name (for example, zebrafish-faecal)\r + - catalogue version (for example, 1.0)\r + - catalogue biome (for example, root:Host-associated:Human:Digestive system:Large intestine:Fecal)\r + - min and max accession number to be assigned to the genomes (only MGnify specific). Max - Min = #total number of genomes (NCBI+ENA)\r +\r +### Execution\r +\r +The pipeline is built in [Nextflow](https://www.nextflow.io), and utilized containers to run the software (we don't support conda ATM).\r +In order to run the pipeline it's required that the user creates a profile that suits their needs, there is an `ebi` profile in `nexflow.config` that can be used as template.\r +\r +After downloading the databases and adjusting the config file:\r +\r +```bash\r +nextflow run EBI-Metagenomics/genomes-pipeline -c -profile \\\r +--genome-prefix=MGYG \\\r +--biome="root:Host-associated:Fish:Digestive system" \\\r +--ena_genomes= \\\r +--ena_genomes_checkm= \\\r +--mgyg_start=0 \\\r +--mgyg_end=10 \\\r +--preassigned_accessions=\r +--catalogue_name=zebrafish-faecal \\\r +--catalogue_version="1.0" \\\r +--ftp_name="zebrafish-faecal" \\\r +--ftp_version="v1.0" \\\r +--outdir=""\r +```\r +\r +### Development\r +\r +Install development tools (including pre-commit hooks to run Black code formatting).\r +\r +```bash\r +pip install -r requirements-dev.txt\r +pre-commit install\r +```\r +\r +#### Code style\r +\r +Use Black, this tool is configured if you install the pre-commit tools as above.\r +\r +To manually run them: black .\r +\r +### Testing\r +\r +This repo has 2 set of tests, python unit tests for some of the most critical python scripts and [nf-test](https://github.com/askimed/nf-test) scripts for the nextflow code.\r +\r +To run the python tests\r +\r +```bash\r +pip install -r requirements-test.txt\r +pytest\r +```\r +\r +To run the nextflow ones the databases have to downloaded manually, we are working to improve this.\r +\r +```bash\r +nf-test test tests/*\r +```\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/462?version=2" ; + schema1:isBasedOn "https://github.com/EBI-Metagenomics/genomes-pipeline.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for MGnify genomes catalogue pipeline" ; + schema1:sdDatePublished "2024-08-05 10:24:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/462/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 129 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-05-23T11:21:45Z" ; + schema1:dateModified "2024-05-23T11:23:44Z" ; + schema1:description """# MGnify genomes catalogue pipeline\r +\r +[MGnify](https://www.ebi.ac.uk/metagenomics/) A pipeline to perform taxonomic and functional annotation and to generate a catalogue from a set of isolate and/or metagenome-assembled genomes (MAGs) using the workflow described in the following publication:\r +\r +Gurbich TA, Almeida A, Beracochea M, Burdett T, Burgin J, Cochrane G, Raj S, Richardson L, Rogers AB, Sakharova E, Salazar GA and Finn RD. (2023) [MGnify Genomes: A Resource for Biome-specific Microbial Genome Catalogues.](https://www.sciencedirect.com/science/article/pii/S0022283623000724) J Mol Biol. doi: https://doi.org/10.1016/j.jmb.2023.168016\r +\r +Detailed information about existing MGnify catalogues: https://docs.mgnify.org/src/docs/genome-viewer.html\r +\r +### Tools used in the pipeline\r +| Tool/Database | Version | Purpose |\r +|---------------------------------------------------------|-------------------|----------- |\r +| CheckM2 | 1.0.1 | Determining genome quality |\r +| dRep | 3.2.2 | Genome clustering |\r +| Mash | 2.3 | Sketch for the catalogue; placement of genomes into clusters (update only); strain tree |\r +| GUNC | 1.0.3 | Quality control |\r +| GUNC DB | 2.0.4 | Database for GUNC |\r +| GTDB-Tk | 2.3.0 | Assigning taxonomy; generating alignments |\r +| GTDB | r214 | Database for GTDB-Tk |\r +| Prokka | 1.14.6 | Protein annotation |\r +| IQ-TREE 2 | 2.2.0.3 | Generating a phylogenetic tree |\r +| Kraken 2 | 2.1.2 | Generating a kraken database |\r +| Bracken | 2.6.2 | Generating a bracken database |\r +| MMseqs2 | 13.45111 | Generating a protein catalogue |\r +| eggNOG-mapper | 2.1.11 | Protein annotation (eggNOG, KEGG, COG, CAZy) |\r +| eggNOG DB | 5.0.2 | Database for eggNOG-mapper |\r +| Diamond | 2.0.11 | Protein annotation (eggNOG) |\r +| InterProScan | 5.62-94.0 | Protein annotation (InterPro, Pfam) |\r +| CRISPRCasFinder | 4.3.2 | Annotation of CRISPR arrays |\r +| AMRFinderPlus | 3.11.4 | Antimicrobial resistance gene annotation; virulence factors, biocide, heat, acid, and metal resistance gene annotation |\r +| AMRFinderPlus DB | 3.11 2023-02-23.1 | Database for AMRFinderPlus |\r +| SanntiS | 0.9.3.2 | Biosynthetic gene cluster annotation |\r +| Infernal | 1.1.4 | RNA predictions |\r +| tRNAscan-SE | 2.0.9 | tRNA predictions |\r +| Rfam | 14.9 | Identification of SSU/LSU rRNA and other ncRNAs |\r +| Panaroo | 1.3.2 | Pan-genome computation |\r +| Seqtk | 1.3 | Generating a gene catalogue |\r +| VIRify | 2.0.1 | Viral sequence annotation |\r +| [Mobilome annotation pipeline](https://github.com/EBI-Metagenomics/mobilome-annotation-pipeline) | 2.0.1 | Mobilome annotation |\r +| samtools | 1.15 | FASTA indexing |\r +\r +## Setup\r +\r +### Environment\r +\r +The pipeline is implemented in [Nextflow](https://www.nextflow.io/).\r +\r +Requirements:\r +- [singulairty](https://sylabs.io/docs/) or [docker](https://www.docker.com/)\r +\r +#### Reference databases\r +\r +The pipeline needs the following reference databases and configuration files (roughtly ~150G):\r +\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/gunc_db_2.0.4.dmnd.gz\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/eggnog_db_5.0.2.tgz\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/rfam_14.9/\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/kegg_classes.tsv\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/continent_countries.csv\r +- https://data.ace.uq.edu.au/public/gtdb/data/releases/release214/214.0/auxillary_files/gtdbtk_r214_data.tar.gz\r +- ftp://ftp.ncbi.nlm.nih.gov/pathogen/Antimicrobial_resistance/AMRFinderPlus/database/3.11/2023-02-23.1\r +- https://zenodo.org/records/4626519/files/uniref100.KO.v1.dmnd.gz\r +\r +### Containers\r +\r +This pipeline requires [singularity](https://sylabs.io/docs/) or [docker](https://www.docker.com/) as the container engine to run pipeline.\r +\r +The containers are hosted in [biocontainers](https://biocontainers.pro/) and [quay.io/microbiome-informatics](https://quay.io/organization/microbiome-informatics) repository.\r +\r +It's possible to build the containers from scratch using the following script:\r +\r +```bash\r +cd containers && bash build.sh\r +```\r +\r +## Running the pipeline\r +\r +## Data preparation\r +\r +1. You need to pre-download your data to directories and make sure that genomes are uncompressed. Scripts to fetch genomes from ENA ([fetch_ena.py](https://github.com/EBI-Metagenomics/genomes-pipeline/blob/master/bin/fetch_ena.py)) and NCBI ([fetch_ncbi.py](https://github.com/EBI-Metagenomics/genomes-pipeline/blob/master/bin/fetch_ncbi.py)) are provided and need to be executed separately from the pipeline. If you have downloaded genomes from both ENA and NCBI, put them into separate folders.\r +\r +2. When genomes are fetched from ENA using the `fetch_ena.py` script, a CSV file with contamination and completeness statistics is also created in the same directory where genomes are saved to. If you are downloading genomes using a different approach, a CSV file needs to be created manually (each line should be genome accession, % completeness, % contamination). The ENA fetching script also pre-filters genomes to satisfy the QS50 cut-off (QS = % completeness - 5 * % contamination).\r +\r +3. You will need the following information to run the pipeline:\r + - catalogue name (for example, zebrafish-faecal)\r + - catalogue version (for example, 1.0)\r + - catalogue biome (for example, root:Host-associated:Human:Digestive system:Large intestine:Fecal)\r + - min and max accession number to be assigned to the genomes (only MGnify specific). Max - Min = #total number of genomes (NCBI+ENA)\r +\r +### Execution\r +\r +The pipeline is built in [Nextflow](https://www.nextflow.io), and utilized containers to run the software (we don't support conda ATM).\r +In order to run the pipeline it's required that the user creates a profile that suits their needs, there is an `ebi` profile in `nexflow.config` that can be used as template.\r +\r +After downloading the databases and adjusting the config file:\r +\r +```bash\r +nextflow run EBI-Metagenomics/genomes-pipeline -c -profile \\\r +--genome-prefix=MGYG \\\r +--biome="root:Host-associated:Fish:Digestive system" \\\r +--ena_genomes= \\\r +--ena_genomes_checkm= \\\r +--mgyg_start=0 \\\r +--mgyg_end=10 \\\r +--preassigned_accessions=\r +--catalogue_name=zebrafish-faecal \\\r +--catalogue_version="1.0" \\\r +--ftp_name="zebrafish-faecal" \\\r +--ftp_version="v1.0" \\\r +--outdir=""\r +```\r +\r +### Development\r +\r +Install development tools (including pre-commit hooks to run Black code formatting).\r +\r +```bash\r +pip install -r requirements-dev.txt\r +pre-commit install\r +```\r +\r +#### Code style\r +\r +Use Black, this tool is configured if you install the pre-commit tools as above.\r +\r +To manually run them: black .\r +\r +### Testing\r +\r +This repo has 2 set of tests, python unit tests for some of the most critical python scripts and [nf-test](https://github.com/askimed/nf-test) scripts for the nextflow code.\r +\r +To run the python tests\r +\r +```bash\r +pip install -r requirements-test.txt\r +pytest\r +```\r +\r +To run the nextflow ones the databases have to downloaded manually, we are working to improve this.\r +\r +```bash\r +nf-test test tests/*\r +```\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/462?version=1" ; + schema1:keywords "Metagenomics, Nextflow, Bioinformatics" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "MGnify genomes catalogue pipeline" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/462?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """## Summary\r +PredPrIn is a scientific workflow to predict Protein-Protein Interactions (PPIs) using machine learning to combine multiple PPI detection methods of proteins according to three categories: structural, based on primary aminoacid sequence and functional annotations.
\r +\r +PredPrIn contains three main steps: (i) acquirement and treatment of protein information, (ii) feature generation, and (iii) classification and analysis.\r +\r +(i) The first step builds a knowledge base with the available annotations of proteins and reuses this base for other prediction experiments, saving time and becoming more efficient. \r +\r +(ii) The feature generation step involves several evidence from different classes, such as: Gene Ontology (GO) information, domain interaction, metabolic pathway participation and sequence-based interaction. For the GO branches, we made a study to evaluate the best method to calculate semantic similarity to enhance the workflow performance. This step can be easily modified by adding new metrics, making PredPrIn flexible for future improvements. \r +\r +Finally, (iii) in the third step, the adaboost classifier is responsible for predicting the final scores from the numerical features dataset, exporting results of performance evaluation metrics.\r +\r +## Requirements:\r +* Python packages needed:\r + - pip3 install luigi\r + - pip3 install sqlalchemy\r + - pip3 install rdflib\r + - pip3 install sklearn\r + - pip3 install matplotlib\r + - pip3 install numpy\r +\r +* Other instalation:\r + - sqlite (to be able to see the documentation generated by luigi about the tasks after execution)\r +\r +## Usage Instructions\r +The steps below consider the creation of a sqlite database file with all he tasks events which can be used after to retrieve the execution time taken by the tasks. It is possible run locally too (see luigi's documentation to change the running command).

\r +* Preparation:\r + 1. ````git clone https://github.com/YasCoMa/predprin.git````\r + 2. ````cd PredPrIn````\r + 3. `pip3 install -r requirements.txt`\r + 4. Download annotation_data.zip (https://drive.google.com/file/d/1bWPSyULaooj7GTrDf6QBY3ZyeyH5MRpm/view?usp=share_link)\r + 5. Download rdf_data.zip (https://drive.google.com/file/d/1Cp511ioXiw2PiOHdkxa4XsZnxOeM3Pan/view?usp=share_link)\r + 6. Download sequence_data.zip (https://drive.google.com/file/d/1uEKh5EF9X_6fgZ9cTTp0jW3XaL48stxA/view?usp=share_link)\r + 7. Unzip annotation_data.zip\r + 8. Unzip rdf_data.zip\r + 9. Unzip sequence_data.zip\r + 10. Download SPRINT pre-computed similarities in https://www.csd.uwo.ca/~ilie/SPRINT/precomputed_similarities.zip and unzip it inside core/sprint/HSP/\r + 11. Certify that there is a file named client.cfg (to configure the history log and feed the sqlite database). It must have the following data:\r + ````\r + [core]\r + default-scheduler-host=localhost\r + default-scheduler-port=8082\r + rpc-connect-timeout=60.0 \r + rpc-retry-attempts=10 \r + rpc-retry-wait=60 \r +\r + [scheduler]\r + record_task_history = True\r +\r + [task_history]\r + db_connection = sqlite:///luigi-task-hist.db\r + ````\r +* Parameters:\r + 1. parameters-file -> json file with all the information to process the prediction experiment (example: params.json)\r + 2. mode -> it can have two values: train (executes cross validation and save the model as a .joblib file) or test (uses a model obtained in train mode to test in some dataset listed in the parameters file)\r + 3. model -> it is the model file full path saved in train mode as .joblib\r + \r +* Running:\r + 1. ````mkdir luigi_log```` (or other name for the log folder of your choice)\r + 2. ````luigid --background --logdir luigi_log```` (start luigi server)\r + 3. ````nohup python3.5 -m luigi --module main RunPPIExperiment --parameters-file params.json --mode 'train' --model none.joblib --workers 3 &````
\r + ````nohup python3.5 -m luigi --module main RunPPIExperiment --parameters-file params.json --mode 'test' --model model.jolib --workers 3 &````
\r + - Replace python3.5 by the command python of your environment
\r + - Replace the data given as example in params.json using your own data
\r + - Adapt the number of workers to use as you need and the capacity of your computational resource available\r +\r + You can monitor the prediction experiment execution in localhost:8082\r +\r +## Reference\r +Martins YC, Ziviani A, Nicolás MF, de Vasconcelos AT. Large-Scale Protein Interactions Prediction by Multiple Evidence Analysis Associated With an In-Silico Curation Strategy. Frontiers in Bioinformatics. 2021:38.\r +https://www.frontiersin.org/articles/10.3389/fbinf.2021.731345/full\r +\r +## Bug Report\r +Please, use the [Issues](https://github.com/YasCoMa/PredPrIn/issues) tab to report any bug.""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/616?version=1" ; + schema1:isBasedOn "https://github.com/YasCoMa/predprin" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for PredPrIn - Scientific workflow to predict protein-protein interactions based in a combined analysis of multiple protein characteristics." ; + schema1:sdDatePublished "2024-08-05 10:27:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/616/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4098 ; + schema1:creator ; + schema1:dateCreated "2023-10-21T23:35:16Z" ; + schema1:dateModified "2023-10-21T23:37:59Z" ; + schema1:description """## Summary\r +PredPrIn is a scientific workflow to predict Protein-Protein Interactions (PPIs) using machine learning to combine multiple PPI detection methods of proteins according to three categories: structural, based on primary aminoacid sequence and functional annotations.
\r +\r +PredPrIn contains three main steps: (i) acquirement and treatment of protein information, (ii) feature generation, and (iii) classification and analysis.\r +\r +(i) The first step builds a knowledge base with the available annotations of proteins and reuses this base for other prediction experiments, saving time and becoming more efficient. \r +\r +(ii) The feature generation step involves several evidence from different classes, such as: Gene Ontology (GO) information, domain interaction, metabolic pathway participation and sequence-based interaction. For the GO branches, we made a study to evaluate the best method to calculate semantic similarity to enhance the workflow performance. This step can be easily modified by adding new metrics, making PredPrIn flexible for future improvements. \r +\r +Finally, (iii) in the third step, the adaboost classifier is responsible for predicting the final scores from the numerical features dataset, exporting results of performance evaluation metrics.\r +\r +## Requirements:\r +* Python packages needed:\r + - pip3 install luigi\r + - pip3 install sqlalchemy\r + - pip3 install rdflib\r + - pip3 install sklearn\r + - pip3 install matplotlib\r + - pip3 install numpy\r +\r +* Other instalation:\r + - sqlite (to be able to see the documentation generated by luigi about the tasks after execution)\r +\r +## Usage Instructions\r +The steps below consider the creation of a sqlite database file with all he tasks events which can be used after to retrieve the execution time taken by the tasks. It is possible run locally too (see luigi's documentation to change the running command).

\r +* Preparation:\r + 1. ````git clone https://github.com/YasCoMa/predprin.git````\r + 2. ````cd PredPrIn````\r + 3. `pip3 install -r requirements.txt`\r + 4. Download annotation_data.zip (https://drive.google.com/file/d/1bWPSyULaooj7GTrDf6QBY3ZyeyH5MRpm/view?usp=share_link)\r + 5. Download rdf_data.zip (https://drive.google.com/file/d/1Cp511ioXiw2PiOHdkxa4XsZnxOeM3Pan/view?usp=share_link)\r + 6. Download sequence_data.zip (https://drive.google.com/file/d/1uEKh5EF9X_6fgZ9cTTp0jW3XaL48stxA/view?usp=share_link)\r + 7. Unzip annotation_data.zip\r + 8. Unzip rdf_data.zip\r + 9. Unzip sequence_data.zip\r + 10. Download SPRINT pre-computed similarities in https://www.csd.uwo.ca/~ilie/SPRINT/precomputed_similarities.zip and unzip it inside core/sprint/HSP/\r + 11. Certify that there is a file named client.cfg (to configure the history log and feed the sqlite database). It must have the following data:\r + ````\r + [core]\r + default-scheduler-host=localhost\r + default-scheduler-port=8082\r + rpc-connect-timeout=60.0 \r + rpc-retry-attempts=10 \r + rpc-retry-wait=60 \r +\r + [scheduler]\r + record_task_history = True\r +\r + [task_history]\r + db_connection = sqlite:///luigi-task-hist.db\r + ````\r +* Parameters:\r + 1. parameters-file -> json file with all the information to process the prediction experiment (example: params.json)\r + 2. mode -> it can have two values: train (executes cross validation and save the model as a .joblib file) or test (uses a model obtained in train mode to test in some dataset listed in the parameters file)\r + 3. model -> it is the model file full path saved in train mode as .joblib\r + \r +* Running:\r + 1. ````mkdir luigi_log```` (or other name for the log folder of your choice)\r + 2. ````luigid --background --logdir luigi_log```` (start luigi server)\r + 3. ````nohup python3.5 -m luigi --module main RunPPIExperiment --parameters-file params.json --mode 'train' --model none.joblib --workers 3 &````
\r + ````nohup python3.5 -m luigi --module main RunPPIExperiment --parameters-file params.json --mode 'test' --model model.jolib --workers 3 &````
\r + - Replace python3.5 by the command python of your environment
\r + - Replace the data given as example in params.json using your own data
\r + - Adapt the number of workers to use as you need and the capacity of your computational resource available\r +\r + You can monitor the prediction experiment execution in localhost:8082\r +\r +## Reference\r +Martins YC, Ziviani A, Nicolás MF, de Vasconcelos AT. Large-Scale Protein Interactions Prediction by Multiple Evidence Analysis Associated With an In-Silico Curation Strategy. Frontiers in Bioinformatics. 2021:38.\r +https://www.frontiersin.org/articles/10.3389/fbinf.2021.731345/full\r +\r +## Bug Report\r +Please, use the [Issues](https://github.com/YasCoMa/PredPrIn/issues) tab to report any bug.""" ; + schema1:image ; + schema1:keywords "Bioinformatics, Luigi & Rufus workflow, Pathway co-occurrence, Gene ontology term sets similarity, Domain-Domain interaction" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "PredPrIn - Scientific workflow to predict protein-protein interactions based in a combined analysis of multiple protein characteristics." ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/616?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 98305 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.54.6" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_ligand_parameterization" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook GMX Notebook Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-08-05 10:25:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/54/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14394 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-03-04T14:20:06Z" ; + schema1:dateModified "2024-03-05T09:40:24Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/54?version=5" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook GMX Notebook Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_ligand_parameterization/blob/main/biobb_wf_ligand_parameterization/notebooks/biobb_ligand_CNS_parameterization_tutorial.ipynb" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """The second-level complexity workflow is one among a collection of workflows designed to address tasks up to CTF estimation. In addition to the functionalities provided by the layer 0 workflow, this workflow aims to enhance the quality of acquisition images using quality protocols.\r +\r +**Quality control protocols**\r +\r +* **Movie max shift**: automatic reject those movies whose frames move more than a given threshold. \r +\r +* **Tilt analysis**: quality score based in the Power Spectrum Density (astigmatism and tilt) \r +\r +* **CTF consensus**: acts as a filter discarding micrographs based on their CTF (limit resolution, defocus, astigmatism, etc.).\r +\r +**Advantages:** \r +\r +* More control of the acquisition quality\r +\r +* Reduce unnecessary processing time and storage""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/599?version=2" ; + schema1:license "CC-BY-4.0" ; + schema1:name "Research Object Crate for CEITEC layer 1 workflow" ; + schema1:sdDatePublished "2024-08-05 10:22:30 +0100" ; + schema1:url "https://workflowhub.eu/workflows/599/ro_crate?version=2" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "GATK4 RNA variant calling pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1019?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/rnavar" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnavar" ; + schema1:sdDatePublished "2024-08-05 10:22:46 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1019/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10407 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:14Z" ; + schema1:dateModified "2024-06-11T12:55:14Z" ; + schema1:description "GATK4 RNA variant calling pipeline" ; + schema1:keywords "GATK4, rna, RNASEQ, variant-calling, worflow" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnavar" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1019?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A mapping-based pipeline for creating a phylogeny from bacterial whole genome sequences" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/967?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/bactmap" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/bactmap" ; + schema1:sdDatePublished "2024-08-05 10:24:16 +0100" ; + schema1:url "https://workflowhub.eu/workflows/967/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7268 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:43Z" ; + schema1:dateModified "2024-06-11T12:54:43Z" ; + schema1:description "A mapping-based pipeline for creating a phylogeny from bacterial whole genome sequences" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/967?version=2" ; + schema1:keywords "bacteria, bacterial, bacterial-genome-analysis, Genomics, mapping, phylogeny, tree" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/bactmap" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/967?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.195.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_dna_helparms" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Structural DNA helical parameters tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/195/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 72437 ; + schema1:creator , + ; + schema1:dateCreated "2022-09-15T12:36:54Z" ; + schema1:dateModified "2023-01-16T13:53:15Z" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/195?version=4" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Structural DNA helical parameters tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_dna_helparms/master/biobb_wf_dna_helparms/notebooks/biobb_dna_helparms_tutorial.ipynb" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An analysis pipeline for Molecular Cartography data from Resolve Biosciences." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1001?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/molkart" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/molkart" ; + schema1:sdDatePublished "2024-08-05 10:22:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1001/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10586 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:05Z" ; + schema1:dateModified "2024-06-11T12:55:05Z" ; + schema1:description "An analysis pipeline for Molecular Cartography data from Resolve Biosciences." ; + schema1:keywords "fish, image-processing, imaging, molecularcartography, Segmentation, single-cell, spatial, Transcriptomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/molkart" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1001?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and modern ancient DNA pipeline in Nextflow and with cloud support." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-08-05 10:23:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3707 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:49Z" ; + schema1:dateModified "2024-06-11T12:54:49Z" ; + schema1:description "A fully reproducible and modern ancient DNA pipeline in Nextflow and with cloud support." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for analysis of Molecular Pixelation assays" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1010?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/pixelator" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/pixelator" ; + schema1:sdDatePublished "2024-08-05 10:23:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1010/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10309 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:07Z" ; + schema1:dateModified "2024-06-11T12:55:07Z" ; + schema1:description "Pipeline for analysis of Molecular Pixelation assays" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1010?version=7" ; + schema1:keywords "molecular-pixelation, pixelator, pixelgen-technologies, proteins, single-cell, single-cell-omics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/pixelator" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1010?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + schema1:datePublished "2024-06-25T09:57:05.224898" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/allele-based-pathogen-identification" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "allele-based-pathogen-identification/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=15" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-08-05 10:24:25 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=15" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9371 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:12Z" ; + schema1:dateModified "2024-06-11T12:55:12Z" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=15" ; + schema1:version 15 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A pipeline to identify (and remove) certain sequences from raw genomic data. Default taxa to identify (and remove) are Homo and Homo sapiens. Removal is optional." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/979?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/detaxizer" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/detaxizer" ; + schema1:sdDatePublished "2024-08-05 10:24:04 +0100" ; + schema1:url "https://workflowhub.eu/workflows/979/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11353 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:48Z" ; + schema1:dateModified "2024-06-11T12:54:48Z" ; + schema1:description "A pipeline to identify (and remove) certain sequences from raw genomic data. Default taxa to identify (and remove) are Homo and Homo sapiens. Removal is optional." ; + schema1:keywords "de-identification, decontamination, eDNA, FASTQ, filter, long-reads, Metabarcoding, Metagenomics, microbiome, nanopore, short-reads, shotgun, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/detaxizer" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/979?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2024-03-06T17:17:46.226835" ; + schema1:hasPart , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/baredsc" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + ; + schema1:name "baredsc/baredSC-1d-logNorm" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "baredsc/baredSC-2d-logNorm" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/baredsc" ; + schema1:version "0.3" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# Shotgun Metagenomics Analysis\r +Analysis of metagenomic shotgun sequences including assembly, speciation, ARG discovery and more\r +\r +## Description\r +The input for this analysis is paired end next generation sequencing data from metagenomic samples. The workflow is designed to be modular, so that individual modules can be run depending on the nature of the metagenomics project at hand. More modules will be added as we develop them - this repo is a work in progress!\r +\r +These scripts have been written specifically for NCI Gadi HPC, wich runs PBS Pro, however feel free to use and modify for anothre system if you are not a Gadi user. \r +\r +### Part 1. Setup and QC\r +Download the repo. You will see directories for `Fastq`, `Inputs`, `Reference` and `Logs`. You will need to copy or symlink your fastq to `Fastq`, sample configuration file (see below) to `Inputs` and the reference genome sequence of your host species (if applicable) to `Reference` for host contamination removal.\r + \r +\r +#### Fastq inputs\r +The scripts assume all fastq files are paired, gzipped, and all in the one directory named 'Fastq'. If your fastq are within a convoluted directory structure (eg per-sample directories) or you would simply like to link them from an alternate location, please use the script `setup_fastq.sh`.\r +\r +To use this script, parse the path name of your fastq as first argument on the command line, and run the script from the base working directory (/Shotgun-Metagenomics-Analysis) which will from here on be referred to as `workdir`. Note that this script looks for `f*q.gz` files (ie fastq.gz or fq.gz) - if yours differ in suffix, please adjust the script accordingly.\r +\r +```\r +bash ./Scripts/setup_fastq.sh \r +```\r +\r +#### Configuration/sample info\r +The only required input configuration file should be named .config, where is the name of the current batch of samples you are processing, or some other meaningful name to your project; it will be used to name output files. The config file should be placed inside the $workdir/Inputs directory, and include the following columns, in this order:\r +\r +```\r +1. Sample ID - used to identify the sample, eg if you have 3 lanes of sequencing per sample, erach of those 6 fastq files should contain this ID that si in column 1\r +2. Lab Sample ID - can be the same as column 1, or different if you have reason to change the IDs eg if the seq centre applies an in-house ID. Please make sure IDs are unique within column 1 and unique within column 2\r +3. Group - eg different time points or treatment groups. If no specific group structure is relevant, please set this to 1 (do not leave blank!) \r +3. Platform - should be Illumina; other sequencing platforms are not tested on this workflow\r +4. Sequencing centre name\r +5. Library - eg if you have 2 sequencing libraries for the same sample. Can be left blank, or assigned to 1. Blank will be assigned libray ID of 1 during processing.\r +```\r +\r +Please do not have spaces in any of the values for the config file. \r +\r +\r +#### General setup\r +\r +All scripts will need to be edited to reflect your NCI project code at the `-P ` and `-l directive. Please run the script create_project.sh and follow the prompts to complete some of the setup for you. \r +\r +Note that you will need to manually edit the PDS resource requests for each PBS script; guidelines/example resources will be given at each step to help you do this. As the 'sed' commands within this script operate on .sh and .pbs files, this setup script has been intentionally named .bash (easiest solution).\r +\r +Remember to submit all scripts from your `workdir`. \r +\r +`bash ./Scripts/create_project.sh`\r +\r +For jobs that execute in parallel, there are 3 scripts: one to make the 'inputs' file listing hte details of each parallel task, one job execution shell script that is run over each task in parallel, and one PBS launcher script. The process is to submit the make input script, check it to make sure your job details are correct, edit the resources directives depending on the number and size of your parallel tasks, then submit the PBS launcher script with `qsub`. \r +\r +#### QC\r +\r +Run fastQC over each fastq file in parallel. Adjust the resources as per your project. To run all files in parallel, set the number of NCPUS requested equal to the number of fastq files (remember that Gadi can only request <1 node or multiples of whole nodes). The make input script sorts the fastq files largest to smallest, so if you have a discrpeancy in file size, optimal efficiency can be achieved by requested less nodes than the total required to run all your fastq in parallel.\r +\r +FastQC does not multithread on a single file, so CPUs per parallel task is set to 1. Example walltimes on Gadi 'normal' queue: one 1.8 GB fastq = 4 minutes; one 52 GB fastq file = 69.5 minutes.\r +\r +Make the fastqc parallel inputs file by running (from `workdir`):\r +`bash ./Scripts/fastqc_make_inputs.sh`\r +\r +Edit the resource requests in `fastqc_run_parallel.pbs` according to your number of fastq files and their size, then submit:\r +`qsub fastqc_run_parallel.pbs`\r +\r +To ease manual inspection of the fastQC output, running `multiqc` is recommended. This will collate the individual fastQC reports into one report. This can be done on the login node for small sample numbers, or using the below script for larger cohorts. Edit the PBS directives, then run:\r +\r +`qsub multiqc.pbs`\r +\r +Save a copy of ./MultiQC/multiqc_report.html to your local disk then open in a web browser to inspect the results. \r +\r +#### Quality filtering and trimming\r +\r +Will be added at a later date. This is highly dependent on the quality of your data and your individual project needs so will be a guide only. \r +\r +### Part 2. Removal of host contamination. \r +\r +If you have metagenomic data extracted from a host, you will need a copy of the host reference genome sequence in order to remove any DNA sequences belonging to the host. Even if your wetlab protocol included a host removal step, it is still important to run bioinformatic host removal.\r +\r +\r +#### Prepare the reference\r +Ensure you have a copy of the reference genome (or symlink) in ./Fasta. This workflow requires BBtools(tested with version 37.98). As of writing, BBtools is not available as a global app on Gadi. Please install locally and make "module loadable", or else edit the scripts to point directly to your local BBtools installation.\r +\r +BBtools repeat masking will use all available threads on machine and 85% of available mem by default. For a mammalian genome, 2 hours on one Gadi 'normal' node is sufficient for repeat masking. \r +\r +Update the name of your reference fastq in the `bbmap_prep.pbs` script (and BBtools, see note above), then run:\r +`qsub ./Scripts/bbmap_prep.pbs`\r +\r +#### Host contamination removal\r +\r +TBC 1/4/22... \r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.327.1" ; + schema1:isBasedOn "https://github.com/Sydney-Informatics-Hub/Shotgun-Metagenomics-Analysis.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Shotgun-Metagenomics-Analysis" ; + schema1:sdDatePublished "2024-08-05 10:32:14 +0100" ; + schema1:url "https://workflowhub.eu/workflows/327/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6783 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2022-04-07T00:45:10Z" ; + schema1:dateModified "2023-01-16T13:59:41Z" ; + schema1:description """# Shotgun Metagenomics Analysis\r +Analysis of metagenomic shotgun sequences including assembly, speciation, ARG discovery and more\r +\r +## Description\r +The input for this analysis is paired end next generation sequencing data from metagenomic samples. The workflow is designed to be modular, so that individual modules can be run depending on the nature of the metagenomics project at hand. More modules will be added as we develop them - this repo is a work in progress!\r +\r +These scripts have been written specifically for NCI Gadi HPC, wich runs PBS Pro, however feel free to use and modify for anothre system if you are not a Gadi user. \r +\r +### Part 1. Setup and QC\r +Download the repo. You will see directories for `Fastq`, `Inputs`, `Reference` and `Logs`. You will need to copy or symlink your fastq to `Fastq`, sample configuration file (see below) to `Inputs` and the reference genome sequence of your host species (if applicable) to `Reference` for host contamination removal.\r + \r +\r +#### Fastq inputs\r +The scripts assume all fastq files are paired, gzipped, and all in the one directory named 'Fastq'. If your fastq are within a convoluted directory structure (eg per-sample directories) or you would simply like to link them from an alternate location, please use the script `setup_fastq.sh`.\r +\r +To use this script, parse the path name of your fastq as first argument on the command line, and run the script from the base working directory (/Shotgun-Metagenomics-Analysis) which will from here on be referred to as `workdir`. Note that this script looks for `f*q.gz` files (ie fastq.gz or fq.gz) - if yours differ in suffix, please adjust the script accordingly.\r +\r +```\r +bash ./Scripts/setup_fastq.sh \r +```\r +\r +#### Configuration/sample info\r +The only required input configuration file should be named .config, where is the name of the current batch of samples you are processing, or some other meaningful name to your project; it will be used to name output files. The config file should be placed inside the $workdir/Inputs directory, and include the following columns, in this order:\r +\r +```\r +1. Sample ID - used to identify the sample, eg if you have 3 lanes of sequencing per sample, erach of those 6 fastq files should contain this ID that si in column 1\r +2. Lab Sample ID - can be the same as column 1, or different if you have reason to change the IDs eg if the seq centre applies an in-house ID. Please make sure IDs are unique within column 1 and unique within column 2\r +3. Group - eg different time points or treatment groups. If no specific group structure is relevant, please set this to 1 (do not leave blank!) \r +3. Platform - should be Illumina; other sequencing platforms are not tested on this workflow\r +4. Sequencing centre name\r +5. Library - eg if you have 2 sequencing libraries for the same sample. Can be left blank, or assigned to 1. Blank will be assigned libray ID of 1 during processing.\r +```\r +\r +Please do not have spaces in any of the values for the config file. \r +\r +\r +#### General setup\r +\r +All scripts will need to be edited to reflect your NCI project code at the `-P ` and `-l directive. Please run the script create_project.sh and follow the prompts to complete some of the setup for you. \r +\r +Note that you will need to manually edit the PDS resource requests for each PBS script; guidelines/example resources will be given at each step to help you do this. As the 'sed' commands within this script operate on .sh and .pbs files, this setup script has been intentionally named .bash (easiest solution).\r +\r +Remember to submit all scripts from your `workdir`. \r +\r +`bash ./Scripts/create_project.sh`\r +\r +For jobs that execute in parallel, there are 3 scripts: one to make the 'inputs' file listing hte details of each parallel task, one job execution shell script that is run over each task in parallel, and one PBS launcher script. The process is to submit the make input script, check it to make sure your job details are correct, edit the resources directives depending on the number and size of your parallel tasks, then submit the PBS launcher script with `qsub`. \r +\r +#### QC\r +\r +Run fastQC over each fastq file in parallel. Adjust the resources as per your project. To run all files in parallel, set the number of NCPUS requested equal to the number of fastq files (remember that Gadi can only request <1 node or multiples of whole nodes). The make input script sorts the fastq files largest to smallest, so if you have a discrpeancy in file size, optimal efficiency can be achieved by requested less nodes than the total required to run all your fastq in parallel.\r +\r +FastQC does not multithread on a single file, so CPUs per parallel task is set to 1. Example walltimes on Gadi 'normal' queue: one 1.8 GB fastq = 4 minutes; one 52 GB fastq file = 69.5 minutes.\r +\r +Make the fastqc parallel inputs file by running (from `workdir`):\r +`bash ./Scripts/fastqc_make_inputs.sh`\r +\r +Edit the resource requests in `fastqc_run_parallel.pbs` according to your number of fastq files and their size, then submit:\r +`qsub fastqc_run_parallel.pbs`\r +\r +To ease manual inspection of the fastQC output, running `multiqc` is recommended. This will collate the individual fastQC reports into one report. This can be done on the login node for small sample numbers, or using the below script for larger cohorts. Edit the PBS directives, then run:\r +\r +`qsub multiqc.pbs`\r +\r +Save a copy of ./MultiQC/multiqc_report.html to your local disk then open in a web browser to inspect the results. \r +\r +#### Quality filtering and trimming\r +\r +Will be added at a later date. This is highly dependent on the quality of your data and your individual project needs so will be a guide only. \r +\r +### Part 2. Removal of host contamination. \r +\r +If you have metagenomic data extracted from a host, you will need a copy of the host reference genome sequence in order to remove any DNA sequences belonging to the host. Even if your wetlab protocol included a host removal step, it is still important to run bioinformatic host removal.\r +\r +\r +#### Prepare the reference\r +Ensure you have a copy of the reference genome (or symlink) in ./Fasta. This workflow requires BBtools(tested with version 37.98). As of writing, BBtools is not available as a global app on Gadi. Please install locally and make "module loadable", or else edit the scripts to point directly to your local BBtools installation.\r +\r +BBtools repeat masking will use all available threads on machine and 85% of available mem by default. For a mammalian genome, 2 hours on one Gadi 'normal' node is sufficient for repeat masking. \r +\r +Update the name of your reference fastq in the `bbmap_prep.pbs` script (and BBtools, see note above), then run:\r +`qsub ./Scripts/bbmap_prep.pbs`\r +\r +#### Host contamination removal\r +\r +TBC 1/4/22... \r +""" ; + schema1:isPartOf ; + schema1:keywords "Metagenomics, shotgun, antimicrobial resistance, humann2, bbmap, whole genome sequencing, Assembly, prokka, abricate, DIAMOND, kraken, braken" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Shotgun-Metagenomics-Analysis" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/327?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-20T11:44:03.098812" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-decontamination-VGP9" ; + schema1:license "BSD-3-Clause" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-decontamination-VGP9/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Molecular Structure Checking using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **checking** a **molecular structure** before using it as an input for a **Molecular Dynamics** simulation. The workflow uses the **BioExcel Building Blocks library (biobb)**. The particular structure used is the crystal structure of **human Adenylate Kinase 1A (AK1A)**, in complex with the **AP5A inhibitor** (PDB code [1Z83](https://www.rcsb.org/structure/1z83)). \r +\r +**Structure checking** is a key step before setting up a protein system for **simulations**. A number of **common issues** found in structures at **Protein Data Bank** may compromise the success of the **simulation**, or may suggest that longer **equilibration** procedures are necessary.\r +\r +The **workflow** shows how to:\r +\r +- Run **basic manipulations on structures** (selection of models, chains, alternative locations\r +- Detect and fix **amide assignments** and **wrong chiralities**\r +- Detect and fix **protein backbone** issues (missing fragments, and atoms, capping)\r +- Detect and fix **missing side-chain atoms**\r +- **Add hydrogen atoms** according to several criteria\r +- Detect and classify **atomic clashes**\r +- Detect possible **disulfide bonds (SS)**\r +\r +An implementation of this workflow in a **web-based Graphical User Interface (GUI)** can be found in the [https://mmb.irbbarcelona.org/biobb-wfs/](https://mmb.irbbarcelona.org/biobb-wfs/) server (see [https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check](https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check)).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.778.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/main/biobb_wf_structure_checking/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy Molecular Structure Checking" ; + schema1:sdDatePublished "2024-08-05 10:25:33 +0100" ; + schema1:url "https://workflowhub.eu/workflows/778/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 35053 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-05T08:58:01Z" ; + schema1:dateModified "2024-03-05T08:59:51Z" ; + schema1:description """# Molecular Structure Checking using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **checking** a **molecular structure** before using it as an input for a **Molecular Dynamics** simulation. The workflow uses the **BioExcel Building Blocks library (biobb)**. The particular structure used is the crystal structure of **human Adenylate Kinase 1A (AK1A)**, in complex with the **AP5A inhibitor** (PDB code [1Z83](https://www.rcsb.org/structure/1z83)). \r +\r +**Structure checking** is a key step before setting up a protein system for **simulations**. A number of **common issues** found in structures at **Protein Data Bank** may compromise the success of the **simulation**, or may suggest that longer **equilibration** procedures are necessary.\r +\r +The **workflow** shows how to:\r +\r +- Run **basic manipulations on structures** (selection of models, chains, alternative locations\r +- Detect and fix **amide assignments** and **wrong chiralities**\r +- Detect and fix **protein backbone** issues (missing fragments, and atoms, capping)\r +- Detect and fix **missing side-chain atoms**\r +- **Add hydrogen atoms** according to several criteria\r +- Detect and classify **atomic clashes**\r +- Detect possible **disulfide bonds (SS)**\r +\r +An implementation of this workflow in a **web-based Graphical User Interface (GUI)** can be found in the [https://mmb.irbbarcelona.org/biobb-wfs/](https://mmb.irbbarcelona.org/biobb-wfs/) server (see [https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check](https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check)).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy Molecular Structure Checking" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/main/biobb_wf_structure_checking/galaxy/biobb_wf_structure_checking.ga" ; + schema1:version 1 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """Workflow to build different indices for different tools from a genome and transcriptome. \r +\r +This workflow expects an (annotated) genome in GBOL ttl format.\r +\r +Steps:\r + - SAPP: rdf2gtf (genome fasta)\r + - SAPP: rdf2fasta (transcripts fasta)\r + - STAR index (Optional for Eukaryotic origin)\r + - bowtie2 index\r + - kallisto index\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/75?version=1" ; + schema1:isBasedOn "https://gitlab.com/m-unlock/cwl/" ; + schema1:license "CC0-1.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Indices builder from GBOL RDF (TTL)" ; + schema1:sdDatePublished "2024-08-05 10:33:22 +0100" ; + schema1:url "https://workflowhub.eu/workflows/75/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 12388 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4445 ; + schema1:creator ; + schema1:dateCreated "2020-11-23T16:22:43Z" ; + schema1:dateModified "2023-01-16T13:46:15Z" ; + schema1:description """Workflow to build different indices for different tools from a genome and transcriptome. \r +\r +This workflow expects an (annotated) genome in GBOL ttl format.\r +\r +Steps:\r + - SAPP: rdf2gtf (genome fasta)\r + - SAPP: rdf2fasta (transcripts fasta)\r + - STAR index (Optional for Eukaryotic origin)\r + - bowtie2 index\r + - kallisto index\r +""" ; + schema1:image ; + schema1:keywords "Alignment" ; + schema1:license "https://spdx.org/licenses/CC0-1.0" ; + schema1:name "Indices builder from GBOL RDF (TTL)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/75?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + ; + ns1:output , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """# workflow-partial-bwa-mem\r +\r +These workflows are part of a set designed to work for RAD-seq data on the Galaxy platform, using the tools from the Stacks program. \r +\r +Galaxy Australia: https://usegalaxy.org.au/\r +\r +Stacks: http://catchenlab.life.illinois.edu/stacks/\r +\r +This workflow is part of the reference-guided stacks workflow, https://workflowhub.eu/workflows/347\r +\r +Inputs\r +* demultiplexed reads in fastq format, may be output from the QC workflow. Files are in a collection. \r +* reference genome in fasta format\r +\r +Outputs\r +* A set of filtered bam files, ready for the next part of the stacks workflow (e.g. gstacks). \r +* Statistics on the bam files. \r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/351?version=1" ; + schema1:isBasedOn "https://github.com/AnnaSyme/workflow-partial-bwa-mem.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Partial ref-guided workflow - bwa mem only" ; + schema1:sdDatePublished "2024-08-05 10:32:07 +0100" ; + schema1:url "https://workflowhub.eu/workflows/351/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 20262 ; + schema1:creator ; + schema1:dateCreated "2022-05-31T08:05:01Z" ; + schema1:dateModified "2023-01-30T18:21:31Z" ; + schema1:description """# workflow-partial-bwa-mem\r +\r +These workflows are part of a set designed to work for RAD-seq data on the Galaxy platform, using the tools from the Stacks program. \r +\r +Galaxy Australia: https://usegalaxy.org.au/\r +\r +Stacks: http://catchenlab.life.illinois.edu/stacks/\r +\r +This workflow is part of the reference-guided stacks workflow, https://workflowhub.eu/workflows/347\r +\r +Inputs\r +* demultiplexed reads in fastq format, may be output from the QC workflow. Files are in a collection. \r +* reference genome in fasta format\r +\r +Outputs\r +* A set of filtered bam files, ready for the next part of the stacks workflow (e.g. gstacks). \r +* Statistics on the bam files. \r +\r +""" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Partial ref-guided workflow - bwa mem only" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/351?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# Local Cromwell implementation of GATK4 germline variant calling pipeline\r +See the [GATK](https://gatk.broadinstitute.org/hc/en-us) website for more information on this toolset \r +## Assumptions\r +- Using hg38 human reference genome build\r +- Running 'locally' i.e. not using HPC/SLURM scheduling, or containers. This repo was specifically tested on Pawsey Nimbus 16 CPU, 64GB RAM virtual machine, primarily running in the `/data` volume storage partition. \r +- Starting from short-read Illumina paired-end fastq files as input\r +\r +### Dependencies\r +The following versions have been tested and work, but GATK and Cromwell are regularly updated and so one must consider whether they would like to use newer versions of these tools. \r +- BWA/0.7.15\r +- GATK v4.0.6.0\r +- SAMtools/1.5\r +- picard/2.9\r +- Python/2.7\r +- Cromwell v61\r +\r +## Quick start guide\r +### Installing and preparing environment for GATK4 with Cromwell\r +\r +1. Clone repository\r +```\r +git clone https://github.com/SarahBeecroft/cromwellGATK4.git\r +cd cromwellGATK4\r +chmod 777 *.sh\r +```\r +\r +2. Install [Miniconda](https://docs.conda.io/en/latest/miniconda.html) if you haven’t already. Create Conda environment using the supplied conda environment file\r +\r +```\r +conda env create --file gatk4_pipeline.yml\r +```\r +\r +3. Download the necessary .jar files\r + - The Cromwell workfow orchestration engine can be downloaded from https://github.com/broadinstitute/cromwell/releases/ \r + - GATK can be downloaded from https://github.com/broadinstitute/gatk/releases. Unzip the file with `unzip` \r + - Picard can be downloaded from https://github.com/broadinstitute/picard/releases/\r +\r +\r +4. Upload the resource bundle file from IRDS using rclone or filezilla and unpack it with `tar xzvf resource.tar.gz`. Note that the `hg38_wgs_scattered_calling_intervals.txt` will need to be to generated using the following:\r +\r +```\r +cd \r +find `pwd` -name "scattered.interval_list" -print | sort > hg38_wgs_scattered_calling_intervals.txt\r +```\r +\r +5. Set up the config files. Files that you need to edit with the correct paths to your data/jar files or other specific configurations are:\r + - `Multisample_Fastq_to_Gvcf_GATK4_inputs_hg38.json`\r + - `Multisample_jointgt_GATK4_inputs_hg38.json`\r + - both json files will need the correct paths to your reference file locations, and the file specifying your inputs i.e. `samples.txt` or `gvcfs.txt`\r + - `samples.txt`\r + - `gvcfs.txt`\r + - These are the sample input files (tab seperated)\r + - The format for samples.txt is sampleID, sampleID_readgroup, path_to_fastq_R1_file, path_to_fastq_R2_file,\r + - The format for gvcfs.txt is sample ID, gvcf, gvcf .tbi index file\r + - Examples are included in this repo\r + - NOTE: Having tabs, not spaces, is vital for parsing the file. Visual studio code tends to introduce spaces, so if you are having issues, check the file with another text editor such as sublime. \r + - `launch_cromwell.sh`\r + - `launch_jointgt.sh`\r + - These are the scripts which launch the pipeline. \r + - `launch_cromwell.sh` launches the fastq to gvcf stage\r + - `launch_jointgt.sh` launched the gvcf joint genotyping to cohort vcf step. This is perfomed when you have run all samples through the fastq to gvcf stage.\r + - Check the paths and parameters make sense for your machine\r + - `local.conf`\r + - the main tuneable parameters here are:\r + - `concurrent-job-limit = 5` this is the max number of concurrent jobs that can be spawned by cromwell. This depends on the computational resources available to you. 5 was determined to work reasonably well on a 16 CPU, 64GB RAM Nimbus VM (Pawsey). \r + - `call-caching enabled = true`. Setting this parameter to `false` will disable call caching (i.e. being able to resume if the job fails before completion). By default, call caching is enabled. \r + - `cromwell.options`\r + - `cromwell.options` requires editing to provide the directory where you would like the final workflow outputs to be written\r + - `Multisample_Fastq_to_Gvcf_GATK4.wdl`\r + - `ruddle_fastq_to_gvcf_single_sample_gatk4.wdl`\r + - The paths to your jar files will need to be updated\r + - The path to your conda `activate` binary will need to be updated (e.g. `/data/miniconda/bin/activate`)\r +\r +6. Launch the job within a `screen` or `tmux` session, using `./launch_cromwell.sh`. When that has completed successfully, you can launch the second stage of the pipeline (joint calling) with `./launch_jointgt.sh`. Ensure you pipe the stdout and stderr to a log file using (for example) `./launch_cromwell.sh &> cromwell.log`\r +\r +### Overview of the steps in `Multisample_Fastq_to_Gvcf_GATK4.wdl`\r +This part of the pipeline takes short-read, Illumina paired-end fastq files as the input. The outputs generated are sorted, duplicate marked bam files and their indices, duplicate metric information, and a GVCF file for each sample. The GVCF files are used as input for the second part of the pipeline (joint genotyping).\r +\r +```\r +FastqToUbam\r +GetBwaVersion\r +SamToFastqAndBwaMem\r +MergeBamAlignment\r +SortAndFixTags\r +MarkDuplicates\r +CreateSequenceGroupingTSV\r +BaseRecalibrator\r +GatherBqsrReports\r +ApplyBQSR\r +GatherBamFiles\r +HaplotypeCaller\r +MergeGVCFs\r +```\r +\r +### Overview of the steps in `Multisample_jointgt_GATK4.wdl`\r +This part of the pipeline takes GVCF files (one per sample), and performs joint genotyping across all of the provided samples. This means that old previously generated GVCFs can be joint-called with new GVCFs whenever you need to add new samples. The key output from this is a joint-genotyped, cohort-wide VCF file. This file can be used for a GEMINI database after normalisation with VT and annotation with a tool such as VEP or SNPEFF. \r +\r +The file `hg38.custom_100Mb.intervals` is required for this step of the pipeline to run. This is included in the git repo for convenience, but should be moved to your resource directory with all the other resource files. \r +\r +```\r +GetNumberOfSamples\r +ImportGVCFs\r +GenotypeGVCFs\r +HardFilterAndMakeSitesOnlyVcf\r +IndelsVariantRecalibrator\r +SNPsVariantRecalibratorCreateModel\r +SNPsVariantRecalibrator\r +GatherTranches\r +ApplyRecalibration\r +GatherVcfs\r +CollectVariantCallingMetrics\r +GatherMetrics\r +DynamicallyCombineIntervals\r +```\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/147?version=1" ; + schema1:isBasedOn "https://github.com/SarahBeecroft/cromwellGATK4" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for GATK4 Fastq to joint-called cohort VCF with Cromwell on local cluster (no job scheduler)" ; + schema1:sdDatePublished "2024-08-05 10:33:10 +0100" ; + schema1:url "https://workflowhub.eu/workflows/147/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6212 ; + schema1:dateCreated "2021-08-17T04:47:53Z" ; + schema1:dateModified "2023-01-16T13:51:45Z" ; + schema1:description """# Local Cromwell implementation of GATK4 germline variant calling pipeline\r +See the [GATK](https://gatk.broadinstitute.org/hc/en-us) website for more information on this toolset \r +## Assumptions\r +- Using hg38 human reference genome build\r +- Running 'locally' i.e. not using HPC/SLURM scheduling, or containers. This repo was specifically tested on Pawsey Nimbus 16 CPU, 64GB RAM virtual machine, primarily running in the `/data` volume storage partition. \r +- Starting from short-read Illumina paired-end fastq files as input\r +\r +### Dependencies\r +The following versions have been tested and work, but GATK and Cromwell are regularly updated and so one must consider whether they would like to use newer versions of these tools. \r +- BWA/0.7.15\r +- GATK v4.0.6.0\r +- SAMtools/1.5\r +- picard/2.9\r +- Python/2.7\r +- Cromwell v61\r +\r +## Quick start guide\r +### Installing and preparing environment for GATK4 with Cromwell\r +\r +1. Clone repository\r +```\r +git clone https://github.com/SarahBeecroft/cromwellGATK4.git\r +cd cromwellGATK4\r +chmod 777 *.sh\r +```\r +\r +2. Install [Miniconda](https://docs.conda.io/en/latest/miniconda.html) if you haven’t already. Create Conda environment using the supplied conda environment file\r +\r +```\r +conda env create --file gatk4_pipeline.yml\r +```\r +\r +3. Download the necessary .jar files\r + - The Cromwell workfow orchestration engine can be downloaded from https://github.com/broadinstitute/cromwell/releases/ \r + - GATK can be downloaded from https://github.com/broadinstitute/gatk/releases. Unzip the file with `unzip` \r + - Picard can be downloaded from https://github.com/broadinstitute/picard/releases/\r +\r +\r +4. Upload the resource bundle file from IRDS using rclone or filezilla and unpack it with `tar xzvf resource.tar.gz`. Note that the `hg38_wgs_scattered_calling_intervals.txt` will need to be to generated using the following:\r +\r +```\r +cd \r +find `pwd` -name "scattered.interval_list" -print | sort > hg38_wgs_scattered_calling_intervals.txt\r +```\r +\r +5. Set up the config files. Files that you need to edit with the correct paths to your data/jar files or other specific configurations are:\r + - `Multisample_Fastq_to_Gvcf_GATK4_inputs_hg38.json`\r + - `Multisample_jointgt_GATK4_inputs_hg38.json`\r + - both json files will need the correct paths to your reference file locations, and the file specifying your inputs i.e. `samples.txt` or `gvcfs.txt`\r + - `samples.txt`\r + - `gvcfs.txt`\r + - These are the sample input files (tab seperated)\r + - The format for samples.txt is sampleID, sampleID_readgroup, path_to_fastq_R1_file, path_to_fastq_R2_file,\r + - The format for gvcfs.txt is sample ID, gvcf, gvcf .tbi index file\r + - Examples are included in this repo\r + - NOTE: Having tabs, not spaces, is vital for parsing the file. Visual studio code tends to introduce spaces, so if you are having issues, check the file with another text editor such as sublime. \r + - `launch_cromwell.sh`\r + - `launch_jointgt.sh`\r + - These are the scripts which launch the pipeline. \r + - `launch_cromwell.sh` launches the fastq to gvcf stage\r + - `launch_jointgt.sh` launched the gvcf joint genotyping to cohort vcf step. This is perfomed when you have run all samples through the fastq to gvcf stage.\r + - Check the paths and parameters make sense for your machine\r + - `local.conf`\r + - the main tuneable parameters here are:\r + - `concurrent-job-limit = 5` this is the max number of concurrent jobs that can be spawned by cromwell. This depends on the computational resources available to you. 5 was determined to work reasonably well on a 16 CPU, 64GB RAM Nimbus VM (Pawsey). \r + - `call-caching enabled = true`. Setting this parameter to `false` will disable call caching (i.e. being able to resume if the job fails before completion). By default, call caching is enabled. \r + - `cromwell.options`\r + - `cromwell.options` requires editing to provide the directory where you would like the final workflow outputs to be written\r + - `Multisample_Fastq_to_Gvcf_GATK4.wdl`\r + - `ruddle_fastq_to_gvcf_single_sample_gatk4.wdl`\r + - The paths to your jar files will need to be updated\r + - The path to your conda `activate` binary will need to be updated (e.g. `/data/miniconda/bin/activate`)\r +\r +6. Launch the job within a `screen` or `tmux` session, using `./launch_cromwell.sh`. When that has completed successfully, you can launch the second stage of the pipeline (joint calling) with `./launch_jointgt.sh`. Ensure you pipe the stdout and stderr to a log file using (for example) `./launch_cromwell.sh &> cromwell.log`\r +\r +### Overview of the steps in `Multisample_Fastq_to_Gvcf_GATK4.wdl`\r +This part of the pipeline takes short-read, Illumina paired-end fastq files as the input. The outputs generated are sorted, duplicate marked bam files and their indices, duplicate metric information, and a GVCF file for each sample. The GVCF files are used as input for the second part of the pipeline (joint genotyping).\r +\r +```\r +FastqToUbam\r +GetBwaVersion\r +SamToFastqAndBwaMem\r +MergeBamAlignment\r +SortAndFixTags\r +MarkDuplicates\r +CreateSequenceGroupingTSV\r +BaseRecalibrator\r +GatherBqsrReports\r +ApplyBQSR\r +GatherBamFiles\r +HaplotypeCaller\r +MergeGVCFs\r +```\r +\r +### Overview of the steps in `Multisample_jointgt_GATK4.wdl`\r +This part of the pipeline takes GVCF files (one per sample), and performs joint genotyping across all of the provided samples. This means that old previously generated GVCFs can be joint-called with new GVCFs whenever you need to add new samples. The key output from this is a joint-genotyped, cohort-wide VCF file. This file can be used for a GEMINI database after normalisation with VT and annotation with a tool such as VEP or SNPEFF. \r +\r +The file `hg38.custom_100Mb.intervals` is required for this step of the pipeline to run. This is included in the git repo for convenience, but should be moved to your resource directory with all the other resource files. \r +\r +```\r +GetNumberOfSamples\r +ImportGVCFs\r +GenotypeGVCFs\r +HardFilterAndMakeSitesOnlyVcf\r +IndelsVariantRecalibrator\r +SNPsVariantRecalibratorCreateModel\r +SNPsVariantRecalibrator\r +GatherTranches\r +ApplyRecalibration\r +GatherVcfs\r +CollectVariantCallingMetrics\r +GatherMetrics\r +DynamicallyCombineIntervals\r +```\r +""" ; + schema1:isPartOf ; + schema1:keywords "Alignment, GATK4, Genomics, variant_calling, SNPs, INDELs, workflow" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "GATK4 Fastq to joint-called cohort VCF with Cromwell on local cluster (no job scheduler)" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/147?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.131.4" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/131/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 77840 ; + schema1:creator , + ; + schema1:dateCreated "2023-07-26T09:37:34Z" ; + schema1:dateModified "2023-07-26T09:38:01Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/131?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_amber_md_setup/master/biobb_wf_amber_md_setup/notebooks/mdsetup_lig/biobb_amber_complex_setup_notebook.ipynb" ; + schema1:version 4 . + + a schema1:Dataset ; + schema1:datePublished "2021-12-06T17:16:32.864578" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/protein-ligand-complex-parameterization" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "protein-ligand-complex-parameterization/main" ; + schema1:sdDatePublished "2021-12-07 03:00:57 +0000" ; + schema1:softwareVersion "v0.1.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + schema1:datePublished "2024-04-09T08:44:23.480130" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + , + ; + schema1:name "consensus-peaks/consensus-peaks-chip-sr" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-atac-cutandrun" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.6" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-chip-pe" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.6" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=9" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-08-05 10:24:24 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7151 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:12Z" ; + schema1:dateModified "2024-06-11T12:55:12Z" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=9" ; + schema1:version 9 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """The ultimate-level complexity workflow is one among a collection of workflows designed to address tasks up to CTF estimation. In addition to the functionalities provided by layer 0 and 1 workflows, this workflow aims to enhance the quality of both **acquisition images** and **processing**.\r +\r +**Quality control protocols**\r +\r +…\r +\r +**Combination of methods**\r +* **CTF consensus**\r + * New methods to compare ctf estimations\r + * CTF xmipp criteria (richer parameters i.e. ice detection)\r +\r +**Advantages**: \r +* Control of the acquisition quality\r +* Robust estimations to continue with the processing""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/600?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CEITEC layer 2 workflow" ; + schema1:sdDatePublished "2024-08-05 10:27:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/600/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 79882 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11315 ; + schema1:dateCreated "2023-10-04T13:11:55Z" ; + schema1:dateModified "2024-07-10T14:00:07Z" ; + schema1:description """The ultimate-level complexity workflow is one among a collection of workflows designed to address tasks up to CTF estimation. In addition to the functionalities provided by layer 0 and 1 workflows, this workflow aims to enhance the quality of both **acquisition images** and **processing**.\r +\r +**Quality control protocols**\r +\r +…\r +\r +**Combination of methods**\r +* **CTF consensus**\r + * New methods to compare ctf estimations\r + * CTF xmipp criteria (richer parameters i.e. ice detection)\r +\r +**Advantages**: \r +* Control of the acquisition quality\r +* Robust estimations to continue with the processing""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "image processing, cryoem, spa, scipion" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "CEITEC layer 2 workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/600?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:description """Galaxy Workflow created on Galaxy-E european instance, ecology.usegalaxy.eu, to analyze crowdsourcing results of the SPIPOLL hoverflies GAPARS European project activity on MMOS server.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/660?version=1" ; + schema1:isBasedOn "https://ecology.usegalaxy.eu/u/ylebras/w/workflow-constructed-from-history-spipoll-mmos-gapars-results-2020---30-05-2023" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for SPIPOLL MMOS GAPARS crowdsourcing results" ; + schema1:sdDatePublished "2024-08-05 10:27:16 +0100" ; + schema1:url "https://workflowhub.eu/workflows/660/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 20142 ; + schema1:creator , + ; + schema1:dateCreated "2023-11-10T08:41:20Z" ; + schema1:dateModified "2023-11-10T08:41:20Z" ; + schema1:description """Galaxy Workflow created on Galaxy-E european instance, ecology.usegalaxy.eu, to analyze crowdsourcing results of the SPIPOLL hoverflies GAPARS European project activity on MMOS server.\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "SPIPOLL MMOS GAPARS crowdsourcing results" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/660?version=1" ; + schema1:version 1 ; + ns1:input , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1021?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/scrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/scrnaseq" ; + schema1:sdDatePublished "2024-08-05 10:23:14 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1021/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8791 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:17Z" ; + schema1:dateModified "2024-06-11T12:55:17Z" ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1021?version=13" ; + schema1:keywords "10x-genomics, 10xgenomics, alevin, bustools, Cellranger, kallisto, rna-seq, single-cell, star-solo" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/scrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1021?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """Bootstrapping-for-BQSR @ NCI-Gadi is a pipeline for bootstrapping a variant resource to enable GATK base quality score recalibration (BQSR) for non-model organisms that lack a publicly available variant resource. This implementation is optimised for the National Compute Infrastucture's Gadi HPC. Multiple rounds of bootstrapping can be performed. Users can use [Fastq-to-bam @ NCI-Gadi](https://workflowhub.eu/workflows/146) and [Germline-ShortV @ NCI-Gadi](https://workflowhub.eu/workflows/143) to produce required input files for Bootstrapping-for-BQSR @ NCI-Gadi. \r +\r +Infrastructure\\_deployment\\_metadata: Gadi (NCI)\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.153.1" ; + schema1:isBasedOn "https://github.com/Sydney-Informatics-Hub/Bootstrapping-for-BQSR" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Bootstrapping-for-BQSR @ NCI-Gadi" ; + schema1:sdDatePublished "2024-08-05 10:33:08 +0100" ; + schema1:url "https://workflowhub.eu/workflows/153/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 31062 ; + schema1:creator , + , + ; + schema1:dateCreated "2021-08-18T23:26:00Z" ; + schema1:dateModified "2023-01-16T13:51:46Z" ; + schema1:description """Bootstrapping-for-BQSR @ NCI-Gadi is a pipeline for bootstrapping a variant resource to enable GATK base quality score recalibration (BQSR) for non-model organisms that lack a publicly available variant resource. This implementation is optimised for the National Compute Infrastucture's Gadi HPC. Multiple rounds of bootstrapping can be performed. Users can use [Fastq-to-bam @ NCI-Gadi](https://workflowhub.eu/workflows/146) and [Germline-ShortV @ NCI-Gadi](https://workflowhub.eu/workflows/143) to produce required input files for Bootstrapping-for-BQSR @ NCI-Gadi. \r +\r +Infrastructure\\_deployment\\_metadata: Gadi (NCI)\r +\r +""" ; + schema1:isPartOf ; + schema1:keywords "GATK4, Bootstrapping, BQSR, SNPs, INDELs, illumina, WGS, Genomics, Alignment, variant calling, model, non-model, scalable, DNA, NCI, NCI-Gadi, PBS" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Bootstrapping-for-BQSR @ NCI-Gadi" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/153?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=22" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-08-05 10:24:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=22" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13435 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:51Z" ; + schema1:dateModified "2024-06-11T12:54:51Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=22" ; + schema1:version 22 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Differential abundance analysis" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/981?version=9" ; + schema1:isBasedOn "https://github.com/nf-core/differentialabundance" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/differentialabundance" ; + schema1:sdDatePublished "2024-08-05 10:24:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/981/ro_crate?version=9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 16959 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:49Z" ; + schema1:dateModified "2024-06-11T12:54:49Z" ; + schema1:description "Differential abundance analysis" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/981?version=8" ; + schema1:keywords "ATAC-seq, ChIP-seq, deseq2, differential-abundance, differential-expression, gsea, limma, microarray, rna-seq, shiny" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/differentialabundance" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/981?version=9" ; + schema1:version 9 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """This notebook is about pre-processing the Auditory Brainstem Response (ABR) raw data files provided by [Ingham et. al](https://journals.plos.org/plosbiology/article?id=10.1371/journal.pbio.3000194) to create a data set for Deep Learning models.\r +\r +The unprocessed ABR data files are available at [Dryad](https://datadryad.org/stash/dataset/doi:10.5061/dryad.cv803rv).\r +\r +Since the ABR raw data are available as zip-archives, these have to be unzipped and the extracted raw data files parsed so that the time series corresponding to the ABR audiograms can be saved in a single csv file.\r +\r +The final data set contains the ABR time series, an individual mouse identifier, stimulus frequency, stimulus sound pressure level (SPL) and a manually determined hearing threshold. For each mouse there are different time series corresponding to six different sound stimuli: broadband click, 6, 12, 18, 24, and 30 kHz, each of which was measured for a range of sound pressure levels. The exact range of sound levels can vary between the different mice and stimuli. \r +\r +The following is done: \r +\r +* The zip archives are unpacked.\r +* The extracted ABR raw data files are parsed and collected in one csv file per archive.\r +* The csv files are merged into a data set of time series. Each time series corresponds to an ABR audiogram measured for a mouse at a specific frequency and sound level.\r +* The mouse phenotyping data are available in Excel format. The individual data sheets are combined into one mouse phenotyping data set, maintaining the mouse pipeline and the cohort type mapping. In addition, the hearing thresholds are added to the ABR audiogram data set.\r +* The data sets are curated: \r +\r + * there is a single curve per mouse, stimulus frequency and sound level,\r + * each sound level is included in the list of potential sound pressure levels,\r + * for each mouse for which an ABR audiogram has been measured, mouse phenotyping data are also provided.""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/216?version=1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Preparing a data set for Deep Learning from zipped ABR raw data files" ; + schema1:sdDatePublished "2024-08-05 10:33:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/216/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4736972 ; + schema1:creator ; + schema1:dateCreated "2021-10-19T10:47:54Z" ; + schema1:dateModified "2023-01-16T13:53:24Z" ; + schema1:description """This notebook is about pre-processing the Auditory Brainstem Response (ABR) raw data files provided by [Ingham et. al](https://journals.plos.org/plosbiology/article?id=10.1371/journal.pbio.3000194) to create a data set for Deep Learning models.\r +\r +The unprocessed ABR data files are available at [Dryad](https://datadryad.org/stash/dataset/doi:10.5061/dryad.cv803rv).\r +\r +Since the ABR raw data are available as zip-archives, these have to be unzipped and the extracted raw data files parsed so that the time series corresponding to the ABR audiograms can be saved in a single csv file.\r +\r +The final data set contains the ABR time series, an individual mouse identifier, stimulus frequency, stimulus sound pressure level (SPL) and a manually determined hearing threshold. For each mouse there are different time series corresponding to six different sound stimuli: broadband click, 6, 12, 18, 24, and 30 kHz, each of which was measured for a range of sound pressure levels. The exact range of sound levels can vary between the different mice and stimuli. \r +\r +The following is done: \r +\r +* The zip archives are unpacked.\r +* The extracted ABR raw data files are parsed and collected in one csv file per archive.\r +* The csv files are merged into a data set of time series. Each time series corresponds to an ABR audiogram measured for a mouse at a specific frequency and sound level.\r +* The mouse phenotyping data are available in Excel format. The individual data sheets are combined into one mouse phenotyping data set, maintaining the mouse pipeline and the cohort type mapping. In addition, the hearing thresholds are added to the ABR audiogram data set.\r +* The data sets are curated: \r +\r + * there is a single curve per mouse, stimulus frequency and sound level,\r + * each sound level is included in the list of potential sound pressure levels,\r + * for each mouse for which an ABR audiogram has been measured, mouse phenotyping data are also provided.""" ; + schema1:keywords "ABR, DL" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Preparing a data set for Deep Learning from zipped ABR raw data files" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/216?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/991?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/hlatyping" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hlatyping" ; + schema1:sdDatePublished "2024-08-05 10:22:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/991/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3311 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:55Z" ; + schema1:dateModified "2024-06-11T12:54:55Z" ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/991?version=9" ; + schema1:keywords "DNA, hla, hla-typing, immunology, optitype, personalized-medicine, rna" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hlatyping" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/991?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-variation-reporting" ; + schema1:mainEntity ; + schema1:name "COVID-19-VARIATION-REPORTING (v0.1)" ; + schema1:sdDatePublished "2021-03-12 13:41:20 +0000" ; + schema1:softwareVersion "v0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 77834 ; + schema1:name "COVID-19-VARIATION-REPORTING" ; + schema1:programmingLanguage . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 386785 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "application/pdf" ; + schema1:name "complete_graph.pdf" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """# MMV Im2Im Transformation\r +\r +[![Build Status](https://github.com/MMV-Lab/mmv_im2im/workflows/Build%20Main/badge.svg)](https://github.com/MMV-Lab/mmv_im2im/actions)\r +\r +A generic python package for deep learning based image-to-image transformation in biomedical applications\r +\r +The main branch will be further developed in order to be able to use the latest state of the art techniques and methods in the future. To reproduce the results of our manuscript, we refer to the branch [paper_version](https://github.com/MMV-Lab/mmv_im2im/tree/paper_version).\r +(We are actively working on the documentation and tutorials. Submit a feature request if there is anything you need.)\r +\r +---\r +\r +## Overview\r +\r +The overall package is designed with a generic image-to-image transformation framework, which could be directly used for semantic segmentation, instance segmentation, image restoration, image generation, labelfree prediction, staining transformation, etc.. The implementation takes advantage of the state-of-the-art ML engineering techniques for users to focus on researches without worrying about the engineering details. In our pre-print [arxiv link](https://arxiv.org/abs/2209.02498), we demonstrated the effectiveness of *MMV_Im2Im* in more than ten different biomedical problems/datasets. \r +\r +* For computational biomedical researchers (e.g., AI algorithm development or bioimage analysis workflow development), we hope this package could serve as the starting point for their specific problems, since the image-to-image "boilerplates" can be easily extended further development or adapted for users' specific problems.\r +* For experimental biomedical researchers, we hope this work provides a comprehensive view of the image-to-image transformation concept through diversified examples and use cases, so that deep learning based image-to-image transformation could be integrated into the assay development process and permit new biomedical studies that can hardly be done only with traditional experimental methods\r +\r +\r +## Installation\r +\r +Before starting, we recommend to [create a new conda environment](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html#creating-an-environment-with-commands) or [a virtual environment](https://docs.python.org/3/library/venv.html) with Python 3.9+.\r +\r +Please note that the proper setup of hardware is beyond the scope of this pacakge. This package was tested with GPU/CPU on Linux/Windows and CPU on MacOS. [Special note for MacOS users: Directly pip install in MacOS may need [additional setup of xcode](https://developer.apple.com/forums/thread/673827).]\r +\r +### Install MONAI\r +\r +To reproduce our results, we need to install MONAI's code version of a specific commit. To do this:\r +```\r +git clone https://github.com/Project-MONAI/MONAI.git\r +cd ./MONAI\r +git checkout 37b58fcec48f3ec1f84d7cabe9c7ad08a93882c0\r +pip install .\r +```\r +\r +We will remove this step for the main branch in the future to ensure a simplified installation of our tool.\r +\r +### Install MMV_Im2Im for basic usage:\r +\r +(For users only using this package, not planning to change any code or make any extension):\r +\r +**Option 1: core functionality only** `pip install mmv_im2im`
\r +**Option 2: advanced functionality (core + logger)** `pip install mmv_im2im[advance]`
\r +**Option 3: to reproduce paper:** `pip install mmv_im2im[paper]`
\r +**Option 4: install everything:** `pip install mmv_im2im[all]`
\r +\r +For MacOS users, additional ' ' marks are need when using installation tags in zsh. For example, `pip install mmv_im2im[paper]` should be `pip install mmv_im2im'[paper]'` in MacOS.\r +\r +### Install MMV_Im2Im for customization or extension:\r +\r +\r +```\r +git clone https://github.com/MMV-Lab/mmv_im2im.git\r +cd mmv_im2im\r +pip install -e .[all]\r +```\r +\r +Note: The `-e` option is the so-called "editable" mode. This will allow code changes taking effect immediately. The installation tags, `advance`, `paper`, `all`, are be selected based on your needs.\r +\r +### (Optional) Install using Docker\r +\r +It is also possible to use our package through [docker](https://www.docker.com/). The installation tutorial is [here](docker/tutorial.md).\r +\r +### (Optional) Use MMV_Im2Im with Google Colab\r +\r +We provide a web-based demo, if cloud computing is preferred. you can [![Open a 2D labelfree DEMO in Google Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/MMV-Lab/mmv_im2im/blob/main/tutorials/colab/labelfree_2d.ipynb). The same demo can de adapted for different applications.\r +\r +## Quick start\r +\r +You can try out on a simple example following [the quick start guide](tutorials/quick_start.md)\r +\r +Basically, you can specify your training configuration in a yaml file and run training with `run_im2im --config /path/to/train_config.yaml`. Then, you can specify the inference configuration in another yaml file and run inference with `run_im2im --config /path/to/inference_config.yaml`. You can also run the inference as a function with the provided API. This will be useful if you want to run the inference within another python script or workflow. Here is an example:\r +\r +```\r +from pathlib import Path\r +from aicsimageio import AICSImage\r +from aicsimageio.writers import OmeTiffWriter\r +from mmv_im2im.configs.config_base import ProgramConfig, parse_adaptor, configuration_validation\r +from mmv_im2im import ProjectTester\r +\r +# load the inference configuration\r +cfg = parse_adaptor(config_class=ProgramConfig, config="./paper_configs/semantic_seg_2d_inference.yaml")\r +cfg = configuration_validation(cfg)\r +\r +# define the executor for inference\r +executor = ProjectTester(cfg)\r +executor.setup_model()\r +executor.setup_data_processing()\r +\r +# get the data, run inference, and save the result\r +fn = Path("./data/img_00_IM.tiff")\r +img = AICSImage(fn).get_image_data("YX", Z=0, C=0, T=0)\r +# or using delayed loading if the data is large\r +# img = AICSImage(fn).get_image_dask_data("YX", Z=0, C=0, T=0)\r +seg = executor.process_one_image(img)\r +OmeTiffWriter.save(seg, "output.tiff", dim_orders="YX")\r +```\r +\r +\r +## Tutorials, examples, demonstrations and documentations\r +\r +The overall package aims to achieve both simplicty and flexibilty with the modularized image-to-image boilerplates. To help different users to best use this package, we provide documentations from four different aspects:\r +\r +* [Examples (i.e., scripts and config files)](tutorials/example_by_use_case.md) for reproducing all the experiments in our [pre-print](https://arxiv.org/abs/2209.02498)\r +* A bottom-up tutorials on [how to understand the modularized image-to-image boilerplates](tutorials/how_to_understand_boilerplates.md) (for extending or adapting the package) and [how to understand the configuration system in details](tutorials/how_to_understand_config.md) (for advance usage to make specific customization).\r +* A top-down tutorials as [FAQ](tutorials/FAQ.md), which will continuously grow as we receive more questions.\r +* All the models used in the manuscript and sample data can be found here: [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.10034416.svg)](https://doi.org/10.5281/zenodo.10034416)\r +\r +\r +### Contribute models to [BioImage Model Zoo](https://bioimage.io/#/)\r +\r +We highly appreciate the BioImage Model Zoo's initiative to provide a comprehensive collection of pre-trained models for a wide range of applications. To make MMV_Im2Im trained models available as well, the first step involves extracting the state_dict from the PyTorch Lightning checkpoint.\r +This can be done via:\r +\r +```python\r +import torch\r +\r +ckpt_path = "./lightning_logs/version_0/checkpoints/last.ckpt"\r +checkpoint = torch.load(ckpt_path, map_location=torch.device('cpu'))\r +state_dict = checkpoint['state_dict']\r +torch.save(state_dict, "./state_dict.pt")\r +```\r +\r +All further steps to provide models can be found in the [official documentation](https://bioimage.io/docs/#/contribute_models/README).\r +\r +## Development\r +\r +See [CONTRIBUTING.md](CONTRIBUTING.md) for information related to developing the code.\r +\r +\r +**MIT license**\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.626.1" ; + schema1:isBasedOn "https://github.com/MMV-Lab/mmv_im2im.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for MMV_Im2Im" ; + schema1:sdDatePublished "2024-08-05 10:27:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/626/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 129932 ; + schema1:creator ; + schema1:dateCreated "2023-10-27T08:14:19Z" ; + schema1:dateModified "2023-10-27T12:05:57Z" ; + schema1:description """# MMV Im2Im Transformation\r +\r +[![Build Status](https://github.com/MMV-Lab/mmv_im2im/workflows/Build%20Main/badge.svg)](https://github.com/MMV-Lab/mmv_im2im/actions)\r +\r +A generic python package for deep learning based image-to-image transformation in biomedical applications\r +\r +The main branch will be further developed in order to be able to use the latest state of the art techniques and methods in the future. To reproduce the results of our manuscript, we refer to the branch [paper_version](https://github.com/MMV-Lab/mmv_im2im/tree/paper_version).\r +(We are actively working on the documentation and tutorials. Submit a feature request if there is anything you need.)\r +\r +---\r +\r +## Overview\r +\r +The overall package is designed with a generic image-to-image transformation framework, which could be directly used for semantic segmentation, instance segmentation, image restoration, image generation, labelfree prediction, staining transformation, etc.. The implementation takes advantage of the state-of-the-art ML engineering techniques for users to focus on researches without worrying about the engineering details. In our pre-print [arxiv link](https://arxiv.org/abs/2209.02498), we demonstrated the effectiveness of *MMV_Im2Im* in more than ten different biomedical problems/datasets. \r +\r +* For computational biomedical researchers (e.g., AI algorithm development or bioimage analysis workflow development), we hope this package could serve as the starting point for their specific problems, since the image-to-image "boilerplates" can be easily extended further development or adapted for users' specific problems.\r +* For experimental biomedical researchers, we hope this work provides a comprehensive view of the image-to-image transformation concept through diversified examples and use cases, so that deep learning based image-to-image transformation could be integrated into the assay development process and permit new biomedical studies that can hardly be done only with traditional experimental methods\r +\r +\r +## Installation\r +\r +Before starting, we recommend to [create a new conda environment](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html#creating-an-environment-with-commands) or [a virtual environment](https://docs.python.org/3/library/venv.html) with Python 3.9+.\r +\r +Please note that the proper setup of hardware is beyond the scope of this pacakge. This package was tested with GPU/CPU on Linux/Windows and CPU on MacOS. [Special note for MacOS users: Directly pip install in MacOS may need [additional setup of xcode](https://developer.apple.com/forums/thread/673827).]\r +\r +### Install MONAI\r +\r +To reproduce our results, we need to install MONAI's code version of a specific commit. To do this:\r +```\r +git clone https://github.com/Project-MONAI/MONAI.git\r +cd ./MONAI\r +git checkout 37b58fcec48f3ec1f84d7cabe9c7ad08a93882c0\r +pip install .\r +```\r +\r +We will remove this step for the main branch in the future to ensure a simplified installation of our tool.\r +\r +### Install MMV_Im2Im for basic usage:\r +\r +(For users only using this package, not planning to change any code or make any extension):\r +\r +**Option 1: core functionality only** `pip install mmv_im2im`
\r +**Option 2: advanced functionality (core + logger)** `pip install mmv_im2im[advance]`
\r +**Option 3: to reproduce paper:** `pip install mmv_im2im[paper]`
\r +**Option 4: install everything:** `pip install mmv_im2im[all]`
\r +\r +For MacOS users, additional ' ' marks are need when using installation tags in zsh. For example, `pip install mmv_im2im[paper]` should be `pip install mmv_im2im'[paper]'` in MacOS.\r +\r +### Install MMV_Im2Im for customization or extension:\r +\r +\r +```\r +git clone https://github.com/MMV-Lab/mmv_im2im.git\r +cd mmv_im2im\r +pip install -e .[all]\r +```\r +\r +Note: The `-e` option is the so-called "editable" mode. This will allow code changes taking effect immediately. The installation tags, `advance`, `paper`, `all`, are be selected based on your needs.\r +\r +### (Optional) Install using Docker\r +\r +It is also possible to use our package through [docker](https://www.docker.com/). The installation tutorial is [here](docker/tutorial.md).\r +\r +### (Optional) Use MMV_Im2Im with Google Colab\r +\r +We provide a web-based demo, if cloud computing is preferred. you can [![Open a 2D labelfree DEMO in Google Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/MMV-Lab/mmv_im2im/blob/main/tutorials/colab/labelfree_2d.ipynb). The same demo can de adapted for different applications.\r +\r +## Quick start\r +\r +You can try out on a simple example following [the quick start guide](tutorials/quick_start.md)\r +\r +Basically, you can specify your training configuration in a yaml file and run training with `run_im2im --config /path/to/train_config.yaml`. Then, you can specify the inference configuration in another yaml file and run inference with `run_im2im --config /path/to/inference_config.yaml`. You can also run the inference as a function with the provided API. This will be useful if you want to run the inference within another python script or workflow. Here is an example:\r +\r +```\r +from pathlib import Path\r +from aicsimageio import AICSImage\r +from aicsimageio.writers import OmeTiffWriter\r +from mmv_im2im.configs.config_base import ProgramConfig, parse_adaptor, configuration_validation\r +from mmv_im2im import ProjectTester\r +\r +# load the inference configuration\r +cfg = parse_adaptor(config_class=ProgramConfig, config="./paper_configs/semantic_seg_2d_inference.yaml")\r +cfg = configuration_validation(cfg)\r +\r +# define the executor for inference\r +executor = ProjectTester(cfg)\r +executor.setup_model()\r +executor.setup_data_processing()\r +\r +# get the data, run inference, and save the result\r +fn = Path("./data/img_00_IM.tiff")\r +img = AICSImage(fn).get_image_data("YX", Z=0, C=0, T=0)\r +# or using delayed loading if the data is large\r +# img = AICSImage(fn).get_image_dask_data("YX", Z=0, C=0, T=0)\r +seg = executor.process_one_image(img)\r +OmeTiffWriter.save(seg, "output.tiff", dim_orders="YX")\r +```\r +\r +\r +## Tutorials, examples, demonstrations and documentations\r +\r +The overall package aims to achieve both simplicty and flexibilty with the modularized image-to-image boilerplates. To help different users to best use this package, we provide documentations from four different aspects:\r +\r +* [Examples (i.e., scripts and config files)](tutorials/example_by_use_case.md) for reproducing all the experiments in our [pre-print](https://arxiv.org/abs/2209.02498)\r +* A bottom-up tutorials on [how to understand the modularized image-to-image boilerplates](tutorials/how_to_understand_boilerplates.md) (for extending or adapting the package) and [how to understand the configuration system in details](tutorials/how_to_understand_config.md) (for advance usage to make specific customization).\r +* A top-down tutorials as [FAQ](tutorials/FAQ.md), which will continuously grow as we receive more questions.\r +* All the models used in the manuscript and sample data can be found here: [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.10034416.svg)](https://doi.org/10.5281/zenodo.10034416)\r +\r +\r +### Contribute models to [BioImage Model Zoo](https://bioimage.io/#/)\r +\r +We highly appreciate the BioImage Model Zoo's initiative to provide a comprehensive collection of pre-trained models for a wide range of applications. To make MMV_Im2Im trained models available as well, the first step involves extracting the state_dict from the PyTorch Lightning checkpoint.\r +This can be done via:\r +\r +```python\r +import torch\r +\r +ckpt_path = "./lightning_logs/version_0/checkpoints/last.ckpt"\r +checkpoint = torch.load(ckpt_path, map_location=torch.device('cpu'))\r +state_dict = checkpoint['state_dict']\r +torch.save(state_dict, "./state_dict.pt")\r +```\r +\r +All further steps to provide models can be found in the [official documentation](https://bioimage.io/docs/#/contribute_models/README).\r +\r +## Development\r +\r +See [CONTRIBUTING.md](CONTRIBUTING.md) for information related to developing the code.\r +\r +\r +**MIT license**\r +""" ; + schema1:keywords "Machine Learning, Python, image processing, Electron microscopy, imaging, jupyter" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "MMV_Im2Im" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/626?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Analysis pipeline for CUT&RUN and CUT&TAG experiments that includes sequencing QC, spike-in normalisation, IgG control normalisation, peak calling and downstream peak analysis." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/976?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/cutandrun" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/cutandrun" ; + schema1:sdDatePublished "2024-08-05 10:24:08 +0100" ; + schema1:url "https://workflowhub.eu/workflows/976/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11778 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:46Z" ; + schema1:dateModified "2024-06-11T12:54:46Z" ; + schema1:description "Analysis pipeline for CUT&RUN and CUT&TAG experiments that includes sequencing QC, spike-in normalisation, IgG control normalisation, peak calling and downstream peak analysis." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/976?version=7" ; + schema1:keywords "cutandrun, cutandrun-seq, cutandtag, cutandtag-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/cutandrun" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/976?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Molecular Structure Checking using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **checking** a **molecular structure** before using it as an input for a **Molecular Dynamics** simulation. The workflow uses the **BioExcel Building Blocks library (biobb)**. The particular structure used is the crystal structure of **human Adenylate Kinase 1A (AK1A)**, in complex with the **AP5A inhibitor** (PDB code [1Z83](https://www.rcsb.org/structure/1z83)). \r +\r +**Structure checking** is a key step before setting up a protein system for **simulations**. A number of **common issues** found in structures at **Protein Data Bank** may compromise the success of the **simulation**, or may suggest that longer **equilibration** procedures are necessary.\r +\r +The **workflow** shows how to:\r +\r +- Run **basic manipulations on structures** (selection of models, chains, alternative locations\r +- Detect and fix **amide assignments** and **wrong chiralities**\r +- Detect and fix **protein backbone** issues (missing fragments, and atoms, capping)\r +- Detect and fix **missing side-chain atoms**\r +- **Add hydrogen atoms** according to several criteria\r +- Detect and classify **atomic clashes**\r +- Detect possible **disulfide bonds (SS)**\r +\r +An implementation of this workflow in a **web-based Graphical User Interface (GUI)** can be found in the [https://mmb.irbbarcelona.org/biobb-wfs/](https://mmb.irbbarcelona.org/biobb-wfs/) server (see [https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check](https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check)).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.829.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/main/biobb_wf_structure_checking/docker" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Docker Molecular Structure Checking" ; + schema1:sdDatePublished "2024-08-05 10:24:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/829/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 770 ; + schema1:creator , + ; + schema1:dateCreated "2024-04-22T11:15:34Z" ; + schema1:dateModified "2024-05-22T13:51:08Z" ; + schema1:description """# Molecular Structure Checking using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **checking** a **molecular structure** before using it as an input for a **Molecular Dynamics** simulation. The workflow uses the **BioExcel Building Blocks library (biobb)**. The particular structure used is the crystal structure of **human Adenylate Kinase 1A (AK1A)**, in complex with the **AP5A inhibitor** (PDB code [1Z83](https://www.rcsb.org/structure/1z83)). \r +\r +**Structure checking** is a key step before setting up a protein system for **simulations**. A number of **common issues** found in structures at **Protein Data Bank** may compromise the success of the **simulation**, or may suggest that longer **equilibration** procedures are necessary.\r +\r +The **workflow** shows how to:\r +\r +- Run **basic manipulations on structures** (selection of models, chains, alternative locations\r +- Detect and fix **amide assignments** and **wrong chiralities**\r +- Detect and fix **protein backbone** issues (missing fragments, and atoms, capping)\r +- Detect and fix **missing side-chain atoms**\r +- **Add hydrogen atoms** according to several criteria\r +- Detect and classify **atomic clashes**\r +- Detect possible **disulfide bonds (SS)**\r +\r +An implementation of this workflow in a **web-based Graphical User Interface (GUI)** can be found in the [https://mmb.irbbarcelona.org/biobb-wfs/](https://mmb.irbbarcelona.org/biobb-wfs/) server (see [https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check](https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check)).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Docker Molecular Structure Checking" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/main/biobb_wf_structure_checking/docker/Dockerfile" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """## ARA (Automated Record Analysis) : An automatic pipeline for exploration of SRA datasets with sequences as a query\r +\r +### Requirements\r +\r +- **Docker**\r +\r + - Please checkout the [Docker installation](https://docs.docker.com/get-docker/) guide.\r +\r + _or_\r +\r +- **Mamba package manager**\r +\r + - Please checkout the [mamba or micromamba](https://mamba.readthedocs.io/en/latest/installation.html) official installation guide.\r +\r + - We prefer `mamba` over [`conda`](https://docs.conda.io/en/latest/) since it is faster and uses `libsolv` to effectively resolve the dependencies.\r +\r + - `conda` can still be used to install the pipeline using the same commands as described in the installation section.\r +\r + > Note: **It is important to include the 'bioconda' channel in addition to the other channels as indicated in the [official manual](https://bioconda.github.io/#usage "Bioconda - Usage")**. Use the following commands in the given order to configure the channels (one-time setup).\r + >\r + > ```bash\r + > conda config --add channels defaults\r + > conda config --add channels bioconda\r + > conda config --add channels conda-forge\r + > conda config --set channel_priority strict\r + > ```\r +\r +---\r +\r +### Installation\r +\r +The user can install the pipeline by using either Docker or Mamba using the steps mentioned below.\r +\r +First, click the green "Code" button, then select "Download Zip" to begin downloading the contents of this repository. Once the download is complete, extract the zip file by into the desired location before starting the setup. Please use the commands shown below to begin installing the pipeline.\r +\r +Alternatively, the github repo can also be cloned through the options shown after clicking the "Code" button. Navigate inside the folder after by using the `cd ARA/` command before starting the setup.\r +\r +> _Warning: Before starting any analysis with the pipeline, please make sure that the system has enough disk space available for the data you wish to retrieve and process from the SRA repository._\r +\r +- **Using Docker**\r +\r + ```bash\r + cd ARA-main/\r + docker build -t ara_img .\r + ```\r +\r +_or_\r +\r +- **Using Mamba**\r +\r + ```bash\r + cd ARA-main/\r + mamba env create --file requirements.yaml\r + mamba activate ara_env\r + perl setup.pl\r + ```\r +\r + > _Note: After installation, the virtual environment consumes approximately 1.5 GB of disk space. The installation was tested on "Ubuntu 20.04.4 LTS", "Ubuntu 22.04.1 LTS" and "Fedora 37" using the procedure mentioned above._\r +\r +Please be patient because downloading and configuring the tools/modules may take several minutes. The warning messages that appear during the installation of certain Perl modules can be ignored by users.\r +\r +Optional: The user can also add the current directory to PATH for ease of use. Use the `chmod +x ara.pl` followed by `export PATH="$(pwd):$PATH"` command. Alternatively, the user is free to create symbolic, copy the executable to `/bin/`, or use any other method depending on their operating system.\r +\r +Refer the 'Troubleshooting' section in case of any installation related issues.\r +\r +---\r +\r +### Example usage\r +\r +- **Docker**\r +\r + `docker run -it ara_img /home/ARA-main/ara.pl --input /home/ARA-main/example/SraRunInfo.csv --sequences /home/ARA-main/example/Arabidopsis_thaliana.TAIR10.ncrna.fa`\r +\r +- **Mamba environment**\r +\r + `perl ara.pl --input example/SraRunInfo.csv --sequences example/Arabidopsis_thaliana.TAIR10.ncrna.fa`\r +\r +To get full usage info: `perl ara.pl --help`\r +\r +> _Note_: The user can delete the contents of `results/` directory after testing the tool using the example mentioned above.\r +\r +### Configuration file\r +\r +The configuration file `conf.txt` is automatically generated during the installation by setup script. It contains certain default parameters as well as the location to the executable binaries of the tools incorporated in the pipeline.\r +\r +The user can modify the default parameters in `conf.txt` and pass it to the pipeline as an input. For example, the `data_perc` option in the configuration refers to the default value of 5% of the dataset selected for analysis. However, the user has the flexibility to provide any integer value between 1 and 100 to specify the desired percentage of the dataset to be used.\r +\r +Similarly, the user can choose between _blastn_ or _bowtie2_ by changing the 'execute flag' to either 0 or 1 in the configuration file while leaving the rest of the parameters to default values. By default, both the tools are enabled _ie_. `execute = 1`.\r +\r +The `read_drop_perc_cutoff` in `conf.txt` config file denotes the cutoff to discard a sample if the total reads left after executing the trimmomatic are higher than the threshold (by default, if the more than 70% of reads are dropped as per the trimmomatic log, then the sample will fail the quality criteria and will not be processed downstream). Please refer the documentation of [Trimmomatic ](https://github.com/usadellab/Trimmomatic) for more details about the parameters present in the config file.\r +\r +Similarly, the criteria to check the minimal alignment rate are indicated by the `alignment perc cutoff` parameter under blastn and bowtie2 in the `conf.txt` configuration file (if the total alignment percentage is less than the threshold then the pipeline will report that the sample failed the quality criteria). More details about the parameters used in the `conf.txt` file can be found in the respective documentations of [Blastn](https://www.ncbi.nlm.nih.gov/books/NBK279690/) and [Bowtie2](https://bowtie-bio.sourceforge.net/bowtie2/manual.shtml).\r +\r +By default, the pipeline uses a pre-built Kraken2 viral genomic database ([release: 9/8/2022](https://genome-idx.s3.amazonaws.com/kraken/k2_viral_20220908.tar.gz)) from . Users can provide their own database by changing the `kraken2_db_path` parameter in the `conf.txt` file.\r +\r +> _Note:_ If the user wishes to use a different installation than Bioconda, the user can manually install the required tools and specify the absolute path of the executable binaries in the configuration.\r +\r +---\r +\r +### Pipeline parameters\r +\r +- **`--input`** (mandatory) The user can provide input in either of the following ways:\r +\r + - A single SRA run accession. eg: **`perl ara.pl --input SRR12548227 --sequences example/Arabidopsis_thaliana.TAIR10.ncrna.fa`**\r +\r + - A list of run accessions in a text file (1 run accession per line). eg: **`perl ara.pl --input example/list.txt --sequences example/Arabidopsis_thaliana.TAIR10.ncrna.fa`**\r +\r + - The SRA runInfo exported directly from the NCBI-SRA web portal. Goto the [SRA homepage](https://www.ncbi.nlm.nih.gov/sra "Home - NCBI - SRA") and search for the desired keyword. Export the `SraRunInfo.csv` by clicking 'Send to' =\\> File =\\> RunInfo). eg: **`perl ara.pl --input example/SraRunInfo.csv --sequences example/Arabidopsis_thaliana.TAIR10.ncrna.fa`**\r +\r +- **`--sequences`** (mandatory) The user should provide a fasta file containing the query sequences.\r +\r +- **`--output`** (optional) The output directory to store the results. By default, the output will be stored into the **`results/`** directory of the package. eg: **`perl ara.pl --input example/SraRunInfo.csv --sequences example/Arabidopsis_thaliana.TAIR10.ncrna.fa --output /src/main/test/`**\r +\r +- **`--mode`** (optional) Choose one of the three modes to run the pipeline.\r +\r + - The **`screen`** is the default mode which will only download a fraction of the data-set per SRA-run accession and analyse the file as per the given configuration.\r +\r + - The **`full`** mode will execute the pipeline by downloading the complete fastq file per SRA-run accession.\r +\r + - The **`both`** option searches for samples using a fraction of the data that meet the minimum alignment cutoff from either 'bowtie2' or 'blastn', and then automatically performs alignment by downloading the entire fastq file. eg: **`perl ara.pl --input example/SraRunInfo.csv --sequences example/Arabidopsis_thaliana.TAIR10.ncrna.fa --output /src/main/test/ --mode screen`**\r +\r + > _Note:_ There is a supporting **`summary`** mode, that will generate a unified alignment summary by examining the output files created by either screen-mode or full-mode. The summary mode should only be used when the user needs to recreate the summary stats from the pre-existing results. The user must enter **`–mode summary`** along with the previously used command parameters to re-generate the summary.\r +\r + - **`--config`** (optional) Pipeline configuration. By default it will use the **`conf.txt`** generated by the setup script. eg: **`perl ara.pl --input example/SraRunInfo.csv --sequences example/Arabidopsis_thaliana.TAIR10.ncrna.fa --output /src/main/test/ --mode screen --config conf.txt`**\r +\r +---\r +\r +### Output structure\r +\r +The pipeline will create folders per SRA run accession and generate results using the run accession as the prefix. The analysis related to the screening a fraction of data will be stored in `screening_results` directory whereas the analysis conducted on the whole dataset will be stored in `full_analyis_results` directory.\r +\r +An outline of directory structure containing the results is shown below-\r +\r + results/\r + `-- test/ (name derived from the input fasta sequence file)\r + |-- test.screening.analysis.stats.sorted.by.alignment.txt (combined metadata and analysis report generated after processing all the SRA run accessions, sorted in decreasing order of total alignment percentage)\r + |-- metadata/\r + | |-- test.metadata.txt (Combined metadata downloaded from SRA)\r + | |-- test.metadata.screened.txt (List of SRA accessions which qualify the filter criteria specified in the config.)\r + | |-- SRA_RUN.run.metadata.txt (unprocessed metadata on a single SRA accession as retrieved from NCBI)\r + |-- reference/\r + | |-- blastn_db/ (folder containing the blast database created from the input fasta sequence)\r + | |-- bowtie2_index/ (folder containing the bowtie index created from the input fasta sequence)\r + | |-- bowtie2_index.stdout.txt (stdout captured from bowtie2 index creation)\r + | `-- makeblastdb.stdout.txt (stdout captured from blastn database creation)\r + `-- screening_results/ (similar structure for screeing or full mode)\r + |-- SRA_RUN/ (each SRA run accession will be processed into a seperate folder)\r + | |-- blastn/\r + | | |-- SRA_RUN.blast.results.txt (output from NCBI Blastn)\r + | | `-- blast.stats.txt (blastn overall alignment stats)\r + | |-- bowtie2/\r + | | |-- SRA_RUN.bam (output from bowtie2)\r + | | |-- alignment.stats.txt (bowtie2 stdout)\r + | | `-- alignment.txt (bowtie2 overall alignment summary)\r + | |-- fastQC/\r + | | |-- \r + | | |-- \r + | |-- kraken2/\r + | | |-- SRA_RUN.kraken (kraken2 standard classification table)\r + | | |-- SRA_RUN.report (kraken2 classification report)\r + | | `-- SRA_RUN.stdout.txt (kraken2 stdout)\r + | |-- raw_fastq/\r + | | |-- \r + | | |-- fastq_dump.stdout.txt\r + | | |-- sra/\r + | | `-- wget.full.sra.stdout.txt\r + | `-- trimmed_data/\r + | |-- \r + | `-- SRA_RUN_trim_stdout_log.txt (trimmomatic stdout)\r + `-- runlog.SRA_RUN.txt (Complete run log of the pipeline per SRA run accession)\r +\r +For a thorough understanding of the results of the third-party tools, take a look at the following documentations:\r +\r +- [Blastn](https://www.ncbi.nlm.nih.gov/books/NBK279690/)\r +- [Bowtie2](https://bowtie-bio.sourceforge.net/bowtie2/manual.shtml)\r +- [FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/)\r +- [Kraken2](https://github.com/DerrickWood/kraken2/blob/master/docs/MANUAL.markdown)\r +- [Trimmomatic](https://github.com/usadellab/Trimmomatic)\r +\r +---\r +\r +### Disk usage using the input from the example\r +\r +The table below provides a summary of the disk usage for different analyses conducted on varying dataset sizes. It demonstrates how disk usage can increase depending on the choice of the fraction of the dataset the user wishes to analyze.\r +\r +| RUN ACCESSION | 100% of dataset | 5% of dataset | 10% of dataset |\r +| ------------- | --------------- | ------------- | -------------- |\r +| SRR8392720 | 1.3G | 85M | 156M |\r +| SRR7289585 | 1.4G | 150M | 288M |\r +| SRR12548227 | 15M | 9.0M | 9.1M |\r +\r +This summary highlights how the disk usage (in megabytes or gigabytes) can vary depending on the chosen fraction of the dataset for analysis.\r +\r +---\r +\r +### Troubleshooting\r +\r +- Errors related to mamba/conda environment:\r +\r + Since `mamba` is a drop-in replacement and uses the same commands and configuration options as **conda**, it's possible to swap almost all commands between **conda** & **mamba**.\r +\r + Use **`conda list`** command to verify whether the packages mentioned in the `requirements.yaml` are successfully installed into your environment.\r +\r + > _Note:_ The `requirements.yaml` provided in this package was exported from `mamba 0.25.0` installation running on `Ubuntu 20.04.4 LTS`.\r +\r + In case of any missing tool/ conflicting dependencies in the environment, the user can try using **`conda search `** or `mamba repoquery search ` command to find the supported version of the tool and then manually install it by typing **`conda install `** or `mamba install ` inside the environment. Please refer the official [troubleshooting guide](https://conda.io/projects/conda/en/latest/user-guide/troubleshooting.html "User guide » Troubleshooting") for further help.\r +\r + > _Note:_ On macOS and Linux, the supported tools and their dependencies aren't always the same. Even when all of the requirements are completely aligned, the set of available versions isn't necessarily the same. User may try setting up the environment using any of the supplementary `requirements-*.txt` provided in the `src/main/resources/` directory.\r +\r +- Error installing Perl modules:\r +\r + Users must ensure that they have write permission to the `/Users/\\*/.cpan/` or similar directory, and the CPAN is properly configured.\r +\r + You might need to define the PERLLIB/PERL5LIB environment variable if you see an error similar to the following:\r +\r + ```bash\r + Cant locate My/Module.pm in @INC (@INC contains:\r + ...\r + ...\r + .).\r + BEGIN failed--compilation aborted.\r + ```\r +\r + > _Note about MAKE_: 'make' is an essential tool for building Perl modules. Please make sure that you have 'make' installed in your system. The setup script provided in this package utilizes 'cpan' to build the required Perl modules automatically.\r +\r + If the automatic setup provided in the package fails to install the required dependencies, you may need to install them manually by using the command `cpan install ` or searching the package on [Metacpan](https://metacpan.org/).\r +\r + Additionally, some Perl modules can also be installed through `mamba` (eg. the compatible version of Perl module `Config::Simple` can be searched on mamba by `mamba repoquery search perl-config-simple`)\r +\r +---\r +\r +### List of Perl modules and tools incorporated in the pipeline\r +\r +- Perl modules:\r +\r + - Config::Simple\r + - Parallel::ForkManager\r + - Log::Log4perl\r + - Getopt::Long\r + - Text::CSV\r + - Text::Unidecode\r +\r +- Tools:\r +\r + - [NCBI EDirect utilities \\>=16.2](https://www.ncbi.nlm.nih.gov/books/NBK179288/)\r + - [NCBI SRA Toolkit \\>=2.10.7](https://www.ncbi.nlm.nih.gov/home/tools/)\r + - [FastQC \\>=0.11.9](https://www.bioinformatics.babraham.ac.uk/projects/download.html#fastqc)\r + - [Trimmomatic \\>=0.39](http://www.usadellab.org/cms/?page=trimmomatic)\r + - [FASTX-Toolkit \\>=0.0.14](http://hannonlab.cshl.edu/fastx_toolkit/)\r + - [NCBI Blast \\>=2.10.1](https://blast.ncbi.nlm.nih.gov/Blast.cgi?PAGE_TYPE=BlastDocs&DOC_TYPE=Download)\r + - [Bowtie2 \\>=2.4.5](http://bowtie-bio.sourceforge.net/bowtie2/index.shtml)\r + - [Samtools \\>=1.15.1](http://www.htslib.org/download/)\r + - [Kraken2 \\>=2.1.2](https://ccb.jhu.edu/software/kraken2/)\r +\r +---\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.546.1" ; + schema1:isBasedOn "https://github.com/maurya-anand/ARA.git" ; + schema1:license "LGPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ARA (Automated Record Analysis)" ; + schema1:sdDatePublished "2024-08-05 10:29:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/546/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 23784 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-07-31T12:44:27Z" ; + schema1:dateModified "2023-07-31T12:49:45Z" ; + schema1:description """## ARA (Automated Record Analysis) : An automatic pipeline for exploration of SRA datasets with sequences as a query\r +\r +### Requirements\r +\r +- **Docker**\r +\r + - Please checkout the [Docker installation](https://docs.docker.com/get-docker/) guide.\r +\r + _or_\r +\r +- **Mamba package manager**\r +\r + - Please checkout the [mamba or micromamba](https://mamba.readthedocs.io/en/latest/installation.html) official installation guide.\r +\r + - We prefer `mamba` over [`conda`](https://docs.conda.io/en/latest/) since it is faster and uses `libsolv` to effectively resolve the dependencies.\r +\r + - `conda` can still be used to install the pipeline using the same commands as described in the installation section.\r +\r + > Note: **It is important to include the 'bioconda' channel in addition to the other channels as indicated in the [official manual](https://bioconda.github.io/#usage "Bioconda - Usage")**. Use the following commands in the given order to configure the channels (one-time setup).\r + >\r + > ```bash\r + > conda config --add channels defaults\r + > conda config --add channels bioconda\r + > conda config --add channels conda-forge\r + > conda config --set channel_priority strict\r + > ```\r +\r +---\r +\r +### Installation\r +\r +The user can install the pipeline by using either Docker or Mamba using the steps mentioned below.\r +\r +First, click the green "Code" button, then select "Download Zip" to begin downloading the contents of this repository. Once the download is complete, extract the zip file by into the desired location before starting the setup. Please use the commands shown below to begin installing the pipeline.\r +\r +Alternatively, the github repo can also be cloned through the options shown after clicking the "Code" button. Navigate inside the folder after by using the `cd ARA/` command before starting the setup.\r +\r +> _Warning: Before starting any analysis with the pipeline, please make sure that the system has enough disk space available for the data you wish to retrieve and process from the SRA repository._\r +\r +- **Using Docker**\r +\r + ```bash\r + cd ARA-main/\r + docker build -t ara_img .\r + ```\r +\r +_or_\r +\r +- **Using Mamba**\r +\r + ```bash\r + cd ARA-main/\r + mamba env create --file requirements.yaml\r + mamba activate ara_env\r + perl setup.pl\r + ```\r +\r + > _Note: After installation, the virtual environment consumes approximately 1.5 GB of disk space. The installation was tested on "Ubuntu 20.04.4 LTS", "Ubuntu 22.04.1 LTS" and "Fedora 37" using the procedure mentioned above._\r +\r +Please be patient because downloading and configuring the tools/modules may take several minutes. The warning messages that appear during the installation of certain Perl modules can be ignored by users.\r +\r +Optional: The user can also add the current directory to PATH for ease of use. Use the `chmod +x ara.pl` followed by `export PATH="$(pwd):$PATH"` command. Alternatively, the user is free to create symbolic, copy the executable to `/bin/`, or use any other method depending on their operating system.\r +\r +Refer the 'Troubleshooting' section in case of any installation related issues.\r +\r +---\r +\r +### Example usage\r +\r +- **Docker**\r +\r + `docker run -it ara_img /home/ARA-main/ara.pl --input /home/ARA-main/example/SraRunInfo.csv --sequences /home/ARA-main/example/Arabidopsis_thaliana.TAIR10.ncrna.fa`\r +\r +- **Mamba environment**\r +\r + `perl ara.pl --input example/SraRunInfo.csv --sequences example/Arabidopsis_thaliana.TAIR10.ncrna.fa`\r +\r +To get full usage info: `perl ara.pl --help`\r +\r +> _Note_: The user can delete the contents of `results/` directory after testing the tool using the example mentioned above.\r +\r +### Configuration file\r +\r +The configuration file `conf.txt` is automatically generated during the installation by setup script. It contains certain default parameters as well as the location to the executable binaries of the tools incorporated in the pipeline.\r +\r +The user can modify the default parameters in `conf.txt` and pass it to the pipeline as an input. For example, the `data_perc` option in the configuration refers to the default value of 5% of the dataset selected for analysis. However, the user has the flexibility to provide any integer value between 1 and 100 to specify the desired percentage of the dataset to be used.\r +\r +Similarly, the user can choose between _blastn_ or _bowtie2_ by changing the 'execute flag' to either 0 or 1 in the configuration file while leaving the rest of the parameters to default values. By default, both the tools are enabled _ie_. `execute = 1`.\r +\r +The `read_drop_perc_cutoff` in `conf.txt` config file denotes the cutoff to discard a sample if the total reads left after executing the trimmomatic are higher than the threshold (by default, if the more than 70% of reads are dropped as per the trimmomatic log, then the sample will fail the quality criteria and will not be processed downstream). Please refer the documentation of [Trimmomatic ](https://github.com/usadellab/Trimmomatic) for more details about the parameters present in the config file.\r +\r +Similarly, the criteria to check the minimal alignment rate are indicated by the `alignment perc cutoff` parameter under blastn and bowtie2 in the `conf.txt` configuration file (if the total alignment percentage is less than the threshold then the pipeline will report that the sample failed the quality criteria). More details about the parameters used in the `conf.txt` file can be found in the respective documentations of [Blastn](https://www.ncbi.nlm.nih.gov/books/NBK279690/) and [Bowtie2](https://bowtie-bio.sourceforge.net/bowtie2/manual.shtml).\r +\r +By default, the pipeline uses a pre-built Kraken2 viral genomic database ([release: 9/8/2022](https://genome-idx.s3.amazonaws.com/kraken/k2_viral_20220908.tar.gz)) from . Users can provide their own database by changing the `kraken2_db_path` parameter in the `conf.txt` file.\r +\r +> _Note:_ If the user wishes to use a different installation than Bioconda, the user can manually install the required tools and specify the absolute path of the executable binaries in the configuration.\r +\r +---\r +\r +### Pipeline parameters\r +\r +- **`--input`** (mandatory) The user can provide input in either of the following ways:\r +\r + - A single SRA run accession. eg: **`perl ara.pl --input SRR12548227 --sequences example/Arabidopsis_thaliana.TAIR10.ncrna.fa`**\r +\r + - A list of run accessions in a text file (1 run accession per line). eg: **`perl ara.pl --input example/list.txt --sequences example/Arabidopsis_thaliana.TAIR10.ncrna.fa`**\r +\r + - The SRA runInfo exported directly from the NCBI-SRA web portal. Goto the [SRA homepage](https://www.ncbi.nlm.nih.gov/sra "Home - NCBI - SRA") and search for the desired keyword. Export the `SraRunInfo.csv` by clicking 'Send to' =\\> File =\\> RunInfo). eg: **`perl ara.pl --input example/SraRunInfo.csv --sequences example/Arabidopsis_thaliana.TAIR10.ncrna.fa`**\r +\r +- **`--sequences`** (mandatory) The user should provide a fasta file containing the query sequences.\r +\r +- **`--output`** (optional) The output directory to store the results. By default, the output will be stored into the **`results/`** directory of the package. eg: **`perl ara.pl --input example/SraRunInfo.csv --sequences example/Arabidopsis_thaliana.TAIR10.ncrna.fa --output /src/main/test/`**\r +\r +- **`--mode`** (optional) Choose one of the three modes to run the pipeline.\r +\r + - The **`screen`** is the default mode which will only download a fraction of the data-set per SRA-run accession and analyse the file as per the given configuration.\r +\r + - The **`full`** mode will execute the pipeline by downloading the complete fastq file per SRA-run accession.\r +\r + - The **`both`** option searches for samples using a fraction of the data that meet the minimum alignment cutoff from either 'bowtie2' or 'blastn', and then automatically performs alignment by downloading the entire fastq file. eg: **`perl ara.pl --input example/SraRunInfo.csv --sequences example/Arabidopsis_thaliana.TAIR10.ncrna.fa --output /src/main/test/ --mode screen`**\r +\r + > _Note:_ There is a supporting **`summary`** mode, that will generate a unified alignment summary by examining the output files created by either screen-mode or full-mode. The summary mode should only be used when the user needs to recreate the summary stats from the pre-existing results. The user must enter **`–mode summary`** along with the previously used command parameters to re-generate the summary.\r +\r + - **`--config`** (optional) Pipeline configuration. By default it will use the **`conf.txt`** generated by the setup script. eg: **`perl ara.pl --input example/SraRunInfo.csv --sequences example/Arabidopsis_thaliana.TAIR10.ncrna.fa --output /src/main/test/ --mode screen --config conf.txt`**\r +\r +---\r +\r +### Output structure\r +\r +The pipeline will create folders per SRA run accession and generate results using the run accession as the prefix. The analysis related to the screening a fraction of data will be stored in `screening_results` directory whereas the analysis conducted on the whole dataset will be stored in `full_analyis_results` directory.\r +\r +An outline of directory structure containing the results is shown below-\r +\r + results/\r + `-- test/ (name derived from the input fasta sequence file)\r + |-- test.screening.analysis.stats.sorted.by.alignment.txt (combined metadata and analysis report generated after processing all the SRA run accessions, sorted in decreasing order of total alignment percentage)\r + |-- metadata/\r + | |-- test.metadata.txt (Combined metadata downloaded from SRA)\r + | |-- test.metadata.screened.txt (List of SRA accessions which qualify the filter criteria specified in the config.)\r + | |-- SRA_RUN.run.metadata.txt (unprocessed metadata on a single SRA accession as retrieved from NCBI)\r + |-- reference/\r + | |-- blastn_db/ (folder containing the blast database created from the input fasta sequence)\r + | |-- bowtie2_index/ (folder containing the bowtie index created from the input fasta sequence)\r + | |-- bowtie2_index.stdout.txt (stdout captured from bowtie2 index creation)\r + | `-- makeblastdb.stdout.txt (stdout captured from blastn database creation)\r + `-- screening_results/ (similar structure for screeing or full mode)\r + |-- SRA_RUN/ (each SRA run accession will be processed into a seperate folder)\r + | |-- blastn/\r + | | |-- SRA_RUN.blast.results.txt (output from NCBI Blastn)\r + | | `-- blast.stats.txt (blastn overall alignment stats)\r + | |-- bowtie2/\r + | | |-- SRA_RUN.bam (output from bowtie2)\r + | | |-- alignment.stats.txt (bowtie2 stdout)\r + | | `-- alignment.txt (bowtie2 overall alignment summary)\r + | |-- fastQC/\r + | | |-- \r + | | |-- \r + | |-- kraken2/\r + | | |-- SRA_RUN.kraken (kraken2 standard classification table)\r + | | |-- SRA_RUN.report (kraken2 classification report)\r + | | `-- SRA_RUN.stdout.txt (kraken2 stdout)\r + | |-- raw_fastq/\r + | | |-- \r + | | |-- fastq_dump.stdout.txt\r + | | |-- sra/\r + | | `-- wget.full.sra.stdout.txt\r + | `-- trimmed_data/\r + | |-- \r + | `-- SRA_RUN_trim_stdout_log.txt (trimmomatic stdout)\r + `-- runlog.SRA_RUN.txt (Complete run log of the pipeline per SRA run accession)\r +\r +For a thorough understanding of the results of the third-party tools, take a look at the following documentations:\r +\r +- [Blastn](https://www.ncbi.nlm.nih.gov/books/NBK279690/)\r +- [Bowtie2](https://bowtie-bio.sourceforge.net/bowtie2/manual.shtml)\r +- [FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/)\r +- [Kraken2](https://github.com/DerrickWood/kraken2/blob/master/docs/MANUAL.markdown)\r +- [Trimmomatic](https://github.com/usadellab/Trimmomatic)\r +\r +---\r +\r +### Disk usage using the input from the example\r +\r +The table below provides a summary of the disk usage for different analyses conducted on varying dataset sizes. It demonstrates how disk usage can increase depending on the choice of the fraction of the dataset the user wishes to analyze.\r +\r +| RUN ACCESSION | 100% of dataset | 5% of dataset | 10% of dataset |\r +| ------------- | --------------- | ------------- | -------------- |\r +| SRR8392720 | 1.3G | 85M | 156M |\r +| SRR7289585 | 1.4G | 150M | 288M |\r +| SRR12548227 | 15M | 9.0M | 9.1M |\r +\r +This summary highlights how the disk usage (in megabytes or gigabytes) can vary depending on the chosen fraction of the dataset for analysis.\r +\r +---\r +\r +### Troubleshooting\r +\r +- Errors related to mamba/conda environment:\r +\r + Since `mamba` is a drop-in replacement and uses the same commands and configuration options as **conda**, it's possible to swap almost all commands between **conda** & **mamba**.\r +\r + Use **`conda list`** command to verify whether the packages mentioned in the `requirements.yaml` are successfully installed into your environment.\r +\r + > _Note:_ The `requirements.yaml` provided in this package was exported from `mamba 0.25.0` installation running on `Ubuntu 20.04.4 LTS`.\r +\r + In case of any missing tool/ conflicting dependencies in the environment, the user can try using **`conda search `** or `mamba repoquery search ` command to find the supported version of the tool and then manually install it by typing **`conda install `** or `mamba install ` inside the environment. Please refer the official [troubleshooting guide](https://conda.io/projects/conda/en/latest/user-guide/troubleshooting.html "User guide » Troubleshooting") for further help.\r +\r + > _Note:_ On macOS and Linux, the supported tools and their dependencies aren't always the same. Even when all of the requirements are completely aligned, the set of available versions isn't necessarily the same. User may try setting up the environment using any of the supplementary `requirements-*.txt` provided in the `src/main/resources/` directory.\r +\r +- Error installing Perl modules:\r +\r + Users must ensure that they have write permission to the `/Users/\\*/.cpan/` or similar directory, and the CPAN is properly configured.\r +\r + You might need to define the PERLLIB/PERL5LIB environment variable if you see an error similar to the following:\r +\r + ```bash\r + Cant locate My/Module.pm in @INC (@INC contains:\r + ...\r + ...\r + .).\r + BEGIN failed--compilation aborted.\r + ```\r +\r + > _Note about MAKE_: 'make' is an essential tool for building Perl modules. Please make sure that you have 'make' installed in your system. The setup script provided in this package utilizes 'cpan' to build the required Perl modules automatically.\r +\r + If the automatic setup provided in the package fails to install the required dependencies, you may need to install them manually by using the command `cpan install ` or searching the package on [Metacpan](https://metacpan.org/).\r +\r + Additionally, some Perl modules can also be installed through `mamba` (eg. the compatible version of Perl module `Config::Simple` can be searched on mamba by `mamba repoquery search perl-config-simple`)\r +\r +---\r +\r +### List of Perl modules and tools incorporated in the pipeline\r +\r +- Perl modules:\r +\r + - Config::Simple\r + - Parallel::ForkManager\r + - Log::Log4perl\r + - Getopt::Long\r + - Text::CSV\r + - Text::Unidecode\r +\r +- Tools:\r +\r + - [NCBI EDirect utilities \\>=16.2](https://www.ncbi.nlm.nih.gov/books/NBK179288/)\r + - [NCBI SRA Toolkit \\>=2.10.7](https://www.ncbi.nlm.nih.gov/home/tools/)\r + - [FastQC \\>=0.11.9](https://www.bioinformatics.babraham.ac.uk/projects/download.html#fastqc)\r + - [Trimmomatic \\>=0.39](http://www.usadellab.org/cms/?page=trimmomatic)\r + - [FASTX-Toolkit \\>=0.0.14](http://hannonlab.cshl.edu/fastx_toolkit/)\r + - [NCBI Blast \\>=2.10.1](https://blast.ncbi.nlm.nih.gov/Blast.cgi?PAGE_TYPE=BlastDocs&DOC_TYPE=Download)\r + - [Bowtie2 \\>=2.4.5](http://bowtie-bio.sourceforge.net/bowtie2/index.shtml)\r + - [Samtools \\>=1.15.1](http://www.htslib.org/download/)\r + - [Kraken2 \\>=2.1.2](https://ccb.jhu.edu/software/kraken2/)\r +\r +---\r +""" ; + schema1:keywords "Genomics, Pipeline, Perl, ncbi sra, sequence annotation, sequence search" ; + schema1:license "https://spdx.org/licenses/LGPL-3.0" ; + schema1:name "ARA (Automated Record Analysis)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/546?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """\r +

+nf-core/viralrecon +

\r +

GitHub Actions CI Status GitHub Actions Linting Status Nextflow install with bioconda

\r +

Docker DOI

\r +

nfcore/viralrecon is a bioinformatics analysis pipeline used to perform assembly and intrahost/low-frequency variant calling for viral samples. The pipeline currently supports metagenomics and amplicon sequencing data derived from the Illumina sequencing platform.

\r +

This pipeline is a re-implementation of the SARS_Cov2_consensus-nf and SARS_Cov2_assembly-nf pipelines initially developed by Sarai Varona and Sara Monzon from BU-ISCIII. Porting both of these pipelines to nf-core was an international collaboration between numerous contributors and developers, led by Harshil Patel from the The Bioinformatics & Biostatistics Group at The Francis Crick Institute, London. We appreciated the need to have a portable, reproducible and scalable pipeline for the analysis of COVID-19 sequencing samples and so the Avengers Assembled! Please come and join us and add yourself to the contributor list :)

\r +

We have integrated a number of options in the pipeline to allow you to run specific aspects of the workflow if you so wish. For example, you can skip all of the assembly steps with the --skip_assembly parameter. See usage docs for all of the available options when running the pipeline.

\r +

Please click here to see an example MultiQC report generated using the parameters defined in this configuration file to run the pipeline on samples which were prepared from the ncov-2019 ARTIC Network V1 amplicon set and sequenced on the Illumina MiSeq platform in 301bp paired-end format.

\r +

The pipeline is built using Nextflow, a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It comes with docker containers making installation trivial and results highly reproducible. Furthermore, automated continuous integration tests to run the pipeline on a full-sized dataset are passing on AWS cloud.

\r +

+Pipeline summary

\r +
    \r +
  1. Download samples via SRA, ENA or GEO ids (ENA FTP, parallel-fastq-dump; if required)
  2. \r +
  3. Merge re-sequenced FastQ files (cat; if required)
  4. \r +
  5. Read QC (FastQC)
  6. \r +
  7. Adapter trimming (fastp)
  8. \r +
  9. Variant calling
    \r +i. Read alignment (Bowtie 2)
    \r +ii. Sort and index alignments (SAMtools)
    \r +iii. Primer sequence removal (iVar; amplicon data only)
    \r +iv. Duplicate read marking (picard; removal optional)
    \r +v. Alignment-level QC (picard, SAMtools)
    \r +vi. Choice of multiple variant calling and consensus sequence generation routes (VarScan 2, BCFTools, BEDTools || iVar variants and consensus || BCFTools, BEDTools)
    \r +- Variant annotation (SnpEff, SnpSift)
    \r +- Consensus assessment report (QUAST)
  10. \r +
  11. +De novo assembly
    \r +i. Primer trimming (Cutadapt; amplicon data only)
    \r +ii. Removal of host reads (Kraken 2)
    \r +iii. Choice of multiple assembly tools (SPAdes || metaSPAdes || Unicycler || minia)
    \r +- Blast to reference genome (blastn)
    \r +- Contiguate assembly (ABACAS)
    \r +- Assembly report (PlasmidID)
    \r +- Assembly assessment report (QUAST)
    \r +- Call variants relative to reference (Minimap2, seqwish, vg, Bandage)
    \r +- Variant annotation (SnpEff, SnpSift)
  12. \r +
  13. Present QC and visualisation for raw read, alignment, assembly and variant calling results (MultiQC)
  14. \r +
\r +

+Quick Start

\r +

i. Install nextflow

\r +

ii. Install either Docker or Singularity for full pipeline reproducibility (please only use Conda as a last resort; see docs)

\r +

iii. Download the pipeline and test it on a minimal dataset with a single command

\r +
nextflow run nf-core/viralrecon -profile test,<docker/singularity/conda/institute>\r
+
\r +
\r +

Please check nf-core/configs to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use -profile <institute> in your command. This will enable either docker or singularity and set the appropriate execution settings for your local compute environment.

\r +
\r +

iv. Start running your own analysis!

\r +
nextflow run nf-core/viralrecon -profile <docker/singularity/conda/institute> --input samplesheet.csv --genome 'NC_045512.2' -profile docker\r
+
\r +

See usage docs for all of the available options when running the pipeline.

\r +

+Documentation

\r +

The nf-core/viralrecon pipeline comes with documentation about the pipeline, found in the docs/ directory:

\r +
    \r +
  1. Installation
  2. \r +
  3. Pipeline configuration\r +\r +
  4. \r +
  5. Running the pipeline
  6. \r +
  7. Output and how to interpret the results
  8. \r +
  9. Troubleshooting
  10. \r +
\r +

+Credits

\r +

These scripts were originally written by Sarai Varona, Miguel Juliá and Sara Monzon from BU-ISCIII and co-ordinated by Isabel Cuesta for the Institute of Health Carlos III, Spain. Through collaboration with the nf-core community the pipeline has now been updated substantially to include additional processing steps, to standardise inputs/outputs and to improve pipeline reporting; implemented primarily by Harshil Patel from The Bioinformatics & Biostatistics Group at The Francis Crick Institute, London.

\r +

Many thanks to others who have helped out and contributed along the way too, including (but not limited to):

\r +\r +\r +\r +Name\r +Affiliation\r +\r +\r +\r +\r +Alexander Peltzer\r +Boehringer Ingelheim, Germany\r +\r +\r +Alison Meynert\r +University of Edinburgh, Scotland\r +\r +\r +Edgar Garriga Nogales\r +Centre for Genomic Regulation, Spain\r +\r +\r +Erik Garrison\r +UCSC, USA\r +\r +\r +Gisela Gabernet\r +QBiC, University of Tübingen, Germany\r +\r +\r +Joao Curado\r +Flomics Biotech, Spain\r +\r +\r +Jose Espinosa-Carrasco\r +Centre for Genomic Regulation, Spain\r +\r +\r +Katrin Sameith\r +DRESDEN-concept Genome Center, Germany\r +\r +\r +Lluc Cabus\r +Flomics Biotech, Spain\r +\r +\r +Marta Pozuelo\r +Flomics Biotech, Spain\r +\r +\r +Maxime Garcia\r +SciLifeLab, Sweden\r +\r +\r +Michael Heuer\r +UC Berkeley, USA\r +\r +\r +Phil Ewels\r +SciLifeLab, Sweden\r +\r +\r +Simon Heumos\r +QBiC, University of Tübingen, Germany\r +\r +\r +Stephen Kelly\r +Memorial Sloan Kettering Cancer Center, USA\r +\r +\r +Thanh Le Viet\r +Quadram Institute, UK\r +\r +\r +\r +
\r +

Listed in alphabetical order

\r +
\r +

+Contributions and Support

\r +

If you would like to contribute to this pipeline, please see the contributing guidelines.

\r +

For further information or help, don’t hesitate to get in touch on Slack (you can join with this invite).

\r +

+Citation

\r +

If you use nf-core/viralrecon for your analysis, please cite it using the following doi: 10.5281/zenodo.3872730

\r +

An extensive list of references for the tools used by the pipeline can be found in the CITATIONS.md file.

\r +

You can cite the nf-core publication as follows:

\r +
\r +

The nf-core framework for community-curated bioinformatics pipelines.

\r +

Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.

\r +

Nat Biotechnol. 2020 Feb 13. doi: 10.1038/s41587-020-0439-x.
\r +ReadCube: Full Access Link

\r +
\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/19?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/viralrecon/blob/master/main.nf" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/viralrecon" ; + schema1:sdDatePublished "2024-08-05 10:33:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/19/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 122308 ; + schema1:creator ; + schema1:dateCreated "2020-05-14T14:10:58Z" ; + schema1:dateModified "2023-01-16T13:41:25Z" ; + schema1:description """\r +

+nf-core/viralrecon +

\r +

GitHub Actions CI Status GitHub Actions Linting Status Nextflow install with bioconda

\r +

Docker DOI

\r +

nfcore/viralrecon is a bioinformatics analysis pipeline used to perform assembly and intrahost/low-frequency variant calling for viral samples. The pipeline currently supports metagenomics and amplicon sequencing data derived from the Illumina sequencing platform.

\r +

This pipeline is a re-implementation of the SARS_Cov2_consensus-nf and SARS_Cov2_assembly-nf pipelines initially developed by Sarai Varona and Sara Monzon from BU-ISCIII. Porting both of these pipelines to nf-core was an international collaboration between numerous contributors and developers, led by Harshil Patel from the The Bioinformatics & Biostatistics Group at The Francis Crick Institute, London. We appreciated the need to have a portable, reproducible and scalable pipeline for the analysis of COVID-19 sequencing samples and so the Avengers Assembled! Please come and join us and add yourself to the contributor list :)

\r +

We have integrated a number of options in the pipeline to allow you to run specific aspects of the workflow if you so wish. For example, you can skip all of the assembly steps with the --skip_assembly parameter. See usage docs for all of the available options when running the pipeline.

\r +

Please click here to see an example MultiQC report generated using the parameters defined in this configuration file to run the pipeline on samples which were prepared from the ncov-2019 ARTIC Network V1 amplicon set and sequenced on the Illumina MiSeq platform in 301bp paired-end format.

\r +

The pipeline is built using Nextflow, a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It comes with docker containers making installation trivial and results highly reproducible. Furthermore, automated continuous integration tests to run the pipeline on a full-sized dataset are passing on AWS cloud.

\r +

+Pipeline summary

\r +
    \r +
  1. Download samples via SRA, ENA or GEO ids (ENA FTP, parallel-fastq-dump; if required)
  2. \r +
  3. Merge re-sequenced FastQ files (cat; if required)
  4. \r +
  5. Read QC (FastQC)
  6. \r +
  7. Adapter trimming (fastp)
  8. \r +
  9. Variant calling
    \r +i. Read alignment (Bowtie 2)
    \r +ii. Sort and index alignments (SAMtools)
    \r +iii. Primer sequence removal (iVar; amplicon data only)
    \r +iv. Duplicate read marking (picard; removal optional)
    \r +v. Alignment-level QC (picard, SAMtools)
    \r +vi. Choice of multiple variant calling and consensus sequence generation routes (VarScan 2, BCFTools, BEDTools || iVar variants and consensus || BCFTools, BEDTools)
    \r +- Variant annotation (SnpEff, SnpSift)
    \r +- Consensus assessment report (QUAST)
  10. \r +
  11. +De novo assembly
    \r +i. Primer trimming (Cutadapt; amplicon data only)
    \r +ii. Removal of host reads (Kraken 2)
    \r +iii. Choice of multiple assembly tools (SPAdes || metaSPAdes || Unicycler || minia)
    \r +- Blast to reference genome (blastn)
    \r +- Contiguate assembly (ABACAS)
    \r +- Assembly report (PlasmidID)
    \r +- Assembly assessment report (QUAST)
    \r +- Call variants relative to reference (Minimap2, seqwish, vg, Bandage)
    \r +- Variant annotation (SnpEff, SnpSift)
  12. \r +
  13. Present QC and visualisation for raw read, alignment, assembly and variant calling results (MultiQC)
  14. \r +
\r +

+Quick Start

\r +

i. Install nextflow

\r +

ii. Install either Docker or Singularity for full pipeline reproducibility (please only use Conda as a last resort; see docs)

\r +

iii. Download the pipeline and test it on a minimal dataset with a single command

\r +
nextflow run nf-core/viralrecon -profile test,<docker/singularity/conda/institute>\r
+
\r +
\r +

Please check nf-core/configs to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use -profile <institute> in your command. This will enable either docker or singularity and set the appropriate execution settings for your local compute environment.

\r +
\r +

iv. Start running your own analysis!

\r +
nextflow run nf-core/viralrecon -profile <docker/singularity/conda/institute> --input samplesheet.csv --genome 'NC_045512.2' -profile docker\r
+
\r +

See usage docs for all of the available options when running the pipeline.

\r +

+Documentation

\r +

The nf-core/viralrecon pipeline comes with documentation about the pipeline, found in the docs/ directory:

\r +
    \r +
  1. Installation
  2. \r +
  3. Pipeline configuration\r +\r +
  4. \r +
  5. Running the pipeline
  6. \r +
  7. Output and how to interpret the results
  8. \r +
  9. Troubleshooting
  10. \r +
\r +

+Credits

\r +

These scripts were originally written by Sarai Varona, Miguel Juliá and Sara Monzon from BU-ISCIII and co-ordinated by Isabel Cuesta for the Institute of Health Carlos III, Spain. Through collaboration with the nf-core community the pipeline has now been updated substantially to include additional processing steps, to standardise inputs/outputs and to improve pipeline reporting; implemented primarily by Harshil Patel from The Bioinformatics & Biostatistics Group at The Francis Crick Institute, London.

\r +

Many thanks to others who have helped out and contributed along the way too, including (but not limited to):

\r +\r +\r +\r +Name\r +Affiliation\r +\r +\r +\r +\r +Alexander Peltzer\r +Boehringer Ingelheim, Germany\r +\r +\r +Alison Meynert\r +University of Edinburgh, Scotland\r +\r +\r +Edgar Garriga Nogales\r +Centre for Genomic Regulation, Spain\r +\r +\r +Erik Garrison\r +UCSC, USA\r +\r +\r +Gisela Gabernet\r +QBiC, University of Tübingen, Germany\r +\r +\r +Joao Curado\r +Flomics Biotech, Spain\r +\r +\r +Jose Espinosa-Carrasco\r +Centre for Genomic Regulation, Spain\r +\r +\r +Katrin Sameith\r +DRESDEN-concept Genome Center, Germany\r +\r +\r +Lluc Cabus\r +Flomics Biotech, Spain\r +\r +\r +Marta Pozuelo\r +Flomics Biotech, Spain\r +\r +\r +Maxime Garcia\r +SciLifeLab, Sweden\r +\r +\r +Michael Heuer\r +UC Berkeley, USA\r +\r +\r +Phil Ewels\r +SciLifeLab, Sweden\r +\r +\r +Simon Heumos\r +QBiC, University of Tübingen, Germany\r +\r +\r +Stephen Kelly\r +Memorial Sloan Kettering Cancer Center, USA\r +\r +\r +Thanh Le Viet\r +Quadram Institute, UK\r +\r +\r +\r +
\r +

Listed in alphabetical order

\r +
\r +

+Contributions and Support

\r +

If you would like to contribute to this pipeline, please see the contributing guidelines.

\r +

For further information or help, don’t hesitate to get in touch on Slack (you can join with this invite).

\r +

+Citation

\r +

If you use nf-core/viralrecon for your analysis, please cite it using the following doi: 10.5281/zenodo.3872730

\r +

An extensive list of references for the tools used by the pipeline can be found in the CITATIONS.md file.

\r +

You can cite the nf-core publication as follows:

\r +
\r +

The nf-core framework for community-curated bioinformatics pipelines.

\r +

Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.

\r +

Nat Biotechnol. 2020 Feb 13. doi: 10.1038/s41587-020-0439-x.
\r +ReadCube: Full Access Link

\r +
\r +""" ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/viralrecon" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/19?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Taxonomic classification and profiling of shotgun metagenomic data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1025?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/taxprofiler" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/taxprofiler" ; + schema1:sdDatePublished "2024-08-05 10:23:08 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1025/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12656 ; + schema1:creator , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:19Z" ; + schema1:dateModified "2024-06-11T12:55:19Z" ; + schema1:description "Taxonomic classification and profiling of shotgun metagenomic data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1025?version=10" ; + schema1:keywords "Classification, illumina, long-reads, Metagenomics, microbiome, nanopore, pathogen, Profiling, shotgun, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/taxprofiler" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1025?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + ; + schema1:description " A version of V-pipe (analysis of next generation sequencing (NGS) data from viral pathogens) specifically adapted to analyze high-throughput sequencing data of SARS-CoV-2. " ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/11?version=1" ; + schema1:isBasedOn "https://github.com/cbg-ethz/V-pipe.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for (old) SARS-COV2 version of the V-Pipe workflow" ; + schema1:sdDatePublished "2024-08-05 10:32:23 +0100" ; + schema1:url "https://workflowhub.eu/workflows/11/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 99939 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 726 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2020-04-10T13:46:55Z" ; + schema1:dateModified "2023-01-16T13:40:31Z" ; + schema1:description " A version of V-pipe (analysis of next generation sequencing (NGS) data from viral pathogens) specifically adapted to analyze high-throughput sequencing data of SARS-CoV-2. " ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "(old) SARS-COV2 version of the V-Pipe workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/11?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2022-02-04T11:28:57.510371" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-consensus-from-variation" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sars-cov-2-consensus-from-variation/COVID-19-CONSENSUS-CONSTRUCTION" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Snakemake workflow: dna-seq-varlociraptor\r +\r +[![Snakemake](https://img.shields.io/badge/snakemake-≥6.3.0-brightgreen.svg)](https://snakemake.github.io)\r +[![GitHub actions status](https://github.com/snakemake-workflows/dna-seq-varlociraptor/workflows/Tests/badge.svg?branch=master)](https://github.com/snakemake-workflows/dna-seq-varlociraptor/actions?query=branch%3Amaster+workflow%3ATests)\r +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.4675661.svg)](https://doi.org/10.5281/zenodo.4675661)\r +\r +\r +A Snakemake workflow for calling small and structural variants under any kind of scenario (tumor/normal, tumor/normal/relapse, germline, pedigree, populations) via the unified statistical model of [Varlociraptor](https://varlociraptor.github.io).\r +\r +\r +## Usage\r +\r +The usage of this workflow is described in the [Snakemake Workflow Catalog](https://snakemake.github.io/snakemake-workflow-catalog/?usage=snakemake-workflows%2Fdna-seq-varlociraptor).\r +\r +If you use this workflow in a paper, don't forget to give credits to the authors by citing the URL of this (original) repository and its DOI (see above).\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/686?version=2" ; + schema1:isBasedOn "https://github.com/snakemake-workflows/dna-seq-varlociraptor" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for dna-seq-varlociraptor" ; + schema1:sdDatePublished "2024-08-05 10:26:09 +0100" ; + schema1:url "https://workflowhub.eu/workflows/686/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1287 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-12-14T08:17:10Z" ; + schema1:dateModified "2023-12-14T08:17:10Z" ; + schema1:description """# Snakemake workflow: dna-seq-varlociraptor\r +\r +[![Snakemake](https://img.shields.io/badge/snakemake-≥6.3.0-brightgreen.svg)](https://snakemake.github.io)\r +[![GitHub actions status](https://github.com/snakemake-workflows/dna-seq-varlociraptor/workflows/Tests/badge.svg?branch=master)](https://github.com/snakemake-workflows/dna-seq-varlociraptor/actions?query=branch%3Amaster+workflow%3ATests)\r +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.4675661.svg)](https://doi.org/10.5281/zenodo.4675661)\r +\r +\r +A Snakemake workflow for calling small and structural variants under any kind of scenario (tumor/normal, tumor/normal/relapse, germline, pedigree, populations) via the unified statistical model of [Varlociraptor](https://varlociraptor.github.io).\r +\r +\r +## Usage\r +\r +The usage of this workflow is described in the [Snakemake Workflow Catalog](https://snakemake.github.io/snakemake-workflow-catalog/?usage=snakemake-workflows%2Fdna-seq-varlociraptor).\r +\r +If you use this workflow in a paper, don't forget to give credits to the authors by citing the URL of this (original) repository and its DOI (see above).\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/686?version=1" ; + schema1:keywords "Bioinformatics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "dna-seq-varlociraptor" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/686?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + schema1:datePublished "2023-01-16T18:24:17.024790" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/rnaseq-sr" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "rnaseq-sr/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.3" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Summary\r +\r +Common Workflow Language example that illustrate the process of setting up a simulation system containing a protein, step by step, using the BioExcel Building Blocks library (biobb). The particular example used is the Lysozyme protein (PDB code 1AKI).\r +\r +# Additional Links\r +\r +* [View Tutorial](http://mmb.irbbarcelona.org/biobb/workflows/tutorials/md_setup)\r +\r +* [View in Binder](https://mybinder.org/v2/gh/bioexcel/biobb_wf_md_setup/master?filepath=biobb_wf_md_setup%2Fnotebooks%2Fbiobb_MDsetup_tutorial.ipynb)\r +\r +* [GitHub Repository]( https://github.com/bioexcel/biobb-wf-md-setup-protein-cwl)\r +\r +* [Documentation](https://biobb-wf-md-setup.readthedocs.io/en/latest/index.html)""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/29?version=1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb-wf-md-setup-protein-cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Protein MD Setup tutorial using BioExcel Building Blocks (biobb)" ; + schema1:sdDatePublished "2024-08-05 10:32:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/29/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10361 ; + schema1:creator ; + schema1:dateCreated "2020-06-16T08:50:11Z" ; + schema1:dateModified "2021-05-07T15:18:57Z" ; + schema1:description """# Summary\r +\r +Common Workflow Language example that illustrate the process of setting up a simulation system containing a protein, step by step, using the BioExcel Building Blocks library (biobb). The particular example used is the Lysozyme protein (PDB code 1AKI).\r +\r +# Additional Links\r +\r +* [View Tutorial](http://mmb.irbbarcelona.org/biobb/workflows/tutorials/md_setup)\r +\r +* [View in Binder](https://mybinder.org/v2/gh/bioexcel/biobb_wf_md_setup/master?filepath=biobb_wf_md_setup%2Fnotebooks%2Fbiobb_MDsetup_tutorial.ipynb)\r +\r +* [GitHub Repository]( https://github.com/bioexcel/biobb-wf-md-setup-protein-cwl)\r +\r +* [Documentation](https://biobb-wf-md-setup.readthedocs.io/en/latest/index.html)""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/29?version=2" ; + schema1:isPartOf , + ; + schema1:keywords "molecular dynamics, trajectories, protein" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Protein MD Setup tutorial using BioExcel Building Blocks (biobb)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/29?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 60267 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/998?version=9" ; + schema1:isBasedOn "https://github.com/nf-core/methylseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/methylseq" ; + schema1:sdDatePublished "2024-08-05 10:22:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/998/ro_crate?version=9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9921 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:02Z" ; + schema1:dateModified "2024-06-11T12:55:02Z" ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/998?version=14" ; + schema1:keywords "bisulfite-sequencing, dna-methylation, em-seq, epigenome, Epigenomics, methyl-seq, pbat, rrbs" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/methylseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/998?version=9" ; + schema1:version 9 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/998?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/methylseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/methylseq" ; + schema1:sdDatePublished "2024-08-05 10:22:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/998/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6500 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:01Z" ; + schema1:dateModified "2024-06-11T12:55:01Z" ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/998?version=14" ; + schema1:keywords "bisulfite-sequencing, dna-methylation, em-seq, epigenome, Epigenomics, methyl-seq, pbat, rrbs" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/methylseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/998?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + schema1:datePublished "2024-05-27T12:41:08.869616" ; + schema1:hasPart , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/qiime2-II-denoising" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + ; + schema1:name "qiime2-II-denoising/IIa-denoising-se" . + + a schema1:Person ; + schema1:name "Debjyoti Ghosh" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "qiime2-II-denoising/IIb-denoising-pe" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/qiime2-II-denoising" ; + schema1:version "0.2" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """Trim and filter reads; can run alone or as part of a combined workflow for large genome assembly. \r +\r +* What it does: Trims and filters raw sequence reads according to specified settings. \r +* Inputs: Long reads (format fastq); Short reads R1 and R2 (format fastq) \r +* Outputs: Trimmed and filtered reads: fastp_filtered_long_reads.fastq.gz (But note: no trimming or filtering is on by default), fastp_filtered_R1.fastq.gz, fastp_filtered_R2.fastq.gz\r +* Reports: fastp report on long reads, html; fastp report on short reads, html\r +* Tools used: fastp (Note. The latest version (0.20.1) of fastp has an issue displaying plot results. Using version 0.19.5 here instead until this is rectified). \r +* Input parameters: None required, but recommend removing the long reads from the workflow if not using any trimming/filtering settings. \r +\r +Workflow steps:\r +\r +Long reads: fastp settings: \r +* These settings have been changed from the defaults (so that all filtering and trimming settings are now disabled). \r +* Adapter trimming options: Disable adapter trimming: yes\r +* Filter options: Quality filtering options: Disable quality filtering: yes\r +* Filter options: Length filtering options: Disable length filtering: yes\r +* Read modification options: PolyG tail trimming: Disable\r +* Output options: output JSON report: yes\r +\r +Short reads: fastp settings:\r +* adapter trimming (default setting: adapters are auto-detected)\r +* quality filtering (default: phred quality 15), unqualified bases limit (default = 40%), number of Ns allowed in a read (default = 5)\r +* length filtering (default length = min 15)\r +* polyG tail trimming (default = on for NextSeq/NovaSeq data which is auto detected)\r +* Output options: output JSON report: yes\r +\r +Options:\r +* Change any settings in fastp for any of the input reads. \r +* Adapter trimming: input the actual adapter sequences. (Alternative tool for long read adapter trimming: Porechop.) \r +* Trimming n bases from ends of reads if quality less than value x (Alternative tool for trimming long reads: NanoFilt.)\r +* Discard post-trimmed reads if length is < x (e.g. for long reads, 1000 bp)\r +* Example filtering/trimming that you might do on long reads: remove adapters (can also be done with Porechop), trim bases from ends of the reads with low quality (can also be done with NanoFilt), after this can keep only reads of length x (e.g. 1000 bp) \r +\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.224.1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Trim and filter reads - fastp" ; + schema1:sdDatePublished "2024-08-05 10:32:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/224/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14680 ; + schema1:creator ; + schema1:dateCreated "2021-11-08T04:56:09Z" ; + schema1:dateModified "2023-01-30T18:21:31Z" ; + schema1:description """Trim and filter reads; can run alone or as part of a combined workflow for large genome assembly. \r +\r +* What it does: Trims and filters raw sequence reads according to specified settings. \r +* Inputs: Long reads (format fastq); Short reads R1 and R2 (format fastq) \r +* Outputs: Trimmed and filtered reads: fastp_filtered_long_reads.fastq.gz (But note: no trimming or filtering is on by default), fastp_filtered_R1.fastq.gz, fastp_filtered_R2.fastq.gz\r +* Reports: fastp report on long reads, html; fastp report on short reads, html\r +* Tools used: fastp (Note. The latest version (0.20.1) of fastp has an issue displaying plot results. Using version 0.19.5 here instead until this is rectified). \r +* Input parameters: None required, but recommend removing the long reads from the workflow if not using any trimming/filtering settings. \r +\r +Workflow steps:\r +\r +Long reads: fastp settings: \r +* These settings have been changed from the defaults (so that all filtering and trimming settings are now disabled). \r +* Adapter trimming options: Disable adapter trimming: yes\r +* Filter options: Quality filtering options: Disable quality filtering: yes\r +* Filter options: Length filtering options: Disable length filtering: yes\r +* Read modification options: PolyG tail trimming: Disable\r +* Output options: output JSON report: yes\r +\r +Short reads: fastp settings:\r +* adapter trimming (default setting: adapters are auto-detected)\r +* quality filtering (default: phred quality 15), unqualified bases limit (default = 40%), number of Ns allowed in a read (default = 5)\r +* length filtering (default length = min 15)\r +* polyG tail trimming (default = on for NextSeq/NovaSeq data which is auto detected)\r +* Output options: output JSON report: yes\r +\r +Options:\r +* Change any settings in fastp for any of the input reads. \r +* Adapter trimming: input the actual adapter sequences. (Alternative tool for long read adapter trimming: Porechop.) \r +* Trimming n bases from ends of reads if quality less than value x (Alternative tool for trimming long reads: NanoFilt.)\r +* Discard post-trimmed reads if length is < x (e.g. for long reads, 1000 bp)\r +* Example filtering/trimming that you might do on long reads: remove adapters (can also be done with Porechop), trim bases from ends of the reads with low quality (can also be done with NanoFilt), after this can keep only reads of length x (e.g. 1000 bp) \r +\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "Large-genome-assembly" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Trim and filter reads - fastp" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/224?version=1" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 282014 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A pipeline to demultiplex, QC and map Nanopore data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1002?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/nanoseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/nanoseq" ; + schema1:sdDatePublished "2024-08-05 10:23:33 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1002/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8657 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:05Z" ; + schema1:dateModified "2024-06-11T12:55:05Z" ; + schema1:description "A pipeline to demultiplex, QC and map Nanopore data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1002?version=5" ; + schema1:keywords "Alignment, demultiplexing, nanopore, QC" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/nanoseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1002?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:description """### Workflow Kallisto RNAseq \r +**(pseudoalignment on transcripts)**\r + - Workflow Illumina Quality: https://workflowhub.eu/workflows/336?version=1 \r + - kallisto\r +\r +**All tool CWL files and other workflows can be found here:**
\r + Tools: https://git.wur.nl/unlock/cwl/-/tree/master/cwl
\r + Workflows: https://git.wur.nl/unlock/cwl/-/tree/master/cwl/workflows\r +\r +**How to setup and use an UNLOCK workflow:**
\r +https://m-unlock.gitlab.io/docs/setup/setup.html\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/344?version=1" ; + schema1:isBasedOn "https://gitlab.com/m-unlock/cwl/-/blob/master/cwl/workflows/workflow_RNAseq_kallisto.cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Kallisto RNAseq Workflow" ; + schema1:sdDatePublished "2024-08-05 10:31:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/344/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 16611 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4610 ; + schema1:creator , + ; + schema1:dateCreated "2022-05-20T10:10:26Z" ; + schema1:dateModified "2023-01-16T13:59:53Z" ; + schema1:description """### Workflow Kallisto RNAseq \r +**(pseudoalignment on transcripts)**\r + - Workflow Illumina Quality: https://workflowhub.eu/workflows/336?version=1 \r + - kallisto\r +\r +**All tool CWL files and other workflows can be found here:**
\r + Tools: https://git.wur.nl/unlock/cwl/-/tree/master/cwl
\r + Workflows: https://git.wur.nl/unlock/cwl/-/tree/master/cwl/workflows\r +\r +**How to setup and use an UNLOCK workflow:**
\r +https://m-unlock.gitlab.io/docs/setup/setup.html\r +\r +""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Kallisto RNAseq Workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/344?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + ; + ns1:output , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "MetaDEGalaxy: Galaxy workflow for differential abundance analysis of 16s metagenomic data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/232?version=1" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for 16S_biodiversity_for_overlap_paired_end" ; + schema1:sdDatePublished "2024-08-05 10:32:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/232/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 62961 ; + schema1:dateCreated "2021-11-10T00:15:14Z" ; + schema1:dateModified "2024-04-17T04:19:28Z" ; + schema1:description "MetaDEGalaxy: Galaxy workflow for differential abundance analysis of 16s metagenomic data" ; + schema1:isPartOf ; + schema1:keywords "MetaDEGalaxy" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "16S_biodiversity_for_overlap_paired_end" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/232?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description "This is a Galaxy workflow for performing molecular dynamics simulations and analysis with coronavirus helicases in the Apo or unbound state. The associated input files can be found at: https://zenodo.org/records/7492987. The associated output files can be found at: https://zenodo.org/records/7851000." ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.764.1" ; + schema1:isBasedOn "https://zenodo.org/records/7492987" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for coronavirushelicase_apo" ; + schema1:sdDatePublished "2024-08-05 10:25:40 +0100" ; + schema1:url "https://workflowhub.eu/workflows/764/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 75856 ; + schema1:creator ; + schema1:dateCreated "2024-02-16T19:18:45Z" ; + schema1:dateModified "2024-03-02T16:49:26Z" ; + schema1:description "This is a Galaxy workflow for performing molecular dynamics simulations and analysis with coronavirus helicases in the Apo or unbound state. The associated input files can be found at: https://zenodo.org/records/7492987. The associated output files can be found at: https://zenodo.org/records/7851000." ; + schema1:keywords "covid-19, SARS-CoV-2, covid19.galaxyproject.org, NSP13, helicase, coronavirus, rna virus, MERS, molecular dynamics" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "coronavirushelicase_apo" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/764?version=1" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=10" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-08-05 10:24:25 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9386 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:12Z" ; + schema1:dateModified "2024-06-11T12:55:12Z" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=10" ; + schema1:version 10 . + + a schema1:Dataset ; + schema1:datePublished "2024-04-29T08:35:24.526954" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/chipseq-sr" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "chipseq-sr/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/963?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/airrflow" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/airrflow" ; + schema1:sdDatePublished "2024-08-05 10:24:21 +0100" ; + schema1:url "https://workflowhub.eu/workflows/963/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9868 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:37Z" ; + schema1:dateModified "2024-06-11T12:54:37Z" ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/963?version=13" ; + schema1:keywords "airr, b-cell, immcantation, immunorepertoire, repseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/airrflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/963?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=12" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-08-05 10:23:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=12" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10982 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:50Z" ; + schema1:dateModified "2024-06-11T12:54:50Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=12" ; + schema1:version 12 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +****\r +\r +About this workflow:\r +\r +* Inputs: multiple transcriptome.gtfs from different tissues, genome.fasta, coding_seqs.fasta, non_coding_seqs.fasta \r +* Runs StringTie merge to combine transcriptomes, with default settings except for -m = 30 and -F = 0.1, to produce a merged_transcriptomes.gtf. \r +* Runs Convert GTF to BED12 with default settings, to produce a merged_transcriptomes.bed. \r +* Runs bedtools getfasta with default settings except for -name = yes, -s = yes, -split - yes, to produce a merged_transcriptomes.fasta\r +* Runs CPAT to generate seqs with high coding probability. \r +* Filters out non-coding seqs from the merged_transcriptomes.fasta\r +* Output: filtered_merged_transcriptomes.fasta""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.878.1" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Combine transcripts - TSI" ; + schema1:sdDatePublished "2024-08-05 10:24:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/878/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 22498 ; + schema1:creator , + ; + schema1:dateCreated "2024-05-08T07:07:37Z" ; + schema1:dateModified "2024-05-09T04:06:49Z" ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +****\r +\r +About this workflow:\r +\r +* Inputs: multiple transcriptome.gtfs from different tissues, genome.fasta, coding_seqs.fasta, non_coding_seqs.fasta \r +* Runs StringTie merge to combine transcriptomes, with default settings except for -m = 30 and -F = 0.1, to produce a merged_transcriptomes.gtf. \r +* Runs Convert GTF to BED12 with default settings, to produce a merged_transcriptomes.bed. \r +* Runs bedtools getfasta with default settings except for -name = yes, -s = yes, -split - yes, to produce a merged_transcriptomes.fasta\r +* Runs CPAT to generate seqs with high coding probability. \r +* Filters out non-coding seqs from the merged_transcriptomes.fasta\r +* Output: filtered_merged_transcriptomes.fasta""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "TSI-annotation" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Combine transcripts - TSI" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/878?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 641824 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "The workflow takes a trimmed HiFi reads collection, Hap1/Hap2 contigs, and the values for transition parameter and max coverage depth (calculated from WF1) to run Purge_Dups. It produces purged Hap1 and Hap2 contigs assemblies, and runs all the QC analysis (gfastats, BUSCO, and Merqury)." ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.606.2" ; + schema1:isBasedOn "https://github.com/ERGA-consortium/pipelines/tree/main/assembly/galaxy" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ERGA HiFi Hap1Hap2 Purge+QC v2309 (WF3)" ; + schema1:sdDatePublished "2024-08-05 10:26:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/606/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 73154 ; + schema1:creator , + ; + schema1:dateCreated "2024-01-02T13:04:04Z" ; + schema1:dateModified "2024-03-13T09:53:44Z" ; + schema1:description "The workflow takes a trimmed HiFi reads collection, Hap1/Hap2 contigs, and the values for transition parameter and max coverage depth (calculated from WF1) to run Purge_Dups. It produces purged Hap1 and Hap2 contigs assemblies, and runs all the QC analysis (gfastats, BUSCO, and Merqury)." ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/606?version=1" ; + schema1:isPartOf ; + schema1:keywords "ERGA, Assembly+QC, HiFi" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ERGA HiFi Hap1Hap2 Purge+QC v2309 (WF3)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/3.Purging/Galaxy-Workflow-ERGA_HiFi_Hap1Hap2_Purge_QC_v2310_(WF3).ga" ; + schema1:version 2 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 235117 ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/3.Purging/pics/Purge_hifi_2309.png" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Predict variants and drug resistance from M. tuberculosis sequence samples (Illumina)" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1035?version=1" ; + schema1:license "AGPL-3.0-or-later" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for TB Variant Analysis v1.0" ; + schema1:sdDatePublished "2024-08-05 10:22:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1035/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 46378 ; + schema1:creator ; + schema1:dateCreated "2024-06-07T19:07:04Z" ; + schema1:dateModified "2024-06-12T13:38:29Z" ; + schema1:description "Predict variants and drug resistance from M. tuberculosis sequence samples (Illumina)" ; + schema1:keywords "pathogen, tuberculosis" ; + schema1:license "https://spdx.org/licenses/AGPL-3.0-or-later" ; + schema1:name "TB Variant Analysis v1.0" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1035?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1027?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/viralrecon" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/viralrecon" ; + schema1:sdDatePublished "2024-08-05 10:23:05 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1027/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9965 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:20Z" ; + schema1:dateModified "2024-06-11T12:55:20Z" ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1027?version=10" ; + schema1:keywords "Amplicon, ARTIC, Assembly, covid-19, covid19, illumina, long-read-sequencing, Metagenomics, nanopore, ONT, oxford-nanopore, SARS-CoV-2, variant-calling, viral, Virus" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/viralrecon" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1027?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-08-05 10:23:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7318 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:58Z" ; + schema1:dateModified "2024-06-11T12:54:58Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=22" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-08-05 10:23:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=22" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 18600 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-07-05T03:02:52Z" ; + schema1:dateModified "2024-07-05T03:02:52Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=22" ; + schema1:version 22 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:description "Analysis of variation within individual COVID-19 samples using Illumina Single End data. More info can be found at https://covid19.galaxyproject.org/genomics/" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/8?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genomics - SE Variation" ; + schema1:sdDatePublished "2024-08-05 10:33:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/8/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 6391 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 33953 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2020-04-10T12:54:10Z" ; + schema1:dateModified "2023-01-16T13:40:12Z" ; + schema1:description "Analysis of variation within individual COVID-19 samples using Illumina Single End data. More info can be found at https://covid19.galaxyproject.org/genomics/" ; + schema1:image ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Genomics - SE Variation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/8?version=1" ; + schema1:version 1 ; + ns1:input , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 11562 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=15" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-08-05 10:22:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=15" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10491 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:39Z" ; + schema1:dateModified "2024-06-11T12:54:39Z" ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=15" ; + schema1:version 15 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:description """Workflow for sequencing with ONT nanopore, from basecalling to assembly quality.\r +Steps:\r + - Kraken2 (taxonomic classification of FASTQ reads)\r + - Krona (classification visualization)\r + - Flye (de novo assembly)\r + - Medaka (assembly polishing)\r + - QUAST (assembly quality reports)\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/254?version=2" ; + schema1:isBasedOn "https://git.wur.nl/unlock/cwl/-/blob/master/cwl/workflows/workflow_nanopore_assembly.cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nanopore workflow with direct FASTQ reads inputs (without Guppy)" ; + schema1:sdDatePublished "2024-08-05 10:31:21 +0100" ; + schema1:url "https://workflowhub.eu/workflows/254/ro_crate?version=2" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 26748 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7733 ; + schema1:creator , + , + ; + schema1:dateCreated "2022-01-07T09:32:14Z" ; + schema1:dateModified "2022-04-20T09:12:00Z" ; + schema1:description """Workflow for sequencing with ONT nanopore, from basecalling to assembly quality.\r +Steps:\r + - Kraken2 (taxonomic classification of FASTQ reads)\r + - Krona (classification visualization)\r + - Flye (de novo assembly)\r + - Medaka (assembly polishing)\r + - QUAST (assembly quality reports)\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/254?version=2" ; + schema1:keywords "nanopore, Genomics, Metagenomics" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "nanopore workflow with direct FASTQ reads inputs (without Guppy)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/254?version=2" ; + schema1:version 2 ; + ns1:input , + , + , + , + ; + ns1:output , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=14" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-08-05 10:23:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=14" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11179 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:50Z" ; + schema1:dateModified "2024-06-11T12:54:50Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=14" ; + schema1:version 14 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Generates Dose-response curve fits on cell-based toxicity data. Outliers of replicate data-sets can be removed by setting a threshold for standard deviation (here set to 25). Curve fits for compounds showing low response can be removed by setting a threshold for minimum activity (here set to 75% confluence).\r +This workflow needs R-Server to run in the back-end. Start R and run the following command: library(Rserve); Rserve(args = "--vanilla")""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/387?version=1" ; + schema1:isBasedOn "https://hub.knime.com/fraunhoferitmp/spaces/Public/latest/Dose_Response_Cell-based-Assay/DRC_template_toxicity~pmQpY43FY6lczWF8" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for DRC_template_toxicity" ; + schema1:sdDatePublished "2024-08-05 10:31:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/387/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 48768 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 681435 ; + schema1:creator ; + schema1:dateCreated "2022-09-26T09:15:13Z" ; + schema1:dateModified "2023-01-16T14:02:31Z" ; + schema1:description """Generates Dose-response curve fits on cell-based toxicity data. Outliers of replicate data-sets can be removed by setting a threshold for standard deviation (here set to 25). Curve fits for compounds showing low response can be removed by setting a threshold for minimum activity (here set to 75% confluence).\r +This workflow needs R-Server to run in the back-end. Start R and run the following command: library(Rserve); Rserve(args = "--vanilla")""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "DRC_template_toxicity" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/387?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.127.6" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_virtual-screening" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein-ligand Docking tutorial (Cluster90)" ; + schema1:sdDatePublished "2024-08-05 10:30:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/127/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 39259 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-03-04T14:30:29Z" ; + schema1:dateModified "2024-05-14T10:11:15Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/127?version=5" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein-ligand Docking tutorial (Cluster90)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_virtual-screening/blob/main/biobb_wf_virtual-screening/notebooks/clusterBindingSite/wf_vs_clusterBindingSite.ipynb" ; + schema1:version 6 . + + a schema1:Dataset ; + schema1:datePublished "2024-04-09T08:37:08.173460" ; + schema1:hasPart , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/fastq-to-matrix-10x" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + ; + schema1:name "fastq-to-matrix-10x/scrna-seq-fastq-to-matrix-10x-cellplex" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + , + , + ; + schema1:name "fastq-to-matrix-10x/scrna-seq-fastq-to-matrix-10x-v3" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/fastq-to-matrix-10x" ; + schema1:version "0.4" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Generic variant calling\r +\r +\r +A generic workflow for identification of variants in a haploid genome such as genomes of bacteria or viruses. It can be readily used on MonkeyPox. The workflow accepts two inputs:\r +\r +- A genbank file with the reference genomes\r +- A collection of paired fastqsanger files\r +\r +The workflow outputs a collection of VCF files for each sample (each fastq pair). These VCF files serve as input to the [Reporting workflow](https://workflowhub.eu/workflows/354). \r +\r +Workflow can be accessed directly on [usegalaxy.org](https://usegalaxy.org/u/aun1/w/generic-variation-analysis-on-wgs-pe-data)\r +\r +The general idea of the workflow is:\r +\r +![](https://i.imgur.com/rk40Y4t.png)""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/353?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Generic variation analysis on WGS PE data" ; + schema1:sdDatePublished "2024-08-05 10:32:06 +0100" ; + schema1:url "https://workflowhub.eu/workflows/353/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 38255 ; + schema1:creator ; + schema1:dateCreated "2022-06-01T12:46:37Z" ; + schema1:dateModified "2023-02-13T14:06:44Z" ; + schema1:description """# Generic variant calling\r +\r +\r +A generic workflow for identification of variants in a haploid genome such as genomes of bacteria or viruses. It can be readily used on MonkeyPox. The workflow accepts two inputs:\r +\r +- A genbank file with the reference genomes\r +- A collection of paired fastqsanger files\r +\r +The workflow outputs a collection of VCF files for each sample (each fastq pair). These VCF files serve as input to the [Reporting workflow](https://workflowhub.eu/workflows/354). \r +\r +Workflow can be accessed directly on [usegalaxy.org](https://usegalaxy.org/u/aun1/w/generic-variation-analysis-on-wgs-pe-data)\r +\r +The general idea of the workflow is:\r +\r +![](https://i.imgur.com/rk40Y4t.png)""" ; + schema1:keywords "mpxv, generic" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Generic variation analysis on WGS PE data" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/353?version=1" ; + schema1:version 1 ; + ns1:input , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/998?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/methylseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/methylseq" ; + schema1:sdDatePublished "2024-08-05 10:22:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/998/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3868 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:01Z" ; + schema1:dateModified "2024-06-11T12:55:01Z" ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/998?version=14" ; + schema1:keywords "bisulfite-sequencing, dna-methylation, em-seq, epigenome, Epigenomics, methyl-seq, pbat, rrbs" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/methylseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/998?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/998?version=13" ; + schema1:isBasedOn "https://github.com/nf-core/methylseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/methylseq" ; + schema1:sdDatePublished "2024-08-05 10:22:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/998/ro_crate?version=13" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11292 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:02Z" ; + schema1:dateModified "2024-06-11T12:55:02Z" ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/998?version=14" ; + schema1:keywords "bisulfite-sequencing, dna-methylation, em-seq, epigenome, Epigenomics, methyl-seq, pbat, rrbs" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/methylseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/998?version=13" ; + schema1:version 13 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 337112 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.0.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.0.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.0.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.0.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.0.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.0.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.0.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.0.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.1.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.1.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.1.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.1.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.1.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.1.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.1.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.1.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.2.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.2.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.2.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.2.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.2.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.2.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.2.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.2.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.3.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.3.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.3.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.3.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.3.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.3.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.3.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.3.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.4.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.4.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.4.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.4.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.4.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.4.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.4.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.4.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.5.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.5.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.5.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.5.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.5.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.5.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.5.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.5.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.6.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.6.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.6.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.6.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.6.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.6.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.6.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.6.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.7.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.7.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.7.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.7.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.7.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.7.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.7.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "A.7.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.0.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.0.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.0.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.0.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.0.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.0.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.0.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.0.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.1.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.1.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.1.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.1.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.1.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.1.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.1.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.1.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.2.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.2.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.2.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.2.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.2.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.2.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.2.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.2.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.3.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.3.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.3.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.3.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.3.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.3.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.3.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.3.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.4.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.4.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.4.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.4.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.4.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.4.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.4.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.4.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.5.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.5.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.5.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.5.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.5.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.5.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.5.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.5.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.6.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.6.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.6.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.6.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.6.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.6.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.6.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.6.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.7.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.7.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.7.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.7.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.7.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.7.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.7.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16384 ; + schema1:dateModified "2023-05-30T08:47:37" ; + schema1:name "B.7.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Post-genome assembly quality control workflow using Quast, BUSCO, Meryl, Merqury and Fasta Statistics" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.403.2" ; + schema1:isBasedOn "https://github.com/AustralianBioCommons/Genome-assessment-post-assembly.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genome assessment post assembly v1.1" ; + schema1:sdDatePublished "2024-08-05 10:22:13 +0100" ; + schema1:url "https://workflowhub.eu/workflows/403/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15117 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2023-05-09T00:59:04Z" ; + schema1:dateModified "2023-05-09T01:34:26Z" ; + schema1:description "Post-genome assembly quality control workflow using Quast, BUSCO, Meryl, Merqury and Fasta Statistics" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/403?version=3" ; + schema1:isPartOf , + ; + schema1:keywords "HiFi, hifiasm, QC, Quast, Meryl, Merqury, BUSCO" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Genome assessment post assembly v1.1" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/403?version=2" ; + schema1:version 2 ; + ns1:input , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# PacBio-HiFi-genome-assembly-using-hifiasm\r +\r +## General recommendations for using `PacBio-HiFi-genome-assembly-using-hifiasm`\r +Please see the [`Genome assembly with hifiasm on Galaxy Australia`](https://australianbiocommons.github.io/how-to-guides/genome_assembly/hifi_assembly) guide.\r +\r +## Acknowledgements\r +\r +The workflow & the [doc_guidelines template used](https://github.com/AustralianBioCommons/doc_guidelines) are \r +supported by the Australian BioCommons via Bioplatforms Australia funding, the Australian Research Data Commons \r +(https://doi.org/10.47486/PL105) and the Queensland Government RICF programme. Bioplatforms Australia and the \r +Australian Research Data Commons are enabled by the National Collaborative Research Infrastructure Strategy (NCRIS).\r +\r +\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/221?version=2" ; + schema1:isBasedOn "https://github.com/AustralianBioCommons/PacBio-HiFi-genome-assembly-using-hifiasm" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for PacBio HiFi genome assembly using hifiasm v2.0" ; + schema1:sdDatePublished "2024-08-05 10:31:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/221/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 30627 ; + schema1:creator , + , + ; + schema1:dateCreated "2022-09-20T00:59:58Z" ; + schema1:dateModified "2022-09-20T01:01:01Z" ; + schema1:description """# PacBio-HiFi-genome-assembly-using-hifiasm\r +\r +## General recommendations for using `PacBio-HiFi-genome-assembly-using-hifiasm`\r +Please see the [`Genome assembly with hifiasm on Galaxy Australia`](https://australianbiocommons.github.io/how-to-guides/genome_assembly/hifi_assembly) guide.\r +\r +## Acknowledgements\r +\r +The workflow & the [doc_guidelines template used](https://github.com/AustralianBioCommons/doc_guidelines) are \r +supported by the Australian BioCommons via Bioplatforms Australia funding, the Australian Research Data Commons \r +(https://doi.org/10.47486/PL105) and the Queensland Government RICF programme. Bioplatforms Australia and the \r +Australian Research Data Commons are enabled by the National Collaborative Research Infrastructure Strategy (NCRIS).\r +\r +\r +\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/221?version=2" ; + schema1:isPartOf , + ; + schema1:keywords "FASTQ, hifiasm, HiFi, genome_assembly" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "PacBio HiFi genome assembly using hifiasm v2.0" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/221?version=2" ; + schema1:version 2 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "Just the cleaning then assembly of all reads. TO explore further follow one of the paths described in \"Global view\" (WF 0) " ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/103?version=1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for 3: Plant virus exploration" ; + schema1:sdDatePublished "2024-08-05 10:31:19 +0100" ; + schema1:url "https://workflowhub.eu/workflows/103/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8538 ; + schema1:dateCreated "2021-02-04T09:11:37Z" ; + schema1:dateModified "2023-02-13T14:06:45Z" ; + schema1:description "Just the cleaning then assembly of all reads. TO explore further follow one of the paths described in \"Global view\" (WF 0) " ; + schema1:keywords "Virology, exploration, DE_NOVO" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "3: Plant virus exploration" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/103?version=1" ; + schema1:version 1 ; + ns1:output , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=28" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-08-05 10:24:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=28" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13588 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:51Z" ; + schema1:dateModified "2024-06-11T12:54:51Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=28" ; + schema1:version 28 . + + a schema1:Dataset ; + schema1:datePublished "2024-03-26T19:05:29.978485" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/assembly-with-flye" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "assembly-with-flye/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Abstract CWL Automatically generated from the Galaxy workflow file: Copy of Genome-wide alternative splicing analysis" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/483?version=1" ; + schema1:license "CC-BY-NC-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for StringTie workflow" ; + schema1:sdDatePublished "2024-08-05 10:30:21 +0100" ; + schema1:url "https://workflowhub.eu/workflows/483/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 5852 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 23236 ; + schema1:creator ; + schema1:dateCreated "2023-05-25T23:15:06Z" ; + schema1:dateModified "2023-05-25T23:15:06Z" ; + schema1:description "Abstract CWL Automatically generated from the Galaxy workflow file: Copy of Genome-wide alternative splicing analysis" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-NC-4.0" ; + schema1:name "StringTie workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/483?version=1" ; + schema1:version 1 ; + ns1:input , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 15124 . + + a schema1:Dataset ; + schema1:datePublished "2024-04-29T08:20:28.253806" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/chipseq-pe" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "chipseq-pe/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 320859 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:MediaObject ; + schema1:contentSize 769 ; + schema1:dateModified "2024-03-21T11:54:38+00:00" ; + schema1:name "matmul_case1.csv" ; + schema1:sdDatePublished "2024-03-22T11:45:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 765 ; + schema1:dateModified "2023-11-28T00:25:59+00:00" ; + schema1:name "matmul_case2.csv" ; + schema1:sdDatePublished "2024-03-22T11:45:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 717 ; + schema1:dateModified "2023-11-28T19:49:11+00:00" ; + schema1:name "matmul_case3.csv" ; + schema1:sdDatePublished "2024-03-22T11:45:33+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.128.5" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_virtual-screening" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein-ligand Docking tutorial (PDBe REST API)" ; + schema1:sdDatePublished "2024-08-05 10:30:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/128/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 36817 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-03-04T14:32:45Z" ; + schema1:dateModified "2024-05-14T10:12:11Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/128?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein-ligand Docking tutorial (PDBe REST API)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_virtual-screening/blob/main/biobb_wf_virtual-screening/notebooks/ebi_api/wf_vs_ebi_api.ipynb" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "ATACSeq peak-calling and differential analysis pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/965?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/atacseq" ; + schema1:sdDatePublished "2024-08-05 10:24:19 +0100" ; + schema1:url "https://workflowhub.eu/workflows/965/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11439 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:42Z" ; + schema1:dateModified "2024-06-11T12:54:42Z" ; + schema1:description "ATACSeq peak-calling and differential analysis pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/965?version=8" ; + schema1:keywords "ATAC-seq, chromatin-accessibiity" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/atacseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/965?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + schema1:datePublished "2023-09-21T11:29:52.138601" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/repeatmasking" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "repeatmasking/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.12" . + + a schema1:Dataset ; + schema1:datePublished "2023-11-23T13:33:07.956730" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-decontamination-VGP9" ; + schema1:license "BSD-3-Clause" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-decontamination-VGP9/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# scRNA-Seq pipelines\r +\r +Here we forge the tools to analyze single cell RNA-Seq experiments. The analysis workflow is based on the Bioconductor packages [*scater*](https://bioconductor.org/packages/devel/bioc/vignettes/scater/inst/doc/overview.html) and [*scran*](https://bioconductor.org/packages/devel/bioc/vignettes/scran/inst/doc/scran.html) as well as the Bioconductor workflows by Lun ATL, McCarthy DJ, & Marioni JC [*A step-by-step workflow for low-level analysis of single-cell RNA-seq data.*](http://doi.org/10.12688/f1000research.9501.1) F1000Res. 2016 Aug 31 [revised 2016 Oct 31];5:2122 and Amezquita RA, Lun ATL et al. [*Orchestrating Single-Cell Analysis with Bioconductor*](https://osca.bioconductor.org/index.html) Nat Methods. 2020 Feb;17(2):137-145.\r +\r +## Implemented protocols\r + - MARS-Seq (massively parallel single-cell RNA-sequencing): The protocol is based on the publications of Jaitin DA, et al. (2014). *Massively parallel single-cell RNA-seq for marker-free decomposition of tissues into cell types.* Science (New York, N.Y.), 343(6172), 776–779. https://doi.org/10.1126/science.1247651 and Keren-Shaul H., et al. (2019). *MARS-seq2.0: an experimental and analytical pipeline for indexed sorting combined with single-cell RNA sequencing.* Nature Protocols. https://doi.org/10.1038/s41596-019-0164-4. The MARS-Seq library preparation protocol is given [here](https://github.com/imbforge/NGSpipe2go/blob/master/resources/MARS-Seq_protocol_Step-by-Step_MML.pdf). The sequencing reads are demultiplexed according to the respective pool barcodes before they are used as input for the analysis pipeline. \r +- Smart-seq2: Libraries are generated using the [Smart-seq2 kit](http://www.nature.com/nmeth/journal/v10/n11/full/nmeth.2639.html). \r +\r +## Pipeline Workflow\r +All analysis steps are illustrated in the pipeline [flowchart](https://www.draw.io/?lightbox=1&highlight=0000ff&edit=_blank&layers=1&nav=1&title=scRNA-Seq#R7R3ZcpvK8mtUlTxIxSIh6dF2oiyVODm2Uzk5LykEI4kYAWGxrXz9nZ6FdUBIQoDjm%2BTeIwYYZqZ7eu%2BegXq1fXrn697ms2sie6BI5tNAfTNQFFmdTPB%2FoGVHW6bKjDasfctkDyUNt9YfxBol1hpZJgoyD4aua4eWl200XMdBRphp033ffcw%2BtnLt7Fc9fY0KDbeGbhdbv1tmuGGtsjZPbrxH1nrDPj1TpvTGUjfu174bOex7jusgemer827YHIONbrqPqSb17UC98l03pL%2B2T1fIhmXlK0bfW5TcjYfsIyes88J76eNqcz0L7YfNzfL6%2Fe3i55%2F3Qw6AB92O2FoMFM3GHV6a1gOsrm2tHXJD%2Bx3BUC99sgzxJf61Zv8lry39fAseEumLt5LFCHd87Tfh1sa%2FZHzP1pfIvoyX9Mq1XZ88pC7IH%2FxIEPrufQwkvIiXK9cJGUbJGoxbDzbIZD2SfuKrlWXbqU4R%2BRN3yu8QGKqXhru1DHwp4d9rWw8C9juGo0T6D%2FXQcmGFhnMpnl4aKgxQD8gP0VOqiUHpHXK3KPR3%2BBF2dzbR6CtsMw1nDAsfU6g5ZW2bFFaOJZVtCbYd1nHfCV7gHww1xGjyYFz%2Fc%2Fs9%2Bhi9%2BfPf3Ye7xY%2Fg4b%2Bh%2FKLx5K0Gf8vwZO3rpoWhfQz6NIAtykzKYEtMU1PYMpYE2KLxB0%2FBlj%2Ffwzd30r%2F%2FfVzcvPW3snn3bvdhOJELwEMmprfs0vXDjbt2Hd1%2Bm7SmIAALkzzzyXU9BpZfKAx3DIR6FLq4KYUU6MkK%2F4XXRxN29SN1580T65lc7PiFg%2Bebegkuf6TvJa%2BRq%2BQ98wLYDr40ALYAaWhcWDYfThEBMwhXCvnAjXwDVexFtsdD3V%2BjsOo59iAsfSUi%2BcjGNOwhywpFSEFexfPWd6kHPNdywiDV81doSPBzPJtk8VMZZ9lU7vnJbFb1PP5BR5CgZzyVWhj75e7L%2Fa%2FN7Ofvf%2B9CZfHz%2BrusLYez54KwRaw6PyKNZ00j0km8SN3DiRIgctYDlH8YEDBc4AdkzXsq50vwcK1exsJerpTBxeVWtxwAiuUh28LcADdeptkc%2FcY%2Bflh7JJXzCTzdybfpwAp8tMr0uglDkLYvADrKYm2FmMuOfNsbORg7lIW1Xa5cjCn45%2FW7W5iasnbxxRD%2Fb2m7S%2BDe6AH0ggWfN6YKi8C4ub4I0G%2F4udX9EP8c8fsjzL7dh1167JLBWGXSiFF%2BGPqRY%2Bgh3m2FeVb0Gq%2FrIlkAKfUvhocuEEVE61YCurzg0gU2VvbSDbwxYILGwV3e6UnQbh6sYvLQCJEZEmJshfoSv50iM1j%2FRivLsYhCokivlnDndaXA%2Fezmbbu6GRBeE3jIINwGa%2BEwUdC5XTOyAQ2f%2F%2FwQxnIntHRgh7pjxpMbwrStFRY4CdPXMU9F%2FlETbgAHn5ARhbDeKRwsGUm5MncGHUgTqEBc20mrQPK4ARVILDMJhJTcEmBF0IOfWFH0sMoIk6cCI5fuZrwhtjyNhctVKSXVXkO5sGSKYMV422E6Q0HIHypZpWCa7cBdrQJgQjkgHCboiw0Zky4lff77R0bq36eaDtKKaaKnlqimTWoIslJX12xJQ%2FgWIP%2FL8heYgDGpBCtQPQ1g6T4BRbOcNaVpS9c3kT%2FEzSAaEEmBIT6heBLG8kl8J23jvSCihIrG8Dd%2BwtNNM%2B5b6Z8kRuEXjMKnjEHucOkr29FJEpeAI3GBpgi%2FcuDkmRXaZmeIUQdL8gaBTqpDqfBUDMPUU0o5IMOla%2B4KjQURKzRjPUXfejYaVAljycPF13mLxRsMSu14t5KDRYFUX1at%2FqGxOOaKWSx133DN5qcR9yu98tHvyPIB%2BzBwcf%2FS54ub2%2BEtIHL57GoO5OD5eq5rNz5Z2qn04U3ldF93Ml9MnxuELuuOTdWJtkvkdzIvsNU2PivSqfTKxry%2Fo2kZTWInnxbuNNo6HQOMEO7m5iZhfWkhk%2F%2BDn9IrA%2FMNn2zDR%2FhSJ3MEocJrbo6sOwn8EWtni5ywi0mNRqPmpqQ7IDW64QZ0XMnUQ%2F1cU4JGEUsn0hMRS9KabLWOoaSEX6aUpPQNUMksQ7cvmMc0BCXikvtPbbSCz7j4qZVNHEwr4lNKqxOPGytEt1iqgR4ffd2Lx3WaFq1kdDJ5pglUQpFOKM8PFuTxZUqWP0CBUzp11Tw3BY6D9NkocJ5Q%2FN%2Fi4VsOlfAlL9HZaPuQ7CC4p6XuYaQPh2xXXRAZ36GWsrJ4BN7wNTGnigxsy1pUx8u3bQpPHTVVSh7g5kysZl6mNK9ulM3Ycjl60P2gEaN%2FWZcnKaAFEJ0HIHkhy3cfLKLopC28IkSrZG%2BXsUH4Gr%2BYWRZmoyj9Ln0N5iq0JEuvXC8cHS4PpZZzL4dUqjnkqbyQKfyfyOuNxdlkueNYntbjjvIRQVnHc0eRc7xVuzP%2FYG8Nz%2BMsGM9geBZGmMjqy5Nbmgp4qh3xpPVEiollhIUehP9cEZJcR4D4HWHSF%2B6IDs7VU3cFA9YfybCJbxAGi%2Fnu%2FYB50nL8fBMtR3hvV%2FByxse3eHBA8Rex2xQeA88dvvHbGNHmmNvupeoUTqVUXUR3cpQ%2BS7hVrYS%2Bk0hO1rGURfgGCP1EygWgjYuUXtYEJEo73JlYh9ALQyy5ylVCTpC9dB9boCRKNSk5H0kY1yQJE6lpknBkEGTWQT3WpBxWnBbUKMSReDVfNo40DPpiwKqcg62a82ZTJGVvJUA%2FFYfU6bRRHBKKLT1yl%2B%2FBozKxRe1MbKlLo8Z9CdTO0ygljV%2F78XE8OT8%2Bys8mtaDRSO3aqNSWBFyAlqb52q%2FVxtve6xvVfHKt%2B2t1qPWHehzKhFoAljrvxb6fqrl9n%2BUr%2B59vOEHj9uHi8Z186%2F78cjn%2F8k19%2BrrZ3g97pD3%2FvXxo2gt8PJUPTWYn8aHauv3t3cVNbc3%2BBqvuxGjseRBkdE4dPsm3wBA%2FXIkfpyBzTiW%2B4QjguZxFmo6VdvHaMmk6ZZ0lfhMa3CR9TfwpOVoHvqzUQjNXVtYQAguZdy1vLdMkpDAHk6Tlhq2EOijJbiUJzsQBTskU2714WJNL%2FA%2Bv3hUw1wke5hW%2BlpNr%2FA8e98Mr18Fd6xYBLsII%2B4iCcFBMyG0ADdQZfDxrvSnggdBMr44PxoN6MtALlVi1ukyncQPNacmFWmGL1ncPkkQJ5iSUjnBFf4lCLwK2A%2FUrYINfFJgOvvnbqGI53fmeY7%2BiG0%2FDIhyJxTUm467jPzw9YCZNXnj%2B%2F0LfWjbg%2B3tkPyDoddBGZM2Uc6MKk7KQKh3BnWri%2BbRneC4VUN1HQWSHQT%2BR3cAEOo3i4DS3gELbOwJWDFVFirYWVLGh6WpE%2FjPR0Iw8G%2BMdPF2dmtdqfMKL35Iaz3XZuyUPD3aruSVnvd%2BStrvu6X7EIxvyvWi4to0l81RIPdIN2I%2BcQ%2FULwc%2BAzBO1Rf4ilnqVLqXerkx%2FXJjdK%2FXOO7PTVo67N6SnKPX6CFTJnlKfVsMumUixiFdkEWwsZ%2FczMHxHD9Dvn%2FQGVu5%2FhiTRaBEYI9o2utmap0Vi5rsqDbDMByv2eXHII%2FjGaINsD%2FnB6ObkRRJ2WViYfsWqfsXjxUQv3mywJX0wQqYrTBAmO0qNOI4O%2Feya1gpoFsEM8mAiChtREEJXmr4FRuosAyrHJZGnwLlpoYMQ40ewwcQgoLROkRyETJRGN1F2Yy%2BYeVvS6nxWj8HLSgMcXlzk7fREjF%2FR1uPPM3NjazGOcf029uGkeNtZnTFn8vUXnXS84h93zk%2FqBaU06B%2BxnBXy0ROmfhbJRavrKjGBGmwptQG7MaEJPFadRkeSmEjbWvo6Q%2BSz%2B1JykznYrTJuKzbyvLGQqtKtW%2BX%2B92P4aG23MlI%2Baab0dbwcXg6nz0XVODayOkN7yvSVHEURAL5xIqNpWa5UiHyj5LBAZIrUapyteKtMzxNCN82hMx9w6bjyocCnhdzVJp1BtASDoYJl1nDnoeAo2klMlKQ2E0nrwaQMZKudh9qhmPk5HE4yJ38FyRxLPQwf597O%2Fjslj63Q26hZp7Yzs%2FGQ45OMcMqzib%2FrQ650bSirPYmT4gSDG31Z2fjSOKnc89q4VWb2k7rMDgyYIi%2BRiCmw3mMNIGXVT9xrZ%2BRn6fq1dCKHMzLtWTKyfEjVRCnWg2iTkYnjwV9gOYimTBHTmvRuJveC3k3lrEljMt8jvOeeH8%2FboXdLfXsFxkRy5ExNamdgwkJqLy2t9Xdr3UaSJx6mwYZ5OEWbPkuKlhfNJ1q3ormQolUndv5fMhceTZCmYcJVbUswr00lLkzz2%2BcPl7Ts4R3esBLkiv%2FOBl6IiEa6Lr1uEh8J7mjAnSdJIUXmu7kh6wMfIGndBLN1k5gEiBn1Rq6MAcqWwW9H0MLTwrIdm0noknEfTqNm7dOo3LlBqwn8HRSPCML3luTvIB91je9o5A%2FsY%2Fh%2BCNtm0lQNk5xIpwhKfHVuzv3rJbr%2BmnOztVEm8tHmXDWLZi2Zc%2FmA%2B2bOXSMHXbrmDouFSm2R0OcKMBMlMbl%2BIFQbeoMPImcNrvU2bLnwSSiOiAejHEyIJ9KgdUJ8BmFxPOtWWBRb%2BP5631ej0mLtzNx%2BWvg0tZ8WPiysHWbcw9hHKtXS0DAeS8%2FKGxUE2cCzrTA8d9pkSv7k8zlc5uwgeTInJVaJmmXiabkIegaZU5O6lTkrTVy9CYAtxt6Hvm7c9zT%2BlVqtYMpskMxC%2F2AFUNEsoEfk9j7VpAFszwfqjQUmprNF4gstHz2qnNB%2FCWFWW0JoWkA4KX6fD%2Fso8nX2gso3CGLnHAN4%2BTvkuJlzSiqkhO7pGpVPSKEHDAG8JZ6I%2B3GdmUQ%2B0n2F9DDyc6fxkW%2Bsw1XbMd2FKO2kDjAL0LZdmowYkCkCTlcVWx4cWQ64weq%2BTcgkSoZIT%2BW60dRSA1RavIOLpSEKC3fewr0xDXm5hXvFKnaPHDIxzzzIISO3yEDngiAacYJgvzLg5uV1H3p%2FplwSaRMnZxFMqlYVXtr5a3RJssUpBuT8LbxT2flb1GOWy3HjKUz3mJXDWUj8GvN2Urs%2FrlNA%2FWk6QTZRJWdq19DxW7vACsg6Iy84%2FbScc53k8%2F%2BBnX5Q0HM492fOSxrE4lCxZpI8E7D22fR0aix0AT7%2F7PH97rs6x6%2Fy02rrqaNd8c54mJ1Yzz44dbLH9cfc4WE9sp7xuIy4lswfy%2FOQ%2BbpqZK3oio6%2BpaG3mIc9gE5IjpWnpj1PZ4dWIibC1M3NbVPXdV1bHt3IIxpBsv7T51EqR4zyWZtKc0q4ILq4ZR2c1%2BPtsRMA6odmEuF7RMaW%2BjYhYi%2FC1J9NWlTnNUtNnq%2FoTqdZXU3Y%2BqdtGvu5Y7KDGpWNhANMp4eFA0xnsxyKnSnhB2qf3LACLHVDAhi0QNag1mkiZWDKc2%2B6j84gqUkCE2oh14fVb4E5HBwGME%2Bjx%2FOJeMp76qfjolm8TU%2F9e%2BnjanM9C%2B2Hzc3y%2Bv3t4uef98NyR30BuYAPCPlc3mY1FrGzT2gNom2CvLS7AjtblrIzVrU5BSRhheV0VAZrKhR6Li3sLMKKLEVvhNNlCYlAVBvLFab%2FUxidEAmKpxwuYX8PUodbxgU5oa8VFUxsolTV8KoctxubWOlZdgvynZXegWPBSvPakY2v9CmOZS0vEqcsvnyFk0ay1iJtKVMxKlWJKgFY56LvkfLuIba%2FMyCXzGsE7cMudrBP89jVptOzEr1frs9TuCzP0BnGbO8L6tap2I4vze9VuRgv1ovT24F16F5qgMXMtWzmz0SQM8EjJDMcRjkTh%2BmyzL1FPRMLN2fa62Wg2wsw2OWRUyz%2FiLBTOxN2HlrxHSTfIR8ZQVHKkyp8DHV6EXfyIcSANNIR54k3grwWBSQfjaF5sUhrAZfLhIfCgDTxiN7QxEli%2F0E0XtGEoxSAd4sCLkh8RTXrL4yxEt9P1%2BjZnhBvnP3K%2FMb1rT94fnr8%2FVjGUJvZJAUlYVzcJMpsXtwkZ7Nq%2F%2FX5wEWbdrow7rratpC2cv%2BY2x%2FvrA9vl7e3T%2B7O3X388eXbsK6Rm5sQuq7ykjtdcpo9VXbv87ymx7mN3OyQCWKQ%2BATHYtQu9eJ6OwKP4B4%2BQU7UoPUZYjmBNXJx4YwW760VgA6iO8iN4JrNiljTYru3IrR8lyPbszN851N9px2XbBSegNrxMXIHEUJ88ZVUSibW64aPRT2eFAqSe6pIZvuhyULAF21izxPwchbwyplcvXUhX%2BkSbh%2F0wjjIjqu0ngv0x0L%2B0KDKuqggXPu2QiqrBlnLMB759u4S8m5hOvu4ZYId9Cqkqbnqm%2BH8wD1Xn72Op2ruUNaxoO6Qohb5qzptwPMgrhspdbu1zn4qa1UVtP26QGd1jIXD7lMd4x6f%2Fn4ayM9zMMnBMU6T3LklMyWNPnufV7STDn%2BvZ5GY9Acdz3K2XV1EElNWpReIlLcLqPuqxeafnyinIFJtO8JVFOqm7oW17Qe8JV2Y0Udb9wGsoKQrYpVluWyspk7ccRflHA02xao4unKUe3bWhHwYndrHKrN8l%2FY41j3EysS2r8Hu8eCklxf2nq1woyoyl%2B67C3wXBazkVjm1CoQeX8ZBJ6nCVwvyR8g508ut5eWzrFqVq7P1VoO%2FgxJnUb5gF2vGVM0yGBtfswMoCUvf6CbBAimrvzUU6jmf5tigKAJxKnD%2FqHzJGwcuZ8yl0UgHxBKStyI732LHns1%2F8m5EcoCS7dL8bohDxzwtQo5BE%2BNYkU6bnj5asfvtQiZX0nIHtIT0d0Xoj%2BfRzmm%2BePbIeILT%2BFcQZl2zVf0X5pSfkkGhwGbBvvvPFSVrmIKT4Rw5uWvX3%2BLP%2F9Ez2YPx2X0SCaGq19UHE3IQV8lKsIR681cUhOSIQZqCXzhjFE97BRuULuuD7lsQSUNSLS3HsEfJEhg7g0SCeRs9QK9rjusNySyV0BNeVJ%2BPjRUJ%2BHp1QcEaDm%2Bv38Zj%2Fvb54msChVpfubKjgMOC%2Bossuh67ZPhBtBx6rhfZvKLPIeu6o8zvPi4ym9Rx46tDh1Cz2wvHAfKE9g7vYJx6Y61IRSkoUkQX3kdBwHEiLq9AyjFAkMKQFGIi3JqNBAi%2Fd%2BgAcGORdOTDCnJc56%2FJAZhP8vpRkTEImb7aRBaA0ELbIzX8SKtQI2e%2FdFApfDzOWl20ec5OW79SeNYw3FalcD7gvlUK%2FxzZoYU5b%2B2suWiLSTZhoTHDRu0diFUzSW6vf6VV5b5A82rtmLJqtjOBfNz1CQpyp7ECXdb8PNRDKFw%2BRtt6U3SlapD9rPj5NZ2l5aysdeQXJNIeGXNo3U7%2BBZDlNz5aZT5QIKGYio982xs5kIFTSkaHCSk1qXa1oAsC7fDYiF4mVTZTySFM608aMdYPQz9yIOTVLE5D3CFfW73Oep9%2FdbMG6HZXHKsJ%2BhY0v0aWW9Bb1VqLJp4vHdv2gpDjMxtZjFxPPUC6OAK%2BpIHqhMC5Xf%2FedokdZ4u1xzXRf1PjFemB5d0e0YCVV8KSggO%2B%2B6q5AeKF0LHYCQyHjYCD3I78TFYbCJgklxor9oMk4F8K9n8EUBQr6sjAYoOftdW5BmjxKQDWmtfr2JYiuptUOY4TwuPtGr%2BIBcGAFT6mDelSyLSWY2JMoAiOf3x2A2poChHYG%2B4RAvxNFjFA5KyKpK5jyUxa3QZnTEw2fde746ISDK48Nf40yVvJCN4TVZSKK6oXNW0ge0csZosM07mVPCl9ec96lcuFpYv43NOX8SVmMWH6cYxgm8%2BuCSr12%2F8B). Specify desired analysis details for your data in the respective *essential.vars.groovy* file (see below) and run the selected pipeline *marsseq.pipeline.groovy* or *smartsseq.pipeline.groovy* as described [here](https://gitlab.rlp.net/imbforge/NGSpipe2go/-/blob/master/README.md). The analysis allows further parameter fine-tuning subsequent the initial analysis e.g. for plotting and QC thresholding. Therefore, a customisable *sc.report.Rmd* file will be generated in the output reports folder after running the pipeline. Go through the steps and modify the default settings where appropriate. Subsequently, the *sc.report.Rmd* file can be converted to a final html report using the *knitr* R-package.\r +\r +### The pipelines includes:\r +- FastQC, MultiQC and other tools for rawdata quality control\r +- Adapter trimming with Cutadapt\r +- Mapping to the genome using STAR\r +- generation of bigWig tracks for visualisation of alignment\r +- Quantification with featureCounts (Subread) and UMI-tools (if UMIs are used for deduplication)\r +- Downstream analysis in R using a pre-designed markdown report file (*sc.report.Rmd*). Modify this file to fit your custom parameter and thresholds and render it to your final html report. The Rmd file uses, among others, the following tools and methods:\r + - QC: the [scater](http://bioconductor.org/packages/release/bioc/html/scater.html) package.\r + - Normalization: the [scran](http://bioconductor.org/packages/release/bioc/html/scran.html) package.\r + - Differential expression analysis: the [scde](http://bioconductor.org/packages/release/bioc/html/scde.html) package.\r + - Trajectory analysis (pseudotime): the [monocle](https://bioconductor.org/packages/release/bioc/html/monocle.html) package.\r +\r +### Pipeline parameter settings\r +- essential.vars.groovy: essential parameter describing the experiment \r + - project folder name\r + - reference genome\r + - experiment design\r + - adapter sequence, etc.\r +- additional (more specialized) parameter can be given in the var.groovy-files of the individual pipeline modules \r +- targets.txt: comma-separated txt-file giving information about the analysed samples. The following columns are required \r + - sample: sample identifier. Must be a unique substring of the input sample file name (e.g. common prefixes and suffixes may be removed). These names are grebbed against the count file names to merge targets.txt to the count data.\r + - plate: plate ID (number) \r + - row: plate row (letter)\r + - col: late column (number)\r + - cells: 0c/1c/10c (control wells)\r + - group: default variable for cell grouping (e.g. by condition)\r + \r + for pool-based libraries like MARSseq required additionally:\r + - pool: the pool ID comprises all cells from 1 library pool (i.e. a set of unique cell barcodes; the cell barcodes are re-used in other pools). Must be a unique substring of the input sample file name. For pool-based design, the pool ID is grebbed against the respective count data filename instead of the sample name as stated above.\r + - barcode: cell barcodes used as cell identifier in the count files. After merging the count data with targets.txt, the barcodes are replaced with sample IDs given in the sample column (i.e. here, sample names need not be a substring of input sample file name).\r +\r +### Programs required\r +- FastQC\r +- STAR\r +- Samtools\r +- Bedtools\r +- Subread\r +- Picard\r +- UCSC utilities\r +- RSeQC\r +- UMI-tools\r +- R\r +\r +## Resources\r +- QC: the [scater](http://bioconductor.org/packages/release/bioc/html/scater.html) package.\r +- Normalization: the [scran](http://bioconductor.org/packages/release/bioc/html/scran.html) package.\r +- Trajectory analysis (pseudotime): the [monocle](https://bioconductor.org/packages/release/bioc/html/monocle.html) package.\r +- A [tutorial](https://scrnaseq-course.cog.sanger.ac.uk/website/index.html) from Hemberg lab\r +- Luecken and Theis 2019 [Current best practices in single‐cell RNA‐seq analysis: a tutorial](https://www.embopress.org/doi/10.15252/msb.20188746)\r +\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/62?version=1" ; + schema1:isBasedOn "https://gitlab.rlp.net/imbforge/NGSpipe2go/-/tree/master/pipelines/scRNAseq" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNA-seq Smart-seq 2" ; + schema1:sdDatePublished "2024-08-05 10:32:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/62/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2160 ; + schema1:creator ; + schema1:dateCreated "2020-10-07T07:47:46Z" ; + schema1:dateModified "2023-01-16T13:44:52Z" ; + schema1:description """# scRNA-Seq pipelines\r +\r +Here we forge the tools to analyze single cell RNA-Seq experiments. The analysis workflow is based on the Bioconductor packages [*scater*](https://bioconductor.org/packages/devel/bioc/vignettes/scater/inst/doc/overview.html) and [*scran*](https://bioconductor.org/packages/devel/bioc/vignettes/scran/inst/doc/scran.html) as well as the Bioconductor workflows by Lun ATL, McCarthy DJ, & Marioni JC [*A step-by-step workflow for low-level analysis of single-cell RNA-seq data.*](http://doi.org/10.12688/f1000research.9501.1) F1000Res. 2016 Aug 31 [revised 2016 Oct 31];5:2122 and Amezquita RA, Lun ATL et al. [*Orchestrating Single-Cell Analysis with Bioconductor*](https://osca.bioconductor.org/index.html) Nat Methods. 2020 Feb;17(2):137-145.\r +\r +## Implemented protocols\r + - MARS-Seq (massively parallel single-cell RNA-sequencing): The protocol is based on the publications of Jaitin DA, et al. (2014). *Massively parallel single-cell RNA-seq for marker-free decomposition of tissues into cell types.* Science (New York, N.Y.), 343(6172), 776–779. https://doi.org/10.1126/science.1247651 and Keren-Shaul H., et al. (2019). *MARS-seq2.0: an experimental and analytical pipeline for indexed sorting combined with single-cell RNA sequencing.* Nature Protocols. https://doi.org/10.1038/s41596-019-0164-4. The MARS-Seq library preparation protocol is given [here](https://github.com/imbforge/NGSpipe2go/blob/master/resources/MARS-Seq_protocol_Step-by-Step_MML.pdf). The sequencing reads are demultiplexed according to the respective pool barcodes before they are used as input for the analysis pipeline. \r +- Smart-seq2: Libraries are generated using the [Smart-seq2 kit](http://www.nature.com/nmeth/journal/v10/n11/full/nmeth.2639.html). \r +\r +## Pipeline Workflow\r +All analysis steps are illustrated in the pipeline [flowchart](https://www.draw.io/?lightbox=1&highlight=0000ff&edit=_blank&layers=1&nav=1&title=scRNA-Seq#R7R3ZcpvK8mtUlTxIxSIh6dF2oiyVODm2Uzk5LykEI4kYAWGxrXz9nZ6FdUBIQoDjm%2BTeIwYYZqZ7eu%2BegXq1fXrn697ms2sie6BI5tNAfTNQFFmdTPB%2FoGVHW6bKjDasfctkDyUNt9YfxBol1hpZJgoyD4aua4eWl200XMdBRphp033ffcw%2BtnLt7Fc9fY0KDbeGbhdbv1tmuGGtsjZPbrxH1nrDPj1TpvTGUjfu174bOex7jusgemer827YHIONbrqPqSb17UC98l03pL%2B2T1fIhmXlK0bfW5TcjYfsIyes88J76eNqcz0L7YfNzfL6%2Fe3i55%2F3Qw6AB92O2FoMFM3GHV6a1gOsrm2tHXJD%2Bx3BUC99sgzxJf61Zv8lry39fAseEumLt5LFCHd87Tfh1sa%2FZHzP1pfIvoyX9Mq1XZ88pC7IH%2FxIEPrufQwkvIiXK9cJGUbJGoxbDzbIZD2SfuKrlWXbqU4R%2BRN3yu8QGKqXhru1DHwp4d9rWw8C9juGo0T6D%2FXQcmGFhnMpnl4aKgxQD8gP0VOqiUHpHXK3KPR3%2BBF2dzbR6CtsMw1nDAsfU6g5ZW2bFFaOJZVtCbYd1nHfCV7gHww1xGjyYFz%2Fc%2Fs9%2Bhi9%2BfPf3Ye7xY%2Fg4b%2Bh%2FKLx5K0Gf8vwZO3rpoWhfQz6NIAtykzKYEtMU1PYMpYE2KLxB0%2FBlj%2Ffwzd30r%2F%2FfVzcvPW3snn3bvdhOJELwEMmprfs0vXDjbt2Hd1%2Bm7SmIAALkzzzyXU9BpZfKAx3DIR6FLq4KYUU6MkK%2F4XXRxN29SN1580T65lc7PiFg%2Bebegkuf6TvJa%2BRq%2BQ98wLYDr40ALYAaWhcWDYfThEBMwhXCvnAjXwDVexFtsdD3V%2BjsOo59iAsfSUi%2BcjGNOwhywpFSEFexfPWd6kHPNdywiDV81doSPBzPJtk8VMZZ9lU7vnJbFb1PP5BR5CgZzyVWhj75e7L%2Fa%2FN7Ofvf%2B9CZfHz%2BrusLYez54KwRaw6PyKNZ00j0km8SN3DiRIgctYDlH8YEDBc4AdkzXsq50vwcK1exsJerpTBxeVWtxwAiuUh28LcADdeptkc%2FcY%2Bflh7JJXzCTzdybfpwAp8tMr0uglDkLYvADrKYm2FmMuOfNsbORg7lIW1Xa5cjCn45%2FW7W5iasnbxxRD%2Fb2m7S%2BDe6AH0ggWfN6YKi8C4ub4I0G%2F4udX9EP8c8fsjzL7dh1167JLBWGXSiFF%2BGPqRY%2Bgh3m2FeVb0Gq%2FrIlkAKfUvhocuEEVE61YCurzg0gU2VvbSDbwxYILGwV3e6UnQbh6sYvLQCJEZEmJshfoSv50iM1j%2FRivLsYhCokivlnDndaXA%2Fezmbbu6GRBeE3jIINwGa%2BEwUdC5XTOyAQ2f%2F%2FwQxnIntHRgh7pjxpMbwrStFRY4CdPXMU9F%2FlETbgAHn5ARhbDeKRwsGUm5MncGHUgTqEBc20mrQPK4ARVILDMJhJTcEmBF0IOfWFH0sMoIk6cCI5fuZrwhtjyNhctVKSXVXkO5sGSKYMV422E6Q0HIHypZpWCa7cBdrQJgQjkgHCboiw0Zky4lff77R0bq36eaDtKKaaKnlqimTWoIslJX12xJQ%2FgWIP%2FL8heYgDGpBCtQPQ1g6T4BRbOcNaVpS9c3kT%2FEzSAaEEmBIT6heBLG8kl8J23jvSCihIrG8Dd%2BwtNNM%2B5b6Z8kRuEXjMKnjEHucOkr29FJEpeAI3GBpgi%2FcuDkmRXaZmeIUQdL8gaBTqpDqfBUDMPUU0o5IMOla%2B4KjQURKzRjPUXfejYaVAljycPF13mLxRsMSu14t5KDRYFUX1at%2FqGxOOaKWSx133DN5qcR9yu98tHvyPIB%2BzBwcf%2FS54ub2%2BEtIHL57GoO5OD5eq5rNz5Z2qn04U3ldF93Ml9MnxuELuuOTdWJtkvkdzIvsNU2PivSqfTKxry%2Fo2kZTWInnxbuNNo6HQOMEO7m5iZhfWkhk%2F%2BDn9IrA%2FMNn2zDR%2FhSJ3MEocJrbo6sOwn8EWtni5ywi0mNRqPmpqQ7IDW64QZ0XMnUQ%2F1cU4JGEUsn0hMRS9KabLWOoaSEX6aUpPQNUMksQ7cvmMc0BCXikvtPbbSCz7j4qZVNHEwr4lNKqxOPGytEt1iqgR4ffd2Lx3WaFq1kdDJ5pglUQpFOKM8PFuTxZUqWP0CBUzp11Tw3BY6D9NkocJ5Q%2FN%2Fi4VsOlfAlL9HZaPuQ7CC4p6XuYaQPh2xXXRAZ36GWsrJ4BN7wNTGnigxsy1pUx8u3bQpPHTVVSh7g5kysZl6mNK9ulM3Ycjl60P2gEaN%2FWZcnKaAFEJ0HIHkhy3cfLKLopC28IkSrZG%2BXsUH4Gr%2BYWRZmoyj9Ln0N5iq0JEuvXC8cHS4PpZZzL4dUqjnkqbyQKfyfyOuNxdlkueNYntbjjvIRQVnHc0eRc7xVuzP%2FYG8Nz%2BMsGM9geBZGmMjqy5Nbmgp4qh3xpPVEiollhIUehP9cEZJcR4D4HWHSF%2B6IDs7VU3cFA9YfybCJbxAGi%2Fnu%2FYB50nL8fBMtR3hvV%2FByxse3eHBA8Rex2xQeA88dvvHbGNHmmNvupeoUTqVUXUR3cpQ%2BS7hVrYS%2Bk0hO1rGURfgGCP1EygWgjYuUXtYEJEo73JlYh9ALQyy5ylVCTpC9dB9boCRKNSk5H0kY1yQJE6lpknBkEGTWQT3WpBxWnBbUKMSReDVfNo40DPpiwKqcg62a82ZTJGVvJUA%2FFYfU6bRRHBKKLT1yl%2B%2FBozKxRe1MbKlLo8Z9CdTO0ygljV%2F78XE8OT8%2Bys8mtaDRSO3aqNSWBFyAlqb52q%2FVxtve6xvVfHKt%2B2t1qPWHehzKhFoAljrvxb6fqrl9n%2BUr%2B59vOEHj9uHi8Z186%2F78cjn%2F8k19%2BrrZ3g97pD3%2FvXxo2gt8PJUPTWYn8aHauv3t3cVNbc3%2BBqvuxGjseRBkdE4dPsm3wBA%2FXIkfpyBzTiW%2B4QjguZxFmo6VdvHaMmk6ZZ0lfhMa3CR9TfwpOVoHvqzUQjNXVtYQAguZdy1vLdMkpDAHk6Tlhq2EOijJbiUJzsQBTskU2714WJNL%2FA%2Bv3hUw1wke5hW%2BlpNr%2FA8e98Mr18Fd6xYBLsII%2B4iCcFBMyG0ADdQZfDxrvSnggdBMr44PxoN6MtALlVi1ukyncQPNacmFWmGL1ncPkkQJ5iSUjnBFf4lCLwK2A%2FUrYINfFJgOvvnbqGI53fmeY7%2BiG0%2FDIhyJxTUm467jPzw9YCZNXnj%2B%2F0LfWjbg%2B3tkPyDoddBGZM2Uc6MKk7KQKh3BnWri%2BbRneC4VUN1HQWSHQT%2BR3cAEOo3i4DS3gELbOwJWDFVFirYWVLGh6WpE%2FjPR0Iw8G%2BMdPF2dmtdqfMKL35Iaz3XZuyUPD3aruSVnvd%2BStrvu6X7EIxvyvWi4to0l81RIPdIN2I%2BcQ%2FULwc%2BAzBO1Rf4ilnqVLqXerkx%2FXJjdK%2FXOO7PTVo67N6SnKPX6CFTJnlKfVsMumUixiFdkEWwsZ%2FczMHxHD9Dvn%2FQGVu5%2FhiTRaBEYI9o2utmap0Vi5rsqDbDMByv2eXHII%2FjGaINsD%2FnB6ObkRRJ2WViYfsWqfsXjxUQv3mywJX0wQqYrTBAmO0qNOI4O%2Feya1gpoFsEM8mAiChtREEJXmr4FRuosAyrHJZGnwLlpoYMQ40ewwcQgoLROkRyETJRGN1F2Yy%2BYeVvS6nxWj8HLSgMcXlzk7fREjF%2FR1uPPM3NjazGOcf029uGkeNtZnTFn8vUXnXS84h93zk%2FqBaU06B%2BxnBXy0ROmfhbJRavrKjGBGmwptQG7MaEJPFadRkeSmEjbWvo6Q%2BSz%2B1JykznYrTJuKzbyvLGQqtKtW%2BX%2B92P4aG23MlI%2Baab0dbwcXg6nz0XVODayOkN7yvSVHEURAL5xIqNpWa5UiHyj5LBAZIrUapyteKtMzxNCN82hMx9w6bjyocCnhdzVJp1BtASDoYJl1nDnoeAo2klMlKQ2E0nrwaQMZKudh9qhmPk5HE4yJ38FyRxLPQwf597O%2Fjslj63Q26hZp7Yzs%2FGQ45OMcMqzib%2FrQ650bSirPYmT4gSDG31Z2fjSOKnc89q4VWb2k7rMDgyYIi%2BRiCmw3mMNIGXVT9xrZ%2BRn6fq1dCKHMzLtWTKyfEjVRCnWg2iTkYnjwV9gOYimTBHTmvRuJveC3k3lrEljMt8jvOeeH8%2FboXdLfXsFxkRy5ExNamdgwkJqLy2t9Xdr3UaSJx6mwYZ5OEWbPkuKlhfNJ1q3ormQolUndv5fMhceTZCmYcJVbUswr00lLkzz2%2BcPl7Ts4R3esBLkiv%2FOBl6IiEa6Lr1uEh8J7mjAnSdJIUXmu7kh6wMfIGndBLN1k5gEiBn1Rq6MAcqWwW9H0MLTwrIdm0noknEfTqNm7dOo3LlBqwn8HRSPCML3luTvIB91je9o5A%2FsY%2Fh%2BCNtm0lQNk5xIpwhKfHVuzv3rJbr%2BmnOztVEm8tHmXDWLZi2Zc%2FmA%2B2bOXSMHXbrmDouFSm2R0OcKMBMlMbl%2BIFQbeoMPImcNrvU2bLnwSSiOiAejHEyIJ9KgdUJ8BmFxPOtWWBRb%2BP5631ej0mLtzNx%2BWvg0tZ8WPiysHWbcw9hHKtXS0DAeS8%2FKGxUE2cCzrTA8d9pkSv7k8zlc5uwgeTInJVaJmmXiabkIegaZU5O6lTkrTVy9CYAtxt6Hvm7c9zT%2BlVqtYMpskMxC%2F2AFUNEsoEfk9j7VpAFszwfqjQUmprNF4gstHz2qnNB%2FCWFWW0JoWkA4KX6fD%2Fso8nX2gso3CGLnHAN4%2BTvkuJlzSiqkhO7pGpVPSKEHDAG8JZ6I%2B3GdmUQ%2B0n2F9DDyc6fxkW%2Bsw1XbMd2FKO2kDjAL0LZdmowYkCkCTlcVWx4cWQ64weq%2BTcgkSoZIT%2BW60dRSA1RavIOLpSEKC3fewr0xDXm5hXvFKnaPHDIxzzzIISO3yEDngiAacYJgvzLg5uV1H3p%2FplwSaRMnZxFMqlYVXtr5a3RJssUpBuT8LbxT2flb1GOWy3HjKUz3mJXDWUj8GvN2Urs%2FrlNA%2FWk6QTZRJWdq19DxW7vACsg6Iy84%2FbScc53k8%2F%2BBnX5Q0HM492fOSxrE4lCxZpI8E7D22fR0aix0AT7%2F7PH97rs6x6%2Fy02rrqaNd8c54mJ1Yzz44dbLH9cfc4WE9sp7xuIy4lswfy%2FOQ%2BbpqZK3oio6%2BpaG3mIc9gE5IjpWnpj1PZ4dWIibC1M3NbVPXdV1bHt3IIxpBsv7T51EqR4zyWZtKc0q4ILq4ZR2c1%2BPtsRMA6odmEuF7RMaW%2BjYhYi%2FC1J9NWlTnNUtNnq%2FoTqdZXU3Y%2BqdtGvu5Y7KDGpWNhANMp4eFA0xnsxyKnSnhB2qf3LACLHVDAhi0QNag1mkiZWDKc2%2B6j84gqUkCE2oh14fVb4E5HBwGME%2Bjx%2FOJeMp76qfjolm8TU%2F9e%2BnjanM9C%2B2Hzc3y%2Bv3t4uef98NyR30BuYAPCPlc3mY1FrGzT2gNom2CvLS7AjtblrIzVrU5BSRhheV0VAZrKhR6Li3sLMKKLEVvhNNlCYlAVBvLFab%2FUxidEAmKpxwuYX8PUodbxgU5oa8VFUxsolTV8KoctxubWOlZdgvynZXegWPBSvPakY2v9CmOZS0vEqcsvnyFk0ay1iJtKVMxKlWJKgFY56LvkfLuIba%2FMyCXzGsE7cMudrBP89jVptOzEr1frs9TuCzP0BnGbO8L6tap2I4vze9VuRgv1ovT24F16F5qgMXMtWzmz0SQM8EjJDMcRjkTh%2BmyzL1FPRMLN2fa62Wg2wsw2OWRUyz%2FiLBTOxN2HlrxHSTfIR8ZQVHKkyp8DHV6EXfyIcSANNIR54k3grwWBSQfjaF5sUhrAZfLhIfCgDTxiN7QxEli%2F0E0XtGEoxSAd4sCLkh8RTXrL4yxEt9P1%2BjZnhBvnP3K%2FMb1rT94fnr8%2FVjGUJvZJAUlYVzcJMpsXtwkZ7Nq%2F%2FX5wEWbdrow7rratpC2cv%2BY2x%2FvrA9vl7e3T%2B7O3X388eXbsK6Rm5sQuq7ykjtdcpo9VXbv87ymx7mN3OyQCWKQ%2BATHYtQu9eJ6OwKP4B4%2BQU7UoPUZYjmBNXJx4YwW760VgA6iO8iN4JrNiljTYru3IrR8lyPbszN851N9px2XbBSegNrxMXIHEUJ88ZVUSibW64aPRT2eFAqSe6pIZvuhyULAF21izxPwchbwyplcvXUhX%2BkSbh%2F0wjjIjqu0ngv0x0L%2B0KDKuqggXPu2QiqrBlnLMB759u4S8m5hOvu4ZYId9Cqkqbnqm%2BH8wD1Xn72Op2ruUNaxoO6Qohb5qzptwPMgrhspdbu1zn4qa1UVtP26QGd1jIXD7lMd4x6f%2Fn4ayM9zMMnBMU6T3LklMyWNPnufV7STDn%2BvZ5GY9Acdz3K2XV1EElNWpReIlLcLqPuqxeafnyinIFJtO8JVFOqm7oW17Qe8JV2Y0Udb9wGsoKQrYpVluWyspk7ccRflHA02xao4unKUe3bWhHwYndrHKrN8l%2FY41j3EysS2r8Hu8eCklxf2nq1woyoyl%2B67C3wXBazkVjm1CoQeX8ZBJ6nCVwvyR8g508ut5eWzrFqVq7P1VoO%2FgxJnUb5gF2vGVM0yGBtfswMoCUvf6CbBAimrvzUU6jmf5tigKAJxKnD%2FqHzJGwcuZ8yl0UgHxBKStyI732LHns1%2F8m5EcoCS7dL8bohDxzwtQo5BE%2BNYkU6bnj5asfvtQiZX0nIHtIT0d0Xoj%2BfRzmm%2BePbIeILT%2BFcQZl2zVf0X5pSfkkGhwGbBvvvPFSVrmIKT4Rw5uWvX3%2BLP%2F9Ez2YPx2X0SCaGq19UHE3IQV8lKsIR681cUhOSIQZqCXzhjFE97BRuULuuD7lsQSUNSLS3HsEfJEhg7g0SCeRs9QK9rjusNySyV0BNeVJ%2BPjRUJ%2BHp1QcEaDm%2Bv38Zj%2Fvb54msChVpfubKjgMOC%2Bossuh67ZPhBtBx6rhfZvKLPIeu6o8zvPi4ym9Rx46tDh1Cz2wvHAfKE9g7vYJx6Y61IRSkoUkQX3kdBwHEiLq9AyjFAkMKQFGIi3JqNBAi%2Fd%2BgAcGORdOTDCnJc56%2FJAZhP8vpRkTEImb7aRBaA0ELbIzX8SKtQI2e%2FdFApfDzOWl20ec5OW79SeNYw3FalcD7gvlUK%2FxzZoYU5b%2B2suWiLSTZhoTHDRu0diFUzSW6vf6VV5b5A82rtmLJqtjOBfNz1CQpyp7ECXdb8PNRDKFw%2BRtt6U3SlapD9rPj5NZ2l5aysdeQXJNIeGXNo3U7%2BBZDlNz5aZT5QIKGYio982xs5kIFTSkaHCSk1qXa1oAsC7fDYiF4mVTZTySFM608aMdYPQz9yIOTVLE5D3CFfW73Oep9%2FdbMG6HZXHKsJ%2BhY0v0aWW9Bb1VqLJp4vHdv2gpDjMxtZjFxPPUC6OAK%2BpIHqhMC5Xf%2FedokdZ4u1xzXRf1PjFemB5d0e0YCVV8KSggO%2B%2B6q5AeKF0LHYCQyHjYCD3I78TFYbCJgklxor9oMk4F8K9n8EUBQr6sjAYoOftdW5BmjxKQDWmtfr2JYiuptUOY4TwuPtGr%2BIBcGAFT6mDelSyLSWY2JMoAiOf3x2A2poChHYG%2B4RAvxNFjFA5KyKpK5jyUxa3QZnTEw2fde746ISDK48Nf40yVvJCN4TVZSKK6oXNW0ge0csZosM07mVPCl9ec96lcuFpYv43NOX8SVmMWH6cYxgm8%2BuCSr12%2F8B). Specify desired analysis details for your data in the respective *essential.vars.groovy* file (see below) and run the selected pipeline *marsseq.pipeline.groovy* or *smartsseq.pipeline.groovy* as described [here](https://gitlab.rlp.net/imbforge/NGSpipe2go/-/blob/master/README.md). The analysis allows further parameter fine-tuning subsequent the initial analysis e.g. for plotting and QC thresholding. Therefore, a customisable *sc.report.Rmd* file will be generated in the output reports folder after running the pipeline. Go through the steps and modify the default settings where appropriate. Subsequently, the *sc.report.Rmd* file can be converted to a final html report using the *knitr* R-package.\r +\r +### The pipelines includes:\r +- FastQC, MultiQC and other tools for rawdata quality control\r +- Adapter trimming with Cutadapt\r +- Mapping to the genome using STAR\r +- generation of bigWig tracks for visualisation of alignment\r +- Quantification with featureCounts (Subread) and UMI-tools (if UMIs are used for deduplication)\r +- Downstream analysis in R using a pre-designed markdown report file (*sc.report.Rmd*). Modify this file to fit your custom parameter and thresholds and render it to your final html report. The Rmd file uses, among others, the following tools and methods:\r + - QC: the [scater](http://bioconductor.org/packages/release/bioc/html/scater.html) package.\r + - Normalization: the [scran](http://bioconductor.org/packages/release/bioc/html/scran.html) package.\r + - Differential expression analysis: the [scde](http://bioconductor.org/packages/release/bioc/html/scde.html) package.\r + - Trajectory analysis (pseudotime): the [monocle](https://bioconductor.org/packages/release/bioc/html/monocle.html) package.\r +\r +### Pipeline parameter settings\r +- essential.vars.groovy: essential parameter describing the experiment \r + - project folder name\r + - reference genome\r + - experiment design\r + - adapter sequence, etc.\r +- additional (more specialized) parameter can be given in the var.groovy-files of the individual pipeline modules \r +- targets.txt: comma-separated txt-file giving information about the analysed samples. The following columns are required \r + - sample: sample identifier. Must be a unique substring of the input sample file name (e.g. common prefixes and suffixes may be removed). These names are grebbed against the count file names to merge targets.txt to the count data.\r + - plate: plate ID (number) \r + - row: plate row (letter)\r + - col: late column (number)\r + - cells: 0c/1c/10c (control wells)\r + - group: default variable for cell grouping (e.g. by condition)\r + \r + for pool-based libraries like MARSseq required additionally:\r + - pool: the pool ID comprises all cells from 1 library pool (i.e. a set of unique cell barcodes; the cell barcodes are re-used in other pools). Must be a unique substring of the input sample file name. For pool-based design, the pool ID is grebbed against the respective count data filename instead of the sample name as stated above.\r + - barcode: cell barcodes used as cell identifier in the count files. After merging the count data with targets.txt, the barcodes are replaced with sample IDs given in the sample column (i.e. here, sample names need not be a substring of input sample file name).\r +\r +### Programs required\r +- FastQC\r +- STAR\r +- Samtools\r +- Bedtools\r +- Subread\r +- Picard\r +- UCSC utilities\r +- RSeQC\r +- UMI-tools\r +- R\r +\r +## Resources\r +- QC: the [scater](http://bioconductor.org/packages/release/bioc/html/scater.html) package.\r +- Normalization: the [scran](http://bioconductor.org/packages/release/bioc/html/scran.html) package.\r +- Trajectory analysis (pseudotime): the [monocle](https://bioconductor.org/packages/release/bioc/html/monocle.html) package.\r +- A [tutorial](https://scrnaseq-course.cog.sanger.ac.uk/website/index.html) from Hemberg lab\r +- Luecken and Theis 2019 [Current best practices in single‐cell RNA‐seq analysis: a tutorial](https://www.embopress.org/doi/10.15252/msb.20188746)\r +\r +\r +""" ; + schema1:keywords "scRNA-seq, smart-seq 2, bpipe, groovy" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "scRNA-seq Smart-seq 2" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/62?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2024-07-15T07:41:28.679773" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/chipseq-pe" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "chipseq-pe/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.24" . + + a schema1:Dataset ; + schema1:datePublished "2024-03-26T20:04:38.057044" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-only-VGP3" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-only-VGP3/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Rare disease researchers workflow is that they submit their raw data (fastq), run the mapping and variant calling RD-Connect pipeline and obtain unannotated gvcf files to further submit to the RD-Connect GPAP or analyse on their own.\r +\r +This demonstrator focuses on the variant calling pipeline. The raw genomic data is processed using the RD-Connect pipeline ([Laurie et al., 2016](https://www.ncbi.nlm.nih.gov/pubmed/27604516)) running on the standards (GA4GH) compliant, interoperable container orchestration platform.\r +\r +This demonstrator will be aligned with the current implementation study on [Development of Architecture for Software Containers at ELIXIR and its use by EXCELERATE use-case communities](docs/Appendix%201%20-%20Project%20Plan%202018-biocontainers%2020171117.pdf) \r +\r +For this implementation, different steps are required:\r +\r +1. Adapt the pipeline to CWL and dockerise elements \r +2. Align with IS efforts on software containers to package the different components (Nextflow) \r +3. Submit trio of Illumina NA12878 Platinum Genome or Exome to the GA4GH platform cloud (by Aspera or ftp server)\r +4. Run the RD-Connect pipeline on the container platform\r +5. Return corresponding gvcf files\r +6. OPTIONAL: annotate and update to RD-Connect playground instance\r +\r +N.B: The demonstrator might have some manual steps, which will not be in production. \r +\r +## RD-Connect pipeline\r +\r +Detailed information about the RD-Connect pipeline can be found in [Laurie et al., 2016](https://www.ncbi.nlm.nih.gov/pubmed/?term=27604516)\r +\r +![alt text](https://raw.githubusercontent.com/inab/Wetlab2Variations/eosc-life/docs/RD-Connect_pipeline.jpg)\r +\r +## The applications\r +\r +**1\\. Name of the application: Adaptor removal**\r +Function: remove sequencing adaptors \r +Container (readiness status, location, version): [cutadapt (v.1.18)](https://hub.docker.com/r/cnag/cutadapt) \r +Required resources in cores and RAM: current container size 169MB \r +Input data (amount, format, directory..): raw fastq \r +Output data: paired fastq without adaptors \r +\r +**2\\. Name of the application: Mapping and bam sorting**\r +Function: align data to reference genome \r +Container : [bwa-mem (v.0.7.17)](https://hub.docker.com/r/cnag/bwa) / [Sambamba (v. 0.6.8 )](https://hub.docker.com/r/cnag/sambamba)(or samtools) \r +Resources :current container size 111MB / 32MB \r +Input data: paired fastq without adaptors \r +Output data: sorted bam \r +\r +**3\\. Name of the application: MarkDuplicates** \r +Function: Mark (and remove) duplicates \r +Container: [Picard (v.2.18.25)](https://hub.docker.com/r/cnag/picard)\r +Resources: current container size 261MB \r +Input data:sorted bam \r +Output data: Sorted bam with marked (or removed) duplicates \r +\r +**4\\. Name of the application: Base quality recalibration (BQSR)** \r +Function: Base quality recalibration \r +Container: [GATK (v.3.6-0)](https://hub.docker.com/r/cnag/gatk)\r +Resources: current container size 270MB \r +Input data: Sorted bam with marked (or removed) duplicates \r +Output data: Sorted bam with marked duplicates & base quality recalculated \r +\r +**5\\. Name of the application: Variant calling** \r +Function: variant calling \r +Container: [GATK (v.3.6-0)](https://hub.docker.com/r/cnag/gatk)\r +Resources: current container size 270MB \r +Input data:Sorted bam with marked duplicates & base quality recalculated \r +Output data: unannotated gvcf per sample \r +\r +**6\\. (OPTIONAL)Name of the application: Quality of the fastq** \r +Function: report on the sequencing quality \r +Container: [fastqc 0.11.8](https://hub.docker.com/r/cnag/fastqc)\r +Resources: current container size 173MB \r +Input data: raw fastq \r +Output data: QC report \r +\r +## Licensing\r +\r +GATK declares that archived packages are made available for free to academic researchers under a limited license for non-commercial use. If you need to use one of these packages for commercial use. https://software.broadinstitute.org/gatk/download/archive """ ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.106.3" ; + schema1:isBasedOn "https://github.com/inab/Wetlab2Variations/tree/eosc-life/nextflow" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for VariantCaller_GATK3.6" ; + schema1:sdDatePublished "2024-08-05 10:33:15 +0100" ; + schema1:url "https://workflowhub.eu/workflows/106/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 22429 ; + schema1:creator ; + schema1:dateCreated "2021-05-21T08:45:49Z" ; + schema1:dateModified "2023-01-16T13:47:03Z" ; + schema1:description """Rare disease researchers workflow is that they submit their raw data (fastq), run the mapping and variant calling RD-Connect pipeline and obtain unannotated gvcf files to further submit to the RD-Connect GPAP or analyse on their own.\r +\r +This demonstrator focuses on the variant calling pipeline. The raw genomic data is processed using the RD-Connect pipeline ([Laurie et al., 2016](https://www.ncbi.nlm.nih.gov/pubmed/27604516)) running on the standards (GA4GH) compliant, interoperable container orchestration platform.\r +\r +This demonstrator will be aligned with the current implementation study on [Development of Architecture for Software Containers at ELIXIR and its use by EXCELERATE use-case communities](docs/Appendix%201%20-%20Project%20Plan%202018-biocontainers%2020171117.pdf) \r +\r +For this implementation, different steps are required:\r +\r +1. Adapt the pipeline to CWL and dockerise elements \r +2. Align with IS efforts on software containers to package the different components (Nextflow) \r +3. Submit trio of Illumina NA12878 Platinum Genome or Exome to the GA4GH platform cloud (by Aspera or ftp server)\r +4. Run the RD-Connect pipeline on the container platform\r +5. Return corresponding gvcf files\r +6. OPTIONAL: annotate and update to RD-Connect playground instance\r +\r +N.B: The demonstrator might have some manual steps, which will not be in production. \r +\r +## RD-Connect pipeline\r +\r +Detailed information about the RD-Connect pipeline can be found in [Laurie et al., 2016](https://www.ncbi.nlm.nih.gov/pubmed/?term=27604516)\r +\r +![alt text](https://raw.githubusercontent.com/inab/Wetlab2Variations/eosc-life/docs/RD-Connect_pipeline.jpg)\r +\r +## The applications\r +\r +**1\\. Name of the application: Adaptor removal**\r +Function: remove sequencing adaptors \r +Container (readiness status, location, version): [cutadapt (v.1.18)](https://hub.docker.com/r/cnag/cutadapt) \r +Required resources in cores and RAM: current container size 169MB \r +Input data (amount, format, directory..): raw fastq \r +Output data: paired fastq without adaptors \r +\r +**2\\. Name of the application: Mapping and bam sorting**\r +Function: align data to reference genome \r +Container : [bwa-mem (v.0.7.17)](https://hub.docker.com/r/cnag/bwa) / [Sambamba (v. 0.6.8 )](https://hub.docker.com/r/cnag/sambamba)(or samtools) \r +Resources :current container size 111MB / 32MB \r +Input data: paired fastq without adaptors \r +Output data: sorted bam \r +\r +**3\\. Name of the application: MarkDuplicates** \r +Function: Mark (and remove) duplicates \r +Container: [Picard (v.2.18.25)](https://hub.docker.com/r/cnag/picard)\r +Resources: current container size 261MB \r +Input data:sorted bam \r +Output data: Sorted bam with marked (or removed) duplicates \r +\r +**4\\. Name of the application: Base quality recalibration (BQSR)** \r +Function: Base quality recalibration \r +Container: [GATK (v.3.6-0)](https://hub.docker.com/r/cnag/gatk)\r +Resources: current container size 270MB \r +Input data: Sorted bam with marked (or removed) duplicates \r +Output data: Sorted bam with marked duplicates & base quality recalculated \r +\r +**5\\. Name of the application: Variant calling** \r +Function: variant calling \r +Container: [GATK (v.3.6-0)](https://hub.docker.com/r/cnag/gatk)\r +Resources: current container size 270MB \r +Input data:Sorted bam with marked duplicates & base quality recalculated \r +Output data: unannotated gvcf per sample \r +\r +**6\\. (OPTIONAL)Name of the application: Quality of the fastq** \r +Function: report on the sequencing quality \r +Container: [fastqc 0.11.8](https://hub.docker.com/r/cnag/fastqc)\r +Resources: current container size 173MB \r +Input data: raw fastq \r +Output data: QC report \r +\r +## Licensing\r +\r +GATK declares that archived packages are made available for free to academic researchers under a limited license for non-commercial use. If you need to use one of these packages for commercial use. https://software.broadinstitute.org/gatk/download/archive """ ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/106?version=2" ; + schema1:keywords "Nextflow, variant_calling" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "VariantCaller_GATK3.6" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/106?version=3" ; + schema1:version 3 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 83411 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.127.4" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_virtual-screening" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein-ligand Docking tutorial (Cluster90)" ; + schema1:sdDatePublished "2024-08-05 10:30:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/127/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 39368 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-04-14T08:09:43Z" ; + schema1:dateModified "2023-04-14T08:11:55Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/127?version=5" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein-ligand Docking tutorial (Cluster90)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_virtual-screening/blob/master/biobb_wf_virtual-screening/notebooks/clusterBindingSite/wf_vs_clusterBindingSite.ipynb" ; + schema1:version 4 . + + a schema1:Dataset ; + schema1:datePublished "2024-03-11T14:51:55.650444" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/dada2" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "dada2/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:Dataset ; + schema1:datePublished "2024-02-05T15:22:27.802677" ; + schema1:hasPart , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/fastq-to-matrix-10x" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + ; + schema1:name "fastq-to-matrix-10x/scrna-seq-fastq-to-matrix-10x-cellplex" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.20" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.20" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + , + , + ; + schema1:name "fastq-to-matrix-10x/scrna-seq-fastq-to-matrix-10x-v3" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/fastq-to-matrix-10x" ; + schema1:version "0.2" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Workflow for the GTN training \"Antibiotic resistance detection\"" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/406?version=1" ; + schema1:isBasedOn "https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/plasmid-metagenomics-nanopore/workflows/Workflow-plasmid-metagenomics-nanopore.ga" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for GTN Training - Antibiotic Resistance Detection" ; + schema1:sdDatePublished "2024-08-05 10:31:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/406/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 25395 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-24T13:28:47Z" ; + schema1:dateModified "2023-02-13T14:06:44Z" ; + schema1:description "Workflow for the GTN training \"Antibiotic resistance detection\"" ; + schema1:keywords "Metagenomics" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "GTN Training - Antibiotic Resistance Detection" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/406?version=1" ; + schema1:version 1 ; + ns1:input . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "fgbio Best Practices FASTQ to Consensus Pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/985?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/fastquorum" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/fastquorum" ; + schema1:sdDatePublished "2024-08-05 10:23:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/985/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10890 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:52Z" ; + schema1:dateModified "2024-06-11T12:54:52Z" ; + schema1:description "fgbio Best Practices FASTQ to Consensus Pipeline" ; + schema1:keywords "Consensus, umi, umis, unique-molecular-identifier" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/fastquorum" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/985?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 1287940 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:MediaObject ; + schema1:contentSize 1423529 ; + schema1:dateModified "2024-07-05T08:47:41+00:00" ; + schema1:name "housing.csv" ; + schema1:sdDatePublished "2024-07-05T11:38:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1266413 ; + schema1:dateModified "2024-07-05T11:38:26+00:00" ; + schema1:name "housing_one_hot_encoded.csv" ; + schema1:sdDatePublished "2024-07-05T11:38:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 141018 ; + schema1:dateModified "2024-07-05T11:38:46+00:00" ; + schema1:name "lat_lon_plot.png" ; + schema1:sdDatePublished "2024-07-05T11:38:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 22381 ; + schema1:dateModified "2024-07-05T11:38:46+00:00" ; + schema1:name "median_income_plot.png" ; + schema1:sdDatePublished "2024-07-05T11:38:51+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """Assembly polishing; can run alone or as part of a combined workflow for large genome assembly. \r +\r +* What it does: Polishes (corrects) an assembly, using long reads (with the tools Racon and Medaka) and short reads (with the tool Racon). (Note: medaka is only for nanopore reads, not PacBio reads). \r +* Inputs: assembly to be polished: assembly.fasta; long reads - the same set used in the assembly (e.g. may be raw or filtered) fastq.gz format; short reads, R1 only, in fastq.gz format\r +* Outputs: Racon+Medaka+Racon polished_assembly. fasta; Fasta statistics after each polishing tool\r +* Tools used: Minimap2, Racon, Fasta statistics, Medaka\r +* Input parameters: None required, but recommended to set the Medaka model correctly (default = r941_min_high_g360). See drop down list for options. \r +\r +Workflow steps:\r +\r +-1- Polish with long reads: using Racon\r +* Long reads and assembly contigs => Racon polishing (subworkflow): \r +* minimap2 : long reads are mapped to assembly => overlaps.paf. \r +* overaps, long reads, assembly => Racon => polished assembly 1\r +* using polished assembly 1 as input; repeat minimap2 + racon => polished assembly 2\r +* using polished assembly 2 as input, repeat minimap2 + racon => polished assembly 3\r +* using polished assembly 3 as input, repeat minimap2 + racon => polished assembly 4\r +* Racon long-read polished assembly => Fasta statistics\r +* Note: The Racon tool panel can be a bit confusing and is under review for improvement. Presently it requires sequences (= long reads), overlaps (= the paf file created by minimap2), and target sequences (= the contigs to be polished) as per "usage" described here https://github.com/isovic/racon/blob/master/README.md\r +* Note: Racon: the default setting for "output unpolished target sequences?" is No. This has been changed to Yes for all Racon steps in these polishing workflows. This means that even if no polishes are made in some contigs, they will be part of the output fasta file. \r +* Note: the contigs output by Racon have new tags in their headers. For more on this see https://github.com/isovic/racon/issues/85.\r +\r +-2- Polish with long reads: using Medaka\r +* Racon polished assembly + long reads => medaka polishing X1 => medaka polished assembly\r +* Medaka polished assembly => Fasta statistics\r +\r +-3- Polish with short reads: using Racon\r +* Short reads and Medaka polished assembly =>Racon polish (subworkflow):\r +* minimap2: short reads (R1 only) are mapped to the assembly => overlaps.paf. Minimap2 setting is for short reads.\r +* overlaps + short reads + assembly => Racon => polished assembly 1\r +* using polished assembly 1 as input; repeat minimap2 + racon => polished assembly 2\r +* Racon short-read polished assembly => Fasta statistics\r +\r +Options\r +* Change settings for Racon long read polishing if using PacBio reads: The default profile setting for Racon long read polishing: minimap2 read mapping is "Oxford Nanopore read to reference mapping", which is specified as an input parameter to the whole Assembly polishing workflow, as text: map-ont. If you are not using nanopore reads and/or need a different setting, change this input. To see the other available settings, open the minimap2 tool, find "Select a profile of preset options", and click on the drop down menu. For each described option, there is a short text in brackets at the end (e.g. map-pb). This is the text to enter into the assembly polishing workflow at runtime instead of the default (map-ont).\r +* Other options: change the number of polishes (in Racon and/or Medaka). There are ways to assess how much improvement in assembly quality has occurred per polishing round (for example, the number of corrections made; the change in Busco score - see section Genome quality assessment for more on Busco).\r +* Option: change polishing settings for any of these tools. Note: for Racon - these will have to be changed within those subworkflows first. Then, in the main workflow, update the subworkflows, and re-save. \r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.226.1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Assembly polishing" ; + schema1:sdDatePublished "2024-08-05 10:32:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/226/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 233028 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 68854 ; + schema1:creator ; + schema1:dateCreated "2021-11-08T05:32:22Z" ; + schema1:dateModified "2023-01-30T18:21:31Z" ; + schema1:description """Assembly polishing; can run alone or as part of a combined workflow for large genome assembly. \r +\r +* What it does: Polishes (corrects) an assembly, using long reads (with the tools Racon and Medaka) and short reads (with the tool Racon). (Note: medaka is only for nanopore reads, not PacBio reads). \r +* Inputs: assembly to be polished: assembly.fasta; long reads - the same set used in the assembly (e.g. may be raw or filtered) fastq.gz format; short reads, R1 only, in fastq.gz format\r +* Outputs: Racon+Medaka+Racon polished_assembly. fasta; Fasta statistics after each polishing tool\r +* Tools used: Minimap2, Racon, Fasta statistics, Medaka\r +* Input parameters: None required, but recommended to set the Medaka model correctly (default = r941_min_high_g360). See drop down list for options. \r +\r +Workflow steps:\r +\r +-1- Polish with long reads: using Racon\r +* Long reads and assembly contigs => Racon polishing (subworkflow): \r +* minimap2 : long reads are mapped to assembly => overlaps.paf. \r +* overaps, long reads, assembly => Racon => polished assembly 1\r +* using polished assembly 1 as input; repeat minimap2 + racon => polished assembly 2\r +* using polished assembly 2 as input, repeat minimap2 + racon => polished assembly 3\r +* using polished assembly 3 as input, repeat minimap2 + racon => polished assembly 4\r +* Racon long-read polished assembly => Fasta statistics\r +* Note: The Racon tool panel can be a bit confusing and is under review for improvement. Presently it requires sequences (= long reads), overlaps (= the paf file created by minimap2), and target sequences (= the contigs to be polished) as per "usage" described here https://github.com/isovic/racon/blob/master/README.md\r +* Note: Racon: the default setting for "output unpolished target sequences?" is No. This has been changed to Yes for all Racon steps in these polishing workflows. This means that even if no polishes are made in some contigs, they will be part of the output fasta file. \r +* Note: the contigs output by Racon have new tags in their headers. For more on this see https://github.com/isovic/racon/issues/85.\r +\r +-2- Polish with long reads: using Medaka\r +* Racon polished assembly + long reads => medaka polishing X1 => medaka polished assembly\r +* Medaka polished assembly => Fasta statistics\r +\r +-3- Polish with short reads: using Racon\r +* Short reads and Medaka polished assembly =>Racon polish (subworkflow):\r +* minimap2: short reads (R1 only) are mapped to the assembly => overlaps.paf. Minimap2 setting is for short reads.\r +* overlaps + short reads + assembly => Racon => polished assembly 1\r +* using polished assembly 1 as input; repeat minimap2 + racon => polished assembly 2\r +* Racon short-read polished assembly => Fasta statistics\r +\r +Options\r +* Change settings for Racon long read polishing if using PacBio reads: The default profile setting for Racon long read polishing: minimap2 read mapping is "Oxford Nanopore read to reference mapping", which is specified as an input parameter to the whole Assembly polishing workflow, as text: map-ont. If you are not using nanopore reads and/or need a different setting, change this input. To see the other available settings, open the minimap2 tool, find "Select a profile of preset options", and click on the drop down menu. For each described option, there is a short text in brackets at the end (e.g. map-pb). This is the text to enter into the assembly polishing workflow at runtime instead of the default (map-ont).\r +* Other options: change the number of polishes (in Racon and/or Medaka). There are ways to assess how much improvement in assembly quality has occurred per polishing round (for example, the number of corrections made; the change in Busco score - see section Genome quality assessment for more on Busco).\r +* Option: change polishing settings for any of these tools. Note: for Racon - these will have to be changed within those subworkflows first. Then, in the main workflow, update the subworkflows, and re-save. \r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "Large-genome-assembly" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Assembly polishing" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/226?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2024-07-17T13:14:59.673469" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-Trio-phasing-VGP5/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.24" . + + a schema1:Dataset ; + schema1:datePublished "2023-11-23T09:05:16.346809" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/lcms-preprocessing" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "lcms-preprocessing/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.132.5" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Amber Constant pH MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/132/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 53518 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-04T15:33:00Z" ; + schema1:dateModified "2024-05-14T10:17:00Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/132?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Amber Constant pH MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_amber_md_setup/blob/main/biobb_wf_amber_md_setup/notebooks/mdsetup_ph/biobb_amber_CpHMD_notebook.ipynb" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=20" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-08-05 10:22:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=20" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13459 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:40Z" ; + schema1:dateModified "2024-06-11T12:54:40Z" ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=20" ; + schema1:version 20 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.255.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_ligand_parameterization/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL GMX Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-08-05 10:32:28 +0100" ; + schema1:url "https://workflowhub.eu/workflows/255/ro_crate?version=2" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 12313 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2695 ; + schema1:creator , + ; + schema1:dateCreated "2023-06-06T12:24:16Z" ; + schema1:dateModified "2023-06-06T12:33:16Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/255?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CWL GMX Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_ligand_parameterization/cwl/workflow.cwl" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + ; + ns1:output , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Analysis pipeline for CUT&RUN and CUT&TAG experiments that includes sequencing QC, spike-in normalisation, IgG control normalisation, peak calling and downstream peak analysis." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/976?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/cutandrun" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/cutandrun" ; + schema1:sdDatePublished "2024-08-05 10:24:08 +0100" ; + schema1:url "https://workflowhub.eu/workflows/976/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12785 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:46Z" ; + schema1:dateModified "2024-06-11T12:54:46Z" ; + schema1:description "Analysis pipeline for CUT&RUN and CUT&TAG experiments that includes sequencing QC, spike-in normalisation, IgG control normalisation, peak calling and downstream peak analysis." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/976?version=7" ; + schema1:keywords "cutandrun, cutandrun-seq, cutandtag, cutandtag-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/cutandrun" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/976?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/278?version=6" ; + schema1:isBasedOn "https://gitlab.bsc.es/lrodrig1/structuralvariants_poc/-/tree/1.1.3/structuralvariants/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CNV_pipeline" ; + schema1:sdDatePublished "2024-08-05 10:31:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/278/ro_crate?version=6" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 9858 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9607 ; + schema1:creator , + ; + schema1:dateCreated "2022-05-11T14:27:15Z" ; + schema1:dateModified "2022-05-11T14:27:15Z" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/278?version=10" ; + schema1:keywords "cancer, CODEX2, ExomeDepth, manta, TransBioNet, variant calling, GRIDSS, structural variants" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CNV_pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/278?version=6" ; + schema1:version 6 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 74039 . + + a schema1:Dataset ; + schema1:datePublished "2022-10-20T14:11:41.337603" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/chipseq-sr" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "chipseq-sr/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.11" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.280.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_ligand_parameterization/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python GMX Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-08-05 10:31:02 +0100" ; + schema1:url "https://workflowhub.eu/workflows/280/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1728 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-17T09:41:27Z" ; + schema1:dateModified "2022-03-23T10:02:13Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/280?version=3" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python GMX Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_ligand_parameterization/python/workflow.py" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 655617 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + schema1:dateModified "2023-10-20T11:03:48" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + ; + schema1:name "data" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 372 ; + schema1:dateModified "2023-10-20T11:03:48" ; + schema1:name "ConvectionDiffusionMaterials_center.json" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 365 ; + schema1:dateModified "2023-10-20T11:03:48" ; + schema1:name "ConvectionDiffusionMaterials_outside.json" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106382 ; + schema1:dateModified "2023-10-20T11:03:48" ; + schema1:name "GUI_test_center.mdpa" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 221975 ; + schema1:dateModified "2023-10-20T11:03:48" ; + schema1:name "GUI_test_outside.mdpa" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4504 ; + schema1:dateModified "2023-10-20T11:03:48" ; + schema1:name "ProjectParameters_CoSimulation.json" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4642 ; + schema1:dateModified "2023-10-20T11:03:48" ; + schema1:name "ProjectParameters_CoSimulation_workflow.json" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4608 ; + schema1:dateModified "2023-10-20T11:03:48" ; + schema1:name "ProjectParameters_CoSimulation_workflow_ROM.json" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4784 ; + schema1:dateModified "2023-10-20T11:03:48" ; + schema1:name "ProjectParameters_center.json" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 5551 ; + schema1:dateModified "2023-10-20T11:03:48" ; + schema1:name "ProjectParameters_center_workflow.json" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 6440 ; + schema1:dateModified "2023-10-20T11:03:48" ; + schema1:name "ProjectParameters_outside.json" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 7605 ; + schema1:dateModified "2023-10-20T11:03:48" ; + schema1:name "ProjectParameters_outside_workflow.json" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 179 ; + schema1:dateModified "2023-10-13T07:13:07" ; + schema1:name "load_parameters.json" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 228 ; + schema1:dateModified "2023-10-13T07:13:07" ; + schema1:name "run_cosim.json" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:Dataset ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:name "rom_data" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 128 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "HROM_ConditionIds.npy" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 128 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "HROM_ConditionWeights.npy" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1952 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "HROM_ElementIds.npy" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1952 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "HROM_ElementWeights.npy" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4680 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "NodeIds.npy" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 63856 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "RightBasisMatrix.npy" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1125 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "RomParameters.json" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 937 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "attrs.json" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1952 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "aux_w.npy" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1952 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "aux_z.npy" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 456 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "HROM_ConditionIds.npy" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 456 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "HROM_ConditionWeights.npy" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1608 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "HROM_ElementIds.npy" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1608 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "HROM_ElementWeights.npy" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 9600 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "NodeIds.npy" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123264 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "RightBasisMatrix.npy" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1127 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "RomParameters.json" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 947 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "attrs.json" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1936 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "aux_w.npy" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1936 ; + schema1:dateModified "2023-10-20T11:05:03" ; + schema1:name "aux_z.npy" ; + schema1:sdDatePublished "2023-10-20T11:05:06+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:description "This workflow employs a recombination detection algorithm (GARD) developed by Kosakovsky Pond et al. and implemented in the hyphy package. More info can be found at https://covid19.galaxyproject.org/genomics/" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/10?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genomics - Recombination and selection analysis" ; + schema1:sdDatePublished "2024-08-05 10:33:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/10/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 2947 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14263 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2020-04-10T13:30:37Z" ; + schema1:dateModified "2023-01-16T13:40:25Z" ; + schema1:description "This workflow employs a recombination detection algorithm (GARD) developed by Kosakovsky Pond et al. and implemented in the hyphy package. More info can be found at https://covid19.galaxyproject.org/genomics/" ; + schema1:image ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Genomics - Recombination and selection analysis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/10?version=1" ; + schema1:version 1 ; + ns1:input . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 6047 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """The workflows in this collection are from the '16S Microbial Analysis with mothur' tutorial for analysis of 16S data (Saskia Hiltemann, Bérénice Batut, Dave Clements), adapted for piepline use on galaxy australia (Ahmed Mehdi). The workflows developed in galaxy use mothur software package developed by Schloss et al https://pubmed.ncbi.nlm.nih.gov/19801464/. \r +\r +Please also refer to the 16S tutorials available at Galaxy https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop-short/tutorial.html and [https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop/tutorial.html\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/649?version=1" ; + schema1:isBasedOn "https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop-short/tutorial.html" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Workflow 2: Data Cleaning And Chimera Removal [16S Microbial Analysis With Mothur]" ; + schema1:sdDatePublished "2024-08-05 10:27:22 +0100" ; + schema1:url "https://workflowhub.eu/workflows/649/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 27542 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2023-11-09T05:11:57Z" ; + schema1:dateModified "2023-11-09T05:11:57Z" ; + schema1:description """The workflows in this collection are from the '16S Microbial Analysis with mothur' tutorial for analysis of 16S data (Saskia Hiltemann, Bérénice Batut, Dave Clements), adapted for piepline use on galaxy australia (Ahmed Mehdi). The workflows developed in galaxy use mothur software package developed by Schloss et al https://pubmed.ncbi.nlm.nih.gov/19801464/. \r +\r +Please also refer to the 16S tutorials available at Galaxy https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop-short/tutorial.html and [https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop/tutorial.html\r +""" ; + schema1:isPartOf ; + schema1:keywords "Metagenomics" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Workflow 2: Data Cleaning And Chimera Removal [16S Microbial Analysis With Mothur]" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/649?version=1" ; + schema1:version 1 ; + ns1:input , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2024-05-22T15:30:59.439533" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Scaffolding-HiC-VGP8" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Scaffolding-HiC-VGP8/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + schema1:datePublished "2022-11-29T12:08:35.581759" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "atacseq/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.2" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """Assembly with Flye; can run alone or as part of a combined workflow for large genome assembly. \r +\r +* What it does: Assembles long reads with the tool Flye\r +* Inputs: long reads (may be raw, or filtered, and/or corrected); fastq.gz format\r +* Outputs: Flye assembly fasta; Fasta stats on assembly.fasta; Assembly graph image from Bandage; Bar chart of contig sizes; Quast reports of genome assembly\r +* Tools used: Flye, Fasta statistics, Bandage, Bar chart, Quast\r +* Input parameters: None required, but recommend setting assembly mode to match input sequence type\r +\r +Workflow steps:\r +* Long reads are assembled with Flye, using default tool settings. Note: the default setting for read type ("mode") is nanopore raw. Change this at runtime if required. \r +* Statistics are computed from the assembly.fasta file output, using Fasta Statistics and Quast (is genome large: Yes; distinguish contigs with more that 50% unaligned bases: no)\r +* The graphical fragment assembly file is visualized with the tool Bandage. \r +* Assembly information sent to bar chart to visualize contig sizes\r +\r +Options\r +* See other Flye options. \r +* Use a different assembler (in a different workflow). \r +* Bandage image options - change size (max size is 32767), labels - add (e.g. node lengths). You can also install Bandage on your own computer and donwload the "graphical fragment assembly" file to view in greater detail. \r +\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.225.1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Assembly with Flye" ; + schema1:sdDatePublished "2024-08-05 10:32:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/225/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 567125 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14840 ; + schema1:creator ; + schema1:dateCreated "2021-11-08T05:07:16Z" ; + schema1:dateModified "2023-01-30T18:21:31Z" ; + schema1:description """Assembly with Flye; can run alone or as part of a combined workflow for large genome assembly. \r +\r +* What it does: Assembles long reads with the tool Flye\r +* Inputs: long reads (may be raw, or filtered, and/or corrected); fastq.gz format\r +* Outputs: Flye assembly fasta; Fasta stats on assembly.fasta; Assembly graph image from Bandage; Bar chart of contig sizes; Quast reports of genome assembly\r +* Tools used: Flye, Fasta statistics, Bandage, Bar chart, Quast\r +* Input parameters: None required, but recommend setting assembly mode to match input sequence type\r +\r +Workflow steps:\r +* Long reads are assembled with Flye, using default tool settings. Note: the default setting for read type ("mode") is nanopore raw. Change this at runtime if required. \r +* Statistics are computed from the assembly.fasta file output, using Fasta Statistics and Quast (is genome large: Yes; distinguish contigs with more that 50% unaligned bases: no)\r +* The graphical fragment assembly file is visualized with the tool Bandage. \r +* Assembly information sent to bar chart to visualize contig sizes\r +\r +Options\r +* See other Flye options. \r +* Use a different assembler (in a different workflow). \r +* Bandage image options - change size (max size is 32767), labels - add (e.g. node lengths). You can also install Bandage on your own computer and donwload the "graphical fragment assembly" file to view in greater detail. \r +\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)\r +\r +""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "Large-genome-assembly" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Assembly with Flye" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/225?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-08-05 10:23:17 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9540 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:15Z" ; + schema1:dateModified "2024-06-11T12:55:15Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Workflow converts one or multiple bam/cram files to fastq format" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/968?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/bamtofastq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/bamtofastq" ; + schema1:sdDatePublished "2024-08-05 10:24:15 +0100" ; + schema1:url "https://workflowhub.eu/workflows/968/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10671 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:44Z" ; + schema1:dateModified "2024-06-11T12:54:44Z" ; + schema1:description "Workflow converts one or multiple bam/cram files to fastq format" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/968?version=5" ; + schema1:keywords "bamtofastq, Conversion, cramtofastq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/bamtofastq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/968?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1021?version=9" ; + schema1:isBasedOn "https://github.com/nf-core/scrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/scrnaseq" ; + schema1:sdDatePublished "2024-08-05 10:23:15 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1021/ro_crate?version=9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10279 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:17Z" ; + schema1:dateModified "2024-06-11T12:55:17Z" ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1021?version=13" ; + schema1:keywords "10x-genomics, 10xgenomics, alevin, bustools, Cellranger, kallisto, rna-seq, single-cell, star-solo" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/scrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1021?version=9" ; + schema1:version 9 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for screening for functional components of assembled contigs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/987?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/funcscan" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/funcscan" ; + schema1:sdDatePublished "2024-08-05 10:23:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/987/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 16170 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:54Z" ; + schema1:dateModified "2024-06-11T12:54:54Z" ; + schema1:description "Pipeline for screening for functional components of assembled contigs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/987?version=8" ; + schema1:keywords "amp, AMR, antibiotic-resistance, antimicrobial-peptides, antimicrobial-resistance-genes, arg, Assembly, bgc, biosynthetic-gene-clusters, contigs, function, Metagenomics, natural-products, screening, secondary-metabolites" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/funcscan" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/987?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Genome-wide alternative splicing analysis v.2" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/482?version=6" ; + schema1:license "CC-BY-NC-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genome-wide alternative splicing analysis v.2" ; + schema1:sdDatePublished "2024-08-05 10:30:12 +0100" ; + schema1:url "https://workflowhub.eu/workflows/482/ro_crate?version=6" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 214419 . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 30115 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 109284 ; + schema1:creator ; + schema1:dateCreated "2023-06-11T12:35:19Z" ; + schema1:dateModified "2023-06-11T12:35:37Z" ; + schema1:description "Genome-wide alternative splicing analysis v.2" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/482?version=6" ; + schema1:keywords "Transcriptomics, Alternative splicing, isoform switching" ; + schema1:license "https://spdx.org/licenses/CC-BY-NC-4.0" ; + schema1:name "Genome-wide alternative splicing analysis v.2" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/482?version=6" ; + schema1:version 6 ; + ns1:input , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """# Generic consensus building\r +\r +This workflow generates consensus sequences using a list of variants generated by [Variant Calling Workflow](https://workflowhub.eu/workflows/353). \r +\r +The workflow accepts a single input:\r +\r +- A collection of VCF files\r +\r +The workflow produces a single output:\r +\r +- Consensus sequence for each input VCF file\r +\r +The workflow can be accessed at [usegalaxy.org](https://usegalaxy.org/u/aun1/w/consensus-construction)""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/356?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Generic consensus construction from VCF calls" ; + schema1:sdDatePublished "2024-08-05 10:32:05 +0100" ; + schema1:url "https://workflowhub.eu/workflows/356/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 59786 ; + schema1:creator ; + schema1:dateCreated "2022-06-03T09:33:10Z" ; + schema1:dateModified "2023-02-13T14:06:44Z" ; + schema1:description """# Generic consensus building\r +\r +This workflow generates consensus sequences using a list of variants generated by [Variant Calling Workflow](https://workflowhub.eu/workflows/353). \r +\r +The workflow accepts a single input:\r +\r +- A collection of VCF files\r +\r +The workflow produces a single output:\r +\r +- Consensus sequence for each input VCF file\r +\r +The workflow can be accessed at [usegalaxy.org](https://usegalaxy.org/u/aun1/w/consensus-construction)""" ; + schema1:keywords "mlxv, generic" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Generic consensus construction from VCF calls" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/356?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A pipeline to demultiplex, QC and map Nanopore data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1002?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/nanoseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/nanoseq" ; + schema1:sdDatePublished "2024-08-05 10:23:33 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1002/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5158 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:05Z" ; + schema1:dateModified "2024-06-11T12:55:05Z" ; + schema1:description "A pipeline to demultiplex, QC and map Nanopore data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1002?version=5" ; + schema1:keywords "Alignment, demultiplexing, nanopore, QC" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/nanoseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1002?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Detects SNPs and INDELs." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/35?version=1" ; + schema1:isBasedOn "https://usegalaxy.eu/u/ambarishk/w/gatk4" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for COVID-19: GATK4" ; + schema1:sdDatePublished "2024-08-05 10:33:33 +0100" ; + schema1:url "https://workflowhub.eu/workflows/35/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 27629 ; + schema1:dateCreated "2020-06-18T15:49:59Z" ; + schema1:dateModified "2023-02-13T14:06:46Z" ; + schema1:description "Detects SNPs and INDELs." ; + schema1:image ; + schema1:keywords "Galaxy, SNPs, INDELs, GATK4" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "COVID-19: GATK4" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/35?version=1" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 203637 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """In this analysis, we created an extended pathway, using the WikiPathways repository (Version 20210110) and the three -omics datasets. For this, each of the three -omics datasets was first analyzed to identify differentially expressed elements, and pathways associated with the significant miRNA-protein links were detected. A miRNA-protein link is deemed significant, and may possibly be implying causality, if both a miRNA and its target are significantly differentially expressed. \r +\r +The peptidome and the proteome datasets were quantile normalized and log2 transformed (Pan and Zhang 2018; Zhao, Wong, and Goh 2020). Before transformation, peptide IDs were mapped to protein IDs, using the information provided by the data uploaders, and were summarized into single protein-level values using geometric mean. The miRNome dataset was already normalized and transformed; thus, the information of their targeting genes was simply added to each miRNA ID, using the information provided by miTaRBase (Huang et al. 2019). As a result, all three datasets had been mapped to their appropriate gene product-level (or, protein-level) identifiers. """ ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/331?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for EJP-RD WP13 case-study: CAKUT proteome, peptidome and miRNome data analysis using WikiPathways" ; + schema1:sdDatePublished "2024-08-05 10:31:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/331/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1069 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2022-04-20T16:59:50Z" ; + schema1:dateModified "2023-01-16T13:59:42Z" ; + schema1:description """In this analysis, we created an extended pathway, using the WikiPathways repository (Version 20210110) and the three -omics datasets. For this, each of the three -omics datasets was first analyzed to identify differentially expressed elements, and pathways associated with the significant miRNA-protein links were detected. A miRNA-protein link is deemed significant, and may possibly be implying causality, if both a miRNA and its target are significantly differentially expressed. \r +\r +The peptidome and the proteome datasets were quantile normalized and log2 transformed (Pan and Zhang 2018; Zhao, Wong, and Goh 2020). Before transformation, peptide IDs were mapped to protein IDs, using the information provided by the data uploaders, and were summarized into single protein-level values using geometric mean. The miRNome dataset was already normalized and transformed; thus, the information of their targeting genes was simply added to each miRNA ID, using the information provided by miTaRBase (Huang et al. 2019). As a result, all three datasets had been mapped to their appropriate gene product-level (or, protein-level) identifiers. """ ; + schema1:keywords "rare diseases, Pathway Analysis, workflow, Proteomics, protein, mirna prediction" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "EJP-RD WP13 case-study: CAKUT proteome, peptidome and miRNome data analysis using WikiPathways" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/331?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.284.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_complex_md_setup/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/284/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7128 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-23T08:45:13Z" ; + schema1:dateModified "2023-01-16T13:58:31Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/284?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_complex_md_setup/python/workflow.py" ; + schema1:version 2 . + + a schema1:Dataset ; + schema1:datePublished "2024-07-24T01:31:55.544589" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-decontamination-VGP9" ; + schema1:license "BSD-3-Clause" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-decontamination-VGP9/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.24" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "The workflow takes a trimmed HiFi reads collection, runs Meryl to create a K-mer database, Genomescope2 to estimate genome properties and Smudgeplot to estimate ploidy. The main results are K-mer database and genome profiling plots, tables, and values useful for downstream analysis. Default K-mer length and ploidy for Genomescope are 21 and 2, respectively. " ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.603.1" ; + schema1:isBasedOn "https://github.com/ERGA-consortium/pipelines/tree/main/assembly/galaxy" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ERGA Profiling HiFi v2309 (WF1)" ; + schema1:sdDatePublished "2024-08-05 10:27:46 +0100" ; + schema1:url "https://workflowhub.eu/workflows/603/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 42208 ; + schema1:creator , + ; + schema1:dateCreated "2023-10-06T13:25:56Z" ; + schema1:dateModified "2024-03-13T09:07:03Z" ; + schema1:description "The workflow takes a trimmed HiFi reads collection, runs Meryl to create a K-mer database, Genomescope2 to estimate genome properties and Smudgeplot to estimate ploidy. The main results are K-mer database and genome profiling plots, tables, and values useful for downstream analysis. Default K-mer length and ploidy for Genomescope are 21 and 2, respectively. " ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "ERGA, Profiling, HiFi" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ERGA Profiling HiFi v2309 (WF1)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/1.Genome_Profiling/Galaxy-Workflow-ERGA_Profiling_HiFi_v2309_(WF1).ga" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 117463 ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/1.Genome_Profiling/pics/Prof_hifi_2309.png" . + + a schema1:Dataset ; + schema1:datePublished "2024-06-25T09:57:05.229158" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/pathogen-detection-pathogfair-samples-aggregation-and-visualisation" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "pathogen-detection-pathogfair-samples-aggregation-and-visualisation/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.283.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_md_setup/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Amber Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/283/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6933 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-23T08:41:32Z" ; + schema1:dateModified "2023-04-14T08:32:35Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/283?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Amber Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_md_setup/python/workflow.py" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=10" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-08-05 10:23:18 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11484 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:15Z" ; + schema1:dateModified "2024-06-11T12:55:15Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=10" ; + schema1:version 10 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "ATACSeq peak-calling and differential analysis pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/965?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/atacseq" ; + schema1:sdDatePublished "2024-08-05 10:24:19 +0100" ; + schema1:url "https://workflowhub.eu/workflows/965/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5755 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:41Z" ; + schema1:dateModified "2024-06-11T12:54:41Z" ; + schema1:description "ATACSeq peak-calling and differential analysis pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/965?version=8" ; + schema1:keywords "ATAC-seq, chromatin-accessibiity" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/atacseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/965?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """## Summary\r +\r +The data preparation pipeline contains tasks for two distinct scenarios: [leukaemia](https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE425) that contains microarray data for 119 patients and [ovarian](https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE140082) cancer that contains next generation sequencing data for 380 patients.\r +\r +The disease outcome prediction pipeline offers two strategies for this task:\r +\r +**Graph kernel method**: It starts generating personalized networks for each patient using the interactome file provided and generate the patient network checking if each PPI of the interactome has both proteins up regulated or down regulated according to the gene expression table provided. The first step generate a set of graphs for the patients that are evaluated with 4 distinct kernels for graph classification, which are: Linear kernel between edge histograms, Linear kernel between vertex histograms and the Weisfeiler lehman. These kernels functions calculate a similarity matrix for the graphs and then this matrix is used by the support vector machine classifier. Then the predictions are delivered to the last task that exports a report with the accuracy reached by each kernel. It allows some customizations about the network parameters to be used, such as the DEG cutoff to determine up and down regulated based on the log2 fold change, which will determine the topology and the labels distribution in the specific sample graphs. It is also possible customize the type of node/edge attributes passed to the kernel function, which may be only label, only weight or both.\r +\r +**GSEA based pathway scores method**: This method is faster and do not rely on tensor inputs such as the previous method. It uses geneset enrichment analysis on the pathways from KEGG 2021 of Human, and uses the scores of the pathways found enriched for the samples to build the numerical features matrix, that is then delivered to the AdaBoost classifier. The user may choose balance the dataset using oversampling strategy provided by SMOTE.\r +\r +## Usage Instructions\r +### Preparation:\r +1. ````git clone https://github.com/YasCoMa/screendop.git````\r +2. ````cd screendop````\r +3. Decompress screening_ovarian/raw_expression_table.tsv.tar.xz\r +4. Create conda environment to handle dependencies: ````conda env create -f drugresponse_env.yml````\r +5. ````conda activate drugresponse_env````\r +6. Setup an environment variable named "path_workflow_screendop" with the full path to this workflow folder\r +\r +### Data preparation - File ````data_preparation_for_pipeline.py```` :\r +\r +#### Files decompression\r +\r +- Decompress data_preparation/lekaemia.tar.xz\r +- Decompress data_preparation/ovarian/GSE140082_data.tar.xz\r + - Put the decompressed file GSE140082_series_matrix.txt in data_preparation/ovarian/\r + \r +#### Pipeline parameters\r +\r +- __-rt__ or __--running_type__
\r + Use to prepare data for the desired scenario:
\r + 1 - Run with Leukaemia data
\r + 2 - Run with Ovarian cancer data\r +\r +#### Running modes examples\r +\r +1. Run for Leukaemia data:
\r +````python3 data_preparation_for_pipeline.py -rt 1 ```` \r +\r +In this case, you must have [R](https://www.r-project.org/) installed and also the library [limma](https://bioconductor.org/packages/release/bioc/html/limma.html), it is used to determine DEGs from microarray data. For this dataset, the files are already prepared in the folder.\r +\r +2. Run for Ovarian cancer data:
\r +````python3 data_preparation_for_pipeline.py -rt 2 ```` \r +\r +In this case, you must have [R](https://www.r-project.org/) installed and also the library [DESeq](https://bioconductor.org/packages/release/bioc/html/DESeq.html), because this scenario treats next generation sequencing data\r +\r +### Disease outcome prediction execution - File ````main.py````:\r +\r +#### Pipeline parameters\r +\r +- __-rt__ or __--running_step__
\r + Use to prepare data for the desired scenario:
\r + 1 - Run graph kernel method
\r + 2 - Run gsea based pathway scores method\r +\r +- __-cf__ or __--configuration_file__
\r + File with the expression values for the genes by sample/patient in tsv format
\r + \r + Example of this file: config.json\r + \r +#### Input configuration file\r +\r +- Configuration file keys (see also the example in config.json):\r + - **folder** (mandatory for both methods): working directory\r + - **identifier**: project identifier to be used in the result files\r + - **mask_expression_table** (mandatory for both methods): Gene expression values file with the result of the fold change normalized value of a certain gene for each sample, already pruned by the significance (p-value). \r + - **raw_expression_table** (mandatory for both methods): Raw gene expression values already normalized following the method pf preference of the user.\r + - **labels_file** (mandatory for both methods): File with the prognosis label for each sample\r + - **deg_cutoff_up**: Cutoff value to determine up regulated gene. Default value is 1.\r + - **deg_cutoff_down**: Cutoff value to determine down regulated gene. Default value is -1.\r + - **nodes_enrichment**: Node attributes to be used in the screening evaluation. It may be a list combining the options "label", "weight" or "all". Examples: ["all", "weight"], ["label"], ["label", "weight"]. Default value is ["all"].\r + - **edges_enrichment**: Edge attributes to be used in the screening evaluation. It may be a list combining the options "label", "weight" or "all". Examples: ["all", "weight"], ["label"], ["label", "weight"]. Default value is ["all"].\r + - **flag_balance**: Flag to indicate whether the user wants to balance the samples in each outcome class, by SMOTE oversampling. Values may be false or true. Default value is false.\r +\r +#### Running modes examples\r +1. Running disease outcome prediction by graph kernel method:
\r + ````python3 main.py -rt 1 -cf config.json````\r +\r +2. Running disease outcome prediction by gsea enriched network method:
\r + ````python3 main.py -rt 2 -cf config.json````\r +\r +## Reference\r +Martins, Y. C. (2023). Multi-task analysis of gene expression data on cancer public datasets. medRxiv, 2023-09.\r +\r +## Bug Report\r +Please, use the [Issue](https://github.com/YasCoMa/screendop/issues) tab to report any bug.""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/621?version=1" ; + schema1:isBasedOn "https://github.com/YasCoMa/screendop" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ScreenDOP - Screening of strategies for disease outcome prediction" ; + schema1:sdDatePublished "2024-08-05 10:27:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/621/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9152 ; + schema1:creator ; + schema1:dateCreated "2023-10-22T00:18:50Z" ; + schema1:dateModified "2023-10-22T00:19:15Z" ; + schema1:description """## Summary\r +\r +The data preparation pipeline contains tasks for two distinct scenarios: [leukaemia](https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE425) that contains microarray data for 119 patients and [ovarian](https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE140082) cancer that contains next generation sequencing data for 380 patients.\r +\r +The disease outcome prediction pipeline offers two strategies for this task:\r +\r +**Graph kernel method**: It starts generating personalized networks for each patient using the interactome file provided and generate the patient network checking if each PPI of the interactome has both proteins up regulated or down regulated according to the gene expression table provided. The first step generate a set of graphs for the patients that are evaluated with 4 distinct kernels for graph classification, which are: Linear kernel between edge histograms, Linear kernel between vertex histograms and the Weisfeiler lehman. These kernels functions calculate a similarity matrix for the graphs and then this matrix is used by the support vector machine classifier. Then the predictions are delivered to the last task that exports a report with the accuracy reached by each kernel. It allows some customizations about the network parameters to be used, such as the DEG cutoff to determine up and down regulated based on the log2 fold change, which will determine the topology and the labels distribution in the specific sample graphs. It is also possible customize the type of node/edge attributes passed to the kernel function, which may be only label, only weight or both.\r +\r +**GSEA based pathway scores method**: This method is faster and do not rely on tensor inputs such as the previous method. It uses geneset enrichment analysis on the pathways from KEGG 2021 of Human, and uses the scores of the pathways found enriched for the samples to build the numerical features matrix, that is then delivered to the AdaBoost classifier. The user may choose balance the dataset using oversampling strategy provided by SMOTE.\r +\r +## Usage Instructions\r +### Preparation:\r +1. ````git clone https://github.com/YasCoMa/screendop.git````\r +2. ````cd screendop````\r +3. Decompress screening_ovarian/raw_expression_table.tsv.tar.xz\r +4. Create conda environment to handle dependencies: ````conda env create -f drugresponse_env.yml````\r +5. ````conda activate drugresponse_env````\r +6. Setup an environment variable named "path_workflow_screendop" with the full path to this workflow folder\r +\r +### Data preparation - File ````data_preparation_for_pipeline.py```` :\r +\r +#### Files decompression\r +\r +- Decompress data_preparation/lekaemia.tar.xz\r +- Decompress data_preparation/ovarian/GSE140082_data.tar.xz\r + - Put the decompressed file GSE140082_series_matrix.txt in data_preparation/ovarian/\r + \r +#### Pipeline parameters\r +\r +- __-rt__ or __--running_type__
\r + Use to prepare data for the desired scenario:
\r + 1 - Run with Leukaemia data
\r + 2 - Run with Ovarian cancer data\r +\r +#### Running modes examples\r +\r +1. Run for Leukaemia data:
\r +````python3 data_preparation_for_pipeline.py -rt 1 ```` \r +\r +In this case, you must have [R](https://www.r-project.org/) installed and also the library [limma](https://bioconductor.org/packages/release/bioc/html/limma.html), it is used to determine DEGs from microarray data. For this dataset, the files are already prepared in the folder.\r +\r +2. Run for Ovarian cancer data:
\r +````python3 data_preparation_for_pipeline.py -rt 2 ```` \r +\r +In this case, you must have [R](https://www.r-project.org/) installed and also the library [DESeq](https://bioconductor.org/packages/release/bioc/html/DESeq.html), because this scenario treats next generation sequencing data\r +\r +### Disease outcome prediction execution - File ````main.py````:\r +\r +#### Pipeline parameters\r +\r +- __-rt__ or __--running_step__
\r + Use to prepare data for the desired scenario:
\r + 1 - Run graph kernel method
\r + 2 - Run gsea based pathway scores method\r +\r +- __-cf__ or __--configuration_file__
\r + File with the expression values for the genes by sample/patient in tsv format
\r + \r + Example of this file: config.json\r + \r +#### Input configuration file\r +\r +- Configuration file keys (see also the example in config.json):\r + - **folder** (mandatory for both methods): working directory\r + - **identifier**: project identifier to be used in the result files\r + - **mask_expression_table** (mandatory for both methods): Gene expression values file with the result of the fold change normalized value of a certain gene for each sample, already pruned by the significance (p-value). \r + - **raw_expression_table** (mandatory for both methods): Raw gene expression values already normalized following the method pf preference of the user.\r + - **labels_file** (mandatory for both methods): File with the prognosis label for each sample\r + - **deg_cutoff_up**: Cutoff value to determine up regulated gene. Default value is 1.\r + - **deg_cutoff_down**: Cutoff value to determine down regulated gene. Default value is -1.\r + - **nodes_enrichment**: Node attributes to be used in the screening evaluation. It may be a list combining the options "label", "weight" or "all". Examples: ["all", "weight"], ["label"], ["label", "weight"]. Default value is ["all"].\r + - **edges_enrichment**: Edge attributes to be used in the screening evaluation. It may be a list combining the options "label", "weight" or "all". Examples: ["all", "weight"], ["label"], ["label", "weight"]. Default value is ["all"].\r + - **flag_balance**: Flag to indicate whether the user wants to balance the samples in each outcome class, by SMOTE oversampling. Values may be false or true. Default value is false.\r +\r +#### Running modes examples\r +1. Running disease outcome prediction by graph kernel method:
\r + ````python3 main.py -rt 1 -cf config.json````\r +\r +2. Running disease outcome prediction by gsea enriched network method:
\r + ````python3 main.py -rt 2 -cf config.json````\r +\r +## Reference\r +Martins, Y. C. (2023). Multi-task analysis of gene expression data on cancer public datasets. medRxiv, 2023-09.\r +\r +## Bug Report\r +Please, use the [Issue](https://github.com/YasCoMa/screendop/issues) tab to report any bug.""" ; + schema1:keywords "Bioinformatics, personalized medicine, gene set enrichment analysis, disease outcome prediction, public cancer datasets exploration, data wrangling, data transformation" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ScreenDOP - Screening of strategies for disease outcome prediction" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/621?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/986?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/fetchngs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/fetchngs" ; + schema1:sdDatePublished "2024-08-05 10:23:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/986/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6338 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:52Z" ; + schema1:dateModified "2024-06-11T12:54:52Z" ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/986?version=13" ; + schema1:keywords "ddbj, download, ena, FASTQ, geo, sra, synapse" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/fetchngs" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/986?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """# COVID-19 Multiscale Modelling of the Virus and Patients’ Tissue Workflow\r +\r +## Table of Contents\r +\r +- [COVID-19 Multiscale Modelling of the Virus and Patients’ Tissue Workflow](#covid-19-multiscale-modelling-of-the-virus-and-patients-tissue-workflow)\r + - [Table of Contents](#table-of-contents)\r + - [Description](#description)\r + - [Contents](#contents)\r + - [Building Blocks](#building-blocks)\r + - [Workflows](#workflows)\r + - [Resources](#resources)\r + - [Tests](#tests)\r + - [Instructions](#instructions)\r + - [Local machine](#local-machine)\r + - [Requirements](#requirements)\r + - [Usage steps](#usage-steps)\r + - [MareNostrum 4](#marenostrum-4)\r + - [Requirements in MN4](#requirements-in-mn4)\r + - [Usage steps in MN4](#usage-steps-in-mn4)\r + - [Mahti or Puhti](#mahti-or-puhti)\r + - [Requirements](#requirements)\r + - [Steps](#steps)\r + - [License](#license)\r + - [Contact](#contact)\r +\r +## Description\r +\r +Uses multiscale simulations to predict patient-specific SARS‑CoV‑2 severity subtypes\r +(moderate, severe or control), using single-cell RNA-Seq data, MaBoSS and PhysiBoSS.\r +Boolean models are used to determine the behaviour of individual agents as a function\r +of extracellular conditions and the concentration of different substrates, including\r +the number of virions. Predictions of severity subtypes are based on a meta-analysis of\r +personalised model outputs simulating cellular apoptosis regulation in epithelial cells\r +infected by SARS‑CoV‑2.\r +\r +The workflow uses the following building blocks, described in order of execution:\r +\r +1. High-throughput mutant analysis\r +2. Single-cell processing\r +3. Personalise patient\r +4. PhysiBoSS\r +5. Analysis of all simulations\r +\r +For details on individual workflow steps, see the user documentation for each building block.\r +\r +[`GitHub repository`]()\r +\r +\r +## Contents\r +\r +### Building Blocks\r +\r +The ``BuildingBlocks`` folder contains the script to install the\r +Building Blocks used in the COVID-19 Workflow.\r +\r +### Workflows\r +\r +The ``Workflow`` folder contains the workflows implementations.\r +\r +Currently contains the implementation using PyCOMPSs and Snakemake (in progress).\r +\r +### Resources\r +\r +The ``Resources`` folder contains dataset files.\r +\r +### Tests\r +\r +The ``Tests`` folder contains the scripts that run each Building Block\r +used in the workflow for the given small dataset.\r +They can be executed individually for testing purposes.\r +\r +## Instructions\r +\r +### Local machine\r +\r +This section explains the requirements and usage for the COVID19 Workflow in a laptop or desktop computer.\r +\r +#### Requirements\r +\r +- [`permedcoe`](https://github.com/PerMedCoE/permedcoe) package\r +- [PyCOMPSs](https://pycompss.readthedocs.io/en/stable/Sections/00_Quickstart.html) / [Snakemake](https://snakemake.readthedocs.io/en/stable/)\r +- [Singularity](https://sylabs.io/guides/3.0/user-guide/installation.html)\r +\r +#### Usage steps\r +\r +1. Clone this repository:\r +\r + ```bash\r + git clone https://github.com/PerMedCoE/covid-19-workflow.git\r + ```\r +\r +2. Install the Building Blocks required for the COVID19 Workflow:\r +\r + ```bash\r + covid-19-workflow/BuildingBlocks/./install_BBs.sh\r + ```\r +\r +3. Get the required Building Block images from the project [B2DROP](https://b2drop.bsc.es/index.php/f/444350):\r +\r + - Required images:\r + - MaBoSS.singularity\r + - meta_analysis.singularity\r + - PhysiCell-COVID19.singularity\r + - single_cell.singularity\r +\r + The path where these files are stored **MUST be exported in the `PERMEDCOE_IMAGES`** environment variable.\r +\r + > :warning: **TIP**: These containers can be built manually as follows (be patient since some of them may take some time):\r + 1. Clone the `BuildingBlocks` repository\r + ```bash\r + git clone https://github.com/PerMedCoE/BuildingBlocks.git\r + ```\r + 2. Build the required Building Block images\r + ```bash\r + cd BuildingBlocks/Resources/images\r + sudo singularity build MaBoSS.sif MaBoSS.singularity\r + sudo singularity build meta_analysis.sif meta_analysis.singularity\r + sudo singularity build PhysiCell-COVID19.sif PhysiCell-COVID19.singularity\r + sudo singularity build single_cell.sif single_cell.singularity\r + cd ../../..\r + ```\r +\r +**If using PyCOMPSs in local PC** (make sure that PyCOMPSs in installed):\r +\r +4. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflows/PyCOMPSs\r + ```\r +\r +5. Execute `./run.sh`\r +\r +**If using Snakemake in local PC** (make sure that SnakeMake is installed):\r +\r +4. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflows/SnakeMake\r + ```\r +\r +5. Execute `./run.sh`\r + > **TIP**: If you want to run the workflow with a different dataset, please update the `run.sh` script setting the `dataset` variable to the new dataset folder and their file names.\r +\r +\r +### MareNostrum 4\r +\r +This section explains the requirements and usage for the COVID19 Workflow in the MareNostrum 4 supercomputer.\r +\r +#### Requirements in MN4\r +\r +- Access to MN4\r +\r +All Building Blocks are already installed in MN4, and the COVID19 Workflow available.\r +\r +#### Usage steps in MN4\r +\r +1. Load the `COMPSs`, `Singularity` and `permedcoe` modules\r +\r + ```bash\r + export COMPSS_PYTHON_VERSION=3\r + module load COMPSs/3.1\r + module load singularity/3.5.2\r + module use /apps/modules/modulefiles/tools/COMPSs/libraries\r + module load permedcoe\r + ```\r +\r + > **TIP**: Include the loading into your `${HOME}/.bashrc` file to load it automatically on the session start.\r +\r + This commands will load COMPSs and the permedcoe package which provides all necessary dependencies, as well as the path to the singularity container images (`PERMEDCOE_IMAGES` environment variable) and testing dataset (`COVID19WORKFLOW_DATASET` environment variable).\r +\r +2. Get a copy of the pilot workflow into your desired folder\r +\r + ```bash\r + mkdir desired_folder\r + cd desired_folder\r + get_covid19workflow\r + ```\r +\r +3. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflow/PyCOMPSs\r + ```\r +\r +4. Execute `./launch.sh`\r +\r + This command will launch a job into the job queuing system (SLURM) requesting 2 nodes (one node acting half master and half worker, and other full worker node) for 20 minutes, and is prepared to use the singularity images that are already deployed in MN4 (located into the `PERMEDCOE_IMAGES` environment variable). It uses the dataset located into `../../Resources/data` folder.\r +\r + > :warning: **TIP**: If you want to run the workflow with a different dataset, please edit the `launch.sh` script and define the appropriate dataset path.\r +\r + After the execution, a `results` folder will be available with with COVID19 Workflow results.\r +\r +### Mahti or Puhti\r +\r +This section explains how to run the COVID19 workflow on CSC supercomputers using SnakeMake.\r +\r +#### Requirements\r +\r +- Install snakemake (or check if there is a version installed using `module spider snakemake`)\r +- Install workflow, using the same steps as for the local machine. With the exception that containers have to be built elsewhere.\r +\r +#### Steps\r +\r +\r +1. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflow/SnakeMake\r + ```\r +\r +2. Edit `launch.sh` with the correct partition, account, and resource specifications. \r +\r +3. Execute `./launch.sh`\r +\r + > :warning: Snakemake provides a `--cluster` flag, but this functionality should be avoided as it's really not suited for HPC systems.\r +\r +## License\r +\r +[Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)\r +\r +## Contact\r +\r +\r +\r +This software has been developed for the [PerMedCoE project](https://permedcoe.eu/), funded by the European Commission (EU H2020 [951773](https://cordis.europa.eu/project/id/951773)).\r +\r +![](https://permedcoe.eu/wp-content/uploads/2020/11/logo_1.png "PerMedCoE")\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/480?version=1" ; + schema1:isBasedOn "https://github.com/PerMedCoE/covid-19-workflow.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for PerMedCoE Covid19 Pilot workflow (Nextflow)" ; + schema1:sdDatePublished "2024-08-05 10:30:23 +0100" ; + schema1:url "https://workflowhub.eu/workflows/480/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3353 ; + schema1:creator ; + schema1:dateCreated "2023-05-23T13:23:14Z" ; + schema1:dateModified "2023-05-23T13:23:14Z" ; + schema1:description """# COVID-19 Multiscale Modelling of the Virus and Patients’ Tissue Workflow\r +\r +## Table of Contents\r +\r +- [COVID-19 Multiscale Modelling of the Virus and Patients’ Tissue Workflow](#covid-19-multiscale-modelling-of-the-virus-and-patients-tissue-workflow)\r + - [Table of Contents](#table-of-contents)\r + - [Description](#description)\r + - [Contents](#contents)\r + - [Building Blocks](#building-blocks)\r + - [Workflows](#workflows)\r + - [Resources](#resources)\r + - [Tests](#tests)\r + - [Instructions](#instructions)\r + - [Local machine](#local-machine)\r + - [Requirements](#requirements)\r + - [Usage steps](#usage-steps)\r + - [MareNostrum 4](#marenostrum-4)\r + - [Requirements in MN4](#requirements-in-mn4)\r + - [Usage steps in MN4](#usage-steps-in-mn4)\r + - [Mahti or Puhti](#mahti-or-puhti)\r + - [Requirements](#requirements)\r + - [Steps](#steps)\r + - [License](#license)\r + - [Contact](#contact)\r +\r +## Description\r +\r +Uses multiscale simulations to predict patient-specific SARS‑CoV‑2 severity subtypes\r +(moderate, severe or control), using single-cell RNA-Seq data, MaBoSS and PhysiBoSS.\r +Boolean models are used to determine the behaviour of individual agents as a function\r +of extracellular conditions and the concentration of different substrates, including\r +the number of virions. Predictions of severity subtypes are based on a meta-analysis of\r +personalised model outputs simulating cellular apoptosis regulation in epithelial cells\r +infected by SARS‑CoV‑2.\r +\r +The workflow uses the following building blocks, described in order of execution:\r +\r +1. High-throughput mutant analysis\r +2. Single-cell processing\r +3. Personalise patient\r +4. PhysiBoSS\r +5. Analysis of all simulations\r +\r +For details on individual workflow steps, see the user documentation for each building block.\r +\r +[`GitHub repository`]()\r +\r +\r +## Contents\r +\r +### Building Blocks\r +\r +The ``BuildingBlocks`` folder contains the script to install the\r +Building Blocks used in the COVID-19 Workflow.\r +\r +### Workflows\r +\r +The ``Workflow`` folder contains the workflows implementations.\r +\r +Currently contains the implementation using PyCOMPSs and Snakemake (in progress).\r +\r +### Resources\r +\r +The ``Resources`` folder contains dataset files.\r +\r +### Tests\r +\r +The ``Tests`` folder contains the scripts that run each Building Block\r +used in the workflow for the given small dataset.\r +They can be executed individually for testing purposes.\r +\r +## Instructions\r +\r +### Local machine\r +\r +This section explains the requirements and usage for the COVID19 Workflow in a laptop or desktop computer.\r +\r +#### Requirements\r +\r +- [`permedcoe`](https://github.com/PerMedCoE/permedcoe) package\r +- [PyCOMPSs](https://pycompss.readthedocs.io/en/stable/Sections/00_Quickstart.html) / [Snakemake](https://snakemake.readthedocs.io/en/stable/)\r +- [Singularity](https://sylabs.io/guides/3.0/user-guide/installation.html)\r +\r +#### Usage steps\r +\r +1. Clone this repository:\r +\r + ```bash\r + git clone https://github.com/PerMedCoE/covid-19-workflow.git\r + ```\r +\r +2. Install the Building Blocks required for the COVID19 Workflow:\r +\r + ```bash\r + covid-19-workflow/BuildingBlocks/./install_BBs.sh\r + ```\r +\r +3. Get the required Building Block images from the project [B2DROP](https://b2drop.bsc.es/index.php/f/444350):\r +\r + - Required images:\r + - MaBoSS.singularity\r + - meta_analysis.singularity\r + - PhysiCell-COVID19.singularity\r + - single_cell.singularity\r +\r + The path where these files are stored **MUST be exported in the `PERMEDCOE_IMAGES`** environment variable.\r +\r + > :warning: **TIP**: These containers can be built manually as follows (be patient since some of them may take some time):\r + 1. Clone the `BuildingBlocks` repository\r + ```bash\r + git clone https://github.com/PerMedCoE/BuildingBlocks.git\r + ```\r + 2. Build the required Building Block images\r + ```bash\r + cd BuildingBlocks/Resources/images\r + sudo singularity build MaBoSS.sif MaBoSS.singularity\r + sudo singularity build meta_analysis.sif meta_analysis.singularity\r + sudo singularity build PhysiCell-COVID19.sif PhysiCell-COVID19.singularity\r + sudo singularity build single_cell.sif single_cell.singularity\r + cd ../../..\r + ```\r +\r +**If using PyCOMPSs in local PC** (make sure that PyCOMPSs in installed):\r +\r +4. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflows/PyCOMPSs\r + ```\r +\r +5. Execute `./run.sh`\r +\r +**If using Snakemake in local PC** (make sure that SnakeMake is installed):\r +\r +4. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflows/SnakeMake\r + ```\r +\r +5. Execute `./run.sh`\r + > **TIP**: If you want to run the workflow with a different dataset, please update the `run.sh` script setting the `dataset` variable to the new dataset folder and their file names.\r +\r +\r +### MareNostrum 4\r +\r +This section explains the requirements and usage for the COVID19 Workflow in the MareNostrum 4 supercomputer.\r +\r +#### Requirements in MN4\r +\r +- Access to MN4\r +\r +All Building Blocks are already installed in MN4, and the COVID19 Workflow available.\r +\r +#### Usage steps in MN4\r +\r +1. Load the `COMPSs`, `Singularity` and `permedcoe` modules\r +\r + ```bash\r + export COMPSS_PYTHON_VERSION=3\r + module load COMPSs/3.1\r + module load singularity/3.5.2\r + module use /apps/modules/modulefiles/tools/COMPSs/libraries\r + module load permedcoe\r + ```\r +\r + > **TIP**: Include the loading into your `${HOME}/.bashrc` file to load it automatically on the session start.\r +\r + This commands will load COMPSs and the permedcoe package which provides all necessary dependencies, as well as the path to the singularity container images (`PERMEDCOE_IMAGES` environment variable) and testing dataset (`COVID19WORKFLOW_DATASET` environment variable).\r +\r +2. Get a copy of the pilot workflow into your desired folder\r +\r + ```bash\r + mkdir desired_folder\r + cd desired_folder\r + get_covid19workflow\r + ```\r +\r +3. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflow/PyCOMPSs\r + ```\r +\r +4. Execute `./launch.sh`\r +\r + This command will launch a job into the job queuing system (SLURM) requesting 2 nodes (one node acting half master and half worker, and other full worker node) for 20 minutes, and is prepared to use the singularity images that are already deployed in MN4 (located into the `PERMEDCOE_IMAGES` environment variable). It uses the dataset located into `../../Resources/data` folder.\r +\r + > :warning: **TIP**: If you want to run the workflow with a different dataset, please edit the `launch.sh` script and define the appropriate dataset path.\r +\r + After the execution, a `results` folder will be available with with COVID19 Workflow results.\r +\r +### Mahti or Puhti\r +\r +This section explains how to run the COVID19 workflow on CSC supercomputers using SnakeMake.\r +\r +#### Requirements\r +\r +- Install snakemake (or check if there is a version installed using `module spider snakemake`)\r +- Install workflow, using the same steps as for the local machine. With the exception that containers have to be built elsewhere.\r +\r +#### Steps\r +\r +\r +1. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflow/SnakeMake\r + ```\r +\r +2. Edit `launch.sh` with the correct partition, account, and resource specifications. \r +\r +3. Execute `./launch.sh`\r +\r + > :warning: Snakemake provides a `--cluster` flag, but this functionality should be avoided as it's really not suited for HPC systems.\r +\r +## License\r +\r +[Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)\r +\r +## Contact\r +\r +\r +\r +This software has been developed for the [PerMedCoE project](https://permedcoe.eu/), funded by the European Commission (EU H2020 [951773](https://cordis.europa.eu/project/id/951773)).\r +\r +![](https://permedcoe.eu/wp-content/uploads/2020/11/logo_1.png "PerMedCoE")\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "PerMedCoE Covid19 Pilot workflow (Nextflow)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/480?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """# Pangenome databases provide superior host removal and mycobacteria classification from clinical metagenomic data\r +\r +> Hall, M, Coin, L., Pangenome databases provide superior host removal and mycobacteria classification from clinical metagenomic data. bioRxiv 2023. doi: [10.1101/2023.09.18.558339][doi]\r +\r +Benchmarking different ways of doing read (taxonomic) classification, with a focus on\r +removal of contamination and classification of _M. tuberculosis_ reads.\r +\r +This repository contains the code and snakemake pipeline to build/download the\r +databases, obtain all results from [the paper][doi], along with accompanying configuration\r +files.\r +\r +Custom databases have all been uploaded to Zenodo, along with the simulated reads:\r +\r +- Nanopore simulated metagenomic reads - https://doi.org/10.5281/zenodo.8339788\r +- Illumina simulated metagenomic reads - https://doi.org/10.5281/zenodo.8339790\r +- Kraken2 database built from the Human Pangenome Reference Consortium\r + genomes - https://doi.org/10.5281/zenodo.8339731\r +- Kraken2 database built from the kraken2 Human\r + library - https://doi.org/10.5281/zenodo.8339699\r +- Kraken2 database built from a *Mycobacterium* representative set of\r + genomes - https://doi.org/10.5281/zenodo.8339821\r +- A (fasta) database of representative genomes from the *Mycobacterium*\r + genus - https://doi.org/10.5281/zenodo.8339940\r +- A (fasta) database of *M. tuberculosis* genomes from a variety of\r + lineages - https://doi.org/10.5281/zenodo.8339947\r +- The fasta file built from the [Clockwork](https://github.com/iqbal-lab-org/clockwork)\r + decontamination pipeline - https://doi.org/10.5281/zenodo.8339802\r +\r +## Example usage\r +\r +We provide some usage examples showing how to download the databases and then use them\r +on your reads.\r +\r +### Human read removal\r +\r +The method we found to give the best balance of runtime, memory usage, and precision and\r +recall was kraken2 with a database built from the Human Pangenome Reference Consortium\r +genomes.\r +\r +This example has been wrapped into a standalone tool called [`nohuman`](https://github.com/mbhall88/nohuman/) which takes a fastq as input and returns a fastq with human reads removed.\r +\r +#### Download human database\r +\r +```\r +mkdir HPRC_db/\r +cd HPRC_db\r +URL="https://zenodo.org/record/8339732/files/k2_HPRC_20230810.tar.gz"\r +wget "$URL"\r +tar -xzf k2_HPRC_20230810.tar.gz\r +rm k2_HPRC_20230810.tar.gz\r +```\r +\r +#### Run kraken2 with HPRC database\r +\r +You'll need [kraken2](https://github.com/DerrickWood/kraken2) installed for this step.\r +\r +```\r +kraken2 --threads 4 --db HPRC_db/ --output classifications.tsv reads.fq\r +```\r +\r +If you are using Illumina reads, a slight adjustment is needed\r +\r +```\r +kraken2 --paired --threads 4 --db HPRC_db/ --output classifications.tsv reads_1.fq reads_2.fq\r +```\r +\r +#### Extract non-human reads\r +\r +You'll need [seqkit](https://github.com/shenwei356/seqkit) installed for this step\r +\r +For Nanopore data\r +\r +```\r +awk -F'\\t' '$1=="U" {print $2}' classifications.tsv | \\\r + seqkit grep -f - -o reads.depleted.fq reads.fq\r +```\r +\r +For Illumina data\r +\r +```\r +awk -F'\\t' '$1=="U" {print $2}' classifications.tsv > ids.txt\r +seqkit grep --id-regexp '^(\\S+)/[12]' -f ids.txt -o reads_1.depleted.fq reads_1.fq\r +seqkit grep --id-regexp '^(\\S+)/[12]' -f ids.txt -o reads_2.depleted.fq reads_2.fq\r +```\r +\r +### *M. tuberculosis* classification/enrichment\r +\r +For this step we recommend either [minimap2](https://github.com/lh3/minimap2) or kraken2\r +with a *Mycobacterium* genus database. We leave it to the user to decide which approach\r +they prefer based on the results in our manuscript.\r +\r +#### Download databases\r +\r +```\r +mkdir Mycobacterium_db\r +cd Mycobacterium_db\r +# download database for use with minimap2\r +URL="https://zenodo.org/record/8339941/files/Mycobacterium.rep.fna.gz"\r +wget "$URL"\r +IDS_URL="https://zenodo.org/record/8343322/files/mtb.ids"\r +wget "$IDS_URL"\r +# download kraken database\r +URL="https://zenodo.org/record/8339822/files/k2_Mycobacterium_20230817.tar.gz"\r +wget "$URL"\r +tar -xzf k2_Mycobacterium_20230817.tar.gz\r +rm k2_Mycobacterium_20230817.tar.gz\r +```\r +\r +#### Classify reads\r +\r +**minimap2**\r +\r +```\r +# nanopore\r +minimap2 --secondary=no -c -t 4 -x map-ont -o reads.aln.paf Mycobacterium_db/Mycobacterium.rep.fna.gz reads.depleted.fq\r +# illumina\r +minimap2 --secondary=no -c -t 4 -x sr -o reads.aln.paf Mycobacterium_db/Mycobacterium.rep.fna.gz reads_1.depleted.fq reads_2.depleted.fq\r +```\r +\r +**kraken2**\r +\r +```\r +# nanopore\r +kraken2 --db Mycobacterium_db --threads 4 --report myco.kreport --output classifications.myco.tsv reads.depleted.fq\r +# illumina\r +kraken2 --db Mycobacterium_db --paired --threads 4 --report myco.kreport --output classifications.myco.tsv reads_1.depleted.fq reads_2.depleted.fq\r +```\r +\r +#### Extract *M. tuberculosis* reads\r +\r +**minimap2**\r +\r +```\r +# nanopore\r +grep -Ff Mycobacterium_db/mtb.ids reads.aln.paf | cut -f1 | \\\r + seqkit grep -f - -o reads.enriched.fq reads.depleted.fq\r +# illumina\r +grep -Ff Mycobacterium_db/mtb.ids reads.aln.paf | cut -f1 > keep.ids\r +seqkit grep -f keep.ids -o reads_1.enriched.fq reads_1.depleted.fq\r +seqkit grep -f keep.ids -o reads_2.enriched.fq reads_2.depleted.fq\r +```\r +\r +**kraken2**\r +\r +We'll use\r +the [`extract_kraken_reads.py` script](https://github.com/jenniferlu717/KrakenTools#extract_kraken_readspy)\r +for this\r +\r +```\r +# nanopore\r +python extract_kraken_reads.py -k classifications.myco.tsv -1 reads.depleted.fq -o reads.enriched.fq -t 1773 -r myco.kreport --include-children\r +# illumina\r +python extract_kraken_reads.py -k classifications.myco.tsv -1 reads_1.depleted.fq -2 reads_2.depleted.fq -o reads_1.enriched.fq -o2 reads_2.enriched.fq -t 1773 -r myco.kreport --include-children\r +```\r +\r +[doi]: https://doi.org/10.1101/2023.09.18.558339 \r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.700.1" ; + schema1:isBasedOn "https://github.com/mbhall88/classification_benchmark.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Pangenome databases provide superior host removal and mycobacteria classification from clinical metagenomic data" ; + schema1:sdDatePublished "2024-08-05 10:26:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/700/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1531 ; + schema1:creator ; + schema1:dateCreated "2024-01-09T05:43:00Z" ; + schema1:dateModified "2024-01-09T20:38:42Z" ; + schema1:description """# Pangenome databases provide superior host removal and mycobacteria classification from clinical metagenomic data\r +\r +> Hall, M, Coin, L., Pangenome databases provide superior host removal and mycobacteria classification from clinical metagenomic data. bioRxiv 2023. doi: [10.1101/2023.09.18.558339][doi]\r +\r +Benchmarking different ways of doing read (taxonomic) classification, with a focus on\r +removal of contamination and classification of _M. tuberculosis_ reads.\r +\r +This repository contains the code and snakemake pipeline to build/download the\r +databases, obtain all results from [the paper][doi], along with accompanying configuration\r +files.\r +\r +Custom databases have all been uploaded to Zenodo, along with the simulated reads:\r +\r +- Nanopore simulated metagenomic reads - https://doi.org/10.5281/zenodo.8339788\r +- Illumina simulated metagenomic reads - https://doi.org/10.5281/zenodo.8339790\r +- Kraken2 database built from the Human Pangenome Reference Consortium\r + genomes - https://doi.org/10.5281/zenodo.8339731\r +- Kraken2 database built from the kraken2 Human\r + library - https://doi.org/10.5281/zenodo.8339699\r +- Kraken2 database built from a *Mycobacterium* representative set of\r + genomes - https://doi.org/10.5281/zenodo.8339821\r +- A (fasta) database of representative genomes from the *Mycobacterium*\r + genus - https://doi.org/10.5281/zenodo.8339940\r +- A (fasta) database of *M. tuberculosis* genomes from a variety of\r + lineages - https://doi.org/10.5281/zenodo.8339947\r +- The fasta file built from the [Clockwork](https://github.com/iqbal-lab-org/clockwork)\r + decontamination pipeline - https://doi.org/10.5281/zenodo.8339802\r +\r +## Example usage\r +\r +We provide some usage examples showing how to download the databases and then use them\r +on your reads.\r +\r +### Human read removal\r +\r +The method we found to give the best balance of runtime, memory usage, and precision and\r +recall was kraken2 with a database built from the Human Pangenome Reference Consortium\r +genomes.\r +\r +This example has been wrapped into a standalone tool called [`nohuman`](https://github.com/mbhall88/nohuman/) which takes a fastq as input and returns a fastq with human reads removed.\r +\r +#### Download human database\r +\r +```\r +mkdir HPRC_db/\r +cd HPRC_db\r +URL="https://zenodo.org/record/8339732/files/k2_HPRC_20230810.tar.gz"\r +wget "$URL"\r +tar -xzf k2_HPRC_20230810.tar.gz\r +rm k2_HPRC_20230810.tar.gz\r +```\r +\r +#### Run kraken2 with HPRC database\r +\r +You'll need [kraken2](https://github.com/DerrickWood/kraken2) installed for this step.\r +\r +```\r +kraken2 --threads 4 --db HPRC_db/ --output classifications.tsv reads.fq\r +```\r +\r +If you are using Illumina reads, a slight adjustment is needed\r +\r +```\r +kraken2 --paired --threads 4 --db HPRC_db/ --output classifications.tsv reads_1.fq reads_2.fq\r +```\r +\r +#### Extract non-human reads\r +\r +You'll need [seqkit](https://github.com/shenwei356/seqkit) installed for this step\r +\r +For Nanopore data\r +\r +```\r +awk -F'\\t' '$1=="U" {print $2}' classifications.tsv | \\\r + seqkit grep -f - -o reads.depleted.fq reads.fq\r +```\r +\r +For Illumina data\r +\r +```\r +awk -F'\\t' '$1=="U" {print $2}' classifications.tsv > ids.txt\r +seqkit grep --id-regexp '^(\\S+)/[12]' -f ids.txt -o reads_1.depleted.fq reads_1.fq\r +seqkit grep --id-regexp '^(\\S+)/[12]' -f ids.txt -o reads_2.depleted.fq reads_2.fq\r +```\r +\r +### *M. tuberculosis* classification/enrichment\r +\r +For this step we recommend either [minimap2](https://github.com/lh3/minimap2) or kraken2\r +with a *Mycobacterium* genus database. We leave it to the user to decide which approach\r +they prefer based on the results in our manuscript.\r +\r +#### Download databases\r +\r +```\r +mkdir Mycobacterium_db\r +cd Mycobacterium_db\r +# download database for use with minimap2\r +URL="https://zenodo.org/record/8339941/files/Mycobacterium.rep.fna.gz"\r +wget "$URL"\r +IDS_URL="https://zenodo.org/record/8343322/files/mtb.ids"\r +wget "$IDS_URL"\r +# download kraken database\r +URL="https://zenodo.org/record/8339822/files/k2_Mycobacterium_20230817.tar.gz"\r +wget "$URL"\r +tar -xzf k2_Mycobacterium_20230817.tar.gz\r +rm k2_Mycobacterium_20230817.tar.gz\r +```\r +\r +#### Classify reads\r +\r +**minimap2**\r +\r +```\r +# nanopore\r +minimap2 --secondary=no -c -t 4 -x map-ont -o reads.aln.paf Mycobacterium_db/Mycobacterium.rep.fna.gz reads.depleted.fq\r +# illumina\r +minimap2 --secondary=no -c -t 4 -x sr -o reads.aln.paf Mycobacterium_db/Mycobacterium.rep.fna.gz reads_1.depleted.fq reads_2.depleted.fq\r +```\r +\r +**kraken2**\r +\r +```\r +# nanopore\r +kraken2 --db Mycobacterium_db --threads 4 --report myco.kreport --output classifications.myco.tsv reads.depleted.fq\r +# illumina\r +kraken2 --db Mycobacterium_db --paired --threads 4 --report myco.kreport --output classifications.myco.tsv reads_1.depleted.fq reads_2.depleted.fq\r +```\r +\r +#### Extract *M. tuberculosis* reads\r +\r +**minimap2**\r +\r +```\r +# nanopore\r +grep -Ff Mycobacterium_db/mtb.ids reads.aln.paf | cut -f1 | \\\r + seqkit grep -f - -o reads.enriched.fq reads.depleted.fq\r +# illumina\r +grep -Ff Mycobacterium_db/mtb.ids reads.aln.paf | cut -f1 > keep.ids\r +seqkit grep -f keep.ids -o reads_1.enriched.fq reads_1.depleted.fq\r +seqkit grep -f keep.ids -o reads_2.enriched.fq reads_2.depleted.fq\r +```\r +\r +**kraken2**\r +\r +We'll use\r +the [`extract_kraken_reads.py` script](https://github.com/jenniferlu717/KrakenTools#extract_kraken_readspy)\r +for this\r +\r +```\r +# nanopore\r +python extract_kraken_reads.py -k classifications.myco.tsv -1 reads.depleted.fq -o reads.enriched.fq -t 1773 -r myco.kreport --include-children\r +# illumina\r +python extract_kraken_reads.py -k classifications.myco.tsv -1 reads_1.depleted.fq -2 reads_2.depleted.fq -o reads_1.enriched.fq -o2 reads_2.enriched.fq -t 1773 -r myco.kreport --include-children\r +```\r +\r +[doi]: https://doi.org/10.1101/2023.09.18.558339 \r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/700?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Pangenome databases provide superior host removal and mycobacteria classification from clinical metagenomic data" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/700?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """Kmer counting step, can run alone or as part of a combined workflow for large genome assembly. \r +\r +* What it does: Estimates genome size and heterozygosity based on counts of kmers\r +* Inputs: One set of short reads: e.g. R1.fq.gz\r +* Outputs: GenomeScope graphs\r +* Tools used: Meryl, GenomeScope\r +* Input parameters: None required\r +* Workflow steps: The tool meryl counts kmers in the input reads (k=21), then converts this into a histogram. GenomeScope: runs a model on the histogram; reports estimates. k-mer size set to 21. \r +* Options: Use a different kmer counting tool. e.g. khmer.\r +\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.223.1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for kmer counting - meryl" ; + schema1:sdDatePublished "2024-08-05 10:32:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/223/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9477 ; + schema1:creator ; + schema1:dateCreated "2021-11-08T04:47:27Z" ; + schema1:dateModified "2023-01-30T18:21:31Z" ; + schema1:description """Kmer counting step, can run alone or as part of a combined workflow for large genome assembly. \r +\r +* What it does: Estimates genome size and heterozygosity based on counts of kmers\r +* Inputs: One set of short reads: e.g. R1.fq.gz\r +* Outputs: GenomeScope graphs\r +* Tools used: Meryl, GenomeScope\r +* Input parameters: None required\r +* Workflow steps: The tool meryl counts kmers in the input reads (k=21), then converts this into a histogram. GenomeScope: runs a model on the histogram; reports estimates. k-mer size set to 21. \r +* Options: Use a different kmer counting tool. e.g. khmer.\r +\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "Large-genome-assembly" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "kmer counting - meryl" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/223?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 368783 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """## Description\r +\r +The workflow takes an input file with Cancer Driver Genes predictions (i.e. the results provided by a participant), computes a set of metrics, and compares them against the data currently stored in OpenEBench within the TCGA community. Two assessment metrics are provided for that predicitions. Also, some plots (which are optional) that allow to visualize the performance of the tool are generated. The workflow consists in three standard steps, defined by OpenEBench. The tools needed to run these steps must be in one or more Docker images, generated from [Docker containers](https://github.com/inab/TCGA_benchmarking_dockers ). Separated instances are spawned from these images for each step:\r +1. **Validation**: the input file format is checked and, if required, the content of the file is validated (e.g check whether the submitted gene IDs exist)\r +2. **Metrics Computation**: the predictions are compared with the 'Gold Standards' provided by the community, which results in two performance metrics - precision (Positive Predictive Value) and recall(True Positive Rate).\r +3. **Results Consolidation**: the benchmark itself is performed by merging the tool metrics with the rest of TCGA data. The results are provided in JSON format and SVG format (scatter plot).\r +\r +![alt text](https://raw.githubusercontent.com/inab/TCGA_benchmarking_workflow/1.0.4/workflow.jpg)\r +\r +\r +## Data\r +\r +* [TCGA_sample_data](./TCGA_sample_data) folder contains all the reference data required by the steps. It is derived from the manuscript:\r +[Comprehensive Characterization of Cancer Driver Genes and Mutations](https://www.cell.com/cell/fulltext/S0092-8674%2818%2930237-X?code=cell-site), Bailey et al, 2018, Cell [![doi:10.1016/j.cell.2018.02.060](https://img.shields.io/badge/doi-10.1016%2Fj.cell.2018.02.060-green.svg)](https://doi.org/10.1016/j.cell.2018.02.060) \r +* [TCGA_sample_out](./TCGA_sample_out) folder contains an example output for a worklow run, with two cancer types / challenges selected (ACC, BRCA). Results obtained from the default execution should be similar to those ones available in this directory. Results found in [TCGA_sample_out/results](./TCGA_sample_out/results) can be visualized in the browser using [`benchmarking_workflows_results_visualizer` javascript library](https://github.com/inab/benchmarking_workflows_results_visualizer).\r +\r +\r +## Usage\r +In order to use the workflow you need to:\r +* Install [Nextflow](https://www.nextflow.io/), which depends on Java virtual machine (>=8 , <15 ). You can automate their installation for local testing using [run_local_nextflow.bash](run_local_nextflow.bash).\r +* Clone [TCGA benchmarking Docker definitions repository](https://github.com/inab/TCGA_benchmarking_dockers) from tag 1.0.3 in a separate directory, and build locally the three Docker images found in it, running the `build.sh 1.0.3` script within that repo.\r +* Run it just using either *`nextflow run main.nf -profile docker`* or *`./run_local_nextflow.bash run main.nf -profile docker`*. Arguments specifications:\r +\r +```\r +Usage:\r +Run the pipeline with default parameters:\r +nextflow run main.nf -profile docker\r +\r +Run with user parameters:\r +nextflow run main.nf -profile docker --predictionsFile {driver.genes.file} --public_ref_dir {validation.reference.file} --participant_name {tool.name} --metrics_ref_dir {gold.standards.dir} --cancer_types {analyzed.cancer.types} --assess_dir {benchmark.data.dir} --results_dir {output.dir}\r +\r +Mandatory arguments:\r + --input List of cancer genes prediction\r + --community_id Name or OEB permanent ID for the benchmarking community\r + --public_ref_dir Directory with list of cancer genes used to validate the predictions\r + --participant_id Name of the tool used for prediction\r + --goldstandard_dir Dir that contains metrics reference datasets for all cancer types\r + --event_id List of types of cancer selected by the user, separated by spaces\r + --assess_dir Dir where the data for the benchmark are stored\r +\r +Other options:\r + --validation_result The output file where the results from validation step will be saved\r + --assessment_results The output file where the results from the computed metrics step will be saved\r + --aggregation_results The output file where the consolidation of the benchmark will be saved\r + --statistics_results The output directory with nextflow statistics\r + --outdir The output directory where the consolidation of the benchmark will be saved\r + --statsdir The output directory with nextflow statistics\r + --data_model_export_dir The output dir where json file with benchmarking data model contents will be saved\r + --otherdir The output directory where custom results will be saved (no directory inside)\r +Flags:\r + --help Display this message\r +```\r +\r +Default input parameters and Docker images to use for each step can be specified in the [config](./nextflow.config) file\r +**NOTE: In order to make your workflow compatible with the [OpenEBench VRE Nextflow Executor](https://github.com/inab/vre-process_nextflow-executor), please make sure to use the same parameter names in your workflow.**""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/244?version=1" ; + schema1:isBasedOn "https://github.com/inab/TCGA_benchmarking_workflow/tree/1.0.8" ; + schema1:license "LGPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for OpenEBench assessment workflow" ; + schema1:sdDatePublished "2024-08-05 10:32:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/244/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5597 ; + schema1:creator , + , + ; + schema1:dateCreated "2021-11-23T16:55:30Z" ; + schema1:dateModified "2021-11-24T09:23:33Z" ; + schema1:description """## Description\r +\r +The workflow takes an input file with Cancer Driver Genes predictions (i.e. the results provided by a participant), computes a set of metrics, and compares them against the data currently stored in OpenEBench within the TCGA community. Two assessment metrics are provided for that predicitions. Also, some plots (which are optional) that allow to visualize the performance of the tool are generated. The workflow consists in three standard steps, defined by OpenEBench. The tools needed to run these steps must be in one or more Docker images, generated from [Docker containers](https://github.com/inab/TCGA_benchmarking_dockers ). Separated instances are spawned from these images for each step:\r +1. **Validation**: the input file format is checked and, if required, the content of the file is validated (e.g check whether the submitted gene IDs exist)\r +2. **Metrics Computation**: the predictions are compared with the 'Gold Standards' provided by the community, which results in two performance metrics - precision (Positive Predictive Value) and recall(True Positive Rate).\r +3. **Results Consolidation**: the benchmark itself is performed by merging the tool metrics with the rest of TCGA data. The results are provided in JSON format and SVG format (scatter plot).\r +\r +![alt text](https://raw.githubusercontent.com/inab/TCGA_benchmarking_workflow/1.0.4/workflow.jpg)\r +\r +\r +## Data\r +\r +* [TCGA_sample_data](./TCGA_sample_data) folder contains all the reference data required by the steps. It is derived from the manuscript:\r +[Comprehensive Characterization of Cancer Driver Genes and Mutations](https://www.cell.com/cell/fulltext/S0092-8674%2818%2930237-X?code=cell-site), Bailey et al, 2018, Cell [![doi:10.1016/j.cell.2018.02.060](https://img.shields.io/badge/doi-10.1016%2Fj.cell.2018.02.060-green.svg)](https://doi.org/10.1016/j.cell.2018.02.060) \r +* [TCGA_sample_out](./TCGA_sample_out) folder contains an example output for a worklow run, with two cancer types / challenges selected (ACC, BRCA). Results obtained from the default execution should be similar to those ones available in this directory. Results found in [TCGA_sample_out/results](./TCGA_sample_out/results) can be visualized in the browser using [`benchmarking_workflows_results_visualizer` javascript library](https://github.com/inab/benchmarking_workflows_results_visualizer).\r +\r +\r +## Usage\r +In order to use the workflow you need to:\r +* Install [Nextflow](https://www.nextflow.io/), which depends on Java virtual machine (>=8 , <15 ). You can automate their installation for local testing using [run_local_nextflow.bash](run_local_nextflow.bash).\r +* Clone [TCGA benchmarking Docker definitions repository](https://github.com/inab/TCGA_benchmarking_dockers) from tag 1.0.3 in a separate directory, and build locally the three Docker images found in it, running the `build.sh 1.0.3` script within that repo.\r +* Run it just using either *`nextflow run main.nf -profile docker`* or *`./run_local_nextflow.bash run main.nf -profile docker`*. Arguments specifications:\r +\r +```\r +Usage:\r +Run the pipeline with default parameters:\r +nextflow run main.nf -profile docker\r +\r +Run with user parameters:\r +nextflow run main.nf -profile docker --predictionsFile {driver.genes.file} --public_ref_dir {validation.reference.file} --participant_name {tool.name} --metrics_ref_dir {gold.standards.dir} --cancer_types {analyzed.cancer.types} --assess_dir {benchmark.data.dir} --results_dir {output.dir}\r +\r +Mandatory arguments:\r + --input List of cancer genes prediction\r + --community_id Name or OEB permanent ID for the benchmarking community\r + --public_ref_dir Directory with list of cancer genes used to validate the predictions\r + --participant_id Name of the tool used for prediction\r + --goldstandard_dir Dir that contains metrics reference datasets for all cancer types\r + --event_id List of types of cancer selected by the user, separated by spaces\r + --assess_dir Dir where the data for the benchmark are stored\r +\r +Other options:\r + --validation_result The output file where the results from validation step will be saved\r + --assessment_results The output file where the results from the computed metrics step will be saved\r + --aggregation_results The output file where the consolidation of the benchmark will be saved\r + --statistics_results The output directory with nextflow statistics\r + --outdir The output directory where the consolidation of the benchmark will be saved\r + --statsdir The output directory with nextflow statistics\r + --data_model_export_dir The output dir where json file with benchmarking data model contents will be saved\r + --otherdir The output directory where custom results will be saved (no directory inside)\r +Flags:\r + --help Display this message\r +```\r +\r +Default input parameters and Docker images to use for each step can be specified in the [config](./nextflow.config) file\r +**NOTE: In order to make your workflow compatible with the [OpenEBench VRE Nextflow Executor](https://github.com/inab/vre-process_nextflow-executor), please make sure to use the same parameter names in your workflow.**""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/244?version=3" ; + schema1:keywords "tcga, openebench, benchmarking" ; + schema1:license "https://spdx.org/licenses/LGPL-3.0" ; + schema1:name "OpenEBench assessment workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/244?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2021-12-20T17:04:47.633738" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-variant-calling" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sars-cov-2-pe-illumina-artic-variant-calling/COVID-19-PE-ARTIC-ILLUMINA" ; + schema1:sdDatePublished "2021-12-21 03:01:00 +0000" ; + schema1:softwareVersion "v0.4.2" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "# beacon-omop-worker-workflows" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/882?version=1" ; + schema1:isBasedOn "https://github.com/Health-Informatics-UoN/beacon-omop-worker-workflows.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for beacon-workflow" ; + schema1:sdDatePublished "2024-08-05 10:24:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/882/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 6001 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 626 ; + schema1:dateCreated "2024-05-10T12:40:07Z" ; + schema1:dateModified "2024-05-10T12:40:07Z" ; + schema1:description "# beacon-omop-worker-workflows" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/882?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "beacon-workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/882?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + ; + ns1:output . + + a schema1:Dataset ; + schema1:datePublished "2023-11-03T14:21:07.903072" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Purge-duplicate-contigs-VGP6/main" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Purge-duplicate-contigs-VGP6/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:version "0.1.1" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Differential abundance analysis" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/981?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/differentialabundance" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/differentialabundance" ; + schema1:sdDatePublished "2024-08-05 10:24:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/981/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15641 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:49Z" ; + schema1:dateModified "2024-06-11T12:54:49Z" ; + schema1:description "Differential abundance analysis" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/981?version=8" ; + schema1:keywords "ATAC-seq, ChIP-seq, deseq2, differential-abundance, differential-expression, gsea, limma, microarray, rna-seq, shiny" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/differentialabundance" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/981?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """Workflow for NonSpliced RNAseq data with multiple aligners.\r +\r +Steps: \r + - workflow_quality.cwl:\r + - FastQC (control)\r + - fastp (trimming)\r + - bowtie2 (read mapping)\r + - sam_to_sorted-bam\r + - featurecounts (transcript read counts)\r + - kallisto (transcript [pseudo]counts)\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/77?version=1" ; + schema1:isBasedOn "https://git.wur.nl/unlock/cwl/-/blob/master/cwl/workflows/workflow_RNAseq_NonSpliced.cwl" ; + schema1:license "CC0-1.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for NonSpliced RNAseq workflow" ; + schema1:sdDatePublished "2024-08-05 10:33:13 +0100" ; + schema1:url "https://workflowhub.eu/workflows/77/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 34088 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7251 ; + schema1:creator , + ; + schema1:dateCreated "2020-11-24T11:05:56Z" ; + schema1:dateModified "2023-01-16T13:46:21Z" ; + schema1:description """Workflow for NonSpliced RNAseq data with multiple aligners.\r +\r +Steps: \r + - workflow_quality.cwl:\r + - FastQC (control)\r + - fastp (trimming)\r + - bowtie2 (read mapping)\r + - sam_to_sorted-bam\r + - featurecounts (transcript read counts)\r + - kallisto (transcript [pseudo]counts)\r +""" ; + schema1:image ; + schema1:keywords "Alignment, bowtie2, featurecounts, kallisto, nonspliced" ; + schema1:license "https://spdx.org/licenses/CC0-1.0" ; + schema1:name "NonSpliced RNAseq workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/77?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2023-11-24T11:23:50.158837" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/gcms-metams" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "gcms-metams/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-08-05 10:24:23 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4744 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:11Z" ; + schema1:dateModified "2024-06-11T12:55:11Z" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +\r +Workflow information:\r +* Input = genome.fasta.\r +* Outputs = masked_genome.fasta and table of repeats found. \r +* Runs RepeatModeler with default settings, uses the output of this (repeat library) as input into RepeatMasker. \r +* Runs RepeatMasker with default settings except for: Skip masking of simple tandem repeats and low complexity regions. (-nolow) : default set to yes. Perform softmasking instead of hardmasking - set to yes. \r +* Workflow report displays an edited table of repeats found. Note: a known bug is that sometimes the workflow report text resets to default text. To restore, look for an earlier workflow version with correct workflow report text, and copy and paste report text into current version.\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/875?version=1" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Repeat masking - TSI" ; + schema1:sdDatePublished "2024-08-05 10:22:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/875/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8120 ; + schema1:creator , + ; + schema1:dateCreated "2024-05-08T03:59:07Z" ; + schema1:dateModified "2024-05-08T06:24:26Z" ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +\r +Workflow information:\r +* Input = genome.fasta.\r +* Outputs = masked_genome.fasta and table of repeats found. \r +* Runs RepeatModeler with default settings, uses the output of this (repeat library) as input into RepeatMasker. \r +* Runs RepeatMasker with default settings except for: Skip masking of simple tandem repeats and low complexity regions. (-nolow) : default set to yes. Perform softmasking instead of hardmasking - set to yes. \r +* Workflow report displays an edited table of repeats found. Note: a known bug is that sometimes the workflow report text resets to default text. To restore, look for an earlier workflow version with correct workflow report text, and copy and paste report text into current version.\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/875?version=2" ; + schema1:isPartOf ; + schema1:keywords "TSI-annotation" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Repeat masking - TSI" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/875?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 403507 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Summary\r +\r +Jupyter Notebook containing a tutorial to illustrate the process of ligand parameterization for a small molecule, step by step, using the BioExcel Building Blocks library (biobb). The particular example used is the Sulfasalazine protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +OpenBabel and ACPype packages are used to add hydrogens, energetically minimize the structure, and generate parameters for the GROMACS package. With Generalized Amber Force Field (GAFF) forcefield and AM1-BCC charges.\r +\r +\r +# Parameters\r +\r +## Inputs: \r +\r +\r +* **ligandCode**: 3-letter code of the ligand structure (e.g. IBP)\r +\r +* **mol_charge**: Molecule net charge (e.g. -1)\r +\r +* **pH**: Acidity or alkalinity for the small molecule. Hydrogen atoms will be added according to this pH. (e.g. 7.4)\r +\r +## Outputs\r +\r +\r +* **IBPparams.gro**: Structure of the parameterized ligand in gro (GROMACS) format.\r +\r +* **IBPparams.top**: Topology of the parameterized ligand, including a reference to the IBPparams.itp.\r +\r +* **IBPparams.itp**: Include Topology File (itp) of the parameterized ligand, including the parameters information: bonds, angles, dihedrals, etc.\r +\r +# Additional Resources\r +\r +* [Tutorial Page for this workflow](http://mmb.irbbarcelona.org/biobb/availability/tutorials/ligand-parameterization)\r +\r +* [GitHub repository](https://github.com/bioexcel/biobb_wf_ligand_parameterization)\r +\r +* [View in Binder](https://mybinder.org/v2/gh/bioexcel/biobb_wf_ligand_parameterization/master?filepath=biobb_wf_ligand_parameterization%2Fnotebooks%2Fbiobb_ligand_parameterization_tutorial.ipynb)\r +\r +* [Documentation](https://biobb-wf-ligand-parameterization.readthedocs.io/en/latest/index.html)""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.54.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_ligand_parameterization" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)" ; + schema1:sdDatePublished "2024-08-05 10:25:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/54/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14669 ; + schema1:creator , + , + ; + schema1:dateCreated "2020-09-14T10:01:04Z" ; + schema1:dateModified "2021-05-13T08:14:49Z" ; + schema1:description """# Summary\r +\r +Jupyter Notebook containing a tutorial to illustrate the process of ligand parameterization for a small molecule, step by step, using the BioExcel Building Blocks library (biobb). The particular example used is the Sulfasalazine protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +OpenBabel and ACPype packages are used to add hydrogens, energetically minimize the structure, and generate parameters for the GROMACS package. With Generalized Amber Force Field (GAFF) forcefield and AM1-BCC charges.\r +\r +\r +# Parameters\r +\r +## Inputs: \r +\r +\r +* **ligandCode**: 3-letter code of the ligand structure (e.g. IBP)\r +\r +* **mol_charge**: Molecule net charge (e.g. -1)\r +\r +* **pH**: Acidity or alkalinity for the small molecule. Hydrogen atoms will be added according to this pH. (e.g. 7.4)\r +\r +## Outputs\r +\r +\r +* **IBPparams.gro**: Structure of the parameterized ligand in gro (GROMACS) format.\r +\r +* **IBPparams.top**: Topology of the parameterized ligand, including a reference to the IBPparams.itp.\r +\r +* **IBPparams.itp**: Include Topology File (itp) of the parameterized ligand, including the parameters information: bonds, angles, dihedrals, etc.\r +\r +# Additional Resources\r +\r +* [Tutorial Page for this workflow](http://mmb.irbbarcelona.org/biobb/availability/tutorials/ligand-parameterization)\r +\r +* [GitHub repository](https://github.com/bioexcel/biobb_wf_ligand_parameterization)\r +\r +* [View in Binder](https://mybinder.org/v2/gh/bioexcel/biobb_wf_ligand_parameterization/master?filepath=biobb_wf_ligand_parameterization%2Fnotebooks%2Fbiobb_ligand_parameterization_tutorial.ipynb)\r +\r +* [Documentation](https://biobb-wf-ligand-parameterization.readthedocs.io/en/latest/index.html)""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/54?version=5" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/54?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2024-05-30T07:18:31.610037" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/chipseq-pe" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "chipseq-pe/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """MGnify (http://www.ebi.ac.uk/metagenomics) provides a free to use platform for the assembly, analysis and archiving of microbiome data derived from sequencing microbial populations that are present in particular environments. Over the past 2 years, MGnify (formerly EBI Metagenomics) has more than doubled the number of publicly available analysed datasets held within the resource. Recently, an updated approach to data analysis has been unveiled (version 5.0), replacing the previous single pipeline with multiple analysis pipelines that are tailored according to the input data, and that are formally described using the Common Workflow Language, enabling greater provenance, reusability, and reproducibility. MGnify's new analysis pipelines offer additional approaches for taxonomic assertions based on ribosomal internal transcribed spacer regions (ITS1/2) and expanded protein functional annotations. Biochemical pathways and systems predictions have also been added for assembled contigs. MGnify's growing focus on the assembly of metagenomic data has also seen the number of datasets it has assembled and analysed increase six-fold. The non-redundant protein database constructed from the proteins encoded by these assemblies now exceeds 1 billion sequences. Meanwhile, a newly developed contig viewer provides fine-grained visualisation of the assembled contigs and their enriched annotations.\r +\r +Documentation: https://docs.mgnify.org/en/latest/analysis.html#raw-reads-analysis-pipeline""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.362.1" ; + schema1:isBasedOn "https://github.com/EBI-Metagenomics/pipeline-v5.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for MGnify - raw-reads analysis pipeline" ; + schema1:sdDatePublished "2024-08-05 10:32:02 +0100" ; + schema1:url "https://workflowhub.eu/workflows/362/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 57591 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6196 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2022-06-07T08:40:40Z" ; + schema1:dateModified "2023-01-16T14:01:12Z" ; + schema1:description """MGnify (http://www.ebi.ac.uk/metagenomics) provides a free to use platform for the assembly, analysis and archiving of microbiome data derived from sequencing microbial populations that are present in particular environments. Over the past 2 years, MGnify (formerly EBI Metagenomics) has more than doubled the number of publicly available analysed datasets held within the resource. Recently, an updated approach to data analysis has been unveiled (version 5.0), replacing the previous single pipeline with multiple analysis pipelines that are tailored according to the input data, and that are formally described using the Common Workflow Language, enabling greater provenance, reusability, and reproducibility. MGnify's new analysis pipelines offer additional approaches for taxonomic assertions based on ribosomal internal transcribed spacer regions (ITS1/2) and expanded protein functional annotations. Biochemical pathways and systems predictions have also been added for assembled contigs. MGnify's growing focus on the assembly of metagenomic data has also seen the number of datasets it has assembled and analysed increase six-fold. The non-redundant protein database constructed from the proteins encoded by these assemblies now exceeds 1 billion sequences. Meanwhile, a newly developed contig viewer provides fine-grained visualisation of the assembled contigs and their enriched annotations.\r +\r +Documentation: https://docs.mgnify.org/en/latest/analysis.html#raw-reads-analysis-pipeline""" ; + schema1:image ; + schema1:keywords "Workflows, CWL, Metagenomics" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "MGnify - raw-reads analysis pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/362?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# COnSensus Interaction Network InFErence Service\r +Inference framework for reconstructing networks using a consensus approach between multiple methods and data sources.\r +\r +![alt text](https://github.com/PhosphorylatedRabbits/cosifer/raw/master/docs/_static/logo.png)\r +\r +## Reference\r +[Manica, Matteo, Charlotte, Bunne, Roland, Mathis, Joris, Cadow, Mehmet Eren, Ahsen, Gustavo A, Stolovitzky, and María Rodríguez, Martínez. "COSIFER: a python package for the consensus inference of molecular interaction networks".Bioinformatics (2020)](https://doi.org/10.1093/bioinformatics/btaa942).""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/118?version=1" ; + schema1:isBasedOn "https://github.com/inab/ipc_workflows/tree/main/cosifer/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for COSIFER" ; + schema1:sdDatePublished "2024-08-05 10:30:40 +0100" ; + schema1:url "https://workflowhub.eu/workflows/118/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 926 ; + schema1:creator , + ; + schema1:dateCreated "2021-05-05T15:50:31Z" ; + schema1:dateModified "2023-04-21T11:04:20Z" ; + schema1:description """# COnSensus Interaction Network InFErence Service\r +Inference framework for reconstructing networks using a consensus approach between multiple methods and data sources.\r +\r +![alt text](https://github.com/PhosphorylatedRabbits/cosifer/raw/master/docs/_static/logo.png)\r +\r +## Reference\r +[Manica, Matteo, Charlotte, Bunne, Roland, Mathis, Joris, Cadow, Mehmet Eren, Ahsen, Gustavo A, Stolovitzky, and María Rodríguez, Martínez. "COSIFER: a python package for the consensus inference of molecular interaction networks".Bioinformatics (2020)](https://doi.org/10.1093/bioinformatics/btaa942).""" ; + schema1:image ; + schema1:keywords "cosifer, cancer, pediatric, rna-seq" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "COSIFER" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/118?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + ; + ns1:output . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 7284 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """Description: SSP-based RCP scenario with high radiative forcing by the end of century. Following approximately RCP8.5 global forcing pathway with SSP5 socioeconomic conditions. Concentration-driven.\r +Rationale: the scenario represents the high end of plausible future pathways. SSP5 is the only SSP with emissions high enough to produce the 8.5 W/m2 level of forcing in 2100.\r +\r +This workflow is answering to the following scientific question:\r +- Is it worth investing in artificial snowmaking equipment at RATECE-PLANICA?""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/47?version=1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for RATECE-PLANICA ski station (Slovenia) under CMIP-6 SSP585 condition" ; + schema1:sdDatePublished "2024-08-05 10:33:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/47/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 834994 ; + schema1:dateCreated "2020-07-23T18:33:44Z" ; + schema1:dateModified "2023-01-16T13:44:15Z" ; + schema1:description """Description: SSP-based RCP scenario with high radiative forcing by the end of century. Following approximately RCP8.5 global forcing pathway with SSP5 socioeconomic conditions. Concentration-driven.\r +Rationale: the scenario represents the high end of plausible future pathways. SSP5 is the only SSP with emissions high enough to produce the 8.5 W/m2 level of forcing in 2100.\r +\r +This workflow is answering to the following scientific question:\r +- Is it worth investing in artificial snowmaking equipment at RATECE-PLANICA?""" ; + schema1:keywords "Climate, jupyter, cmip6" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "RATECE-PLANICA ski station (Slovenia) under CMIP-6 SSP585 condition" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/47?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +****\r +\r +About this workflow:\r +\r +* Input: merged_transcriptomes.fasta. \r +* Runs TransDecoder to produce longest_transcripts.fasta\r +* (Runs both the LongOrfs and Predict parts together. Default settings except Long Orfs options: -m =20)\r +* Runs Busco on output. """ ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.879.1" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Extract transcripts - TSI" ; + schema1:sdDatePublished "2024-08-05 10:24:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/879/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9310 ; + schema1:creator , + ; + schema1:dateCreated "2024-05-08T07:15:41Z" ; + schema1:dateModified "2024-05-09T04:08:31Z" ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +****\r +\r +About this workflow:\r +\r +* Input: merged_transcriptomes.fasta. \r +* Runs TransDecoder to produce longest_transcripts.fasta\r +* (Runs both the LongOrfs and Predict parts together. Default settings except Long Orfs options: -m =20)\r +* Runs Busco on output. """ ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "TSI-annotation" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Extract transcripts - TSI" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/879?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 314187 . + + a schema1:Dataset ; + schema1:datePublished "2024-07-10T15:58:48.295940" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/amr_gene_detection" ; + schema1:license "GPL-3.0-or-later" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "amr_gene_detection/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.24" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Global Run-On sequencing analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1004?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/nascent" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/nascent" ; + schema1:sdDatePublished "2024-08-05 10:23:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1004/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10409 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:06Z" ; + schema1:dateModified "2024-06-11T12:55:06Z" ; + schema1:description "Global Run-On sequencing analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1004?version=4" ; + schema1:keywords "gro-seq, nascent, pro-seq, rna, transcription, tss" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/nascent" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1004?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A mapping-based pipeline for creating a phylogeny from bacterial whole genome sequences" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/967?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/bactmap" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/bactmap" ; + schema1:sdDatePublished "2024-08-05 10:24:16 +0100" ; + schema1:url "https://workflowhub.eu/workflows/967/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6091 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:43Z" ; + schema1:dateModified "2024-06-11T12:54:43Z" ; + schema1:description "A mapping-based pipeline for creating a phylogeny from bacterial whole genome sequences" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/967?version=2" ; + schema1:keywords "bacteria, bacterial, bacterial-genome-analysis, Genomics, mapping, phylogeny, tree" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/bactmap" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/967?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/gmx-protein-ligand-complex-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/295?version=2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_protein_complex_md_setup/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:name "Research Object Crate for Galaxy Protein Ligand Complex MD Setup" ; + schema1:sdDatePublished "2024-08-05 10:32:25 +0100" ; + schema1:url "https://workflowhub.eu/workflows/295/ro_crate?version=2" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """Somatic-ShortV @ NCI-Gadi is a variant calling pipeline that calls somatic short variants (SNPs and indels) from tumour and matched normal BAM files following [GATK's Best Practice Workflow](https://gatk.broadinstitute.org/hc/en-us/articles/360035894731-Somatic-short-variant-discovery-SNVs-Indels-). This workflow is designed for the National Computational Infrastructure's (NCI) Gadi supercompter, leveraging multiple nodes on NCI Gadi to run all stages of the workflow in parallel. \r +\r +Infrastructure\\_deployment\\_metadata: Gadi (NCI)""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.148.1" ; + schema1:isBasedOn "https://github.com/Sydney-Informatics-Hub/Somatic-ShortV" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Somatic-ShortV @ NCI-Gadi" ; + schema1:sdDatePublished "2024-08-05 10:33:06 +0100" ; + schema1:url "https://workflowhub.eu/workflows/148/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 246369 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 20568 ; + schema1:creator , + , + ; + schema1:dateCreated "2021-08-18T23:14:55Z" ; + schema1:dateModified "2023-01-16T13:51:45Z" ; + schema1:description """Somatic-ShortV @ NCI-Gadi is a variant calling pipeline that calls somatic short variants (SNPs and indels) from tumour and matched normal BAM files following [GATK's Best Practice Workflow](https://gatk.broadinstitute.org/hc/en-us/articles/360035894731-Somatic-short-variant-discovery-SNVs-Indels-). This workflow is designed for the National Computational Infrastructure's (NCI) Gadi supercompter, leveraging multiple nodes on NCI Gadi to run all stages of the workflow in parallel. \r +\r +Infrastructure\\_deployment\\_metadata: Gadi (NCI)""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "GATK4, SNPs, INDELs, Somatic, variant_calling, Mutect2, NCI, NCI Gadi, Gadi, cancer, tumour, NCI-Gadi, scalable, VCF" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Somatic-ShortV @ NCI-Gadi" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/148?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=10" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-08-05 10:23:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13140 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:59Z" ; + schema1:dateModified "2024-06-11T12:54:59Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=10" ; + schema1:version 10 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.261.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_complex_md_setup/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:32:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/261/ro_crate?version=2" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 158190 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 32274 ; + schema1:creator , + ; + schema1:dateCreated "2023-06-09T06:44:44Z" ; + schema1:dateModified "2023-06-09T07:10:36Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/261?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CWL Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_complex_md_setup/cwl/workflow.cwl" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Automated quantitative analysis of DIA proteomics mass spectrometry measurements." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/980?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/diaproteomics" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/diaproteomics" ; + schema1:sdDatePublished "2024-08-05 10:24:04 +0100" ; + schema1:url "https://workflowhub.eu/workflows/980/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6252 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:48Z" ; + schema1:dateModified "2024-06-11T12:54:48Z" ; + schema1:description "Automated quantitative analysis of DIA proteomics mass spectrometry measurements." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/980?version=6" ; + schema1:keywords "data-independent-proteomics, dia-proteomics, openms, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/diaproteomics" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/980?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 41971 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:MediaObject ; + schema1:contentSize 299 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "A.0.0" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 298 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "A.0.1" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 298 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "A.0.2" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 301 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "A.0.3" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 296 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "A.1.0" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 299 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "A.1.1" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 299 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "A.1.2" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 298 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "A.1.3" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 296 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "A.2.0" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 300 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "A.2.1" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 300 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "A.2.2" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 300 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "A.2.3" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 298 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "A.3.0" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 304 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "A.3.1" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 300 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "A.3.2" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 299 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "A.3.3" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 298 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "B.0.0" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 293 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "B.0.1" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 302 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "B.0.2" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 301 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "B.0.3" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 300 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "B.1.0" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 297 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "B.1.1" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 299 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "B.1.2" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 299 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "B.1.3" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 299 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "B.2.0" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 297 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "B.2.1" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 302 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "B.2.2" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 300 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "B.2.3" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 296 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "B.3.0" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 297 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "B.3.1" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 296 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "B.3.2" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 297 ; + schema1:dateModified "2023-05-30T07:15:29" ; + schema1:name "B.3.3" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:Dataset ; + schema1:datePublished "2024-04-29T15:54:18.200332" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Purge-duplicate-contigs-VGP6/main" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Purge-duplicate-contigs-VGP6/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:version "0.3.8" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Conformational ensembles generation\r +\r +## Workflow included in the [ELIXIR 3D-Bioinfo](https://elixir-europe.org/communities/3d-bioinfo) Implementation Study:\r +\r +### Building on PDBe-KB to chart and characterize the conformation landscape of native proteins\r +\r +This tutorial aims to illustrate the process of generating **protein conformational ensembles** from** 3D structures **and analysing its **molecular flexibility**, step by step, using the **BioExcel Building Blocks library (biobb)**.\r +\r +## Conformational landscape of native proteins\r +**Proteins** are **dynamic** systems that adopt multiple **conformational states**, a property essential for many **biological processes** (e.g. binding other proteins, nucleic acids, small molecule ligands, or switching between functionaly active and inactive states). Characterizing the different **conformational states** of proteins and the transitions between them is therefore critical for gaining insight into their **biological function** and can help explain the effects of genetic variants in **health** and **disease** and the action of drugs.\r +\r +**Structural biology** has become increasingly efficient in sampling the different **conformational states** of proteins. The **PDB** has currently archived more than **170,000 individual structures**, but over two thirds of these structures represent **multiple conformations** of the same or related protein, observed in different crystal forms, when interacting with other proteins or other macromolecules, or upon binding small molecule ligands. Charting this conformational diversity across the PDB can therefore be employed to build a useful approximation of the **conformational landscape** of native proteins.\r +\r +A number of resources and **tools** describing and characterizing various often complementary aspects of protein **conformational diversity** in known structures have been developed, notably by groups in Europe. These tools include algorithms with varying degree of sophistication, for aligning the 3D structures of individual protein chains or domains, of protein assemblies, and evaluating their degree of **structural similarity**. Using such tools one can **align structures pairwise**, compute the corresponding **similarity matrix**, and identify ensembles of **structures/conformations** with a defined **similarity level** that tend to recur in different PDB entries, an operation typically performed using **clustering** methods. Such workflows are at the basis of resources such as **CATH, Contemplate, or PDBflex** that offer access to **conformational ensembles** comprised of similar **conformations** clustered according to various criteria. Other types of tools focus on differences between **protein conformations**, identifying regions of proteins that undergo large **collective displacements** in different PDB entries, those that act as **hinges or linkers**, or regions that are inherently **flexible**.\r +\r +To build a meaningful approximation of the **conformational landscape** of native proteins, the **conformational ensembles** (and the differences between them), identified on the basis of **structural similarity/dissimilarity** measures alone, need to be **biophysically characterized**. This may be approached at **two different levels**. \r +- At the **biological level**, it is important to link observed **conformational ensembles**, to their **functional roles** by evaluating the correspondence with **protein family classifications** based on sequence information and **functional annotations** in public databases e.g. Uniprot, PDKe-Knowledge Base (KB). These links should provide valuable mechanistic insights into how the **conformational and dynamic properties** of proteins are exploited by evolution to regulate their **biological function**.

\r +\r +- At the **physical level** one needs to introduce **energetic consideration** to evaluate the likelihood that the identified **conformational ensembles** represent **conformational states** that the protein (or domain under study) samples in isolation. Such evaluation is notoriously **challenging** and can only be roughly approximated by using **computational methods** to evaluate the extent to which the observed **conformational ensembles** can be reproduced by algorithms that simulate the **dynamic behavior** of protein systems. These algorithms include the computationally expensive **classical molecular dynamics (MD) simulations** to sample local thermal fluctuations but also faster more approximate methods such as **Elastic Network Models** and **Normal Node Analysis** (NMA) to model low energy **collective motions**. Alternatively, **enhanced sampling molecular dynamics** can be used to model complex types of **conformational changes** but at a very high computational cost. \r +\r +The **ELIXIR 3D-Bioinfo Implementation Study** *Building on PDBe-KB to chart and characterize the conformation landscape of native proteins* focuses on:\r +\r +1. Mapping the **conformational diversity** of proteins and their homologs across the PDB. \r +2. Characterize the different **flexibility properties** of protein regions, and link this information to sequence and functional annotation.\r +3. Benchmark **computational methods** that can predict a biophysical description of protein motions.\r +\r +This notebook is part of the third objective, where a list of **computational resources** that are able to predict **protein flexibility** and **conformational ensembles** have been collected, evaluated, and integrated in reproducible and interoperable workflows using the **BioExcel Building Blocks library**. Note that the list is not meant to be exhaustive, it is built following the expertise of the implementation study partners.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.490.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_flexdyn/galaxy" ; + schema1:license "other-open" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy Protein conformational ensembles generation" ; + schema1:sdDatePublished "2024-08-05 10:30:17 +0100" ; + schema1:url "https://workflowhub.eu/workflows/490/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 92310 ; + schema1:creator , + ; + schema1:dateCreated "2023-06-01T09:50:55Z" ; + schema1:dateModified "2023-06-01T09:53:56Z" ; + schema1:description """# Protein Conformational ensembles generation\r +\r +## Workflow included in the [ELIXIR 3D-Bioinfo](https://elixir-europe.org/communities/3d-bioinfo) Implementation Study:\r +\r +### Building on PDBe-KB to chart and characterize the conformation landscape of native proteins\r +\r +This tutorial aims to illustrate the process of generating **protein conformational ensembles** from** 3D structures **and analysing its **molecular flexibility**, step by step, using the **BioExcel Building Blocks library (biobb)**.\r +\r +## Conformational landscape of native proteins\r +**Proteins** are **dynamic** systems that adopt multiple **conformational states**, a property essential for many **biological processes** (e.g. binding other proteins, nucleic acids, small molecule ligands, or switching between functionaly active and inactive states). Characterizing the different **conformational states** of proteins and the transitions between them is therefore critical for gaining insight into their **biological function** and can help explain the effects of genetic variants in **health** and **disease** and the action of drugs.\r +\r +**Structural biology** has become increasingly efficient in sampling the different **conformational states** of proteins. The **PDB** has currently archived more than **170,000 individual structures**, but over two thirds of these structures represent **multiple conformations** of the same or related protein, observed in different crystal forms, when interacting with other proteins or other macromolecules, or upon binding small molecule ligands. Charting this conformational diversity across the PDB can therefore be employed to build a useful approximation of the **conformational landscape** of native proteins.\r +\r +A number of resources and **tools** describing and characterizing various often complementary aspects of protein **conformational diversity** in known structures have been developed, notably by groups in Europe. These tools include algorithms with varying degree of sophistication, for aligning the 3D structures of individual protein chains or domains, of protein assemblies, and evaluating their degree of **structural similarity**. Using such tools one can **align structures pairwise**, compute the corresponding **similarity matrix**, and identify ensembles of **structures/conformations** with a defined **similarity level** that tend to recur in different PDB entries, an operation typically performed using **clustering** methods. Such workflows are at the basis of resources such as **CATH, Contemplate, or PDBflex** that offer access to **conformational ensembles** comprised of similar **conformations** clustered according to various criteria. Other types of tools focus on differences between **protein conformations**, identifying regions of proteins that undergo large **collective displacements** in different PDB entries, those that act as **hinges or linkers**, or regions that are inherently **flexible**.\r +\r +To build a meaningful approximation of the **conformational landscape** of native proteins, the **conformational ensembles** (and the differences between them), identified on the basis of **structural similarity/dissimilarity** measures alone, need to be **biophysically characterized**. This may be approached at **two different levels**. \r +- At the **biological level**, it is important to link observed **conformational ensembles**, to their **functional roles** by evaluating the correspondence with **protein family classifications** based on sequence information and **functional annotations** in public databases e.g. Uniprot, PDKe-Knowledge Base (KB). These links should provide valuable mechanistic insights into how the **conformational and dynamic properties** of proteins are exploited by evolution to regulate their **biological function**.

\r +\r +- At the **physical level** one needs to introduce **energetic consideration** to evaluate the likelihood that the identified **conformational ensembles** represent **conformational states** that the protein (or domain under study) samples in isolation. Such evaluation is notoriously **challenging** and can only be roughly approximated by using **computational methods** to evaluate the extent to which the observed **conformational ensembles** can be reproduced by algorithms that simulate the **dynamic behavior** of protein systems. These algorithms include the computationally expensive **classical molecular dynamics (MD) simulations** to sample local thermal fluctuations but also faster more approximate methods such as **Elastic Network Models** and **Normal Node Analysis** (NMA) to model low energy **collective motions**. Alternatively, **enhanced sampling molecular dynamics** can be used to model complex types of **conformational changes** but at a very high computational cost. \r +\r +The **ELIXIR 3D-Bioinfo Implementation Study** *Building on PDBe-KB to chart and characterize the conformation landscape of native proteins* focuses on:\r +\r +1. Mapping the **conformational diversity** of proteins and their homologs across the PDB. \r +2. Characterize the different **flexibility properties** of protein regions, and link this information to sequence and functional annotation.\r +3. Benchmark **computational methods** that can predict a biophysical description of protein motions.\r +\r +This notebook is part of the third objective, where a list of **computational resources** that are able to predict **protein flexibility** and **conformational ensembles** have been collected, evaluated, and integrated in reproducible and interoperable workflows using the **BioExcel Building Blocks library**. Note that the list is not meant to be exhaustive, it is built following the expertise of the implementation study partners.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "" ; + schema1:name "Galaxy Protein conformational ensembles generation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_flexdyn/galaxy/biobb_wf_flexdyn.ga" ; + schema1:version 1 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2021-12-20T17:04:47.631291" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-ivar-analysis" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sars-cov-2-pe-illumina-artic-ivar-analysis/SARS-COV-2-ILLUMINA-AMPLICON-IVAR-PANGOLIN-NEXTCLADE" ; + schema1:sdDatePublished "2021-12-21 03:01:00 +0000" ; + schema1:softwareVersion "v0.2.2" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """This Galaxy-E workflow was made from the ["Cleaning GBIF data for the use in biogeography" tutorial](https://ropensci.github.io/CoordinateCleaner/articles/Cleaning_GBIF_data_with_CoordinateCleaner.html) and allows to:\r +- Use CoordinateCleaner to automatically flag problematic records\r +- Use GBIF provided meta-data to improve coordinate quality, tailored to your downstream analyses\r +- Use automated cleaning algorithms of CoordinateCleaner to identify problematic contributing datasets\r +- Visualize data on a map""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/404?version=1" ; + schema1:isBasedOn "https://ecology.usegalaxy.eu/u/ylebras/w/gbif-data-quality-check-and-filtering-workflow-feb-2020" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for GBIF data Quality check and filtering workflow Feb-2020" ; + schema1:sdDatePublished "2024-08-05 10:31:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/404/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 16297 ; + schema1:creator ; + schema1:dateCreated "2022-11-18T14:39:27Z" ; + schema1:dateModified "2023-11-09T21:03:31Z" ; + schema1:description """This Galaxy-E workflow was made from the ["Cleaning GBIF data for the use in biogeography" tutorial](https://ropensci.github.io/CoordinateCleaner/articles/Cleaning_GBIF_data_with_CoordinateCleaner.html) and allows to:\r +- Use CoordinateCleaner to automatically flag problematic records\r +- Use GBIF provided meta-data to improve coordinate quality, tailored to your downstream analyses\r +- Use automated cleaning algorithms of CoordinateCleaner to identify problematic contributing datasets\r +- Visualize data on a map""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "GBIF data Quality check and filtering workflow Feb-2020" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/404?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Genes and transcripts annotation with Isoseq using uLTRA and TAMA" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/993?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/isoseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/isoseq" ; + schema1:sdDatePublished "2024-08-05 10:23:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/993/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7742 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:57Z" ; + schema1:dateModified "2024-06-11T12:54:57Z" ; + schema1:description "Genes and transcripts annotation with Isoseq using uLTRA and TAMA" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/993?version=6" ; + schema1:keywords "isoseq, isoseq-3, rna, tama, ultra" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/isoseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/993?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 31175 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:MediaObject ; + schema1:contentSize 3 ; + schema1:dateModified "2024-04-26T13:10:25+00:00" ; + schema1:name "energy.selection" ; + schema1:sdDatePublished "2024-07-12T11:16:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3 ; + schema1:dateModified "2024-04-26T13:10:25+00:00" ; + schema1:name "genion.group" ; + schema1:sdDatePublished "2024-07-12T11:16:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1044 ; + schema1:dateModified "2024-04-26T13:10:25+00:00" ; + schema1:name "ions.mdp" ; + schema1:sdDatePublished "2024-07-12T11:16:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 971 ; + schema1:dateModified "2024-04-26T13:10:25+00:00" ; + schema1:name "minim.mdp" ; + schema1:sdDatePublished "2024-07-12T11:16:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 109917 ; + schema1:dateModified "2024-04-26T13:10:27+00:00" ; + schema1:name "1aki.pdb" ; + schema1:sdDatePublished "2024-07-12T11:16:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2998215 ; + schema1:dateModified "2024-04-26T13:10:27+00:00" ; + schema1:name "1u3m.pdb" ; + schema1:sdDatePublished "2024-07-12T11:16:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2918025 ; + schema1:dateModified "2024-04-26T13:10:27+00:00" ; + schema1:name "1xyw.pdb" ; + schema1:sdDatePublished "2024-07-12T11:16:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88246 ; + schema1:dateModified "2024-07-12T11:16:05+00:00" ; + schema1:name "1aki.gro" ; + schema1:sdDatePublished "2024-07-12T11:16:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577599 ; + schema1:dateModified "2024-07-12T11:16:05+00:00" ; + schema1:name "1aki.top" ; + schema1:sdDatePublished "2024-07-12T11:16:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1280044 ; + schema1:dateModified "2024-07-12T11:16:05+00:00" ; + schema1:name "1aki_em.tpr" ; + schema1:sdDatePublished "2024-07-12T11:16:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-07-12T11:16:05+00:00" ; + schema1:name "1aki_em_energy.edr" ; + schema1:sdDatePublished "2024-07-12T11:16:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1279304 ; + schema1:dateModified "2024-07-12T11:16:05+00:00" ; + schema1:name "1aki_ions.tpr" ; + schema1:sdDatePublished "2024-07-12T11:16:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88246 ; + schema1:dateModified "2024-07-12T11:16:05+00:00" ; + schema1:name "1aki_newbox.gro" ; + schema1:sdDatePublished "2024-07-12T11:16:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 7939 ; + schema1:dateModified "2024-07-12T11:16:05+00:00" ; + schema1:name "1aki_potential.png" ; + schema1:sdDatePublished "2024-07-12T11:16:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2154 ; + schema1:dateModified "2024-07-12T11:16:05+00:00" ; + schema1:name "1aki_potential.xvg" ; + schema1:sdDatePublished "2024-07-12T11:16:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1525186 ; + schema1:dateModified "2024-07-12T11:16:05+00:00" ; + schema1:name "1aki_solv.gro" ; + schema1:sdDatePublished "2024-07-12T11:16:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1524475 ; + schema1:dateModified "2024-07-12T11:16:05+00:00" ; + schema1:name "1aki_solv_ions.gro" ; + schema1:sdDatePublished "2024-07-12T11:16:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 82046 ; + schema1:dateModified "2024-07-12T11:16:05+00:00" ; + schema1:name "1u3m.gro" ; + schema1:sdDatePublished "2024-07-12T11:16:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 537933 ; + schema1:dateModified "2024-07-12T11:16:05+00:00" ; + schema1:name "1u3m.top" ; + schema1:sdDatePublished "2024-07-12T11:16:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1416272 ; + schema1:dateModified "2024-07-12T11:16:05+00:00" ; + schema1:name "1u3m_em.tpr" ; + schema1:sdDatePublished "2024-07-12T11:16:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-07-12T11:16:05+00:00" ; + schema1:name "1u3m_em_energy.edr" ; + schema1:sdDatePublished "2024-07-12T11:16:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1415196 ; + schema1:dateModified "2024-07-12T11:16:05+00:00" ; + schema1:name "1u3m_ions.tpr" ; + schema1:sdDatePublished "2024-07-12T11:16:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 82046 ; + schema1:dateModified "2024-07-12T11:16:05+00:00" ; + schema1:name "1u3m_newbox.gro" ; + schema1:sdDatePublished "2024-07-12T11:16:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 7464 ; + schema1:dateModified "2024-07-12T11:16:05+00:00" ; + schema1:name "1u3m_potential.png" ; + schema1:sdDatePublished "2024-07-12T11:16:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2154 ; + schema1:dateModified "2024-07-12T11:16:05+00:00" ; + schema1:name "1u3m_potential.xvg" ; + schema1:sdDatePublished "2024-07-12T11:16:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1837046 ; + schema1:dateModified "2024-07-12T11:16:05+00:00" ; + schema1:name "1u3m_solv.gro" ; + schema1:sdDatePublished "2024-07-12T11:16:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1836965 ; + schema1:dateModified "2024-07-12T11:16:05+00:00" ; + schema1:name "1u3m_solv_ions.gro" ; + schema1:sdDatePublished "2024-07-12T11:16:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 80112 ; + schema1:dateModified "2024-07-12T11:16:05+00:00" ; + schema1:name "1xyw.gro" ; + schema1:sdDatePublished "2024-07-12T11:16:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 524009 ; + schema1:dateModified "2024-07-12T11:16:05+00:00" ; + schema1:name "1xyw.top" ; + schema1:sdDatePublished "2024-07-12T11:16:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1408872 ; + schema1:dateModified "2024-07-12T11:16:05+00:00" ; + schema1:name "1xyw_em.tpr" ; + schema1:sdDatePublished "2024-07-12T11:16:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-07-12T11:16:05+00:00" ; + schema1:name "1xyw_em_energy.edr" ; + schema1:sdDatePublished "2024-07-12T11:16:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1407844 ; + schema1:dateModified "2024-07-12T11:16:05+00:00" ; + schema1:name "1xyw_ions.tpr" ; + schema1:sdDatePublished "2024-07-12T11:16:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 80112 ; + schema1:dateModified "2024-07-12T11:16:05+00:00" ; + schema1:name "1xyw_newbox.gro" ; + schema1:sdDatePublished "2024-07-12T11:16:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8525 ; + schema1:dateModified "2024-07-12T11:16:05+00:00" ; + schema1:name "1xyw_potential.png" ; + schema1:sdDatePublished "2024-07-12T11:16:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2130 ; + schema1:dateModified "2024-07-12T11:16:05+00:00" ; + schema1:name "1xyw_potential.xvg" ; + schema1:sdDatePublished "2024-07-12T11:16:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1845237 ; + schema1:dateModified "2024-07-12T11:16:05+00:00" ; + schema1:name "1xyw_solv.gro" ; + schema1:sdDatePublished "2024-07-12T11:16:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1845066 ; + schema1:dateModified "2024-07-12T11:16:05+00:00" ; + schema1:name "1xyw_solv_ions.gro" ; + schema1:sdDatePublished "2024-07-12T11:16:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4308 ; + schema1:dateModified "2024-07-12T11:16:05+00:00" ; + schema1:name "POTENTIAL_RESULTS.png" ; + schema1:sdDatePublished "2024-07-12T11:16:08+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Conformational Transitions calculations tutorial using BioExcel Building Blocks (biobb) and GOdMD\r +\r +This tutorial aims to illustrate the process of computing a conformational transition between two known structural conformations of a protein, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.550.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_godmd/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Protein Conformational Transitions calculations tutorial" ; + schema1:sdDatePublished "2024-08-05 10:29:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/550/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3599 ; + schema1:creator , + ; + schema1:dateCreated "2023-08-02T10:06:40Z" ; + schema1:dateModified "2023-08-02T10:10:41Z" ; + schema1:description """# Protein Conformational Transitions calculations tutorial using BioExcel Building Blocks (biobb) and GOdMD\r +\r +This tutorial aims to illustrate the process of computing a conformational transition between two known structural conformations of a protein, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Protein Conformational Transitions calculations tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_godmd/python/workflow.py" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/gmx-protein-ligand-complex-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.295.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_protein_complex_md_setup/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy Protein Ligand Complex MD Setup" ; + schema1:sdDatePublished "2024-08-05 10:32:25 +0100" ; + schema1:url "https://workflowhub.eu/workflows/295/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 90663 ; + schema1:creator , + ; + schema1:dateCreated "2023-05-03T13:46:26Z" ; + schema1:dateModified "2023-05-03T13:47:32Z" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/gmx-protein-ligand-complex-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/295?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy Protein Ligand Complex MD Setup" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_protein_complex_md_setup/galaxy/biobb_wf_protein_complex_md_setup.ga" ; + schema1:version 3 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:description """# CroMaSt: A workflow for domain family curation through cross-mapping of structural instances between protein domain databases\r +\r +CroMaSt (**Cro**ss **Ma**pper of domain **St**ructural instances) is an automated iterative workflow to clarify domain definition by cross-mapping of domain structural instances between domain databases. CroMaSt (for Cross-Mapper of domain Structural instances) will classify all structural instances of a given domain type into 3 different categories (core, true and domain-like). \r +\r +\r +## Requirements\r +1. [Conda](https://docs.conda.io/projects/conda/en/latest/) or [Miniconda](https://docs.conda.io/en/latest/miniconda.html)\r +2. [Kpax](http://kpax.loria.fr/download.php) \r +Download and install conda (or Miniconda) and Kpax by following the instructions from their official site.\r +\r +\r +## Get it running \r +(Considering the requirements are already met)\r +\r +1. Clone the repository and change the directory\r +\r +```\r +git clone https://gitlab.inria.fr/capsid.public_codes/CroMaSt.git\r +cd CroMaSt\r +```\r +\r +2. Create the conda environment for the workflow\r +```\r +conda env create --file yml/environment.yml\r +conda activate CroMaSt\r +```\r +\r +3. Change the path of variables in paramter file\r +```\r +sed -i 's/\\/home\\/hdhondge\\/CroMaSt\\//\\/YOUR\\/PATH\\/TO_CroMaSt\\//g' yml/CroMaSt_input.yml \r +```\r +\r +4. Create the directory to store files from PDB and SIFTS (if not already)\r +```\r +mkdir PDB_files SIFTS\r +```\r +\r +5. Download the source input data\r +```\r +cwl-runner Tools/download_data.cwl yml/download_data.yml\r +```\r +\r +## Basic example\r +\r +### 1. First, we will run the workflow for the KH domain with family identifiers `RRM_1` and `RRM` in Pfam and CATH, respectively.\r +Run the workflow -\r +\r +```\r +cwl-runner --parallel --outdir=Results/ CroMaSt.cwl yml/CroMaSt_input.yml\r +```\r +\r +### 2. Once the iteration is complete, check the `new_param.yml` file from the `outputdir` (Results), if there is any family identifier in either `pfam` or `cath`; run the next iteration using following command (Until there is no new families explored by workflow) -\r +\r +```\r +cwl-runner --parallel --outdir=Results/ CroMaSt.cwl Results/new_param.yml\r +```\r + \r +### **Extra:** Start the workflow with multiple families from one or both databases \r +If you would like to start the workflow with multiple families from one or both databases, then simply add a comma in between two family identifiers. \r +```\r +pfam: ['PF00076', 'PF08777']\r +cath: ['3.30.70.330']\r +```\r +\r +- **Pro Tip**: Don't forget to give different path to `--outdir` option while running the workflow multiple times or at least move the results to some other location after first run.\r +\r +## Run the workflow for protein domain of your choice \r +### 1. You can run the workflow for the domain of your choice by simply changing the family identifers in `yml/CroMaSt_input.yml` file.\r +\r +Simply replace the following values of family identifiers (for pfam and cath) with the family identifiers of your choice in `yml/CroMaSt_input.yml` file. \r +```\r +pfam: ['PF00076']\r +cath: ['3.30.70.330']\r +```\r +\r +\r +\r +## Data files used in current version are as follows:\r +**Files in Data directory can be downloaded as follows**:\r +\r +1. File used from Pfam database: [pdbmap.gz](http://ftp.ebi.ac.uk/pub/databases/Pfam/releases/Pfam33.0/pdbmap.gz) \r +\r +2. File used from CATH database: [cath-domain-description-file.txt](ftp://orengoftp.biochem.ucl.ac.uk:21/cath/releases/latest-release/cath-classification-data/cath-domain-description-file.txt) \r +\r +3. Obsolete entries from RCSB PDB\r +[obsolete_PDB_entry_ids.txt](https://data.rcsb.org/rest/v1/holdings/removed/entry_ids) \r +\r +\r +CATH Version - 4.3.0 (Ver_Date - 11-Sep-2019) [FTP site](ftp://orengoftp.biochem.ucl.ac.uk/cath/releases/latest-release/cath-classification-data/) \r +Pfam Version - 33.0 (Ver_Date - 18-Mar-2020) [FTP site](http://ftp.ebi.ac.uk/pub/databases/Pfam/releases/Pfam33.0/) \r +\r +## Reference\r +```\r +Poster - \r +1. Hrishikesh Dhondge, Isaure Chauvot de Beauchêne, Marie-Dominique Devignes. CroMaSt: A workflow for domain family curation through cross-mapping of structural instances between protein domain databases. 21st European Conference on Computational Biology, Sep 2022, Sitges, Spain. ⟨hal-03789541⟩\r +\r +```\r +\r +## Acknowledgements\r +This project has received funding from the Marie Skłodowska-Curie Innovative Training Network (MSCA-ITN) RNAct supported by European Union’s Horizon 2020 research and innovation programme under granta greement No 813239.\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.390.1" ; + schema1:isBasedOn "https://github.com/HrishiDhondge/CroMaSt.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CroMaSt: A workflow for domain family curation through cross-mapping of structural instances between protein domain databases" ; + schema1:sdDatePublished "2024-08-05 10:30:07 +0100" ; + schema1:url "https://workflowhub.eu/workflows/390/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 21416 ; + schema1:creator , + , + ; + schema1:dateCreated "2022-09-28T11:34:54Z" ; + schema1:dateModified "2023-01-16T14:02:36Z" ; + schema1:description """# CroMaSt: A workflow for domain family curation through cross-mapping of structural instances between protein domain databases\r +\r +CroMaSt (**Cro**ss **Ma**pper of domain **St**ructural instances) is an automated iterative workflow to clarify domain definition by cross-mapping of domain structural instances between domain databases. CroMaSt (for Cross-Mapper of domain Structural instances) will classify all structural instances of a given domain type into 3 different categories (core, true and domain-like). \r +\r +\r +## Requirements\r +1. [Conda](https://docs.conda.io/projects/conda/en/latest/) or [Miniconda](https://docs.conda.io/en/latest/miniconda.html)\r +2. [Kpax](http://kpax.loria.fr/download.php) \r +Download and install conda (or Miniconda) and Kpax by following the instructions from their official site.\r +\r +\r +## Get it running \r +(Considering the requirements are already met)\r +\r +1. Clone the repository and change the directory\r +\r +```\r +git clone https://gitlab.inria.fr/capsid.public_codes/CroMaSt.git\r +cd CroMaSt\r +```\r +\r +2. Create the conda environment for the workflow\r +```\r +conda env create --file yml/environment.yml\r +conda activate CroMaSt\r +```\r +\r +3. Change the path of variables in paramter file\r +```\r +sed -i 's/\\/home\\/hdhondge\\/CroMaSt\\//\\/YOUR\\/PATH\\/TO_CroMaSt\\//g' yml/CroMaSt_input.yml \r +```\r +\r +4. Create the directory to store files from PDB and SIFTS (if not already)\r +```\r +mkdir PDB_files SIFTS\r +```\r +\r +5. Download the source input data\r +```\r +cwl-runner Tools/download_data.cwl yml/download_data.yml\r +```\r +\r +## Basic example\r +\r +### 1. First, we will run the workflow for the KH domain with family identifiers `RRM_1` and `RRM` in Pfam and CATH, respectively.\r +Run the workflow -\r +\r +```\r +cwl-runner --parallel --outdir=Results/ CroMaSt.cwl yml/CroMaSt_input.yml\r +```\r +\r +### 2. Once the iteration is complete, check the `new_param.yml` file from the `outputdir` (Results), if there is any family identifier in either `pfam` or `cath`; run the next iteration using following command (Until there is no new families explored by workflow) -\r +\r +```\r +cwl-runner --parallel --outdir=Results/ CroMaSt.cwl Results/new_param.yml\r +```\r + \r +### **Extra:** Start the workflow with multiple families from one or both databases \r +If you would like to start the workflow with multiple families from one or both databases, then simply add a comma in between two family identifiers. \r +```\r +pfam: ['PF00076', 'PF08777']\r +cath: ['3.30.70.330']\r +```\r +\r +- **Pro Tip**: Don't forget to give different path to `--outdir` option while running the workflow multiple times or at least move the results to some other location after first run.\r +\r +## Run the workflow for protein domain of your choice \r +### 1. You can run the workflow for the domain of your choice by simply changing the family identifers in `yml/CroMaSt_input.yml` file.\r +\r +Simply replace the following values of family identifiers (for pfam and cath) with the family identifiers of your choice in `yml/CroMaSt_input.yml` file. \r +```\r +pfam: ['PF00076']\r +cath: ['3.30.70.330']\r +```\r +\r +\r +\r +## Data files used in current version are as follows:\r +**Files in Data directory can be downloaded as follows**:\r +\r +1. File used from Pfam database: [pdbmap.gz](http://ftp.ebi.ac.uk/pub/databases/Pfam/releases/Pfam33.0/pdbmap.gz) \r +\r +2. File used from CATH database: [cath-domain-description-file.txt](ftp://orengoftp.biochem.ucl.ac.uk:21/cath/releases/latest-release/cath-classification-data/cath-domain-description-file.txt) \r +\r +3. Obsolete entries from RCSB PDB\r +[obsolete_PDB_entry_ids.txt](https://data.rcsb.org/rest/v1/holdings/removed/entry_ids) \r +\r +\r +CATH Version - 4.3.0 (Ver_Date - 11-Sep-2019) [FTP site](ftp://orengoftp.biochem.ucl.ac.uk/cath/releases/latest-release/cath-classification-data/) \r +Pfam Version - 33.0 (Ver_Date - 18-Mar-2020) [FTP site](http://ftp.ebi.ac.uk/pub/databases/Pfam/releases/Pfam33.0/) \r +\r +## Reference\r +```\r +Poster - \r +1. Hrishikesh Dhondge, Isaure Chauvot de Beauchêne, Marie-Dominique Devignes. CroMaSt: A workflow for domain family curation through cross-mapping of structural instances between protein domain databases. 21st European Conference on Computational Biology, Sep 2022, Sitges, Spain. ⟨hal-03789541⟩\r +\r +```\r +\r +## Acknowledgements\r +This project has received funding from the Marie Skłodowska-Curie Innovative Training Network (MSCA-ITN) RNAct supported by European Union’s Horizon 2020 research and innovation programme under granta greement No 813239.\r +\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/390?version=1" ; + schema1:keywords "Pfam, CATH, Protein domains, data integration" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "CroMaSt: A workflow for domain family curation through cross-mapping of structural instances between protein domain databases" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/390?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 103397 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """# Inclusion Body Myositis Active Subnetwork Identification Workflow\r +\r +Workflow for Creating a large disease network from various datasets and databases for IBM, and applying the active subnetwork identification method MOGAMUN.\r +\r +## Links\r +[WorkflowHub](https://workflowhub.eu/workflows/681)\r +\r +[Docker Image](https://hub.docker.com/R/jdwijnbergen/multi-omics_asi)\r +\r +[Docker Image build requirements](https://doi.org/10.5281/zenodo.10210364)\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/681?version=3" ; + schema1:isBasedOn "https://github.com/jdwijnbergen/IBM_ASI_workflow.git" ; + schema1:license "notspecified" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Inclusion Body Myositis Active Subnetwork Identification Workflow" ; + schema1:sdDatePublished "2024-08-05 10:25:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/681/ro_crate?version=3" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 27177 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6953 ; + schema1:creator , + ; + schema1:dateCreated "2023-11-27T15:59:18Z" ; + schema1:dateModified "2023-11-27T15:59:18Z" ; + schema1:description """# Inclusion Body Myositis Active Subnetwork Identification Workflow\r +\r +Workflow for Creating a large disease network from various datasets and databases for IBM, and applying the active subnetwork identification method MOGAMUN.\r +\r +## Links\r +[WorkflowHub](https://workflowhub.eu/workflows/681)\r +\r +[Docker Image](https://hub.docker.com/R/jdwijnbergen/multi-omics_asi)\r +\r +[Docker Image build requirements](https://doi.org/10.5281/zenodo.10210364)\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/681?version=6" ; + schema1:keywords "Bioinformatics, CWL, Genomics, Transcriptomics, Protein-Protein Interaction" ; + schema1:license "https://choosealicense.com/no-permission/" ; + schema1:name "Inclusion Body Myositis Active Subnetwork Identification Workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/681?version=3" ; + schema1:version 3 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1021?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/scrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/scrnaseq" ; + schema1:sdDatePublished "2024-08-05 10:23:14 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1021/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8620 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:17Z" ; + schema1:dateModified "2024-06-11T12:55:17Z" ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1021?version=13" ; + schema1:keywords "10x-genomics, 10xgenomics, alevin, bustools, Cellranger, kallisto, rna-seq, single-cell, star-solo" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/scrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1021?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A Nanostring nCounter analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1003?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/nanostring" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/nanostring" ; + schema1:sdDatePublished "2024-08-05 10:23:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1003/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8701 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:05Z" ; + schema1:dateModified "2024-06-11T12:55:05Z" ; + schema1:description "A Nanostring nCounter analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1003?version=4" ; + schema1:keywords "nanostring, nanostringnorm" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/nanostring" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1003?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """\r +# Summary \r +\r +This notebook demonstrates how to recreate lineages published in the paper [Live imaging of remyelination in the adult mouse corpus callosum](https://www.pnas.org/content/118/28/e2025795118) and available at [idr0113-bottes-opcclones](https://idr.openmicroscopy.org/search/?query=Name:idr0113).\r +\r +The lineage is created from the metadata associated to the specified image.\r +\r +To load the data from the Image Data Resource, we use:\r +\r +* the [Python API](https://docs.openmicroscopy.org/omero/latest/developers/Python.html)\r +* the [JSON API](https://docs.openmicroscopy.org/omero/latest/developers/json-api.html)\r +\r +LPC-induced focal demyelination and in vivo imaging of genetically targeted OPCs and their progeny to describe the cellular dynamics of OPC-mediated remyelination in the CC.\r +\r +Longitudinal observation of OPCs and their progeny for up to two months reveals functional inter- and intraclonal heterogeneity and provides insights into the cell division capacity and the migration/differentiation dynamics of OPCs and their daughter cells in vivo.\r +\r +The majority of the clones remained quiescent or divided only few times. Some OPCs were highly proliferative. Large clones showed longer times between consecutive divisions compared to low proliferating clones.\r +\r +OPCs show distinct modes of cell division: from symmetric proliferative, to symmetric differentiating and also asymmetric cell division, where the OPC is self-renewed while the other daughter cell differentiates.\r +\r +Only 16.46% of OPC-derived cells differentiated into mature, remyelinating oligodendrocytes, with OPCs born at early divisions showing a higher probability to survive and to terminally differentiate.\r +\r +Cell death was associated with distinct cell division histories of different clones, with higher probability of death when generated at later divisions.\r +\r +Migratory behaviour was restricted to progenitors. Successfully differentiating progenitors moved shorter distances per day compared to dying cells.\r +\r +# Inputs\r +Parameters needed to configure the workflow:\r +\r +**imageId**: Identifier of an image in IDR.\r +\r +# Ouputs\r +Output file generated:\r +\r +**lineage_imageId.pdf**: A PDF with the generated lineage. Options to save as `png` or `svg` are also available.\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/267?version=1" ; + schema1:isBasedOn "https://github.com/IDR/idr0113-bottes-opcclones" ; + schema1:license "BSD-2-Clause" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Cell Lineage in the adult mouse corpus callosum" ; + schema1:sdDatePublished "2024-08-05 10:32:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/267/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 234305 ; + schema1:creator ; + schema1:dateCreated "2022-02-08T11:36:16Z" ; + schema1:dateModified "2023-01-16T13:57:29Z" ; + schema1:description """\r +# Summary \r +\r +This notebook demonstrates how to recreate lineages published in the paper [Live imaging of remyelination in the adult mouse corpus callosum](https://www.pnas.org/content/118/28/e2025795118) and available at [idr0113-bottes-opcclones](https://idr.openmicroscopy.org/search/?query=Name:idr0113).\r +\r +The lineage is created from the metadata associated to the specified image.\r +\r +To load the data from the Image Data Resource, we use:\r +\r +* the [Python API](https://docs.openmicroscopy.org/omero/latest/developers/Python.html)\r +* the [JSON API](https://docs.openmicroscopy.org/omero/latest/developers/json-api.html)\r +\r +LPC-induced focal demyelination and in vivo imaging of genetically targeted OPCs and their progeny to describe the cellular dynamics of OPC-mediated remyelination in the CC.\r +\r +Longitudinal observation of OPCs and their progeny for up to two months reveals functional inter- and intraclonal heterogeneity and provides insights into the cell division capacity and the migration/differentiation dynamics of OPCs and their daughter cells in vivo.\r +\r +The majority of the clones remained quiescent or divided only few times. Some OPCs were highly proliferative. Large clones showed longer times between consecutive divisions compared to low proliferating clones.\r +\r +OPCs show distinct modes of cell division: from symmetric proliferative, to symmetric differentiating and also asymmetric cell division, where the OPC is self-renewed while the other daughter cell differentiates.\r +\r +Only 16.46% of OPC-derived cells differentiated into mature, remyelinating oligodendrocytes, with OPCs born at early divisions showing a higher probability to survive and to terminally differentiate.\r +\r +Cell death was associated with distinct cell division histories of different clones, with higher probability of death when generated at later divisions.\r +\r +Migratory behaviour was restricted to progenitors. Successfully differentiating progenitors moved shorter distances per day compared to dying cells.\r +\r +# Inputs\r +Parameters needed to configure the workflow:\r +\r +**imageId**: Identifier of an image in IDR.\r +\r +# Ouputs\r +Output file generated:\r +\r +**lineage_imageId.pdf**: A PDF with the generated lineage. Options to save as `png` or `svg` are also available.\r +\r +""" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/BSD-2-Clause" ; + schema1:name "Cell Lineage in the adult mouse corpus callosum" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/IDR/idr0113-bottes-opcclones/blob/main/notebooks/idr0113_lineage.ipynb" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Generate possible metabolic routes for the production of a target molecule in an organism of choice" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/24?version=1" ; + schema1:isBasedOn "https://galaxy-synbiocad.org/u/mdulac/w/retropath2-3" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for RetroSynthesis" ; + schema1:sdDatePublished "2024-08-05 10:33:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/24/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8427 ; + schema1:creator ; + schema1:dateCreated "2020-05-29T10:08:17Z" ; + schema1:dateModified "2023-01-16T13:41:42Z" ; + schema1:description "Generate possible metabolic routes for the production of a target molecule in an organism of choice" ; + schema1:keywords "Retrosynthesis, pathway prediction, pathway design, Synthetic Biology, metabolic engineering" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "RetroSynthesis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/24?version=1" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1017?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/rnafusion" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnafusion" ; + schema1:sdDatePublished "2024-08-05 10:22:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1017/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5152 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:09Z" ; + schema1:dateModified "2024-06-11T12:55:09Z" ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1017?version=16" ; + schema1:keywords "fusion, fusion-genes, gene-fusion, rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnafusion" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1017?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Simple bacterial assembly and annotation" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/966?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/bacass" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/bacass" ; + schema1:sdDatePublished "2024-08-05 10:24:18 +0100" ; + schema1:url "https://workflowhub.eu/workflows/966/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12331 ; + schema1:creator ; + schema1:dateCreated "2024-06-13T03:03:06Z" ; + schema1:dateModified "2024-06-13T03:03:06Z" ; + schema1:description "Simple bacterial assembly and annotation" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/966?version=7" ; + schema1:keywords "Assembly, bacterial-genomes, denovo, denovo-assembly, genome-assembly, hybrid-assembly, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/bacass" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/966?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Exome Alignment Workflow\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/239?version=1" ; + schema1:isBasedOn "https://github.com/inab/ipc_workflows/tree/main/exome/alignment" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for exome-alignment" ; + schema1:sdDatePublished "2024-08-05 10:32:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/239/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 11999 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2241 ; + schema1:creator ; + schema1:dateCreated "2021-11-19T10:06:37Z" ; + schema1:dateModified "2023-01-16T13:55:02Z" ; + schema1:description """Exome Alignment Workflow\r +""" ; + schema1:image ; + schema1:keywords "cancer, pediatric, Alignment" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "exome-alignment" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/239?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + ; + ns1:output . + + a schema1:Dataset ; + schema1:datePublished "2023-11-09T11:09:07.998474" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/kmer-profiling-hifi-VGP1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "kmer-profiling-hifi-VGP1/main" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "kmer-profiling-hifi-VGP1/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/kmer-profiling-hifi-VGP1" ; + schema1:version "0.1.3" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/991?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/hlatyping" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hlatyping" ; + schema1:sdDatePublished "2024-08-05 10:22:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/991/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3699 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:56Z" ; + schema1:dateModified "2024-06-11T12:54:56Z" ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/991?version=9" ; + schema1:keywords "DNA, hla, hla-typing, immunology, optitype, personalized-medicine, rna" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hlatyping" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/991?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.195.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_dna_helparms" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Structural DNA helical parameters tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/195/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 72437 ; + schema1:creator , + ; + schema1:dateCreated "2023-04-14T08:45:43Z" ; + schema1:dateModified "2023-04-14T08:47:02Z" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/195?version=4" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Structural DNA helical parameters tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_dna_helparms/master/biobb_wf_dna_helparms/notebooks/biobb_dna_helparms_tutorial.ipynb" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Simulations and figures supporting the manuscript \"Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake\"" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.511.4" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake" ; + schema1:sdDatePublished "2024-08-05 10:29:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/511/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 332 ; + schema1:creator , + , + , + , + , + ; + schema1:dateCreated "2024-01-11T09:54:04Z" ; + schema1:dateModified "2024-01-11T10:07:57Z" ; + schema1:description "Simulations and figures supporting the manuscript \"Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake\"" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/511?version=4" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/511?version=4" ; + schema1:version 4 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 75460 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Assembly, visualisation and quality control workflow for high fidelity reads built from circular consensus sequence (PacBio HiFi) data.\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/221?version=1" ; + schema1:isBasedOn "https://github.com/AustralianBioCommons/PacBio-HiFi-genome-assembly-using-hifiasm" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for PacBio HiFi genome assembly using hifiasm (HiFi genome assembly stage 2)" ; + schema1:sdDatePublished "2024-08-05 10:31:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/221/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 27211 ; + schema1:creator , + ; + schema1:dateCreated "2021-10-26T00:25:23Z" ; + schema1:dateModified "2022-09-20T00:55:45Z" ; + schema1:description """Assembly, visualisation and quality control workflow for high fidelity reads built from circular consensus sequence (PacBio HiFi) data.\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/221?version=2" ; + schema1:isPartOf , + ; + schema1:keywords "FASTQ, hifiasm, HiFi, genome_assembly" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "PacBio HiFi genome assembly using hifiasm (HiFi genome assembly stage 2)" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/221?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 129485 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=17" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-08-05 10:23:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=17" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12455 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:50Z" ; + schema1:dateModified "2024-06-11T12:54:50Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=17" ; + schema1:version 17 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Metagenomic dataset taxonomic classification using kraken2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/124?version=1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for 1: Plant virus detection with kraken2 (SE)" ; + schema1:sdDatePublished "2024-08-05 10:33:12 +0100" ; + schema1:url "https://workflowhub.eu/workflows/124/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9835 ; + schema1:dateCreated "2021-06-17T09:14:19Z" ; + schema1:dateModified "2023-02-13T14:06:45Z" ; + schema1:description "Metagenomic dataset taxonomic classification using kraken2" ; + schema1:keywords "Virology, kraken" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "1: Plant virus detection with kraken2 (SE)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/124?version=1" ; + schema1:version 1 ; + ns1:output , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Workflow converts one or multiple bam files back to the fastq format" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/968?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/bamtofastq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for qbic-pipelines/bamtofastq" ; + schema1:sdDatePublished "2024-08-05 10:24:14 +0100" ; + schema1:url "https://workflowhub.eu/workflows/968/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4298 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:44Z" ; + schema1:dateModified "2024-06-11T12:54:44Z" ; + schema1:description "Workflow converts one or multiple bam files back to the fastq format" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/968?version=5" ; + schema1:keywords "bamtofastq, Conversion, cramtofastq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "qbic-pipelines/bamtofastq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/968?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """# HiFi *de novo* genome assembly workflow\r +\r +HiFi-assembly-workflow is a bioinformatics pipeline that can be used to analyse Pacbio CCS reads for *de novo* genome assembly using PacBio Circular Consensus Sequencing (CCS) reads. This workflow is implemented in Nextflow and has 3 major sections. \r + \r +Please refer to the following documentation for detailed description of each workflow section:\r + \r +- [Pre-assembly quality control (QC)](https://github.com/AusARG/hifi-assembly-workflow/blob/master/recommendations.md#stage-1-pre-assembly-quality-control)\r +- [Assembly](https://github.com/AusARG/hifi-assembly-workflow/blob/master/recommendations.md#stage-2-assembly)\r +- [Post-assembly QC](https://github.com/AusARG/hifi-assembly-workflow/blob/master/recommendations.md#stage-3-post-assembly-quality-control)\r +\r +## HiFi assembly workflow flowchart\r +\r +![](https://github.com/AusARG/hifi-assembly-workflow/blob/master/workflow.png?raw=true)\r +\r +# Quick Usage:\r +The pipeline has been tested on NCI Gadi and AGRF balder cluster. If needed to run on AGRF cluster, please contact us at bioinformatics@agrf.org.au.\r +Please note for running this on NCI Gadi you need access. Please refer to Gadi guidelines for account creation and usage: these can be found at https://opus.nci.org.au/display/Help/Access.\r +\r +Here is an example that can be used to run a phased assembly on Gadi:\r +\r +```\r +Module load nextflow/21.04.3\r +nextflow run Hifi_assembly.nf –bam_folder -profile gadi \r +\r +The workflow accepts 2 mandatory arguments:\r +--bam_folder -- Full Path to the CCS bam files\r +-profile -- gadi/balder/local\r +```\r +\r +Please note that you can either run jobs interactively or submit jobs to the cluster. This is determined by the -profile flag. By passing the gadi tag to the profile argument, the jobs are submitted and run on the cluster.\r +\r +# General recommendations for using the HiFi *de novo* genome assembly workflow\r +\r +## Example local profile usage\r +\r +```\r +Start a screen, submit a job, and run the workflow \r +Screen -S ‘name’\r +\r +qsub -I -qnormal -Pwz54 -lwalltime=48:00:00,ncpus=4,mem=200GB,storage=scratch/wz54+gdata/wz54,wd\r +export MODULEPATH=/apps/Modules/modulefiles:/g/data/wz54/groupResources/modules\r +\r +module load nextflow/21.04.3\r +nextflow run /g/data/wz54/groupResources/scripts/pl/hifi_assembly.nf --bam_folder -profile local\r +\r +#This load the scripts directory to the environmental PATH and load nextflow module\r +module load hifi_assembly/1.0.0 \r +```\r +\r +# Outputs\r +\r +Pipeline generates various files and folders here is a brief description: \r +The pipeline creates a folder called `secondary_analysis` that contains two sub folders named:\r +\r +- `exeReport` \r +- `Results` -- Contains preQC, assembly and postQC analysis files\r +\r +## exeReport\r +This folder contains a computation resource usage summary in various charts and a text file. \r +`report.html` provides a comprehensive summary.\r +\r +## Results\r +The `Results` folder contains three sub-directories preQC, assembly and postqc. As the name suggests, outputs from the respective workflow sections are placed in each of these folders.\r +\r +### preQC\r +The following table contains list of files and folder from preQC results\r +\r +| Output folder/file | File | Description |\r +| ------------------ | ---------------- | ------------------------------------------------------------------------------ |\r +| .fa | | Bam files converted to fasta format |\r +| kmer\\_analysis | | Folder containing kmer analysis outputs |\r +| | .jf | k-mer counts from each sample |\r +| | .histo | histogram of k-mer occurrence |\r +| genome\\_profiling | | genomescope profiling outputs |\r +| | summary.txt | Summary metrics of genome scope outputs |\r +| | linear\\_plot.png | Plot showing no. of times a k-mer observed by no. of k-mers with that coverage |\r +\r +\r +### Assembly\r +This folder contains final assembly results in format.\r +\r +- `_primary.fa` - Fasta file containing primary contigs\r +- `_associate.fa` - Fasta file containing associated contigs\r +\r +### postqc\r + \r +The postqc folder contains two sub folders \r +\r +- `assembly_completeness`\r +- `assembly_evaluation`\r +\r +#### assembly_completeness\r +This contains BUSCO evaluation results for primary and associate contig.\r +\r +#### assembly_evaluation\r +Assembly evaluation folder contains various file formats, here is a brief description for each of the outputs.\r +\r +| File | Description |\r +| ----------- | ----------------------------------------------------------------------------------------- |\r +| report.txt | Assessment summary in plain text format |\r +| report.tsv | Tab-separated version of the summary, suitable for spreadsheets (Google Docs, Excel, etc) |\r +| report.tex | LaTeX version of the summary |\r +| icarus.html | Icarus main menu with links to interactive viewers |\r +| report.html | HTML version of the report with interactive plots inside |\r +\r +\r +# Infrastructure usage and recommendations\r +\r +### NCI facility access\r +One should have a user account set with NCI to access gadi high performance computational facility. Setting up a NCI account is mentioned in detail at the following URL: https://opus.nci.org.au/display/Help/Setting+up+your+NCI+Account \r + \r +Documentation for a specific infrastructure should go into a infrastructure documentation template\r +https://github.com/AustralianBioCommons/doc_guidelines/blob/master/infrastructure_optimisation.md\r +\r +\r +## Compute resource usage across tested infrastructures\r +\r +| | Computational resource for plant case study |\r +| ------------------------------------- | ------------------------------------------- |\r +| | Time | CPU | Memory | I/O |\r +| Process | duration | realtime | %cpu | peak\\_rss | peak\\_vmem | rchar | wchar |\r +| Converting bam to fasta for sample | 12m 54s | 12m 48s | 99.80% | 5.2 MB | 197.7 MB | 43.3 GB | 50.1 GB |\r +| Generating k-mer counts and histogram | 26m 43s | 26m 36s | 1725.30% | 19.5 GB | 21 GB | 77.2 GB | 27.1 GB |\r +| Profiling genome characteristics | 34.7s | 13.2s | 89.00% | 135 MB | 601.2 MB | 8.5 MB | 845.9 KB |\r +| Denovo assembly | 6h 51m 15s | 6h 51m 11s | 4744.40% | 84.7 GB | 225.6 GB | 1.4 TB | 456 GB |\r +| evaluate\\_assemblies | 5m 18s | 4m 54s | 98.20% | 1.6 GB | 1.9 GB | 13.6 GB | 2.8 GB |\r +| assemblies\\_completeness | 25m 57s | 25m 53s | 2624.20% | 22 GB | 25.2 GB | 624.9 GB | 2.9 GB |\r +\r +\r +| | Computational resource for bird case study |\r +| ------------------------------------- | ------------------------------------------ |\r +| | Time | CPU | Memory | I/O |\r +| Process | duration | realtime | %cpu | peak\\_rss | peak\\_vmem | rchar | wchar |\r +| Converting bam to fasta for sample | 12m 54s | 7m 9s | 86.40% | 5.2 MB | 197.8 MB | 21.5 GB | 27.4 GB |\r +| Generating k-mer counts and histogram | 26m 43s | 15m 34s | 1687.70% | 10.1 GB | 11.7 GB | 44 GB | 16.6 GB |\r +| Profiling genome characteristics | 34.7s | 1m 15s | 15.30% | 181.7 MB | 562.2 MB | 8.5 MB | 819.1 KB |\r +| De novo assembly | 6h 51m 15s | 9h 2m 47s | 1853.50% | 67.3 GB | 98.4 GB | 1 TB | 395.6 GB |\r +| evaluate assemblies | 5m 18s | 2m 48s | 97.50% | 1.1 GB | 1.4 GB | 8.7 GB | 1.8 GB |\r +| assemblies completeness | 25m 57s | 22m 36s | 2144.00% | 22.2 GB | 25 GB | 389.7 GB | 1.4 GB |\r +\r +\r +# Workflow summaries\r +\r +## Metadata\r +\r +| Metadata field | Pre-assembly quality control | Primary assembly | Post-assembly quality control |\r +| ---------------- | --------------------------------------------------------------------------------- | ------------------ | ----------------------------- |\r +| Version | 1.0 | 1.0 | 1.0 |\r +| Maturity | Production | Production | production |\r +| Creators | Naga, Kenneth | Naga, Kenneth | Naga, Kenneth |\r +| Source | [AusARG/hifi-assembly-workflow](https://github.com/AusARG/hifi-assembly-workflow) |\r +| License | MIT License | MIT License | MIT License |\r +| Workflow manager | NextFlow | NextFlow | NextFlow |\r +| Container | No containers used | No containers used | No containers used |\r +| Install method | Manual | Manual | Manual |\r +\r +\r +## Component tools\r +​\r +| Workflow element | Workflow element version | Workflow title |\r +| --------------------------------- | ------------------------ | ----------------------------- |\r +| Samtools, jellyfish, genomescope | 1.0 | Pre-assembly quality control |\r +| Improved phased assembler (pbipa) | 1.0 | Primary assembly |\r +| Quast and busco | 1.0 | Post-assembly quality control |\r +\r +\r +## Required (minimum) inputs/parameters\r + \r +PATH to HIFI bam folder is the minimum requirement for the processing the pipeline.\r +\r +## Third party tools / dependencies\r +\r +The following packages are used by the pipeline.\r +\r +- `nextflow/21.04.3`\r +- `samtools/1.12`\r +- `jellyfish/2.3.0`\r +- `genomescope/2.0`\r +- `ipa/1.3.1`\r +- `quast/5.0.2`\r +- `busco/5.2.2`\r +\r +The following paths contain all modules required for the pipeline.\r +\r +- `/apps/Modules/modulefiles`\r +- `/g/data/wz54/groupResources/modules`\r +\r +---\r +\r +# Help/FAQ/Troubleshooting\r +\r +Direct training and help is available if you are new to HPC and/or new to NCI/Gadi.\r +\r +- Basic information to get started with the NCI Gadi for bioinformatics can be found at https://github.com/AusARG/ABLeS/wiki/temppage.\r +- For NCI support, contact the NCI helpdesk directly at https://www.nci.org.au/users/nci-helpdesk\r +- Queue limits and structure explained at https://opus.nci.org.au/display/Help/4.+PBS+Jobs\r +\r +---\r +\r +# 3rd party Tutorials \r +\r +A tutorial by Andrew Severin on running GenomeScope 1.0 is available here:\r +https://github.com/AusARG/hifi-assembly-workflow.git\r +\r +Improved Phased Assembler tutorial is available at \r +https://github.com/PacificBiosciences/pbbioconda/wiki/Improved-Phased-Assembler\r +\r +Busco tutorial\r +https://wurmlab.com/genomicscourse/2016-SIB/practicals/busco/busco_tutorial\r +\r +---\r +\r +# Licence(s)\r +\r +MIT License\r +\r +Copyright (c) 2022 AusARG\r +\r +Permission is hereby granted, free of charge, to any person obtaining a copy\r +of this software and associated documentation files (the "Software"), to deal\r +in the Software without restriction, including without limitation the rights\r +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r +copies of the Software, and to permit persons to whom the Software is\r +furnished to do so, subject to the following conditions:\r +\r +The above copyright notice and this permission notice shall be included in all\r +copies or substantial portions of the Software.\r +\r +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r +SOFTWARE.\r +\r +---\r +\r +# Acknowledgements/citations/credits\r +\r +> Jung, H. et al. Twelve quick steps for genome assembly and annotation in the classroom. PLoS Comput. Biol. 16, 1–25 (2020).\r +\r +> 2020, G. A. W. No Title. https://ucdavis-bioinformatics-training.github.io/2020-Genome_Assembly_Workshop/kmers/kmers.\r +\r +> Sović, I. et al. Improved Phased Assembly using HiFi Data. (2020).\r +\r +> Gurevich, A., Saveliev, V., Vyahhi, N. & Tesler, G. QUAST: Quality assessment tool for genome assemblies. Bioinformatics 29, 1072–1075 (2013).\r +\r +> Waterhouse, R. M. et al. BUSCO applications from quality assessments to gene prediction and phylogenomics. Mol. Biol. Evol. 35, 543–548 (2018).\r +\r +---\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/340?version=1" ; + schema1:isBasedOn "https://github.com/AusARG/hifi-assembly-workflow.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for HiFi de novo genome assembly workflow" ; + schema1:sdDatePublished "2024-08-05 10:32:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/340/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2978 ; + schema1:dateCreated "2022-05-10T23:42:38Z" ; + schema1:dateModified "2023-01-16T13:59:52Z" ; + schema1:description """# HiFi *de novo* genome assembly workflow\r +\r +HiFi-assembly-workflow is a bioinformatics pipeline that can be used to analyse Pacbio CCS reads for *de novo* genome assembly using PacBio Circular Consensus Sequencing (CCS) reads. This workflow is implemented in Nextflow and has 3 major sections. \r + \r +Please refer to the following documentation for detailed description of each workflow section:\r + \r +- [Pre-assembly quality control (QC)](https://github.com/AusARG/hifi-assembly-workflow/blob/master/recommendations.md#stage-1-pre-assembly-quality-control)\r +- [Assembly](https://github.com/AusARG/hifi-assembly-workflow/blob/master/recommendations.md#stage-2-assembly)\r +- [Post-assembly QC](https://github.com/AusARG/hifi-assembly-workflow/blob/master/recommendations.md#stage-3-post-assembly-quality-control)\r +\r +## HiFi assembly workflow flowchart\r +\r +![](https://github.com/AusARG/hifi-assembly-workflow/blob/master/workflow.png?raw=true)\r +\r +# Quick Usage:\r +The pipeline has been tested on NCI Gadi and AGRF balder cluster. If needed to run on AGRF cluster, please contact us at bioinformatics@agrf.org.au.\r +Please note for running this on NCI Gadi you need access. Please refer to Gadi guidelines for account creation and usage: these can be found at https://opus.nci.org.au/display/Help/Access.\r +\r +Here is an example that can be used to run a phased assembly on Gadi:\r +\r +```\r +Module load nextflow/21.04.3\r +nextflow run Hifi_assembly.nf –bam_folder -profile gadi \r +\r +The workflow accepts 2 mandatory arguments:\r +--bam_folder -- Full Path to the CCS bam files\r +-profile -- gadi/balder/local\r +```\r +\r +Please note that you can either run jobs interactively or submit jobs to the cluster. This is determined by the -profile flag. By passing the gadi tag to the profile argument, the jobs are submitted and run on the cluster.\r +\r +# General recommendations for using the HiFi *de novo* genome assembly workflow\r +\r +## Example local profile usage\r +\r +```\r +Start a screen, submit a job, and run the workflow \r +Screen -S ‘name’\r +\r +qsub -I -qnormal -Pwz54 -lwalltime=48:00:00,ncpus=4,mem=200GB,storage=scratch/wz54+gdata/wz54,wd\r +export MODULEPATH=/apps/Modules/modulefiles:/g/data/wz54/groupResources/modules\r +\r +module load nextflow/21.04.3\r +nextflow run /g/data/wz54/groupResources/scripts/pl/hifi_assembly.nf --bam_folder -profile local\r +\r +#This load the scripts directory to the environmental PATH and load nextflow module\r +module load hifi_assembly/1.0.0 \r +```\r +\r +# Outputs\r +\r +Pipeline generates various files and folders here is a brief description: \r +The pipeline creates a folder called `secondary_analysis` that contains two sub folders named:\r +\r +- `exeReport` \r +- `Results` -- Contains preQC, assembly and postQC analysis files\r +\r +## exeReport\r +This folder contains a computation resource usage summary in various charts and a text file. \r +`report.html` provides a comprehensive summary.\r +\r +## Results\r +The `Results` folder contains three sub-directories preQC, assembly and postqc. As the name suggests, outputs from the respective workflow sections are placed in each of these folders.\r +\r +### preQC\r +The following table contains list of files and folder from preQC results\r +\r +| Output folder/file | File | Description |\r +| ------------------ | ---------------- | ------------------------------------------------------------------------------ |\r +| .fa | | Bam files converted to fasta format |\r +| kmer\\_analysis | | Folder containing kmer analysis outputs |\r +| | .jf | k-mer counts from each sample |\r +| | .histo | histogram of k-mer occurrence |\r +| genome\\_profiling | | genomescope profiling outputs |\r +| | summary.txt | Summary metrics of genome scope outputs |\r +| | linear\\_plot.png | Plot showing no. of times a k-mer observed by no. of k-mers with that coverage |\r +\r +\r +### Assembly\r +This folder contains final assembly results in format.\r +\r +- `_primary.fa` - Fasta file containing primary contigs\r +- `_associate.fa` - Fasta file containing associated contigs\r +\r +### postqc\r + \r +The postqc folder contains two sub folders \r +\r +- `assembly_completeness`\r +- `assembly_evaluation`\r +\r +#### assembly_completeness\r +This contains BUSCO evaluation results for primary and associate contig.\r +\r +#### assembly_evaluation\r +Assembly evaluation folder contains various file formats, here is a brief description for each of the outputs.\r +\r +| File | Description |\r +| ----------- | ----------------------------------------------------------------------------------------- |\r +| report.txt | Assessment summary in plain text format |\r +| report.tsv | Tab-separated version of the summary, suitable for spreadsheets (Google Docs, Excel, etc) |\r +| report.tex | LaTeX version of the summary |\r +| icarus.html | Icarus main menu with links to interactive viewers |\r +| report.html | HTML version of the report with interactive plots inside |\r +\r +\r +# Infrastructure usage and recommendations\r +\r +### NCI facility access\r +One should have a user account set with NCI to access gadi high performance computational facility. Setting up a NCI account is mentioned in detail at the following URL: https://opus.nci.org.au/display/Help/Setting+up+your+NCI+Account \r + \r +Documentation for a specific infrastructure should go into a infrastructure documentation template\r +https://github.com/AustralianBioCommons/doc_guidelines/blob/master/infrastructure_optimisation.md\r +\r +\r +## Compute resource usage across tested infrastructures\r +\r +| | Computational resource for plant case study |\r +| ------------------------------------- | ------------------------------------------- |\r +| | Time | CPU | Memory | I/O |\r +| Process | duration | realtime | %cpu | peak\\_rss | peak\\_vmem | rchar | wchar |\r +| Converting bam to fasta for sample | 12m 54s | 12m 48s | 99.80% | 5.2 MB | 197.7 MB | 43.3 GB | 50.1 GB |\r +| Generating k-mer counts and histogram | 26m 43s | 26m 36s | 1725.30% | 19.5 GB | 21 GB | 77.2 GB | 27.1 GB |\r +| Profiling genome characteristics | 34.7s | 13.2s | 89.00% | 135 MB | 601.2 MB | 8.5 MB | 845.9 KB |\r +| Denovo assembly | 6h 51m 15s | 6h 51m 11s | 4744.40% | 84.7 GB | 225.6 GB | 1.4 TB | 456 GB |\r +| evaluate\\_assemblies | 5m 18s | 4m 54s | 98.20% | 1.6 GB | 1.9 GB | 13.6 GB | 2.8 GB |\r +| assemblies\\_completeness | 25m 57s | 25m 53s | 2624.20% | 22 GB | 25.2 GB | 624.9 GB | 2.9 GB |\r +\r +\r +| | Computational resource for bird case study |\r +| ------------------------------------- | ------------------------------------------ |\r +| | Time | CPU | Memory | I/O |\r +| Process | duration | realtime | %cpu | peak\\_rss | peak\\_vmem | rchar | wchar |\r +| Converting bam to fasta for sample | 12m 54s | 7m 9s | 86.40% | 5.2 MB | 197.8 MB | 21.5 GB | 27.4 GB |\r +| Generating k-mer counts and histogram | 26m 43s | 15m 34s | 1687.70% | 10.1 GB | 11.7 GB | 44 GB | 16.6 GB |\r +| Profiling genome characteristics | 34.7s | 1m 15s | 15.30% | 181.7 MB | 562.2 MB | 8.5 MB | 819.1 KB |\r +| De novo assembly | 6h 51m 15s | 9h 2m 47s | 1853.50% | 67.3 GB | 98.4 GB | 1 TB | 395.6 GB |\r +| evaluate assemblies | 5m 18s | 2m 48s | 97.50% | 1.1 GB | 1.4 GB | 8.7 GB | 1.8 GB |\r +| assemblies completeness | 25m 57s | 22m 36s | 2144.00% | 22.2 GB | 25 GB | 389.7 GB | 1.4 GB |\r +\r +\r +# Workflow summaries\r +\r +## Metadata\r +\r +| Metadata field | Pre-assembly quality control | Primary assembly | Post-assembly quality control |\r +| ---------------- | --------------------------------------------------------------------------------- | ------------------ | ----------------------------- |\r +| Version | 1.0 | 1.0 | 1.0 |\r +| Maturity | Production | Production | production |\r +| Creators | Naga, Kenneth | Naga, Kenneth | Naga, Kenneth |\r +| Source | [AusARG/hifi-assembly-workflow](https://github.com/AusARG/hifi-assembly-workflow) |\r +| License | MIT License | MIT License | MIT License |\r +| Workflow manager | NextFlow | NextFlow | NextFlow |\r +| Container | No containers used | No containers used | No containers used |\r +| Install method | Manual | Manual | Manual |\r +\r +\r +## Component tools\r +​\r +| Workflow element | Workflow element version | Workflow title |\r +| --------------------------------- | ------------------------ | ----------------------------- |\r +| Samtools, jellyfish, genomescope | 1.0 | Pre-assembly quality control |\r +| Improved phased assembler (pbipa) | 1.0 | Primary assembly |\r +| Quast and busco | 1.0 | Post-assembly quality control |\r +\r +\r +## Required (minimum) inputs/parameters\r + \r +PATH to HIFI bam folder is the minimum requirement for the processing the pipeline.\r +\r +## Third party tools / dependencies\r +\r +The following packages are used by the pipeline.\r +\r +- `nextflow/21.04.3`\r +- `samtools/1.12`\r +- `jellyfish/2.3.0`\r +- `genomescope/2.0`\r +- `ipa/1.3.1`\r +- `quast/5.0.2`\r +- `busco/5.2.2`\r +\r +The following paths contain all modules required for the pipeline.\r +\r +- `/apps/Modules/modulefiles`\r +- `/g/data/wz54/groupResources/modules`\r +\r +---\r +\r +# Help/FAQ/Troubleshooting\r +\r +Direct training and help is available if you are new to HPC and/or new to NCI/Gadi.\r +\r +- Basic information to get started with the NCI Gadi for bioinformatics can be found at https://github.com/AusARG/ABLeS/wiki/temppage.\r +- For NCI support, contact the NCI helpdesk directly at https://www.nci.org.au/users/nci-helpdesk\r +- Queue limits and structure explained at https://opus.nci.org.au/display/Help/4.+PBS+Jobs\r +\r +---\r +\r +# 3rd party Tutorials \r +\r +A tutorial by Andrew Severin on running GenomeScope 1.0 is available here:\r +https://github.com/AusARG/hifi-assembly-workflow.git\r +\r +Improved Phased Assembler tutorial is available at \r +https://github.com/PacificBiosciences/pbbioconda/wiki/Improved-Phased-Assembler\r +\r +Busco tutorial\r +https://wurmlab.com/genomicscourse/2016-SIB/practicals/busco/busco_tutorial\r +\r +---\r +\r +# Licence(s)\r +\r +MIT License\r +\r +Copyright (c) 2022 AusARG\r +\r +Permission is hereby granted, free of charge, to any person obtaining a copy\r +of this software and associated documentation files (the "Software"), to deal\r +in the Software without restriction, including without limitation the rights\r +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r +copies of the Software, and to permit persons to whom the Software is\r +furnished to do so, subject to the following conditions:\r +\r +The above copyright notice and this permission notice shall be included in all\r +copies or substantial portions of the Software.\r +\r +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r +SOFTWARE.\r +\r +---\r +\r +# Acknowledgements/citations/credits\r +\r +> Jung, H. et al. Twelve quick steps for genome assembly and annotation in the classroom. PLoS Comput. Biol. 16, 1–25 (2020).\r +\r +> 2020, G. A. W. No Title. https://ucdavis-bioinformatics-training.github.io/2020-Genome_Assembly_Workshop/kmers/kmers.\r +\r +> Sović, I. et al. Improved Phased Assembly using HiFi Data. (2020).\r +\r +> Gurevich, A., Saveliev, V., Vyahhi, N. & Tesler, G. QUAST: Quality assessment tool for genome assemblies. Bioinformatics 29, 1072–1075 (2013).\r +\r +> Waterhouse, R. M. et al. BUSCO applications from quality assessments to gene prediction and phylogenomics. Mol. Biol. Evol. 35, 543–548 (2018).\r +\r +---\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "HiFi de novo genome assembly workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/340?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "# beacon-omop-worker-workflows" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/882?version=2" ; + schema1:isBasedOn "https://github.com/Health-Informatics-UoN/beacon-omop-worker-workflows.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for beacon-workflow" ; + schema1:sdDatePublished "2024-08-05 10:24:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/882/ro_crate?version=2" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 2831 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 362 ; + schema1:dateCreated "2024-05-22T13:18:49Z" ; + schema1:dateModified "2024-05-22T13:18:49Z" ; + schema1:description "# beacon-omop-worker-workflows" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/882?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "beacon-workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/882?version=2" ; + schema1:version 2 ; + ns1:input ; + ns1:output . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +**Based on the official [pmx tutorial](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate how to compute a **fast-growth mutation free energy** calculation, step by step, using the BioExcel **Building Blocks library (biobb)**. The particular example used is the **Staphylococcal nuclease** protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +The **non-equilibrium free energy calculation** protocol performs a **fast alchemical transition** in the direction **WT->Mut** and back **Mut->WT**. The two equilibrium trajectories needed for the tutorial, one for **Wild Type (WT)** and another for the **Mutated (Mut)** protein (Isoleucine 10 to Alanine -I10A-), have already been generated and are included in this example. We will name **WT as stateA** and **Mut as stateB**.\r +\r +![](https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/schema.png)\r +\r +The tutorial calculates the **free energy difference** in the folded state of a protein. Starting from **two 1ns-length independent equilibrium simulations** (WT and mutant), snapshots are selected to start **fast (50ps) transitions** driving the system in the **forward** (WT to mutant) and **reverse** (mutant to WT) directions, and the **work values** required to perform these transitions are collected. With these values, **Crooks Gaussian Intersection** (CGI), **Bennett Acceptance Ratio** (BAR) and **Jarzynski estimator** methods are used to calculate the **free energy difference** between the two states.\r +\r +*Please note that for the sake of disk space this tutorial is using 1ns-length equilibrium trajectories, whereas in the [original example](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/eq.mdp) the equilibrium trajectories used were obtained from 10ns-length simulations.*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.55.4" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_pmx_tutorial" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)" ; + schema1:sdDatePublished "2024-08-05 10:25:30 +0100" ; + schema1:url "https://workflowhub.eu/workflows/55/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 52945 ; + schema1:creator , + ; + schema1:dateCreated "2022-09-16T09:35:38Z" ; + schema1:dateModified "2023-01-16T13:44:51Z" ; + schema1:description """# Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +**Based on the official [pmx tutorial](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate how to compute a **fast-growth mutation free energy** calculation, step by step, using the BioExcel **Building Blocks library (biobb)**. The particular example used is the **Staphylococcal nuclease** protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +The **non-equilibrium free energy calculation** protocol performs a **fast alchemical transition** in the direction **WT->Mut** and back **Mut->WT**. The two equilibrium trajectories needed for the tutorial, one for **Wild Type (WT)** and another for the **Mutated (Mut)** protein (Isoleucine 10 to Alanine -I10A-), have already been generated and are included in this example. We will name **WT as stateA** and **Mut as stateB**.\r +\r +![](https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/schema.png)\r +\r +The tutorial calculates the **free energy difference** in the folded state of a protein. Starting from **two 1ns-length independent equilibrium simulations** (WT and mutant), snapshots are selected to start **fast (50ps) transitions** driving the system in the **forward** (WT to mutant) and **reverse** (mutant to WT) directions, and the **work values** required to perform these transitions are collected. With these values, **Crooks Gaussian Intersection** (CGI), **Bennett Acceptance Ratio** (BAR) and **Jarzynski estimator** methods are used to calculate the **free energy difference** between the two states.\r +\r +*Please note that for the sake of disk space this tutorial is using 1ns-length equilibrium trajectories, whereas in the [original example](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/eq.mdp) the equilibrium trajectories used were obtained from 10ns-length simulations.*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/55?version=5" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/biobb_wf_pmx_tutorial.ipynb" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "An example workflow for the Specimen Data Refinery tool, allowing an individual tool to be used" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.375.1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for HTR-Collections-test" ; + schema1:sdDatePublished "2024-08-05 10:31:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/375/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15329 ; + schema1:creator , + , + ; + schema1:dateCreated "2022-07-08T13:05:11Z" ; + schema1:dateModified "2023-01-16T14:02:00Z" ; + schema1:description "An example workflow for the Specimen Data Refinery tool, allowing an individual tool to be used" ; + schema1:keywords "Default-SDR, multi-specimen-input, collections, validated-2022-06-29" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "HTR-Collections-test" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/375?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description "This repository contains the python code to reproduce the experiments in Dłotko, Gurnari \"Euler Characteristic Curves and Profiles: a stable shape invariant for big data problems\"" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.576.1" ; + schema1:isBasedOn "https://github.com/dioscuri-tda/ecp_experiments" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ECP experiments" ; + schema1:sdDatePublished "2024-08-05 10:27:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/576/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 185 ; + schema1:creator ; + schema1:dateCreated "2023-09-25T12:10:08Z" ; + schema1:dateModified "2023-09-25T14:35:37Z" ; + schema1:description "This repository contains the python code to reproduce the experiments in Dłotko, Gurnari \"Euler Characteristic Curves and Profiles: a stable shape invariant for big data problems\"" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ECP experiments" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/576?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/963?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/airrflow" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/bcellmagic" ; + schema1:sdDatePublished "2024-08-05 10:24:21 +0100" ; + schema1:url "https://workflowhub.eu/workflows/963/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6709 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:36Z" ; + schema1:dateModified "2024-06-11T12:54:36Z" ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/963?version=13" ; + schema1:keywords "airr, b-cell, immcantation, immunorepertoire, repseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/bcellmagic" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/963?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.195.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_dna_helparms" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Structural DNA helical parameters tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/195/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 72225 ; + schema1:creator , + ; + schema1:dateCreated "2021-09-28T08:43:19Z" ; + schema1:dateModified "2022-09-15T12:36:32Z" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/195?version=4" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Structural DNA helical parameters tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/195?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.281.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_protein_complex_md_setup/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/281/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8747 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-17T09:57:03Z" ; + schema1:dateModified "2022-11-22T09:59:54Z" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/281?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_protein_complex_md_setup/python/workflow.py" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "The workflow takes a trimmed Illumina WGS paired-end reads collection, Collapsed contigs, and the values for transition parameter and max coverage depth (calculated from WF1) to run Purge_Dups. It produces purged Collapsed contigs assemblies, and runs all the QC analysis (gfastats, BUSCO, and Merqury). " ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/701?version=1" ; + schema1:isBasedOn "https://github.com/ERGA-consortium/pipelines/tree/main/assembly/galaxy" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ERGA ONT+Illumina Collapsed Purge+QC v2311 (WF3)" ; + schema1:sdDatePublished "2024-08-05 10:26:02 +0100" ; + schema1:url "https://workflowhub.eu/workflows/701/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 43492 ; + schema1:creator , + ; + schema1:dateCreated "2024-01-09T10:40:06Z" ; + schema1:dateModified "2024-01-09T10:44:51Z" ; + schema1:description "The workflow takes a trimmed Illumina WGS paired-end reads collection, Collapsed contigs, and the values for transition parameter and max coverage depth (calculated from WF1) to run Purge_Dups. It produces purged Collapsed contigs assemblies, and runs all the QC analysis (gfastats, BUSCO, and Merqury). " ; + schema1:image ; + schema1:isPartOf , + ; + schema1:keywords "name:ASSEMBLY+QC, ERGA, illumina" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ERGA ONT+Illumina Collapsed Purge+QC v2311 (WF3)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/3.Purging/Galaxy-Workflow-ERGA_Illumina_Collapsed_Purge_QC_v2311_(WF3).ga" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + ; + ns1:output . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 128917 ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/3.Purging/pics/Purge_illumina_2311.png" . + + a schema1:Dataset ; + schema1:datePublished "2024-02-16T10:01:26.347512" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Purge-duplicate-contigs-VGP6/main" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Purge-duplicate-contigs-VGP6/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:version "0.3.4" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """The containerised pipeline for profiling shotgun metagenomic data is derived from the [MGnify](https://www.ebi.ac.uk/metagenomics/) pipeline raw-reads analyses, a well-established resource used for analyzing microbiome data.\r +Key components:\r +- Quality control and decontamination\r +- rRNA and ncRNA detection using Rfam database\r +- Taxonomic classification of SSU and LSU regions \r +- Abundance analysis with mOTUs""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/450?version=1" ; + schema1:isBasedOn "https://github.com/EBI-Metagenomics/motus_pipeline" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for MGnify raw reads taxonomic profiling pipeline" ; + schema1:sdDatePublished "2024-08-05 10:31:10 +0100" ; + schema1:url "https://workflowhub.eu/workflows/450/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 385320 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 131 ; + schema1:creator , + ; + schema1:dateCreated "2023-03-30T20:49:19Z" ; + schema1:dateModified "2023-03-30T20:58:56Z" ; + schema1:description """The containerised pipeline for profiling shotgun metagenomic data is derived from the [MGnify](https://www.ebi.ac.uk/metagenomics/) pipeline raw-reads analyses, a well-established resource used for analyzing microbiome data.\r +Key components:\r +- Quality control and decontamination\r +- rRNA and ncRNA detection using Rfam database\r +- Taxonomic classification of SSU and LSU regions \r +- Abundance analysis with mOTUs""" ; + schema1:image ; + schema1:keywords "Nextflow, Metagenomics" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "MGnify raw reads taxonomic profiling pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/450?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# IndexReferenceFasta-nf\r +===========\r +\r + - [Description](#description)\r + - [Diagram](#diagram)\r + - [User guide](#user-guide)\r + - [Benchmarking](#benchmarking)\r + - [Workflow summaries](#workflow-summaries)\r + - [Metadata](#metadata)\r + - [Component tools](#component-tools)\r + - [Required (minimum)\r + inputs/parameters](#required-minimum-inputsparameters)\r + - [Additional notes](#additional-notes)\r + - [Help/FAQ/Troubleshooting](#helpfaqtroubleshooting)\r + - [Acknowledgements/citations/credits](#acknowledgementscitationscredits)\r +\r +---\r +\r +## Description\r +This is a flexible pipeline for generating common reference genome index files for WGS data analysis. IndexReferenceFasta-nf is a Nextflow (DSL2) pipeline that runs the following tools using Singularity containers:\r +* Samtools faidx\r +* BWA index\r +* GATK CreateSequenceDictionary \r +\r +## Diagram\r +

\r +\r +

\r +\r +## User guide\r +**1. Set up**\r +\r +Clone this repository by running:\r +```\r +git clone https://github.com/Sydney-Informatics-Hub/IndexReferenceFasta-nf.git\r +cd IndexReferenceFasta-nf\r +``` \r +\r +**2. Generate indexes** \r +\r +Users can specify which index files to create by using the `--samtools`, `--bwa`, and/or `--gatk` flags. All are optional. Run the pipeline with:\r +\r +```\r +nextflow run main.nf /path/to/ref.fasta --bwa --samtools --gatk \r +```\r +\r +## Benchmarking\r +\r +### Human hg38 reference assembly @ Pawsey's Nimbus (NCPU/task = 1)\r +|task_id|hash |native_id|name |status |exit|submit |duration |realtime |%cpu |peak_rss|peak_vmem|rchar |wchar |\r +|-------|---------|---------|--------------|---------|----|-------|----------|----------|-------|--------|---------|-------|-------|\r +|3 |27/33fffc|131621 |samtools_index|COMPLETED|0 |55:44.9|12.2s |12s |99.20% |6.3 MB |11.8 MB |3 GB |19.1 KB|\r +|1 |80/f03e46|131999 |gatk_index |COMPLETED|0 |55:46.7|22.6s |22.3s |231.90%|3.8 GB |37.1 GB |3.1 GB |726 KB |\r +|2 |ea/e29535|131594 |bwa_index |COMPLETED|0 |55:44.9|1h 50m 16s|1h 50m 15s|99.50% |4.5 GB |4.5 GB |12.1 GB|8.2 GB |\r +\r +## Workflow summaries\r +\r +### Metadata\r +|metadata field | workflow_name / workflow_version |\r +|-------------------|:---------------------------------:|\r +|Version | workflow_version |\r +|Maturity | under development |\r +|Creators | Georgie Samaha |\r +|Source | NA |\r +|License | GPL-3.0 license |\r +|Workflow manager | NextFlow |\r +|Container | None |\r +|Install method | Manual |\r +|GitHub | Sydney-Informatics-Hub/IndexReferenceFasta-nf |\r +|bio.tools | NA |\r +|BioContainers | NA | \r +|bioconda | NA |\r +\r +### Component tools\r +\r +* samtools/1.15.1\r +* gatk/4.2.6.1 \r +* bwa/0.7.17\r +\r +### Required (minimum) inputs/parameters\r +\r +* A reference genome file in fasta format.\r +\r +## Additional notes\r +\r +### Help/FAQ/Troubleshooting\r +\r +## Acknowledgements/citations/credits\r +### Authors \r +- Georgie Samaha (Sydney Informatics Hub, University of Sydney) \r +\r +### Acknowledgements \r +\r +- This pipeline was built using the [Nextflow DSL2 template](https://github.com/Sydney-Informatics-Hub/Nextflow_DSL2_template). \r +- Documentation was created following the [Australian BioCommons documentation guidelines](https://github.com/AustralianBioCommons/doc_guidelines). \r +\r +### Cite us to support us! \r +Acknowledgements (and co-authorship, where appropriate) are an important way for us to demonstrate the value we bring to your research. Your research outcomes are vital for ongoing funding of the Sydney Informatics Hub and national compute facilities. We suggest including the following acknowledgement in any publications that follow from this work: \r +\r +The authors acknowledge the technical assistance provided by the Sydney Informatics Hub, a Core Research Facility of the University of Sydney and the Australian BioCommons which is enabled by NCRIS via Bioplatforms Australia. \r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.393.1" ; + schema1:isBasedOn "https://github.com/Sydney-Informatics-Hub/IndexReferenceFasta-nf" ; + schema1:license "LGPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for IndexReferenceFasta-nf" ; + schema1:sdDatePublished "2024-08-05 10:31:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/393/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2496 ; + schema1:creator ; + schema1:dateCreated "2022-10-12T02:34:32Z" ; + schema1:dateModified "2023-01-16T14:02:39Z" ; + schema1:description """# IndexReferenceFasta-nf\r +===========\r +\r + - [Description](#description)\r + - [Diagram](#diagram)\r + - [User guide](#user-guide)\r + - [Benchmarking](#benchmarking)\r + - [Workflow summaries](#workflow-summaries)\r + - [Metadata](#metadata)\r + - [Component tools](#component-tools)\r + - [Required (minimum)\r + inputs/parameters](#required-minimum-inputsparameters)\r + - [Additional notes](#additional-notes)\r + - [Help/FAQ/Troubleshooting](#helpfaqtroubleshooting)\r + - [Acknowledgements/citations/credits](#acknowledgementscitationscredits)\r +\r +---\r +\r +## Description\r +This is a flexible pipeline for generating common reference genome index files for WGS data analysis. IndexReferenceFasta-nf is a Nextflow (DSL2) pipeline that runs the following tools using Singularity containers:\r +* Samtools faidx\r +* BWA index\r +* GATK CreateSequenceDictionary \r +\r +## Diagram\r +

\r +\r +

\r +\r +## User guide\r +**1. Set up**\r +\r +Clone this repository by running:\r +```\r +git clone https://github.com/Sydney-Informatics-Hub/IndexReferenceFasta-nf.git\r +cd IndexReferenceFasta-nf\r +``` \r +\r +**2. Generate indexes** \r +\r +Users can specify which index files to create by using the `--samtools`, `--bwa`, and/or `--gatk` flags. All are optional. Run the pipeline with:\r +\r +```\r +nextflow run main.nf /path/to/ref.fasta --bwa --samtools --gatk \r +```\r +\r +## Benchmarking\r +\r +### Human hg38 reference assembly @ Pawsey's Nimbus (NCPU/task = 1)\r +|task_id|hash |native_id|name |status |exit|submit |duration |realtime |%cpu |peak_rss|peak_vmem|rchar |wchar |\r +|-------|---------|---------|--------------|---------|----|-------|----------|----------|-------|--------|---------|-------|-------|\r +|3 |27/33fffc|131621 |samtools_index|COMPLETED|0 |55:44.9|12.2s |12s |99.20% |6.3 MB |11.8 MB |3 GB |19.1 KB|\r +|1 |80/f03e46|131999 |gatk_index |COMPLETED|0 |55:46.7|22.6s |22.3s |231.90%|3.8 GB |37.1 GB |3.1 GB |726 KB |\r +|2 |ea/e29535|131594 |bwa_index |COMPLETED|0 |55:44.9|1h 50m 16s|1h 50m 15s|99.50% |4.5 GB |4.5 GB |12.1 GB|8.2 GB |\r +\r +## Workflow summaries\r +\r +### Metadata\r +|metadata field | workflow_name / workflow_version |\r +|-------------------|:---------------------------------:|\r +|Version | workflow_version |\r +|Maturity | under development |\r +|Creators | Georgie Samaha |\r +|Source | NA |\r +|License | GPL-3.0 license |\r +|Workflow manager | NextFlow |\r +|Container | None |\r +|Install method | Manual |\r +|GitHub | Sydney-Informatics-Hub/IndexReferenceFasta-nf |\r +|bio.tools | NA |\r +|BioContainers | NA | \r +|bioconda | NA |\r +\r +### Component tools\r +\r +* samtools/1.15.1\r +* gatk/4.2.6.1 \r +* bwa/0.7.17\r +\r +### Required (minimum) inputs/parameters\r +\r +* A reference genome file in fasta format.\r +\r +## Additional notes\r +\r +### Help/FAQ/Troubleshooting\r +\r +## Acknowledgements/citations/credits\r +### Authors \r +- Georgie Samaha (Sydney Informatics Hub, University of Sydney) \r +\r +### Acknowledgements \r +\r +- This pipeline was built using the [Nextflow DSL2 template](https://github.com/Sydney-Informatics-Hub/Nextflow_DSL2_template). \r +- Documentation was created following the [Australian BioCommons documentation guidelines](https://github.com/AustralianBioCommons/doc_guidelines). \r +\r +### Cite us to support us! \r +Acknowledgements (and co-authorship, where appropriate) are an important way for us to demonstrate the value we bring to your research. Your research outcomes are vital for ongoing funding of the Sydney Informatics Hub and national compute facilities. We suggest including the following acknowledgement in any publications that follow from this work: \r +\r +The authors acknowledge the technical assistance provided by the Sydney Informatics Hub, a Core Research Facility of the University of Sydney and the Australian BioCommons which is enabled by NCRIS via Bioplatforms Australia. \r +""" ; + schema1:keywords "Bioinformatics, Nextflow, WGS, index, referencegenome, SAMTools, GATK, BWA, Genomics" ; + schema1:license "https://spdx.org/licenses/LGPL-3.0" ; + schema1:name "IndexReferenceFasta-nf" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/393?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=14" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-08-05 10:23:19 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=14" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15644 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:15Z" ; + schema1:dateModified "2024-06-11T12:55:15Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=14" ; + schema1:version 14 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=15" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-08-05 10:23:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=15" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11341 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:50Z" ; + schema1:dateModified "2024-06-11T12:54:50Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=15" ; + schema1:version 15 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """\r +Author: AMBARISH KUMAR er.ambarish@gmail.com; ambari73_sit@jnu.ac.in\r +\r +This is a proposed standard operating procedure for genomic variant detection using VARSCAN.\r +\r +It is hoped to be effective and useful for getting SARS-CoV-2 genome variants.\r +\r +\r +\r +It uses Illumina RNASEQ reads and genome sequence.\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/31?version=1" ; + schema1:isBasedOn "https://github.com/ambarishK/bio-cwl-tools/blob/release/varscanW.cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genomic variants - SNPs and INDELs detection using VARSCAN2." ; + schema1:sdDatePublished "2024-08-05 10:33:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/31/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 57754 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2917 ; + schema1:creator ; + schema1:dateCreated "2020-06-17T06:24:44Z" ; + schema1:dateModified "2023-01-16T13:42:27Z" ; + schema1:description """\r +Author: AMBARISH KUMAR er.ambarish@gmail.com; ambari73_sit@jnu.ac.in\r +\r +This is a proposed standard operating procedure for genomic variant detection using VARSCAN.\r +\r +It is hoped to be effective and useful for getting SARS-CoV-2 genome variants.\r +\r +\r +\r +It uses Illumina RNASEQ reads and genome sequence.\r +""" ; + schema1:image ; + schema1:keywords "CWL, SNPs, INDELs, VARSCAN2" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Genomic variants - SNPs and INDELs detection using VARSCAN2." ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/31?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + ; + ns1:output , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# ProGFASTAGen\r +\r +The ProGFASTAGen (**Pro**tein-**G**raph-**FASTA**-**Gen**erator or **Pro**t**G**raph-**FASTA**-**Gen**erator) repository contains workflows to generate so-called precursor-specific-FASTAs (using the precursors from MGF-files) including feature-peptides, like VARIANTs or CONFLICTs if desired, or global-FASTAs (as described in [ProtGraph](https://github.com/mpc-bioinformatics/ProtGraph)). The single workflow scripts have been implemented with [Nextflow-DSL-2](https://www.nextflow.io/docs/latest/dsl2.html) and are independent to each other. Each of these workflows can be used on their own or can be imported to other workflows for other use-cases. Further, we included three main-workflows, to show how the single workflows can be chained together. The `main_workflow_protein_fasta.nf`-workflow converts Thermo-RAW-files into MGF, searches with Comet (and Percolator) and the identification results are then further summarized. The workflows `main_workflow_global_fasta.nf` and `main_workflow_precursor_specific_fasta.nf` generate specific FASTA-files before search-engine-identification. Below are example nextflow-calls, which can be used.\r +\r +Regarding the precursor-specific-FASTA-generation: The source-code of the C++ implementation for traversal can be found in `bin`. There, four implementations are present: `Float/Int`-Versions as well as `DryRun/VarLimitter`-Versions of the traversal. The `Float/Int`-Versions can be faster/slower depending on th processor-architecture and can be used via a flag in the `create_precursor_specific_fasta.nf`-workflow. The `DryRun`-Version does not generate a FASTA but tests the used system (depending on a query-timeout) to determine the maximum number of variants which can be used, while not timing out. The actual FASTA-generation happens in the `VarLimitter`-Version using the generated protein-graphs at hand.\r +\r +in **Prerequisites** a small description of dependencies and how to set up the host system is given. **Individual steps** describes the single workflows and how they can be called, while **Main Workflow Scripts** shows example-calls of the main workflows. In **Regenerate Results from Publication**, the calls and parameters are shown, which were used in the publication. Using the same FASTA or UniProt flat file format with a similar server-setting should yield similar results as used in the publication.\r +\r +## Prerequisites\r +\r +### Executing on Linux\r +\r +This workflow can be only executed on linux (tested on Ubuntu 22.04 and ArchLinux). Before setting up the `bin`-folder, some requiered binaries need to be present on the OS. (Focusing on Ubuntu:) The following packages need to be installed on Ubuntu (via `apt`), if not already:\r +\r +```text\r +build-essential\r +wget\r +curl\r +unzip\r +cmake\r +mono-complete\r +python3-pip (or any environment with Python3, where pip is available)\r +python-is-python3 (needed for ubuntu, so that python points to python3)\r +```\r +\r +If all packages are installed (and the python environment is set up), the setup-script needs to be executed, which downloads needed dependencies and compiles the source-code located in the `bin`-folder:\r +\r +```shell\r +chmod +x compile_and_setup_depencies.sh # In case this file is not executable\r +./compile_and_setup_depencies.sh # Downloads dependencies, compiles the C++-implementation and sets all binaries in the bin-folder as executable\r +```\r +\r +If the script exits without errors, the provided workflows can be executed with the command `nextflow`.\r +\r +### Executing in Docker\r +\r +Alternatively, docker can be used. For this, please follow the [installation guide](https://docs.docker.com/engine/install/ubuntu/) for docker. After installing docker, a local docker-container can be build with all needed dependencies for the workflows. We provide a `Dockerfile` in the `docker`-folder. To build it, execute (while beeing with a shell in the root-folder of this repository) the following:\r +\r +```shell\r +docker build -t progfastagen:local . -f docker/Dockerfile\r +```\r +\r +This command builds a local docker container, tagging it with `progfastagen:local`, which can be later used by nextflow. To use it with nextflow, make sure that `nextflow` is installed on the host-system. For each of the workflow example calls below, the `-with-docker progfastagen:local` then needs to be appended, to let `nextflow` know to use the local docker-container.\r +\r +## Individual Steps\r +\r +Each step has been implemented in such a way, that it can be executed on its own. Each subsection below, provides a brief overview and an example call of the required parameters to demonstrate how the workflow can be called. If you are interested for all the available parameters within a workflow and want modify or tune them, then please refer to the source of the workflows, where each parameter is described briefly.\r +\r +### Converting RAW-files to MGF\r +\r +The workflow `convert_to_mgf.nf` is a wrapper around the ThermoRawFileParser and converts RAW-files to the MGF-format. The `ctm_raws` parameter needs to be set, in order to generate the MGF-files:\r +\r +```text\r +nextflow run convert_to_mgf.nf \\\r + --ctm_raws < Folder containing RAW-files > \\\r + --ctm_outdir < Output-Folder, where the MGFs should be stored >\r +```\r +\r +### Generating a Precursor-Specific-FASTA\r +\r +The workflow `create_precursor_specific_fasta.nf` generates a precursor-specific-FASTA-file, tailored to a set of MGF-files. Here, Protein-Graphs are generated, using the UniProt flat file format (which can be downloaded from [UniProt](https://www.uniprot.org/) by selecting `Text` as format) and a python script prepares the queries, by extracting the MS2-precursors from the MGF-files (using a tolerance, in ppm). Using the Protein-Graphs and a `DryRun`-Version of the traversal, the maximum-variant-limits are determined for each Protein-Graph (and mass-query-range) using a binary-search. These limits are then used for the actual ms2-specific-FASTA-generation in conjunction with the extracted MS2-precursors and a compacted FASTA is returned, which is tailored to the MGF-files.\r +\r +Altough of the complexity, the workflow only requires the following parameters to generate such a FASTA:\r +\r +```text\r +nextflow run create_precursor_specific_fasta.nf \\\r + --cmf_mgf_files < Folder containing MGF-files > \\\r + --cmf_sp_embl_file < Path to a SP-EMBL-File (UniProt flat file format) > \\\r + --cmf_outdir \r +```\r +\r +The optional parameter: `cmf_pg_additional_params` is added to ProtGraph directly, allowing every parameter, ProtGraph provides to be set there (e.g. useful if the digestion should be changed or features/PTMs should be included/excluded, etc...), allowing arbitrary settings to generate Protein-Graphs if desired. It defaults to use all features, ProtGraph can parse.\r +\r +**Note regarding PTMs/Tolerance**: The FASTA is tailored to the MS2-precursors, therefore variable and fixed modifications need to be set to the same settings as for the actual identification. This workflow defaults to carbamidomethylation (C, fixed) and oxidation (M, variable). See ProtGraph (and the workflow-parameter `cmf_pg_additional_params`) to set the PTMs accordingly in the Protein-Graphs. The same applies for the MS2-precursor-tolereance which can be set with `cmf_query_ppm` and defaults to `5ppm`.\r +\r +**Note regarding Limits**: This workflows defaults to allow up to 5 seconds per query and limits peptides to contain at most 5 variants (with a maximum of 5000 Da per peptide), resulting into FASTA-files which can be 15-200GB large (depending on dataset and species). Changing these settings can drastically increase/decrease the runtime/memory usage/disk usage. We advise to change those settings slightly and to pay attention on the runtime/memory usage/disk usage if run with the newly set limits (and dataset + species) the first time.\r +\r +**Note regarding identification**: If digestion is enabled (default is `Trypsin`), the resulting FASTA contains already digested entries, thus searching with a search-engine, the digestion should be set to `off/no_cut`.\r +\r +### Generating a Global-FASTA\r +\r +This workflow generates a so called global-FASTA, using ProtGraph, the UniProt flat file format and some global limits for writing out peptides/proteins. Global-FASTAs can be generated with the `create_global_fasta.nf`-workflow. To generate a global-FASTA, only a path to a single SP-EMBL-file (UniProt flat file format) is required. Such a file can be downloaded from [UniProt](https://www.uniprot.org/) directly, by selecting `Text` instead of `FASTA` as the download format.\r +\r +```text\r +nextflow run create_global_fasta.nf \\\r + --cgf_sp_embl_file < Path to a SP-EMBL-File (UniProt flat file format) > \\\r + --cgf_outdir < The output-folder, where the gloabl-FASTA and some Protein-Graph-statistics should be saved >\r +```\r +\r +Per default, this workflow does not export feature-peptides and is set to only export peptides with up to 5000 Da mass and maximum of two miscleavages. It is possible to generate global-FASTA with some specific features (like containing, `SIGNAL`, `PEPTIDE` or others) and other limits. The parameters `cgf_features_in_graphs` and `cgf_peptide_limits` can be set accordingly. These are added to ProtGraph directly, hence every parameter ProtGraph provides, can be set here (including different digestion settings).\r +\r +**Note**: A dry run with ProtGraph to generate statistics how many peptide would be theoretically exported is advised prior for testing. Some Protein-Graphs with some features (e.g. P53 using variants) can contain to many peptides, which could result to very long runtimes and huge FASTAs.\r +\r +**Note regarding identification**: If digestion is enabled (default is `Trypsin`), the resulting FASTA contains already digested entries, thus searching with a search-engine, the digestion should be set to `off/no_cut`.\r +\r +### Identification via Coment (and Percolator)\r +\r +We provide an identification workflow to showcase, that the generated FASTAs can be used with search-engines. The workflow `identification_via_comet.nf` identifies MGF-files individually, using custom search-settings for Comet (and if desired rescores the results with Percolator), applies an FDR-cutoff using the q-value (for each file) and exposes the identification results into an output-folder.\r +\r +Three parameters are required, to execute the workflow:\r +\r +1. The MGFs which should be identified\r +2. The Comet-Parameter file to set the search-settings\r +3. The FASTA-file which should be used for identification\r +\r +Below is an example call with all required parameters (Percolator is enabled by default):\r +\r +```text\r +nextflow run identification_via_comet.nf \\\r + --idc_mgf_folder < Folder containing MGF-files > \\\r + --idc_fasta_file < The FASTA which should be used for identification > \\\r + --idc_search_parameter_file < The Comet-Parameters file (Search Configuration) > \\\r + --idc_outdir < Output-Folder where the results of the identification files are stored >\r +```\r +\r +Here is another example call with all required parameters (this time, turning Percolator off):\r +\r +```text\r +nextflow run identification_via_comet.nf \\\r + --idc_mgf_folder < Folder containing MGF-files > \\\r + --idc_fasta_file < The FASTA which should be used for identification > \\\r + --idc_search_parameter_file < The Comet-Parameters file (Search Configuration) > \\\r + --idc_outdir < Output-Folder where the results of the identification files are stored > \\\r + --idc_use_percolator 0\r +```\r +\r +**Note**: This identification-workflow defaults to an FDR-cutoff (q-value) of `--idc_fdr "0.01"`, reporting only 1% filtered PSMs. Arbitrary and multiple FDR-cutoffs can be set and can be changed to the desired value.\r +\r +### Summarization of results\r +\r +The `summarize_ident_results.nf`-workflow genereates convenient summarization of the identification results. Here, the identification-results are binned into 4 groups:\r +\r +1. Unique PSMs (a match, which can only originate from one protein)\r +2. Shared PSMs (a match, which can originate from multiple proteins)\r +3. Unique Feature PSMs (as 1., but only containing peptides, which can be explained by a features)\r +4. Shared Feature PSMs (as 2., but only can be explained by features from all originating proteins)\r +\r +Furthermore, heatmaps are generated to provide an overview of found peptides across all MGFs/RAW-files.\r +\r +To call this method, a `glob` needs to be specified in this workflow:\r +\r +```text\r +nextflow run summarize_ident_results.nf \\\r + --sir_identified_files_glob < The glob matching the desired output from the identification results >\r + --sir_outdir < The output directory where the summarized results should be saved >\r +```\r +\r +In case, the identification workflow was executed using an FDR of 0.01, you could use the following `glob`:\r +\r +```text\r +nextflow run summarize_ident_results.nf \\\r + --sir_identified_files_glob "/*qvalue_no_decoys_fdr_0.01.tsv"\r + --sir_outdir < The output directory where the summarized results should be saved >\r +```\r +\r +**Note**: This step can be used only if specific columns are present in the tables. Furthermore, it distinguishes between the identification results from a FASTA by UniProt or by ProtGraph. The additional parameters control, whether to bin results in group 3 and 4, decide if variable modifications should be considered as unique, as well as if a peptide, which originates multiple times to the same protein should be considered as unique. The main-workflows set these parameters accordingly and can be used as an example.\r +\r +## Main Workflow Scripts\r +\r +Each individual step described above, is also imported and chained into three main-workflows:\r +\r +1. `main_workflow_protein_fasta.nf` (UniProt-FASTA-search)\r +2. `main_workflow_global_fasta.nf` (Generation of a global-FASTA and search)\r +3. `main_workflow_precursor_specific_fasta.nf` (Generation of a precursor-specific-FASTA and search)\r +\r +generating summarized identification results across multiple RAW-files.\r +\r +In each of these workflows, it is possible to modify the parameters of the imported subworkflows, by using the imported subworkflows parameters directly (as shown in the **Individual Steps** above).\r +\r +For protein-FASTA identification, only three parameters are required:\r +\r +```text\r +nextflow run main_workflow_protein_fasta.nf \\\r + --main_fasta_file < The FASTA-file, to be used for identification > \\\r + --main_raw_files_folder < The folder containing RAW-files > \\\r + --main_comet_params < The parameters file for comet (for identification) > \\\r + --main_outdir < Output-Folder where all the results from the workflows should be saved >\r +```\r +\r +This is also true for the other two workflows, where instead of a FASTA-file, the UniProt flat file format needs to be provided. Such a file can be downloaded from [UniProt](https://www.uniprot.org/) directly, by selecting the format `Text` instead of the format `FASTA`.\r +\r +Here are the correpsonding calls for global-FASTA and precurosr-specific-FASTA generation and identification:\r +\r +```text\r +# global-FASTA\r +nextflow run main_workflow_global_fasta.nf \\\r + --main_sp_embl_file < The SP-EMBL-file used for Protein-Graph- and FASTA-generation (UniProt flat file format) > \\\r + --main_raw_files_folder < The folder containing RAW-files > \\\r + --main_comet_params< The parameters file for comet (for identification) > \\\r + --main_outdir < Output-Folder where all the results from the workflows should be saved >\r +\r +# precursor-specific-FASTA\r +nextflow run main_workflow_precursor_specific_fasta.nf \\\r + --main_sp_embl_file < The SP-EMBL-file used for Protein-Graph- and FASTA-generation (UniProt flat file format) > \\\r + --main_raw_files_folder < The folder containing RAW-files > \\\r + --main_comet_params < The parameters file for comet (for identification) > \\\r + --main_outdir < Output-Folder where all the results from the workflows should be saved >\r +```\r +\r +**Note**: Only defining the required parameters, uses the default parameters for every other setting. For all workflows, this would mean, that the FDR-cutoff (q-value) is set to `0.01` resulting into both FDRs considered. Furthermore, the global-FASTA and precursor-specific-FASTA workflows assume Trypsin digestion. For the global-FASTA-workflow, no features are exported by default, which may not be desired, if someone whishes to search for peptide-features (like `SIGNAL`, etc..). For the precursor-specific-FASTA-workflow, the PTMs carbamidomethylation (C, fixed) and oxidation (M, variable) are assumed, which may need to be modified.\r +\r +**Note regarding example calls**: Further below you can find the calls as used in the publication. These set the most minimal parameters for a correct execution on custom datasets and can be used as an example.\r +\r +## Regenerate Results from Publication\r +\r +In this subsection you can find the nextflow-calls which were used to execute the 3 workflows. Executing this with the same UniProt flat file/FASTA-file should yield the similar/same results. For generated precursor-specific-FASTAs it may happen, that these are generated with slightly different variant-limits, therefore a slightly different FASTA to search with and slightly different identification results.\r +\r +The FASTA/UniProt flat file used for identification can be found [here](https://cloud.mpc.rub.de/s/LJ2bgGNmsxzSaod). The Comet configuration files are provided in the `example_configuration`-folder. The datasets can be retrieved from [PRIDE](https://www.ebi.ac.uk/pride/).\r +\r +### PXD002171\r +\r +```shell\r +# PXD002171 Precursor-Specific\r +nextflow run main_workflow_precursor_specific_fasta.nf \\\r + -with-report "PXD002171_results_precursor_specific/nextflow_report.html" \\\r + -with-timeline "PXD002171_results_precursor_specific/nextflow_timeline.html" \\\r + --main_sp_embl_file 20230619_homo_sapiens_proteome.txt \\\r + --main_raw_files_folder PXD002171 \\\r + --main_comet_params example_configurations/PXD002171_no_dig.txt \\\r + --main_outdir PXD002171_results_precursor_specific \\\r + --cmf_max_precursor_da 5000 \\\r + --cmf_query_ppm 5 \\\r + --cmf_timeout_for_single_query 5 \\\r + --cmf_maximum_variant_limit 5 \\\r + --cmf_pg_additional_params "-ft VARIANT -ft SIGNAL -ft INIT_MET -ft CONFLICT -ft VAR_SEQ -ft PEPTIDE -ft PROPEP -ft CHAIN -vm 'M:15.994915' -vm 'C:71.037114'" \\\r + --idc_fdr "0.01"\r + \r +# PXD002171 Global digested FASTA\r +nextflow run main_workflow_global_fasta.nf \\\r + -with-report "PXD002171_global_fasta/nextflow_report.html" \\\r + -with-timeline "PXD002171_global_fasta/nextflow_timeline.html" \\\r + --main_sp_embl_file 20230619_homo_sapiens_proteome.txt \\\r + --main_raw_files_folder PXD002171 \\\r + --main_comet_params example_configurations/PXD002171_no_dig.txt \\\r + --main_outdir PXD002171_global_fasta \\\r + --cgf_features_in_graphs "-ft None" \\\r + --cgf_peptide_limits "--pep_miscleavages 2 --pep_min_pep_length 5" \\\r + --idc_fdr "0.01"\r +\r +# PXD002171 Protein FASTA\r +nextflow run main_workflow_protein_fasta.nf \\\r + -with-report "PXD002171_protein_fasta/nextflow_report.html" \\\r + -with-timeline "PXD002171_protein_fasta/nextflow_timeline.html" \\\r + --main_fasta_file 20230619_homo_sapiens_proteome.fasta \\\r + --main_raw_files_folder PXD002171 \\\r + --main_comet_params example_configurations/PXD002171_trypsin_dig.txt \\\r + --main_outdir PXD002171_protein_fasta \\\r + --idc_fdr "0.01"\r +```\r +\r +### PXD028605\r +\r +```shell\r +# PXD028605 Precursor-Specific\r +nextflow run main_workflow_precursor_specific_fasta.nf \\\r + -with-report "PXD028605_results_precursor_specific/nextflow_report.html" \\\r + -with-timeline "PXD028605_results_precursor_specific/nextflow_timeline.html" \\\r + --main_sp_embl_file 20230619_homo_sapiens_proteome.txt \\\r + --main_raw_files_folder PXD028605 \\\r + --main_comet_params example_configurations/PXD028605_no_dig.txt \\\r + --main_outdir PXD028605_results_precursor_specific \\\r + --cmf_max_precursor_da 5000 \\\r + --cmf_query_ppm 20 \\\r + --cmf_timeout_for_single_query 5 \\\r + --cmf_maximum_variant_limit 5 \\\r + --cmf_pg_additional_params "-ft VARIANT -ft SIGNAL -ft INIT_MET -ft CONFLICT -ft VAR_SEQ -ft PEPTIDE -ft PROPEP -ft CHAIN -fm 'C:57.021464' -vm 'M:15.9949'" \\\r + --idc_fdr "0.01"\r +\r +# PXD028605 Global digested FASTA\r +nextflow run main_workflow_global_fasta.nf \\\r + -with-report "PXD028605_global_fasta/nextflow_report.html" \\\r + -with-timeline "PXD028605_global_fasta/nextflow_timeline.html" \\\r + --main_sp_embl_file 20230619_homo_sapiens_proteome.txt \\\r + --main_raw_files_folder PXD028605 \\\r + --main_comet_params example_configurations/PXD028605_no_dig.txt \\\r + --main_outdir PXD028605_global_fasta \\\r + --cgf_features_in_graphs "-ft None" \\\r + --cgf_peptide_limits "--pep_miscleavages 2 --pep_min_pep_length 5" \\\r + --idc_fdr "0.01"\r +\r +# PXD028605 Protein FASTA\r +nextflow run main_workflow_protein_fasta.nf \\\r + -with-report "PXD028605_protein_fasta/nextflow_report.html" \\\r + -with-timeline "PXD028605_protein_fasta/nextflow_timeline.html" \\\r + --main_fasta_file 20230619_homo_sapiens_proteome.fasta \\\r + --main_raw_files_folder PXD028605 \\\r + --main_comet_params example_configurations/PXD028605_trypsin_dig.txt \\\r + --main_outdir PXD028605_protein_fasta \\\r + --idc_fdr "0.01"\r +```\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.837.1" ; + schema1:isBasedOn "https://github.com/mpc-bioinformatics/ProGFASTAGen" ; + schema1:license "BSD-3-Clause" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ProGFASTAGen - Protein-Graph FASTA Generation (and Identification) Workflows" ; + schema1:sdDatePublished "2024-08-05 10:24:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/837/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2815 ; + schema1:creator , + ; + schema1:dateCreated "2024-04-26T09:54:07Z" ; + schema1:dateModified "2024-04-26T09:54:51Z" ; + schema1:description """# ProGFASTAGen\r +\r +The ProGFASTAGen (**Pro**tein-**G**raph-**FASTA**-**Gen**erator or **Pro**t**G**raph-**FASTA**-**Gen**erator) repository contains workflows to generate so-called precursor-specific-FASTAs (using the precursors from MGF-files) including feature-peptides, like VARIANTs or CONFLICTs if desired, or global-FASTAs (as described in [ProtGraph](https://github.com/mpc-bioinformatics/ProtGraph)). The single workflow scripts have been implemented with [Nextflow-DSL-2](https://www.nextflow.io/docs/latest/dsl2.html) and are independent to each other. Each of these workflows can be used on their own or can be imported to other workflows for other use-cases. Further, we included three main-workflows, to show how the single workflows can be chained together. The `main_workflow_protein_fasta.nf`-workflow converts Thermo-RAW-files into MGF, searches with Comet (and Percolator) and the identification results are then further summarized. The workflows `main_workflow_global_fasta.nf` and `main_workflow_precursor_specific_fasta.nf` generate specific FASTA-files before search-engine-identification. Below are example nextflow-calls, which can be used.\r +\r +Regarding the precursor-specific-FASTA-generation: The source-code of the C++ implementation for traversal can be found in `bin`. There, four implementations are present: `Float/Int`-Versions as well as `DryRun/VarLimitter`-Versions of the traversal. The `Float/Int`-Versions can be faster/slower depending on th processor-architecture and can be used via a flag in the `create_precursor_specific_fasta.nf`-workflow. The `DryRun`-Version does not generate a FASTA but tests the used system (depending on a query-timeout) to determine the maximum number of variants which can be used, while not timing out. The actual FASTA-generation happens in the `VarLimitter`-Version using the generated protein-graphs at hand.\r +\r +in **Prerequisites** a small description of dependencies and how to set up the host system is given. **Individual steps** describes the single workflows and how they can be called, while **Main Workflow Scripts** shows example-calls of the main workflows. In **Regenerate Results from Publication**, the calls and parameters are shown, which were used in the publication. Using the same FASTA or UniProt flat file format with a similar server-setting should yield similar results as used in the publication.\r +\r +## Prerequisites\r +\r +### Executing on Linux\r +\r +This workflow can be only executed on linux (tested on Ubuntu 22.04 and ArchLinux). Before setting up the `bin`-folder, some requiered binaries need to be present on the OS. (Focusing on Ubuntu:) The following packages need to be installed on Ubuntu (via `apt`), if not already:\r +\r +```text\r +build-essential\r +wget\r +curl\r +unzip\r +cmake\r +mono-complete\r +python3-pip (or any environment with Python3, where pip is available)\r +python-is-python3 (needed for ubuntu, so that python points to python3)\r +```\r +\r +If all packages are installed (and the python environment is set up), the setup-script needs to be executed, which downloads needed dependencies and compiles the source-code located in the `bin`-folder:\r +\r +```shell\r +chmod +x compile_and_setup_depencies.sh # In case this file is not executable\r +./compile_and_setup_depencies.sh # Downloads dependencies, compiles the C++-implementation and sets all binaries in the bin-folder as executable\r +```\r +\r +If the script exits without errors, the provided workflows can be executed with the command `nextflow`.\r +\r +### Executing in Docker\r +\r +Alternatively, docker can be used. For this, please follow the [installation guide](https://docs.docker.com/engine/install/ubuntu/) for docker. After installing docker, a local docker-container can be build with all needed dependencies for the workflows. We provide a `Dockerfile` in the `docker`-folder. To build it, execute (while beeing with a shell in the root-folder of this repository) the following:\r +\r +```shell\r +docker build -t progfastagen:local . -f docker/Dockerfile\r +```\r +\r +This command builds a local docker container, tagging it with `progfastagen:local`, which can be later used by nextflow. To use it with nextflow, make sure that `nextflow` is installed on the host-system. For each of the workflow example calls below, the `-with-docker progfastagen:local` then needs to be appended, to let `nextflow` know to use the local docker-container.\r +\r +## Individual Steps\r +\r +Each step has been implemented in such a way, that it can be executed on its own. Each subsection below, provides a brief overview and an example call of the required parameters to demonstrate how the workflow can be called. If you are interested for all the available parameters within a workflow and want modify or tune them, then please refer to the source of the workflows, where each parameter is described briefly.\r +\r +### Converting RAW-files to MGF\r +\r +The workflow `convert_to_mgf.nf` is a wrapper around the ThermoRawFileParser and converts RAW-files to the MGF-format. The `ctm_raws` parameter needs to be set, in order to generate the MGF-files:\r +\r +```text\r +nextflow run convert_to_mgf.nf \\\r + --ctm_raws < Folder containing RAW-files > \\\r + --ctm_outdir < Output-Folder, where the MGFs should be stored >\r +```\r +\r +### Generating a Precursor-Specific-FASTA\r +\r +The workflow `create_precursor_specific_fasta.nf` generates a precursor-specific-FASTA-file, tailored to a set of MGF-files. Here, Protein-Graphs are generated, using the UniProt flat file format (which can be downloaded from [UniProt](https://www.uniprot.org/) by selecting `Text` as format) and a python script prepares the queries, by extracting the MS2-precursors from the MGF-files (using a tolerance, in ppm). Using the Protein-Graphs and a `DryRun`-Version of the traversal, the maximum-variant-limits are determined for each Protein-Graph (and mass-query-range) using a binary-search. These limits are then used for the actual ms2-specific-FASTA-generation in conjunction with the extracted MS2-precursors and a compacted FASTA is returned, which is tailored to the MGF-files.\r +\r +Altough of the complexity, the workflow only requires the following parameters to generate such a FASTA:\r +\r +```text\r +nextflow run create_precursor_specific_fasta.nf \\\r + --cmf_mgf_files < Folder containing MGF-files > \\\r + --cmf_sp_embl_file < Path to a SP-EMBL-File (UniProt flat file format) > \\\r + --cmf_outdir \r +```\r +\r +The optional parameter: `cmf_pg_additional_params` is added to ProtGraph directly, allowing every parameter, ProtGraph provides to be set there (e.g. useful if the digestion should be changed or features/PTMs should be included/excluded, etc...), allowing arbitrary settings to generate Protein-Graphs if desired. It defaults to use all features, ProtGraph can parse.\r +\r +**Note regarding PTMs/Tolerance**: The FASTA is tailored to the MS2-precursors, therefore variable and fixed modifications need to be set to the same settings as for the actual identification. This workflow defaults to carbamidomethylation (C, fixed) and oxidation (M, variable). See ProtGraph (and the workflow-parameter `cmf_pg_additional_params`) to set the PTMs accordingly in the Protein-Graphs. The same applies for the MS2-precursor-tolereance which can be set with `cmf_query_ppm` and defaults to `5ppm`.\r +\r +**Note regarding Limits**: This workflows defaults to allow up to 5 seconds per query and limits peptides to contain at most 5 variants (with a maximum of 5000 Da per peptide), resulting into FASTA-files which can be 15-200GB large (depending on dataset and species). Changing these settings can drastically increase/decrease the runtime/memory usage/disk usage. We advise to change those settings slightly and to pay attention on the runtime/memory usage/disk usage if run with the newly set limits (and dataset + species) the first time.\r +\r +**Note regarding identification**: If digestion is enabled (default is `Trypsin`), the resulting FASTA contains already digested entries, thus searching with a search-engine, the digestion should be set to `off/no_cut`.\r +\r +### Generating a Global-FASTA\r +\r +This workflow generates a so called global-FASTA, using ProtGraph, the UniProt flat file format and some global limits for writing out peptides/proteins. Global-FASTAs can be generated with the `create_global_fasta.nf`-workflow. To generate a global-FASTA, only a path to a single SP-EMBL-file (UniProt flat file format) is required. Such a file can be downloaded from [UniProt](https://www.uniprot.org/) directly, by selecting `Text` instead of `FASTA` as the download format.\r +\r +```text\r +nextflow run create_global_fasta.nf \\\r + --cgf_sp_embl_file < Path to a SP-EMBL-File (UniProt flat file format) > \\\r + --cgf_outdir < The output-folder, where the gloabl-FASTA and some Protein-Graph-statistics should be saved >\r +```\r +\r +Per default, this workflow does not export feature-peptides and is set to only export peptides with up to 5000 Da mass and maximum of two miscleavages. It is possible to generate global-FASTA with some specific features (like containing, `SIGNAL`, `PEPTIDE` or others) and other limits. The parameters `cgf_features_in_graphs` and `cgf_peptide_limits` can be set accordingly. These are added to ProtGraph directly, hence every parameter ProtGraph provides, can be set here (including different digestion settings).\r +\r +**Note**: A dry run with ProtGraph to generate statistics how many peptide would be theoretically exported is advised prior for testing. Some Protein-Graphs with some features (e.g. P53 using variants) can contain to many peptides, which could result to very long runtimes and huge FASTAs.\r +\r +**Note regarding identification**: If digestion is enabled (default is `Trypsin`), the resulting FASTA contains already digested entries, thus searching with a search-engine, the digestion should be set to `off/no_cut`.\r +\r +### Identification via Coment (and Percolator)\r +\r +We provide an identification workflow to showcase, that the generated FASTAs can be used with search-engines. The workflow `identification_via_comet.nf` identifies MGF-files individually, using custom search-settings for Comet (and if desired rescores the results with Percolator), applies an FDR-cutoff using the q-value (for each file) and exposes the identification results into an output-folder.\r +\r +Three parameters are required, to execute the workflow:\r +\r +1. The MGFs which should be identified\r +2. The Comet-Parameter file to set the search-settings\r +3. The FASTA-file which should be used for identification\r +\r +Below is an example call with all required parameters (Percolator is enabled by default):\r +\r +```text\r +nextflow run identification_via_comet.nf \\\r + --idc_mgf_folder < Folder containing MGF-files > \\\r + --idc_fasta_file < The FASTA which should be used for identification > \\\r + --idc_search_parameter_file < The Comet-Parameters file (Search Configuration) > \\\r + --idc_outdir < Output-Folder where the results of the identification files are stored >\r +```\r +\r +Here is another example call with all required parameters (this time, turning Percolator off):\r +\r +```text\r +nextflow run identification_via_comet.nf \\\r + --idc_mgf_folder < Folder containing MGF-files > \\\r + --idc_fasta_file < The FASTA which should be used for identification > \\\r + --idc_search_parameter_file < The Comet-Parameters file (Search Configuration) > \\\r + --idc_outdir < Output-Folder where the results of the identification files are stored > \\\r + --idc_use_percolator 0\r +```\r +\r +**Note**: This identification-workflow defaults to an FDR-cutoff (q-value) of `--idc_fdr "0.01"`, reporting only 1% filtered PSMs. Arbitrary and multiple FDR-cutoffs can be set and can be changed to the desired value.\r +\r +### Summarization of results\r +\r +The `summarize_ident_results.nf`-workflow genereates convenient summarization of the identification results. Here, the identification-results are binned into 4 groups:\r +\r +1. Unique PSMs (a match, which can only originate from one protein)\r +2. Shared PSMs (a match, which can originate from multiple proteins)\r +3. Unique Feature PSMs (as 1., but only containing peptides, which can be explained by a features)\r +4. Shared Feature PSMs (as 2., but only can be explained by features from all originating proteins)\r +\r +Furthermore, heatmaps are generated to provide an overview of found peptides across all MGFs/RAW-files.\r +\r +To call this method, a `glob` needs to be specified in this workflow:\r +\r +```text\r +nextflow run summarize_ident_results.nf \\\r + --sir_identified_files_glob < The glob matching the desired output from the identification results >\r + --sir_outdir < The output directory where the summarized results should be saved >\r +```\r +\r +In case, the identification workflow was executed using an FDR of 0.01, you could use the following `glob`:\r +\r +```text\r +nextflow run summarize_ident_results.nf \\\r + --sir_identified_files_glob "/*qvalue_no_decoys_fdr_0.01.tsv"\r + --sir_outdir < The output directory where the summarized results should be saved >\r +```\r +\r +**Note**: This step can be used only if specific columns are present in the tables. Furthermore, it distinguishes between the identification results from a FASTA by UniProt or by ProtGraph. The additional parameters control, whether to bin results in group 3 and 4, decide if variable modifications should be considered as unique, as well as if a peptide, which originates multiple times to the same protein should be considered as unique. The main-workflows set these parameters accordingly and can be used as an example.\r +\r +## Main Workflow Scripts\r +\r +Each individual step described above, is also imported and chained into three main-workflows:\r +\r +1. `main_workflow_protein_fasta.nf` (UniProt-FASTA-search)\r +2. `main_workflow_global_fasta.nf` (Generation of a global-FASTA and search)\r +3. `main_workflow_precursor_specific_fasta.nf` (Generation of a precursor-specific-FASTA and search)\r +\r +generating summarized identification results across multiple RAW-files.\r +\r +In each of these workflows, it is possible to modify the parameters of the imported subworkflows, by using the imported subworkflows parameters directly (as shown in the **Individual Steps** above).\r +\r +For protein-FASTA identification, only three parameters are required:\r +\r +```text\r +nextflow run main_workflow_protein_fasta.nf \\\r + --main_fasta_file < The FASTA-file, to be used for identification > \\\r + --main_raw_files_folder < The folder containing RAW-files > \\\r + --main_comet_params < The parameters file for comet (for identification) > \\\r + --main_outdir < Output-Folder where all the results from the workflows should be saved >\r +```\r +\r +This is also true for the other two workflows, where instead of a FASTA-file, the UniProt flat file format needs to be provided. Such a file can be downloaded from [UniProt](https://www.uniprot.org/) directly, by selecting the format `Text` instead of the format `FASTA`.\r +\r +Here are the correpsonding calls for global-FASTA and precurosr-specific-FASTA generation and identification:\r +\r +```text\r +# global-FASTA\r +nextflow run main_workflow_global_fasta.nf \\\r + --main_sp_embl_file < The SP-EMBL-file used for Protein-Graph- and FASTA-generation (UniProt flat file format) > \\\r + --main_raw_files_folder < The folder containing RAW-files > \\\r + --main_comet_params< The parameters file for comet (for identification) > \\\r + --main_outdir < Output-Folder where all the results from the workflows should be saved >\r +\r +# precursor-specific-FASTA\r +nextflow run main_workflow_precursor_specific_fasta.nf \\\r + --main_sp_embl_file < The SP-EMBL-file used for Protein-Graph- and FASTA-generation (UniProt flat file format) > \\\r + --main_raw_files_folder < The folder containing RAW-files > \\\r + --main_comet_params < The parameters file for comet (for identification) > \\\r + --main_outdir < Output-Folder where all the results from the workflows should be saved >\r +```\r +\r +**Note**: Only defining the required parameters, uses the default parameters for every other setting. For all workflows, this would mean, that the FDR-cutoff (q-value) is set to `0.01` resulting into both FDRs considered. Furthermore, the global-FASTA and precursor-specific-FASTA workflows assume Trypsin digestion. For the global-FASTA-workflow, no features are exported by default, which may not be desired, if someone whishes to search for peptide-features (like `SIGNAL`, etc..). For the precursor-specific-FASTA-workflow, the PTMs carbamidomethylation (C, fixed) and oxidation (M, variable) are assumed, which may need to be modified.\r +\r +**Note regarding example calls**: Further below you can find the calls as used in the publication. These set the most minimal parameters for a correct execution on custom datasets and can be used as an example.\r +\r +## Regenerate Results from Publication\r +\r +In this subsection you can find the nextflow-calls which were used to execute the 3 workflows. Executing this with the same UniProt flat file/FASTA-file should yield the similar/same results. For generated precursor-specific-FASTAs it may happen, that these are generated with slightly different variant-limits, therefore a slightly different FASTA to search with and slightly different identification results.\r +\r +The FASTA/UniProt flat file used for identification can be found [here](https://cloud.mpc.rub.de/s/LJ2bgGNmsxzSaod). The Comet configuration files are provided in the `example_configuration`-folder. The datasets can be retrieved from [PRIDE](https://www.ebi.ac.uk/pride/).\r +\r +### PXD002171\r +\r +```shell\r +# PXD002171 Precursor-Specific\r +nextflow run main_workflow_precursor_specific_fasta.nf \\\r + -with-report "PXD002171_results_precursor_specific/nextflow_report.html" \\\r + -with-timeline "PXD002171_results_precursor_specific/nextflow_timeline.html" \\\r + --main_sp_embl_file 20230619_homo_sapiens_proteome.txt \\\r + --main_raw_files_folder PXD002171 \\\r + --main_comet_params example_configurations/PXD002171_no_dig.txt \\\r + --main_outdir PXD002171_results_precursor_specific \\\r + --cmf_max_precursor_da 5000 \\\r + --cmf_query_ppm 5 \\\r + --cmf_timeout_for_single_query 5 \\\r + --cmf_maximum_variant_limit 5 \\\r + --cmf_pg_additional_params "-ft VARIANT -ft SIGNAL -ft INIT_MET -ft CONFLICT -ft VAR_SEQ -ft PEPTIDE -ft PROPEP -ft CHAIN -vm 'M:15.994915' -vm 'C:71.037114'" \\\r + --idc_fdr "0.01"\r + \r +# PXD002171 Global digested FASTA\r +nextflow run main_workflow_global_fasta.nf \\\r + -with-report "PXD002171_global_fasta/nextflow_report.html" \\\r + -with-timeline "PXD002171_global_fasta/nextflow_timeline.html" \\\r + --main_sp_embl_file 20230619_homo_sapiens_proteome.txt \\\r + --main_raw_files_folder PXD002171 \\\r + --main_comet_params example_configurations/PXD002171_no_dig.txt \\\r + --main_outdir PXD002171_global_fasta \\\r + --cgf_features_in_graphs "-ft None" \\\r + --cgf_peptide_limits "--pep_miscleavages 2 --pep_min_pep_length 5" \\\r + --idc_fdr "0.01"\r +\r +# PXD002171 Protein FASTA\r +nextflow run main_workflow_protein_fasta.nf \\\r + -with-report "PXD002171_protein_fasta/nextflow_report.html" \\\r + -with-timeline "PXD002171_protein_fasta/nextflow_timeline.html" \\\r + --main_fasta_file 20230619_homo_sapiens_proteome.fasta \\\r + --main_raw_files_folder PXD002171 \\\r + --main_comet_params example_configurations/PXD002171_trypsin_dig.txt \\\r + --main_outdir PXD002171_protein_fasta \\\r + --idc_fdr "0.01"\r +```\r +\r +### PXD028605\r +\r +```shell\r +# PXD028605 Precursor-Specific\r +nextflow run main_workflow_precursor_specific_fasta.nf \\\r + -with-report "PXD028605_results_precursor_specific/nextflow_report.html" \\\r + -with-timeline "PXD028605_results_precursor_specific/nextflow_timeline.html" \\\r + --main_sp_embl_file 20230619_homo_sapiens_proteome.txt \\\r + --main_raw_files_folder PXD028605 \\\r + --main_comet_params example_configurations/PXD028605_no_dig.txt \\\r + --main_outdir PXD028605_results_precursor_specific \\\r + --cmf_max_precursor_da 5000 \\\r + --cmf_query_ppm 20 \\\r + --cmf_timeout_for_single_query 5 \\\r + --cmf_maximum_variant_limit 5 \\\r + --cmf_pg_additional_params "-ft VARIANT -ft SIGNAL -ft INIT_MET -ft CONFLICT -ft VAR_SEQ -ft PEPTIDE -ft PROPEP -ft CHAIN -fm 'C:57.021464' -vm 'M:15.9949'" \\\r + --idc_fdr "0.01"\r +\r +# PXD028605 Global digested FASTA\r +nextflow run main_workflow_global_fasta.nf \\\r + -with-report "PXD028605_global_fasta/nextflow_report.html" \\\r + -with-timeline "PXD028605_global_fasta/nextflow_timeline.html" \\\r + --main_sp_embl_file 20230619_homo_sapiens_proteome.txt \\\r + --main_raw_files_folder PXD028605 \\\r + --main_comet_params example_configurations/PXD028605_no_dig.txt \\\r + --main_outdir PXD028605_global_fasta \\\r + --cgf_features_in_graphs "-ft None" \\\r + --cgf_peptide_limits "--pep_miscleavages 2 --pep_min_pep_length 5" \\\r + --idc_fdr "0.01"\r +\r +# PXD028605 Protein FASTA\r +nextflow run main_workflow_protein_fasta.nf \\\r + -with-report "PXD028605_protein_fasta/nextflow_report.html" \\\r + -with-timeline "PXD028605_protein_fasta/nextflow_timeline.html" \\\r + --main_fasta_file 20230619_homo_sapiens_proteome.fasta \\\r + --main_raw_files_folder PXD028605 \\\r + --main_comet_params example_configurations/PXD028605_trypsin_dig.txt \\\r + --main_outdir PXD028605_protein_fasta \\\r + --idc_fdr "0.01"\r +```\r +""" ; + schema1:keywords "Bioinformatics, Proteomics" ; + schema1:license "https://spdx.org/licenses/BSD-3-Clause" ; + schema1:name "ProGFASTAGen - Protein-Graph FASTA Generation (and Identification) Workflows" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/837?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:description """### - deprecated - \r +\r +Workflow for sequencing with ONT Nanopore, from basecalling to assembly.\r + - Guppy (basecalling of raw reads)\r + - MinIONQC (quality check)\r + - FASTQ merging from multi into one file\r + - Kraken2 (taxonomic classification)\r + - Krona (classification visualization)\r + - Flye (de novo assembly)\r + - Medaka (assembly polishing)\r + - QUAST (assembly quality reports)\r +\r +**All tool CWL files and other workflows can be found here:**
\r + Tools: https://git.wur.nl/unlock/cwl/-/tree/master/cwl
\r + Workflows: https://git.wur.nl/unlock/cwl/-/tree/master/cwl/workflows
\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/253?version=1" ; + schema1:isBasedOn "https://git.wur.nl/unlock/cwl/-/raw/dev/cwl/workflows/workflow_nanopore.cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Nanopore Guppy Basecalling Assembly Workflow" ; + schema1:sdDatePublished "2024-08-05 10:31:40 +0100" ; + schema1:url "https://workflowhub.eu/workflows/253/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 18286 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6073 ; + schema1:creator , + , + ; + schema1:dateCreated "2022-01-06T07:36:38Z" ; + schema1:dateModified "2023-01-16T13:56:37Z" ; + schema1:description """### - deprecated - \r +\r +Workflow for sequencing with ONT Nanopore, from basecalling to assembly.\r + - Guppy (basecalling of raw reads)\r + - MinIONQC (quality check)\r + - FASTQ merging from multi into one file\r + - Kraken2 (taxonomic classification)\r + - Krona (classification visualization)\r + - Flye (de novo assembly)\r + - Medaka (assembly polishing)\r + - QUAST (assembly quality reports)\r +\r +**All tool CWL files and other workflows can be found here:**
\r + Tools: https://git.wur.nl/unlock/cwl/-/tree/master/cwl
\r + Workflows: https://git.wur.nl/unlock/cwl/-/tree/master/cwl/workflows
\r +\r +""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Nanopore Guppy Basecalling Assembly Workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/253?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """PAIRED-END workflow. Align reads on fasta reference/assembly using bwa mem, get a consensus, variants, mutation explanations.\r +\r +IMPORTANT: \r +* For "bcftools call" consensus step, the --ploidy file is in "Données partagées" (Shared Data) and must be imported in your history to use the worflow by providing this file (tells bcftools to consider haploid variant calling). \r +* SELECT THE MOST ADAPTED VADR MODEL for annotation (see vadr parameters).\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/518?version=1" ; + schema1:isBasedOn "https://github.com/ANSES-Ploufragan/vvv2_display" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for VVV2_align_PE" ; + schema1:sdDatePublished "2024-08-05 10:27:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/518/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 31003 ; + schema1:creator ; + schema1:dateCreated "2023-06-28T09:52:35Z" ; + schema1:dateModified "2023-10-16T11:59:29Z" ; + schema1:description """PAIRED-END workflow. Align reads on fasta reference/assembly using bwa mem, get a consensus, variants, mutation explanations.\r +\r +IMPORTANT: \r +* For "bcftools call" consensus step, the --ploidy file is in "Données partagées" (Shared Data) and must be imported in your history to use the worflow by providing this file (tells bcftools to consider haploid variant calling). \r +* SELECT THE MOST ADAPTED VADR MODEL for annotation (see vadr parameters).\r +""" ; + schema1:image ; + schema1:keywords "paired-end, variant_calling, Annotation, Virus, Alignment, Bioinformatics, Galaxy, SNPs, covid-19, variant calling, workflow" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "VVV2_align_PE" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/518?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 236295 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "16S rRNA amplicon sequencing analysis workflow using QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-08-05 10:22:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4145 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:38Z" ; + schema1:dateModified "2024-06-11T12:54:38Z" ; + schema1:description "16S rRNA amplicon sequencing analysis workflow using QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """Combined workflow for large genome assembly\r +\r +The tutorial document for this workflow is here: https://doi.org/10.5281/zenodo.5655813\r +\r +\r +What it does: A workflow for genome assembly, containing subworkflows:\r +* Data QC\r +* Kmer counting\r +* Trim and filter reads\r +* Assembly with Flye\r +* Assembly polishing\r +* Assess genome quality\r +\r +Inputs: \r +* long reads and short reads in fastq format\r +* reference genome for Quast\r +\r +Outputs: \r +* Data information - QC, kmers\r +* Filtered, trimmed reads\r +* Genome assembly, assembly graph, stats\r +* Polished assembly, stats\r +* Quality metrics - Busco, Quast\r +\r +Options\r +* Omit some steps - e.g. Data QC and kmer counting\r +* Replace a module with one using a different tool - e.g. change assembly tool\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.230.1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Combined workflows for large genome assembly" ; + schema1:sdDatePublished "2024-08-05 10:32:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/230/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 440166 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 191181 ; + schema1:creator ; + schema1:dateCreated "2021-11-08T06:08:25Z" ; + schema1:dateModified "2023-01-16T13:54:36Z" ; + schema1:description """Combined workflow for large genome assembly\r +\r +The tutorial document for this workflow is here: https://doi.org/10.5281/zenodo.5655813\r +\r +\r +What it does: A workflow for genome assembly, containing subworkflows:\r +* Data QC\r +* Kmer counting\r +* Trim and filter reads\r +* Assembly with Flye\r +* Assembly polishing\r +* Assess genome quality\r +\r +Inputs: \r +* long reads and short reads in fastq format\r +* reference genome for Quast\r +\r +Outputs: \r +* Data information - QC, kmers\r +* Filtered, trimmed reads\r +* Genome assembly, assembly graph, stats\r +* Polished assembly, stats\r +* Quality metrics - Busco, Quast\r +\r +Options\r +* Omit some steps - e.g. Data QC and kmer counting\r +* Replace a module with one using a different tool - e.g. change assembly tool\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "Large-genome-assembly" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Combined workflows for large genome assembly" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/230?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Genes and transcripts annotation with Isoseq using uLTRA and TAMA" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/993?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/isoseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/isoseq" ; + schema1:sdDatePublished "2024-08-05 10:23:46 +0100" ; + schema1:url "https://workflowhub.eu/workflows/993/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8438 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:58Z" ; + schema1:dateModified "2024-06-11T12:54:58Z" ; + schema1:description "Genes and transcripts annotation with Isoseq using uLTRA and TAMA" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/993?version=6" ; + schema1:keywords "isoseq, isoseq-3, rna, tama, ultra" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/isoseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/993?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-16T14:32:18.215710" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Scaffolding-HiC-VGP8" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Scaffolding-HiC-VGP8/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """This workflow can be used to fit dose-response curves from normalised biochemical assay data (%Inhibition) using the HCS extension. This workflow needs R-Server to run in the back-end. Start R and run the following command: library(Rserve); Rserve(args = "--vanilla")\r +IC50 values will not be extrapolated outside the tested concentration range\r +For activity classification the following criteria are applied:\r +- maximum (average % inhibion) >25 % and slope is >0 and IC50 > 5 µM or\r +- minimum (average % inhibion) >75 %\r +Results are formatted for upload to the European Chemical Biology Database (ECBD)""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/389?version=1" ; + schema1:isBasedOn "https://hub.knime.com/fraunhoferitmp/spaces/Public/latest/Dose_Response_Biochemical/DRCfit_biochemical_ECBD~6NLZB5Jkgn6j5a6Y" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for DRC_biochemical_toECBD" ; + schema1:sdDatePublished "2024-08-05 10:31:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/389/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 49286 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10087717 ; + schema1:creator ; + schema1:dateCreated "2022-09-26T10:15:37Z" ; + schema1:dateModified "2023-01-16T14:02:33Z" ; + schema1:description """This workflow can be used to fit dose-response curves from normalised biochemical assay data (%Inhibition) using the HCS extension. This workflow needs R-Server to run in the back-end. Start R and run the following command: library(Rserve); Rserve(args = "--vanilla")\r +IC50 values will not be extrapolated outside the tested concentration range\r +For activity classification the following criteria are applied:\r +- maximum (average % inhibion) >25 % and slope is >0 and IC50 > 5 µM or\r +- minimum (average % inhibion) >75 %\r +Results are formatted for upload to the European Chemical Biology Database (ECBD)""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "DRC_biochemical_toECBD" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/389?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-08-05 10:23:16 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6470 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:14Z" ; + schema1:dateModified "2024-06-11T12:55:14Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """The Regulatory Mendelian Mutation (ReMM) score was created for relevance prediction of non-coding variations (SNVs and small InDels) in the human genome (GRCh37) in terms of Mendelian diseases. This project updates the ReMM score for the genome build GRCh38 and combines GRCh37 and GRCh38 into one workflow.\r +\r +## Pre-requirements\r +\r +### Conda\r +We use Conda as software and dependency management tool. Conda installation guidelines can be found here:\r +\r +https://conda.io/projects/conda/en/latest/user-guide/install/index.html\r +\r +### Additional programs\r +These programs are used during the workflow. They usually need to be compiled, however, the repository already contains the executables or generated files.\r +\r +- [AttributeDB](https://github.com/visze/attributedb)\r +- [Jannovar](https://github.com/charite/jannovar) \r +- [parSMURF](https://github.com/AnacletoLAB/parSMURF)\r +\r +### Snakemake\r +\r +The workflow is managed by Snakemake - a workflow management system used to create reproducible and scalable data analyses. To install Snakemake as well as all other required packages, you need to create a working environment according to the description in the file env/ReMM.yaml. For that, first\r +\r +Clone the repository\r +```\r +git clone https://github.com/kircherlab/ReMM\r +cd ReMM\r +```\r +\r +Create a working environment and activate it\r +\r +```\r +conda env create -n ReMM --file workflow/envs/ReMM.yaml\r +conda activate ReMM\r +```\r +\r +All paths are relative to the Snakemake file so you do not need to change any path variables. Additionally, Snakemake creates all missing directories, so no need to create any aditional folders either.\r +\r +## Workflow\r +\r +The workflow consists of four main parts:\r +\r +- Download of feature data\r +- Data processing and cleaning\r +- Model training and validation\r +- Calculation of ReMM for the whole genome\r +\r +The `workflow` folder contains a graph of the workflow and more detailed information on the most important steps.\r +\r +To launch a snakemake workflow, you need to tell snakemake which file you want to generate. We defined all rules for multiple steps. They can be found here: `workflow/Snakefile`. For example, you want to generate all feature sets defined in a config file you can run:\r +\r +```\r +snakemake -c1 all_feature_sets\r +```\r +\r +To execute any step separately (see `README.md` in the `workflow` folder for details on workflow steps), you need to look up the name of the desired output file in the scripts and call Snakemake with the exact name. Using a flag `-n`, you can initiate a 'dry run': Snakemake will check the consistency of all rules and files and show the number of steps. However, a clean dry run does not necessarily mean that no errors will occur during a normal run. ReMM score is not allele-specific so that you get only one score independent of the variant itself. The workflow from the download of data up to computing the scores may take several days or weeks depending on the computing power and internet connection.\r +\r +\r +### The config files\r +\r +The main config file can be found in `config/config.yaml`. This config file was used to generate the ReMM score. Here most of the configuration magic happens. There is a second config file `config/features.yaml` where all features are listed (with additional description). Config files are controled via [json-schema](http://json-schema.org). \r +\r +We also provide a slurm config file for runtimes, memory and number of threads per rule: `config/slurm.yaml`.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.414.1" ; + schema1:isBasedOn "https://github.com/kircherlab/ReMM.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ReMM score" ; + schema1:sdDatePublished "2024-08-05 10:31:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/414/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6006 ; + schema1:creator ; + schema1:dateCreated "2023-01-03T09:09:05Z" ; + schema1:dateModified "2023-01-16T14:04:58Z" ; + schema1:description """The Regulatory Mendelian Mutation (ReMM) score was created for relevance prediction of non-coding variations (SNVs and small InDels) in the human genome (GRCh37) in terms of Mendelian diseases. This project updates the ReMM score for the genome build GRCh38 and combines GRCh37 and GRCh38 into one workflow.\r +\r +## Pre-requirements\r +\r +### Conda\r +We use Conda as software and dependency management tool. Conda installation guidelines can be found here:\r +\r +https://conda.io/projects/conda/en/latest/user-guide/install/index.html\r +\r +### Additional programs\r +These programs are used during the workflow. They usually need to be compiled, however, the repository already contains the executables or generated files.\r +\r +- [AttributeDB](https://github.com/visze/attributedb)\r +- [Jannovar](https://github.com/charite/jannovar) \r +- [parSMURF](https://github.com/AnacletoLAB/parSMURF)\r +\r +### Snakemake\r +\r +The workflow is managed by Snakemake - a workflow management system used to create reproducible and scalable data analyses. To install Snakemake as well as all other required packages, you need to create a working environment according to the description in the file env/ReMM.yaml. For that, first\r +\r +Clone the repository\r +```\r +git clone https://github.com/kircherlab/ReMM\r +cd ReMM\r +```\r +\r +Create a working environment and activate it\r +\r +```\r +conda env create -n ReMM --file workflow/envs/ReMM.yaml\r +conda activate ReMM\r +```\r +\r +All paths are relative to the Snakemake file so you do not need to change any path variables. Additionally, Snakemake creates all missing directories, so no need to create any aditional folders either.\r +\r +## Workflow\r +\r +The workflow consists of four main parts:\r +\r +- Download of feature data\r +- Data processing and cleaning\r +- Model training and validation\r +- Calculation of ReMM for the whole genome\r +\r +The `workflow` folder contains a graph of the workflow and more detailed information on the most important steps.\r +\r +To launch a snakemake workflow, you need to tell snakemake which file you want to generate. We defined all rules for multiple steps. They can be found here: `workflow/Snakefile`. For example, you want to generate all feature sets defined in a config file you can run:\r +\r +```\r +snakemake -c1 all_feature_sets\r +```\r +\r +To execute any step separately (see `README.md` in the `workflow` folder for details on workflow steps), you need to look up the name of the desired output file in the scripts and call Snakemake with the exact name. Using a flag `-n`, you can initiate a 'dry run': Snakemake will check the consistency of all rules and files and show the number of steps. However, a clean dry run does not necessarily mean that no errors will occur during a normal run. ReMM score is not allele-specific so that you get only one score independent of the variant itself. The workflow from the download of data up to computing the scores may take several days or weeks depending on the computing power and internet connection.\r +\r +\r +### The config files\r +\r +The main config file can be found in `config/config.yaml`. This config file was used to generate the ReMM score. Here most of the configuration magic happens. There is a second config file `config/features.yaml` where all features are listed (with additional description). Config files are controled via [json-schema](http://json-schema.org). \r +\r +We also provide a slurm config file for runtimes, memory and number of threads per rule: `config/slurm.yaml`.\r +""" ; + schema1:keywords "non-coding, pathogenicity score, variant pathogenicity prediction, Snakemake, ReMM, Regulatory Mendelian Mutation score" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ReMM score" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/414?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/protein-ligand-docking).\r +***\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.296.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_virtual_screening/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy Protein-ligand Docking tutorial (Fpocket)" ; + schema1:sdDatePublished "2024-08-05 10:30:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/296/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 26367 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-22T10:13:05Z" ; + schema1:dateModified "2023-01-16T13:58:43Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/protein-ligand-docking).\r +***\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/296?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy Protein-ligand Docking tutorial (Fpocket)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_virtual_screening/galaxy/biobb_wf_virtual_screening.ga" ; + schema1:version 2 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "De novo assembly pipeline for 10X linked-reads, used at the SciLifeLab National Genomics Infrastructure." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1005?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/neutronstar" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/neutronstar" ; + schema1:sdDatePublished "2024-08-05 10:23:29 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1005/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3657 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:06Z" ; + schema1:dateModified "2024-06-11T12:55:06Z" ; + schema1:description "De novo assembly pipeline for 10X linked-reads, used at the SciLifeLab National Genomics Infrastructure." ; + schema1:keywords "10x-genomics, 10xgenomics, denovo-assembly, genome-assembly, linked-reads, Supernova" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/neutronstar" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1005?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """## Description\r +\r +The workflow takes an input file with Cancer Driver Genes predictions (i.e. the results provided by a participant), computes a set of metrics, and compares them against the data currently stored in OpenEBench within the TCGA community. Two assessment metrics are provided for that predicitions. Also, some plots (which are optional) that allow to visualize the performance of the tool are generated. The workflow consists in three standard steps, defined by OpenEBench. The tools needed to run these steps must be in one or more Docker images, generated from [Docker containers](https://github.com/inab/TCGA_benchmarking_dockers ). Separated instances are spawned from these images for each step:\r +1. **Validation**: the input file format is checked and, if required, the content of the file is validated (e.g check whether the submitted gene IDs exist)\r +2. **Metrics Computation**: the predictions are compared with the 'Gold Standards' provided by the community, which results in two performance metrics - precision (Positive Predictive Value) and recall(True Positive Rate).\r +3. **Results Consolidation**: the benchmark itself is performed by merging the tool metrics with the rest of TCGA data. The results are provided in JSON format and SVG format (scatter plot).\r +\r +![alt text](https://raw.githubusercontent.com/inab/TCGA_benchmarking_workflow/1.0.7/workflow.jpg)\r +\r +\r +## Data\r +\r +* [TCGA_sample_data](./TCGA_sample_data) folder contains all the reference data required by the steps. It is derived from the manuscript:\r +[Comprehensive Characterization of Cancer Driver Genes and Mutations](https://www.cell.com/cell/fulltext/S0092-8674%2818%2930237-X?code=cell-site), Bailey et al, 2018, Cell [![doi:10.1016/j.cell.2018.02.060](https://img.shields.io/badge/doi-10.1016%2Fj.cell.2018.02.060-green.svg)](https://doi.org/10.1016/j.cell.2018.02.060) \r +* [TCGA_sample_out](./TCGA_sample_out) folder contains an example output for a worklow run, with two cancer types / challenges selected (ACC, BRCA). Results obtained from the default execution should be similar to those ones available in this directory. Results found in [TCGA_sample_out/results](./TCGA_sample_out/results) can be visualized in the browser using [`benchmarking_workflows_results_visualizer` javascript library](https://github.com/inab/benchmarking_workflows_results_visualizer).\r +\r +\r +## Usage\r +In order to use the workflow you need to:\r +* Install [Nextflow](https://www.nextflow.io/), which depends on Java virtual machine (>=8 , <15 ). You can automate their installation for local testing using [run_local_nextflow.bash](run_local_nextflow.bash).\r +* Clone [TCGA benchmarking Docker definitions repository](https://github.com/inab/TCGA_benchmarking_dockers) from tag 1.0.3 in a separate directory, and build locally the three Docker images found in it, running the `build.sh 1.0.3` script within that repo.\r +* Run it just using either *`nextflow run main.nf -profile docker`* or *`./run_local_nextflow.bash run main.nf -profile docker`*. Arguments specifications:\r +\r +```\r +Usage:\r +Run the pipeline with default parameters:\r +nextflow run main.nf -profile docker\r +\r +Run with user parameters:\r +nextflow run main.nf -profile docker --predictionsFile {driver.genes.file} --public_ref_dir {validation.reference.file} --participant_name {tool.name} --metrics_ref_dir {gold.standards.dir} --cancer_types {analyzed.cancer.types} --assess_dir {benchmark.data.dir} --results_dir {output.dir}\r +\r +Mandatory arguments:\r + --input List of cancer genes prediction\r + --community_id Name or OEB permanent ID for the benchmarking community\r + --public_ref_dir Directory with list of cancer genes used to validate the predictions\r + --participant_id Name of the tool used for prediction\r + --goldstandard_dir Dir that contains metrics reference datasets for all cancer types\r + --event_id List of types of cancer selected by the user, separated by spaces\r + --assess_dir Dir where the data for the benchmark are stored\r +\r +Other options:\r + --validation_result The output file where the results from validation step will be saved\r + --assessment_results The output file where the results from the computed metrics step will be saved\r + --aggregation_results The output file where the consolidation of the benchmark will be saved\r + --statistics_results The output directory with nextflow statistics\r + --outdir The output directory where the consolidation of the benchmark will be saved\r + --statsdir The output directory with nextflow statistics\r + --data_model_export_dir The output dir where json file with benchmarking data model contents will be saved\r + --otherdir The output directory where custom results will be saved (no directory inside)\r +Flags:\r + --help Display this message\r +```\r +\r +Default input parameters and Docker images to use for each step can be specified in the [config](./nextflow.config) file\r +**NOTE: In order to make your workflow compatible with the [OpenEBench VRE Nextflow Executor](https://github.com/inab/vre-process_nextflow-executor), please make sure to use the same parameter names in your workflow.**""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/244?version=3" ; + schema1:isBasedOn "https://github.com/inab/TCGA_benchmarking_workflow/tree/1.0.8" ; + schema1:license "LGPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for OpenEBench assessment workflow" ; + schema1:sdDatePublished "2024-08-05 10:32:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/244/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6150 ; + schema1:creator , + , + ; + schema1:dateCreated "2021-11-25T13:31:19Z" ; + schema1:dateModified "2021-11-26T09:08:21Z" ; + schema1:description """## Description\r +\r +The workflow takes an input file with Cancer Driver Genes predictions (i.e. the results provided by a participant), computes a set of metrics, and compares them against the data currently stored in OpenEBench within the TCGA community. Two assessment metrics are provided for that predicitions. Also, some plots (which are optional) that allow to visualize the performance of the tool are generated. The workflow consists in three standard steps, defined by OpenEBench. The tools needed to run these steps must be in one or more Docker images, generated from [Docker containers](https://github.com/inab/TCGA_benchmarking_dockers ). Separated instances are spawned from these images for each step:\r +1. **Validation**: the input file format is checked and, if required, the content of the file is validated (e.g check whether the submitted gene IDs exist)\r +2. **Metrics Computation**: the predictions are compared with the 'Gold Standards' provided by the community, which results in two performance metrics - precision (Positive Predictive Value) and recall(True Positive Rate).\r +3. **Results Consolidation**: the benchmark itself is performed by merging the tool metrics with the rest of TCGA data. The results are provided in JSON format and SVG format (scatter plot).\r +\r +![alt text](https://raw.githubusercontent.com/inab/TCGA_benchmarking_workflow/1.0.7/workflow.jpg)\r +\r +\r +## Data\r +\r +* [TCGA_sample_data](./TCGA_sample_data) folder contains all the reference data required by the steps. It is derived from the manuscript:\r +[Comprehensive Characterization of Cancer Driver Genes and Mutations](https://www.cell.com/cell/fulltext/S0092-8674%2818%2930237-X?code=cell-site), Bailey et al, 2018, Cell [![doi:10.1016/j.cell.2018.02.060](https://img.shields.io/badge/doi-10.1016%2Fj.cell.2018.02.060-green.svg)](https://doi.org/10.1016/j.cell.2018.02.060) \r +* [TCGA_sample_out](./TCGA_sample_out) folder contains an example output for a worklow run, with two cancer types / challenges selected (ACC, BRCA). Results obtained from the default execution should be similar to those ones available in this directory. Results found in [TCGA_sample_out/results](./TCGA_sample_out/results) can be visualized in the browser using [`benchmarking_workflows_results_visualizer` javascript library](https://github.com/inab/benchmarking_workflows_results_visualizer).\r +\r +\r +## Usage\r +In order to use the workflow you need to:\r +* Install [Nextflow](https://www.nextflow.io/), which depends on Java virtual machine (>=8 , <15 ). You can automate their installation for local testing using [run_local_nextflow.bash](run_local_nextflow.bash).\r +* Clone [TCGA benchmarking Docker definitions repository](https://github.com/inab/TCGA_benchmarking_dockers) from tag 1.0.3 in a separate directory, and build locally the three Docker images found in it, running the `build.sh 1.0.3` script within that repo.\r +* Run it just using either *`nextflow run main.nf -profile docker`* or *`./run_local_nextflow.bash run main.nf -profile docker`*. Arguments specifications:\r +\r +```\r +Usage:\r +Run the pipeline with default parameters:\r +nextflow run main.nf -profile docker\r +\r +Run with user parameters:\r +nextflow run main.nf -profile docker --predictionsFile {driver.genes.file} --public_ref_dir {validation.reference.file} --participant_name {tool.name} --metrics_ref_dir {gold.standards.dir} --cancer_types {analyzed.cancer.types} --assess_dir {benchmark.data.dir} --results_dir {output.dir}\r +\r +Mandatory arguments:\r + --input List of cancer genes prediction\r + --community_id Name or OEB permanent ID for the benchmarking community\r + --public_ref_dir Directory with list of cancer genes used to validate the predictions\r + --participant_id Name of the tool used for prediction\r + --goldstandard_dir Dir that contains metrics reference datasets for all cancer types\r + --event_id List of types of cancer selected by the user, separated by spaces\r + --assess_dir Dir where the data for the benchmark are stored\r +\r +Other options:\r + --validation_result The output file where the results from validation step will be saved\r + --assessment_results The output file where the results from the computed metrics step will be saved\r + --aggregation_results The output file where the consolidation of the benchmark will be saved\r + --statistics_results The output directory with nextflow statistics\r + --outdir The output directory where the consolidation of the benchmark will be saved\r + --statsdir The output directory with nextflow statistics\r + --data_model_export_dir The output dir where json file with benchmarking data model contents will be saved\r + --otherdir The output directory where custom results will be saved (no directory inside)\r +Flags:\r + --help Display this message\r +```\r +\r +Default input parameters and Docker images to use for each step can be specified in the [config](./nextflow.config) file\r +**NOTE: In order to make your workflow compatible with the [OpenEBench VRE Nextflow Executor](https://github.com/inab/vre-process_nextflow-executor), please make sure to use the same parameter names in your workflow.**""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/244?version=3" ; + schema1:keywords "tcga, openebench, benchmarking" ; + schema1:license "https://spdx.org/licenses/LGPL-3.0" ; + schema1:name "OpenEBench assessment workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/244?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-ont-artic-variant-calling" ; + schema1:mainEntity ; + schema1:name "COVID-19-ARTIC-ONT (v0.1)" ; + schema1:sdDatePublished "2021-03-12 13:41:26 +0000" ; + schema1:softwareVersion "v0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 45258 ; + schema1:name "COVID-19-ARTIC-ONT" ; + schema1:programmingLanguage . + + a schema1:Dataset ; + schema1:datePublished "2024-05-27T12:41:08.866362" ; + schema1:hasPart , + , + , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/qiime2-I-import" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + , + , + ; + schema1:name "qiime2-I-import/Ia-import-multiplexed-se" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "qiime2-I-import/Ib-import-multiplexed-pe" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/qiime2-I-import" ; + schema1:version "0.2" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "qiime2-I-import/Ic-import-demultiplexed-se" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/qiime2-I-import" ; + schema1:version "0.2" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "qiime2-I-import/Id-import-demultiplexed-pe" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/qiime2-I-import" ; + schema1:version "0.2" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly and annotation of metatranscriptomic data, both prokaryotic and eukaryotic" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/997?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/metatdenovo" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/metatdenovo" ; + schema1:sdDatePublished "2024-08-05 10:23:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/997/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11793 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:00Z" ; + schema1:dateModified "2024-06-11T12:55:00Z" ; + schema1:description "Assembly and annotation of metatranscriptomic data, both prokaryotic and eukaryotic" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/997?version=1" ; + schema1:keywords "eukaryotes, Metagenomics, metatranscriptomics, prokaryotes, viruses" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/metatdenovo" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/997?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """\r +# BridgeDb tutorial: Gene HGNC name to Ensembl identifier\r +\r +This tutorial explains how to use the BridgeDb identifier mapping service to translate HGNC names to Ensembl identifiers. This step is part of the OpenRiskNet use case to link Adverse Outcome Pathways to [WikiPathways](https://wikipathways.org/).\r +\r +First we need to load the Python library to allow calls to the [BridgeDb REST webservice](http://bridgedb.prod.openrisknet.org/swagger/):\r +\r +\r +```python\r +import requests\r +```\r +\r +Let's assume we're interested in the gene with HGNC MECP2 (FIXME: look up a gene in AOPWiki), the API call to make mappings is given below as `callUrl`. Here, the `H` indicates that the query (`MECP2`) is an HGNC symbol:\r +\r +\r +```python\r +callUrl = 'http://bridgedb.prod.openrisknet.org/Human/xrefs/H/MECP2'\r +```\r +\r +The default call returns all identifiers, not just for Ensembl:\r +\r +\r +```python\r +response = requests.get(callUrl)\r +response.text\r +```\r +\r +\r +\r +\r + 'GO:0001964\\tGeneOntology\\nuc065cav.1\\tUCSC Genome Browser\\n312750\\tOMIM\\nGO:0042551\\tGeneOntology\\nuc065car.1\\tUCSC Genome Browser\\nA0A087X1U4\\tUniprot-TrEMBL\\n4204\\tWikiGenes\\nGO:0043524\\tGeneOntology\\nILMN_1702715\\tIllumina\\n34355_at\\tAffy\\nGO:0007268\\tGeneOntology\\nMECP2\\tHGNC\\nuc065caz.1\\tUCSC Genome Browser\\nA_33_P3339036\\tAgilent\\nGO:0006576\\tGeneOntology\\nuc065cbg.1\\tUCSC Genome Browser\\nGO:0006342\\tGeneOntology\\n300496\\tOMIM\\nGO:0035176\\tGeneOntology\\nuc065cbc.1\\tUCSC Genome Browser\\nGO:0033555\\tGeneOntology\\nGO:0045892\\tGeneOntology\\nA_23_P114361\\tAgilent\\nGO:0045893\\tGeneOntology\\nENSG00000169057\\tEnsembl\\nGO:0090063\\tGeneOntology\\nGO:0005515\\tGeneOntology\\nGO:0002087\\tGeneOntology\\nGO:0005634\\tGeneOntology\\nGO:0007416\\tGeneOntology\\nGO:0008104\\tGeneOntology\\nGO:0042826\\tGeneOntology\\nGO:0007420\\tGeneOntology\\nGO:0035067\\tGeneOntology\\n300005\\tOMIM\\nNP_001104262\\tRefSeq\\nA0A087WVW7\\tUniprot-TrEMBL\\nNP_004983\\tRefSeq\\nGO:0046470\\tGeneOntology\\nGO:0010385\\tGeneOntology\\n11722682_at\\tAffy\\nGO:0051965\\tGeneOntology\\nNM_001316337\\tRefSeq\\nuc065caw.1\\tUCSC Genome Browser\\nA0A0D9SFX7\\tUniprot-TrEMBL\\nA0A140VKC4\\tUniprot-TrEMBL\\nGO:0003723\\tGeneOntology\\nGO:0019233\\tGeneOntology\\nGO:0001666\\tGeneOntology\\nGO:0003729\\tGeneOntology\\nGO:0021591\\tGeneOntology\\nuc065cas.1\\tUCSC Genome Browser\\nGO:0019230\\tGeneOntology\\nGO:0003682\\tGeneOntology\\nGO:0001662\\tGeneOntology\\nuc065cbh.1\\tUCSC Genome Browser\\nX99687_at\\tAffy\\nGO:0008344\\tGeneOntology\\nGO:0009791\\tGeneOntology\\nuc065cbd.1\\tUCSC Genome Browser\\nGO:0019904\\tGeneOntology\\nGO:0030182\\tGeneOntology\\nGO:0035197\\tGeneOntology\\n8175998\\tAffy\\nGO:0016358\\tGeneOntology\\nNM_004992\\tRefSeq\\nGO:0003714\\tGeneOntology\\nGO:0005739\\tGeneOntology\\nGO:0005615\\tGeneOntology\\nGO:0005737\\tGeneOntology\\nuc004fjv.3\\tUCSC Genome Browser\\n202617_s_at\\tAffy\\nGO:0050905\\tGeneOntology\\nGO:0008327\\tGeneOntology\\nD3YJ43\\tUniprot-TrEMBL\\nGO:0003677\\tGeneOntology\\nGO:0006541\\tGeneOntology\\nGO:0040029\\tGeneOntology\\nA_33_P3317211\\tAgilent\\nNP_001303266\\tRefSeq\\n11722683_a_at\\tAffy\\nGO:0008211\\tGeneOntology\\nGO:0051151\\tGeneOntology\\nNM_001110792\\tRefSeq\\nX89430_at\\tAffy\\nGO:2000820\\tGeneOntology\\nuc065cat.1\\tUCSC Genome Browser\\nGO:0003700\\tGeneOntology\\nGO:0047485\\tGeneOntology\\n4204\\tEntrez Gene\\nGO:0009405\\tGeneOntology\\nA0A0D9SEX1\\tUniprot-TrEMBL\\nGO:0098794\\tGeneOntology\\n3C2I\\tPDB\\nHs.200716\\tUniGene\\nGO:0000792\\tGeneOntology\\nuc065cax.1\\tUCSC Genome Browser\\n300055\\tOMIM\\n5BT2\\tPDB\\nGO:0006020\\tGeneOntology\\nGO:0031175\\tGeneOntology\\nuc065cbe.1\\tUCSC Genome Browser\\nGO:0008284\\tGeneOntology\\nuc065cba.1\\tUCSC Genome Browser\\nGO:0060291\\tGeneOntology\\n202618_s_at\\tAffy\\nGO:0016573\\tGeneOntology\\n17115453\\tAffy\\nA0A1B0GTV0\\tUniprot-TrEMBL\\nuc065cbi.1\\tUCSC Genome Browser\\nGO:0048167\\tGeneOntology\\nGO:0007616\\tGeneOntology\\nGO:0016571\\tGeneOntology\\nuc004fjw.3\\tUCSC Genome Browser\\nGO:0007613\\tGeneOntology\\nGO:0007612\\tGeneOntology\\nGO:0021549\\tGeneOntology\\n11722684_a_at\\tAffy\\nGO:0001078\\tGeneOntology\\nX94628_rna1_s_at\\tAffy\\nGO:0007585\\tGeneOntology\\nGO:0010468\\tGeneOntology\\nGO:0031061\\tGeneOntology\\nA_24_P237486\\tAgilent\\nGO:0050884\\tGeneOntology\\nGO:0000930\\tGeneOntology\\nGO:0005829\\tGeneOntology\\nuc065cau.1\\tUCSC Genome Browser\\nH7BY72\\tUniprot-TrEMBL\\n202616_s_at\\tAffy\\nGO:0006355\\tGeneOntology\\nuc065cay.1\\tUCSC Genome Browser\\nGO:0010971\\tGeneOntology\\n300673\\tOMIM\\nGO:0008542\\tGeneOntology\\nGO:0060079\\tGeneOntology\\nuc065cbf.1\\tUCSC Genome Browser\\nGO:0006122\\tGeneOntology\\nuc065cbb.1\\tUCSC Genome Browser\\nGO:0007052\\tGeneOntology\\nC9JH89\\tUniprot-TrEMBL\\nB5MCB4\\tUniprot-TrEMBL\\nGO:0032048\\tGeneOntology\\nGO:0050432\\tGeneOntology\\nGO:0001976\\tGeneOntology\\nI6LM39\\tUniprot-TrEMBL\\nGO:0005813\\tGeneOntology\\nILMN_1682091\\tIllumina\\nP51608\\tUniprot-TrEMBL\\n1QK9\\tPDB\\nGO:0006349\\tGeneOntology\\nGO:1900114\\tGeneOntology\\nGO:0000122\\tGeneOntology\\nGO:0006351\\tGeneOntology\\nGO:0008134\\tGeneOntology\\nILMN_1824898\\tIllumina\\n300260\\tOMIM\\n0006510725\\tIllumina\\n'\r +\r +\r +\r +You can also see the results are returned as a TSV file, consisting of two columns, the identifier and the matching database.\r +\r +We will want to convert this reply into a Python dictionary (with the identifier as key, as one database may have multiple identifiers):\r +\r +\r +```python\r +lines = response.text.split("\\n")\r +mappings = {}\r +for line in lines:\r + if ('\\t' in line):\r + tuple = line.split('\\t')\r + identifier = tuple[0]\r + database = tuple[1]\r + if (database == "Ensembl"):\r + mappings[identifier] = database\r +\r +print(mappings)\r +```\r +\r + {'ENSG00000169057': 'Ensembl'}\r +\r +\r +Alternatively, we can restrivct the return values from the BridgeDb webservice to just return Ensembl identifiers (system code `En`). For this, we add the `?dataSource=En` call parameter:\r +\r +\r +```python\r +callUrl = 'http://bridgedb-swagger.prod.openrisknet.org/Human/xrefs/H/MECP2?dataSource=En'\r +response = requests.get(callUrl)\r +response.text\r +```\r +\r +\r +\r +\r + 'ENSG00000169057\\tEnsembl\\n'\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.326.3" ; + schema1:isBasedOn "https://github.com/OpenRiskNet/notebooks.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for BridgeDb tutorial: Gene HGNC name to Ensembl identifier" ; + schema1:sdDatePublished "2024-08-05 10:32:14 +0100" ; + schema1:url "https://workflowhub.eu/workflows/326/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8112 ; + schema1:creator , + ; + schema1:dateCreated "2022-04-06T13:12:39Z" ; + schema1:dateModified "2023-01-16T13:59:41Z" ; + schema1:description """\r +# BridgeDb tutorial: Gene HGNC name to Ensembl identifier\r +\r +This tutorial explains how to use the BridgeDb identifier mapping service to translate HGNC names to Ensembl identifiers. This step is part of the OpenRiskNet use case to link Adverse Outcome Pathways to [WikiPathways](https://wikipathways.org/).\r +\r +First we need to load the Python library to allow calls to the [BridgeDb REST webservice](http://bridgedb.prod.openrisknet.org/swagger/):\r +\r +\r +```python\r +import requests\r +```\r +\r +Let's assume we're interested in the gene with HGNC MECP2 (FIXME: look up a gene in AOPWiki), the API call to make mappings is given below as `callUrl`. Here, the `H` indicates that the query (`MECP2`) is an HGNC symbol:\r +\r +\r +```python\r +callUrl = 'http://bridgedb.prod.openrisknet.org/Human/xrefs/H/MECP2'\r +```\r +\r +The default call returns all identifiers, not just for Ensembl:\r +\r +\r +```python\r +response = requests.get(callUrl)\r +response.text\r +```\r +\r +\r +\r +\r + 'GO:0001964\\tGeneOntology\\nuc065cav.1\\tUCSC Genome Browser\\n312750\\tOMIM\\nGO:0042551\\tGeneOntology\\nuc065car.1\\tUCSC Genome Browser\\nA0A087X1U4\\tUniprot-TrEMBL\\n4204\\tWikiGenes\\nGO:0043524\\tGeneOntology\\nILMN_1702715\\tIllumina\\n34355_at\\tAffy\\nGO:0007268\\tGeneOntology\\nMECP2\\tHGNC\\nuc065caz.1\\tUCSC Genome Browser\\nA_33_P3339036\\tAgilent\\nGO:0006576\\tGeneOntology\\nuc065cbg.1\\tUCSC Genome Browser\\nGO:0006342\\tGeneOntology\\n300496\\tOMIM\\nGO:0035176\\tGeneOntology\\nuc065cbc.1\\tUCSC Genome Browser\\nGO:0033555\\tGeneOntology\\nGO:0045892\\tGeneOntology\\nA_23_P114361\\tAgilent\\nGO:0045893\\tGeneOntology\\nENSG00000169057\\tEnsembl\\nGO:0090063\\tGeneOntology\\nGO:0005515\\tGeneOntology\\nGO:0002087\\tGeneOntology\\nGO:0005634\\tGeneOntology\\nGO:0007416\\tGeneOntology\\nGO:0008104\\tGeneOntology\\nGO:0042826\\tGeneOntology\\nGO:0007420\\tGeneOntology\\nGO:0035067\\tGeneOntology\\n300005\\tOMIM\\nNP_001104262\\tRefSeq\\nA0A087WVW7\\tUniprot-TrEMBL\\nNP_004983\\tRefSeq\\nGO:0046470\\tGeneOntology\\nGO:0010385\\tGeneOntology\\n11722682_at\\tAffy\\nGO:0051965\\tGeneOntology\\nNM_001316337\\tRefSeq\\nuc065caw.1\\tUCSC Genome Browser\\nA0A0D9SFX7\\tUniprot-TrEMBL\\nA0A140VKC4\\tUniprot-TrEMBL\\nGO:0003723\\tGeneOntology\\nGO:0019233\\tGeneOntology\\nGO:0001666\\tGeneOntology\\nGO:0003729\\tGeneOntology\\nGO:0021591\\tGeneOntology\\nuc065cas.1\\tUCSC Genome Browser\\nGO:0019230\\tGeneOntology\\nGO:0003682\\tGeneOntology\\nGO:0001662\\tGeneOntology\\nuc065cbh.1\\tUCSC Genome Browser\\nX99687_at\\tAffy\\nGO:0008344\\tGeneOntology\\nGO:0009791\\tGeneOntology\\nuc065cbd.1\\tUCSC Genome Browser\\nGO:0019904\\tGeneOntology\\nGO:0030182\\tGeneOntology\\nGO:0035197\\tGeneOntology\\n8175998\\tAffy\\nGO:0016358\\tGeneOntology\\nNM_004992\\tRefSeq\\nGO:0003714\\tGeneOntology\\nGO:0005739\\tGeneOntology\\nGO:0005615\\tGeneOntology\\nGO:0005737\\tGeneOntology\\nuc004fjv.3\\tUCSC Genome Browser\\n202617_s_at\\tAffy\\nGO:0050905\\tGeneOntology\\nGO:0008327\\tGeneOntology\\nD3YJ43\\tUniprot-TrEMBL\\nGO:0003677\\tGeneOntology\\nGO:0006541\\tGeneOntology\\nGO:0040029\\tGeneOntology\\nA_33_P3317211\\tAgilent\\nNP_001303266\\tRefSeq\\n11722683_a_at\\tAffy\\nGO:0008211\\tGeneOntology\\nGO:0051151\\tGeneOntology\\nNM_001110792\\tRefSeq\\nX89430_at\\tAffy\\nGO:2000820\\tGeneOntology\\nuc065cat.1\\tUCSC Genome Browser\\nGO:0003700\\tGeneOntology\\nGO:0047485\\tGeneOntology\\n4204\\tEntrez Gene\\nGO:0009405\\tGeneOntology\\nA0A0D9SEX1\\tUniprot-TrEMBL\\nGO:0098794\\tGeneOntology\\n3C2I\\tPDB\\nHs.200716\\tUniGene\\nGO:0000792\\tGeneOntology\\nuc065cax.1\\tUCSC Genome Browser\\n300055\\tOMIM\\n5BT2\\tPDB\\nGO:0006020\\tGeneOntology\\nGO:0031175\\tGeneOntology\\nuc065cbe.1\\tUCSC Genome Browser\\nGO:0008284\\tGeneOntology\\nuc065cba.1\\tUCSC Genome Browser\\nGO:0060291\\tGeneOntology\\n202618_s_at\\tAffy\\nGO:0016573\\tGeneOntology\\n17115453\\tAffy\\nA0A1B0GTV0\\tUniprot-TrEMBL\\nuc065cbi.1\\tUCSC Genome Browser\\nGO:0048167\\tGeneOntology\\nGO:0007616\\tGeneOntology\\nGO:0016571\\tGeneOntology\\nuc004fjw.3\\tUCSC Genome Browser\\nGO:0007613\\tGeneOntology\\nGO:0007612\\tGeneOntology\\nGO:0021549\\tGeneOntology\\n11722684_a_at\\tAffy\\nGO:0001078\\tGeneOntology\\nX94628_rna1_s_at\\tAffy\\nGO:0007585\\tGeneOntology\\nGO:0010468\\tGeneOntology\\nGO:0031061\\tGeneOntology\\nA_24_P237486\\tAgilent\\nGO:0050884\\tGeneOntology\\nGO:0000930\\tGeneOntology\\nGO:0005829\\tGeneOntology\\nuc065cau.1\\tUCSC Genome Browser\\nH7BY72\\tUniprot-TrEMBL\\n202616_s_at\\tAffy\\nGO:0006355\\tGeneOntology\\nuc065cay.1\\tUCSC Genome Browser\\nGO:0010971\\tGeneOntology\\n300673\\tOMIM\\nGO:0008542\\tGeneOntology\\nGO:0060079\\tGeneOntology\\nuc065cbf.1\\tUCSC Genome Browser\\nGO:0006122\\tGeneOntology\\nuc065cbb.1\\tUCSC Genome Browser\\nGO:0007052\\tGeneOntology\\nC9JH89\\tUniprot-TrEMBL\\nB5MCB4\\tUniprot-TrEMBL\\nGO:0032048\\tGeneOntology\\nGO:0050432\\tGeneOntology\\nGO:0001976\\tGeneOntology\\nI6LM39\\tUniprot-TrEMBL\\nGO:0005813\\tGeneOntology\\nILMN_1682091\\tIllumina\\nP51608\\tUniprot-TrEMBL\\n1QK9\\tPDB\\nGO:0006349\\tGeneOntology\\nGO:1900114\\tGeneOntology\\nGO:0000122\\tGeneOntology\\nGO:0006351\\tGeneOntology\\nGO:0008134\\tGeneOntology\\nILMN_1824898\\tIllumina\\n300260\\tOMIM\\n0006510725\\tIllumina\\n'\r +\r +\r +\r +You can also see the results are returned as a TSV file, consisting of two columns, the identifier and the matching database.\r +\r +We will want to convert this reply into a Python dictionary (with the identifier as key, as one database may have multiple identifiers):\r +\r +\r +```python\r +lines = response.text.split("\\n")\r +mappings = {}\r +for line in lines:\r + if ('\\t' in line):\r + tuple = line.split('\\t')\r + identifier = tuple[0]\r + database = tuple[1]\r + if (database == "Ensembl"):\r + mappings[identifier] = database\r +\r +print(mappings)\r +```\r +\r + {'ENSG00000169057': 'Ensembl'}\r +\r +\r +Alternatively, we can restrivct the return values from the BridgeDb webservice to just return Ensembl identifiers (system code `En`). For this, we add the `?dataSource=En` call parameter:\r +\r +\r +```python\r +callUrl = 'http://bridgedb-swagger.prod.openrisknet.org/Human/xrefs/H/MECP2?dataSource=En'\r +response = requests.get(callUrl)\r +response.text\r +```\r +\r +\r +\r +\r + 'ENSG00000169057\\tEnsembl\\n'\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/326?version=2" ; + schema1:keywords "Toxicology, jupyter" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "BridgeDb tutorial: Gene HGNC name to Ensembl identifier" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/326?version=3" ; + schema1:version 3 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 1640625 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "Workflow to take DataOne data packages (raw datasets + metadata written in Ecological Metadata Standard) as input and create a DwC occurence.csv file almost ready to put in a Dawrin core Archive using eml-annotations at the attribute level" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/117?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Workflow constructed from history 'test dwc from PNDB Data package EML DwC annotations'" ; + schema1:sdDatePublished "2024-08-05 10:33:17 +0100" ; + schema1:url "https://workflowhub.eu/workflows/117/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 49971 ; + schema1:creator ; + schema1:dateCreated "2021-04-09T15:45:27Z" ; + schema1:dateModified "2023-11-09T21:04:38Z" ; + schema1:description "Workflow to take DataOne data packages (raw datasets + metadata written in Ecological Metadata Standard) as input and create a DwC occurence.csv file almost ready to put in a Dawrin core Archive using eml-annotations at the attribute level" ; + schema1:keywords "DataOne, Data package, EML, Ecological metadata language, eml-annotation, Darwin core, Galaxy-E, Galaxy" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Workflow constructed from history 'test dwc from PNDB Data package EML DwC annotations'" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/117?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2024-06-25T09:57:05.226299" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/gene-based-pathogen-identification" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "gene-based-pathogen-identification/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """### Workflow (hybrid) metagenomic assembly and binning + GEMs\r +_Accepts both Illumina and Long reads (ONT/PacBio)_\r +\r +- **Workflow Illumina Quality:** https://workflowhub.eu/workflows/336?version=1 \r +- **Workflow LongRead Quality:** https://workflowhub.eu/workflows/337\r + \r +- Kraken2 taxonomic classification of FASTQ reads\r +- SPAdes/Flye (Assembly)\r +- QUAST (Assembly quality report)\r +\r +**Workflow binnning** https://workflowhub.eu/workflows/64?version=11 (optional)\r +- Metabat2/MaxBin2/SemiBin\r +- DAS Tool\r +- CheckM\r +- BUSCO\r +- GTDB-Tk\r + \r +**Workflow Genome-scale metabolic models** https://workflowhub.eu/workflows/372 (optional)\r +- CarveMe (GEM generation)\r +- MEMOTE (GEM test suite)\r +- SMETANA (Species METabolic interaction ANAlysis)\r +\r +Other UNLOCK workflows on WorkflowHub: https://workflowhub.eu/projects/16/workflows?view=default\r +\r +**All tool CWL files and other workflows can be found here:**
\r +https://gitlab.com/m-unlock/cwl
\r +\r +**How to setup and use an UNLOCK workflow:**
\r +https://m-unlock.gitlab.io/docs/setup/setup.html
\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/367?version=1" ; + schema1:isBasedOn "https://gitlab.com/m-unlock/cwl/-/blob/master/cwl/workflows/workflow_metagenomics_assembly.cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for (Hybrid) Metagenomics workflow" ; + schema1:sdDatePublished "2024-08-05 10:31:07 +0100" ; + schema1:url "https://workflowhub.eu/workflows/367/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 175595 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 34941 ; + schema1:creator , + ; + schema1:dateCreated "2022-06-14T08:14:10Z" ; + schema1:dateModified "2024-01-02T12:20:13Z" ; + schema1:description """### Workflow (hybrid) metagenomic assembly and binning + GEMs\r +_Accepts both Illumina and Long reads (ONT/PacBio)_\r +\r +- **Workflow Illumina Quality:** https://workflowhub.eu/workflows/336?version=1 \r +- **Workflow LongRead Quality:** https://workflowhub.eu/workflows/337\r + \r +- Kraken2 taxonomic classification of FASTQ reads\r +- SPAdes/Flye (Assembly)\r +- QUAST (Assembly quality report)\r +\r +**Workflow binnning** https://workflowhub.eu/workflows/64?version=11 (optional)\r +- Metabat2/MaxBin2/SemiBin\r +- DAS Tool\r +- CheckM\r +- BUSCO\r +- GTDB-Tk\r + \r +**Workflow Genome-scale metabolic models** https://workflowhub.eu/workflows/372 (optional)\r +- CarveMe (GEM generation)\r +- MEMOTE (GEM test suite)\r +- SMETANA (Species METabolic interaction ANAlysis)\r +\r +Other UNLOCK workflows on WorkflowHub: https://workflowhub.eu/projects/16/workflows?view=default\r +\r +**All tool CWL files and other workflows can be found here:**
\r +https://gitlab.com/m-unlock/cwl
\r +\r +**How to setup and use an UNLOCK workflow:**
\r +https://m-unlock.gitlab.io/docs/setup/setup.html
\r +""" ; + schema1:image ; + schema1:keywords "Metagenomics, Assembly, illumina, binning" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "(Hybrid) Metagenomics workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/367?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Analysis of ribosome profiling, or Ribo-seq (also named ribosome footprinting)" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1016?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/riboseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/riboseq" ; + schema1:sdDatePublished "2024-08-05 10:23:22 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1016/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12175 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:09Z" ; + schema1:dateModified "2024-06-11T12:55:09Z" ; + schema1:description "Analysis of ribosome profiling, or Ribo-seq (also named ribosome footprinting)" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1016?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/riboseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1016?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 7130329 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + schema1:datePublished "2024-05-09T20:55:41.110246" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Purge-duplicates-one-haplotype-VGP6b" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Purge-duplicates-one-haplotype-VGP6b/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=11" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-08-05 10:22:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9397 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:39Z" ; + schema1:dateModified "2024-06-11T12:54:39Z" ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=11" ; + schema1:version 11 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "call and score variants from WGS/WES of rare disease patients" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1014?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/raredisease" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/raredisease" ; + schema1:sdDatePublished "2024-08-05 10:23:23 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1014/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11550 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:08Z" ; + schema1:dateModified "2024-06-11T12:55:08Z" ; + schema1:description "call and score variants from WGS/WES of rare disease patients" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1014?version=5" ; + schema1:keywords "diagnostics, rare-disease, snv, structural-variants, variant-annotation, variant-calling, wes, WGS" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/raredisease" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1014?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# CLAWS (CNAG's Long-read Assembly Workflow in Snakemake)\r + Snakemake Pipeline used for de novo genome assembly @CNAG. It has been developed for Snakemake v6.0.5.\r +\r +It accepts Oxford Nanopore Technologies (ONT) reads, PacBio HFi reads, illumina paired-end data, illumina 10X data and Hi-C reads. It does the preprocessing of the reads, assembly, polishing, purge_dups, scaffodling and different evaluation steps. By default it will preprocess the reads, run Flye + Hypo + purge_dups + yahs and evaluate the resulting assemblies with BUSCO, MERQURY, Nseries and assembly_stats. \r +It needs a config file and a spec file (json file with instructions on which resources should slurm use for each of the jobs). Both files are created by the script "create_config_assembly.py" that is located in the bin directory. To check all the options accepted by the script, do:\r +\r +```\r +bin/create_config_assembly.py -h\r +```\r +\r +Once the 2 config files are produced, the pipeline can be launched using snakemake like this:\r +\r +``snakemake --notemp -j 999 --snakefile assembly_pipeline.smk --configfile assembly.config --is --cluster-conf assembly.spec --use-conda --use-envmodules``\r +\r +If you are using an HPC cluster, please check how should you run snakemake to launch the jobs to the cluster. \r +\r +Most of the tools used will be installed via conda using the environments of the "envs" directory after providing the "--use-conda" option to snakemake. However, a few tools cannot be installed via conda and will have to be available in your PATH, or as a module in the cluster. Those tools are:\r +\r +- NextDenovo/2.5.0\r +- NextPolish/1.4.1\r +\r +# How to provide input data:\r +\r +There are several ways of providing the reads.\r +\r +### 1- ONT reads\r +\r +1.1 Using the option ``--ont-dir {DIR}`` in create_config_assembly.py.\r +\r +If you do so, it will look for all the files in the directory that end in '.fastq.gz' and will add the basenames to "ONT_wildcards". These wildcards will be processed by the pipeline that will:\r +\r +- Concatenate all the files into a single file\r +\r +- Run filtlong with the default or specified parameters. \r +\r +- Use the resulting file for assembly, polishing and/or purging.\r +\r +You can also specify the basenames of the files that you want to use with the ``--ont-list `` option. In this case, the pipeline will use the wildcards that you're providing instead of merging all the files in the directory.\r +\r +1.2 Using the option ```--ont-reads {FILE}``` in create_config_assembly.py.\r +\r +If you do so, it will consider that you already have all the reads in one file and will: \r +\r +- Run filtlong with the default or specified parameters.\r +\r +- Use the resulting file for assembly, polishing and/or purging.\r +\r +1.3 Using the option ```--ont-filt {FILE}```. It will use this file as the output from filtlong. Hence, it will skip the preprocessing steps and directly use it for assembly, polishing and/or purging. \r +\r +\r +\r +### 2-Illumina 10X-linked data\r +\r +2.1 Using the ```--raw-10X {DIR:list}``` option. \r +\r +Dictionary with 10X raw read directories, it has to be the mkfastq dir. You must specify as well the sampleIDs from this run. Example: '{"mkfastq- dir":"sample1,sample2,sample3"}'...\r +\r +It will take each basename in the list to get the fastqs from the corresponding directory and run longranger on each sample. Afterwards, it will build meryldbs for each "barcoded" file. Finally, it will concatenate all the meryldbs and "barcoded" files. Resulting "barcoded" file will be used for polishing. \r +\r +2.2 Using the ``--processed-10X {DIR}`` parameter. \r +\r +This directory can already be there or be produced by the pipeline as described in step 2.1. Once all the "barcoded" fastq files are there, meryldbs will be built for each "barcoded" file. Finally, it will concatenate all the meryldbs and "barcoded" files. Resulting "barcoded" file will be used for polishing. \r +\r +2.3 Using the ``--10X`` option. \r +\r +The argument to this is the path to the concatenated ".barcoded" file that needs to be used for polishing. If the pre-concatenated files are not given, meryldbs will be directly generated with this file, but it may run out of memory. \r +\r +### 3- Illumina short-read data\r +\r +3.1 Using the ``--illumina-dir {DIR}`` option, that will look for all the files in the directory that end in '.1.fastq.gz' and will add the basenames to "illumina_wildcards". These wildcards will be processed by the pipeline that will: \r +\r +- Trim adaptors with Trimgalore\r +\r +- Concatenate all the trimmed *.1.fastq.gz and the *2.fastq.gz in one file per pair. \r +\r +- The resulting reads will be used for building meryldbs and polishing. \r +\r +3.2 Using the ``--processed-illumina`` option. If the directory exists and contains files, the pipeline will look for all the files in the directory that end in '.1.fastq.gz' and will add the basenames to "illumina_wildcards". These wildcards will be processed by the pipeline that will:\r +\r +- Concatenate all the trimmed *.1.fastq.gz and the *2.fastq.gz in one file per pair. \r +\r +- The resulting reads will be used for building meryldbs and polishing. \r +\r +3.3 Using the ``--pe1 {FILE} and --pe2 {FILE}`` options. That will consider that these are the paired files containing all the illumina reads ready to be used and will build meryldbs and polish with them.\r +\r +### 4- Input assemblies\r +\r +If you want to polish an already assembled assembly, you can give it to the pipeline by using the option ``--assembly-in ASSEMBLY_IN [ASSEMBLY_IN ...]\r + Dictionary with assemblies that need to be polished but not assembled and directory where they should\r + be polished. Example: '{"assembly1":"polishing_dir1"}' '{"assembly2"="polishing_dir2"}' ...``\r + \r +If you want to improve an already polished assembly, you can give it to the pipeline by using the option ``--postpolish-assemblies POSTPOLISH_ASSEMBLIES [POSTPOLISH_ASSEMBLIES ...]\r + Dictionary with assemblies for whic postpolishing steps need to be run but that are not assembled and\r + base step for the directory where the first postpolishing step should be run. Example:\r + '{"assembly1":"s04.1_p03.1"}' '{"assembly2"="s04.2_p03.2"}' ...``\r +\r +\r +\r +# Description of implemented rules\r +\r +1- Preprocessing:\r + \r +- **Read concatenation:**\r +\r +``zcat {input.fastqs} | pigz -p {threads} -c > {output.final_fastq}``\r + \r +- **Longranger for 10X reads**: it uses the Longranger version installed in the path specified in the configfile\r +\r +``longranger basic --id={params.sample} --sample={params.sample} --fastqs={input.mkfastq_dir} --localcores={threads}``\r +\r +- **Trimgalore:** By default it gives the ``--gzip -q 20 --paired --retain_unpaired`` options, but it can be changed with the ``--trim-galore-opts `` argument. \r +\r +``trim_galore -j {threads} {params.opts} {input.read1} {input.read2}``\r +\r +- **Filtlong:** it uses the Filtlong version installed in the path specified in the configfile. By default it gives the min_length and min_mean_q parameters, but extra parameters can be added with the ``--filtlong-opts`` option.\r +\r +``filtlong --min_length {params.minlen} --min_mean_q {params.min_mean_q} {params.opts} {input.reads} | pigz -p {threads} -c > {output.outreads}``\r + \r +- **Build meryldb** (with processed 10X reads or illumina reads): it uses the merqury conda environment specified in the configfile. It takes as argument the `--mery-k` value that needs to be estimated first for the genime size. \r +\r +``meryl k={params.kmer} count output {output.out_dir} {input.fastq}``\r + \r +- Concat meryldbs: with the merqury conda environment specified in the configfile\r +\r +``meryl union-sum output {output.meryl_all} {input.input_run}``\r + \r +- **Align ONT (Minimap2):** it aligns the reads using minimap2 and outputs the alignment either in bam or in paf.gz formats. It uses the minimap2 conda environment specified in the configfile\r +\r +``minimap2 -{params.align_opts} -t {threads} {input.genome} {input.reads} ``\r +\r +- **Align Illumina (BWA-MEM):** it aligns the reads with BWA-mem and outputs a bam file\r +\r +``bwa mem -Y {params.options} -t {threads} {input.genome} {input.reads} | samtools view -Sb - | samtools sort -@ {threads} -o {output.mapping} -``\r +\r +2- Assembly\r +\r +- **Flye (default)**. It is run by default, if you don't want the pipeline to run it, you can give `--no-flye` option when creating the config. It uses the conda environment specified in the config. By default it is set to 2 polishing iterations and gives the genome-size estimate that has been given when creating the config. Extra options can be provided with the `--flye-opts`.\r +\r +``flye --{params.readtype} {input.reads} -o {params.outdir}out -t {threads} -i {params.pol_iterations} {params.other_flye_opts} ``\r + \r +- **Nextdenovo (if ``run-nextdenovo``):** It uses the cluster module specified in the config. If nextdenovo option is turned on, the create_config script will also create the nextdenovo config file. Check the create_config help to see which options can be modified on it. \r +\r +``nextDenovo {input.config}``\r +\r +3- Polishing\r +\r +- **Hypo (default):** It is the polisher that the pipeline uses by default, it can be turned off specifying ``--no-hypo`` when creating the config. If selected, the reads will be aligned in previous rules and then hypo will be run, it requires illumina data. It uses the conda environment specified in the config. \r +\r +``hypo -r @short_reads.list.txt -d {input.genome} -b {input.sr_bam} -c {coverage} -s {params.genome_size} -B {input.lr_bam} -t {threads} -o {output.polished} -p {params.proc} {params.opts} ``\r + \r +- **Racon (if turned on):** to run racon, specify ``--racon-rounds `` and the number of rounds of it you want to run. It uses the conda environment specified in the config file. \r +\r +``{params.racon_env}/scripts/racon_wrapper.py -u {params.opts} -t {threads} reads4racon.fastq.gz {input.mapping} {input.assembly} > {output.polished} ``\r + \r +- **Medaka (if turned on):** to run medaka, specify ``--medaka-rounds`` and the nummber of rounds of it you want to run. It uses the conda environment specified in the config file. It'll run after racon and before pilon, if they are also selected. \r +\r +`` medaka consensus {input.mapping} {wildcards.directory}/rmp/{wildcards.base}.medaka{wildcards.param}.hdf --threads {medaka_threads} --model {params.model} {params.consensus_opts};\r +medaka stitch --threads {threads} {wildcards.directory}/rmp/{wildcards.base}.medaka{wildcards.param}.hdf {input.assembly} {output.polished}``\r + \r +- **Pilon (if turned on):** to run Pilon, specify ``--pilon-rounds`` and the number of rounds of it you want to run. If it's a big genome, the pipeline will split the consensus step in several jobs, each of them running on certain scaffolds. It uses the version installed in the path specified in the config. \r +\r +``{scripts_dir}split_bam.py assembly.len {input.mapping} {params.chunks} {threads};\r +java {params.java_opts} -jar {params.path} --genome {input.assembly} --frags {input.alignment} {params.opts} --threads {threads} --output {basename}; \r +{scripts_dir}/concat_pilon.py {params.splitdir} {params.chunks} > {output.polished}``\r +\r + \r +- **Nextpolish ont (if turned on):** to run nextpolish with ONT reads, specify ``--nextpolish-ont-rounds`` and the number of rounds you want to run of it. \r +\r +``"python /apps/NEXTPOLISH/1.3.1/lib/nextpolish2.py -g {input.genome} -p {threads} -l lgs.fofn -r {params.lrtype} > {output.polished}``\r + \r +- **Nextpolish illumina (if turned on):** to run nextpolish with ONT reads, specify ``--nextpolish-ill-rounds`` and the number of rounds you want to run of it. \r +\r +``"python /apps/NEXTPOLISH/1.3.1/lib/nextpolish1.py -g {input.genome} -p {threads} -s {input.bam} -t {params.task} > {output.polished}``\r +\r +4- Post-assembly\r +\r +- **Purge_dups (by default):** select ``--no-purgedups`` if you don't want to run it. If no manual cutoffs are given, it'll run purgedups with automatic cutoffs and then will rerun it selecting the mean cutoff as 0.75\\*cov. It uses the version installed in the cluster module specified in the config. \r +\r +5- Evaluations\r + \r +- **Merqury:** It runs on each 'terminal' assembly. This is, the base assembly and the resulting assembly from each branch of the pipeline. \r + \r +- **Busco:** It can be run only in the terminal assemblies or on all the assemblies produced by the pipeline. It uses the conda environment specified in the config as well as the parameters specified. \r + \r +- **Nseries:** This is run during the *finalize* on all the assemblies that are evaluated. After it, that rule combines the statistics produced by all the evaluation rules. \r +\r +# Description of all options\r +```\r +bin/create_config_assembly.py -h\r +usage: create_configuration_file [-h] [--configFile configFile]\r + [--specFile specFile]\r + [--ndconfFile ndconfFile]\r + [--concat-cores concat_cores]\r + [--genome-size genome_size]\r + [--lr-type lr_type] [--basename base_name]\r + [--species species] [--keep-intermediate]\r + [--preprocess-lr-step PREPROCESS_ONT_STEP]\r + [--preprocess-10X-step PREPROCESS_10X_STEP]\r + [--preprocess-illumina-step PREPROCESS_ILLUMINA_STEP]\r + [--preprocess-hic-step PREPROCESS_HIC_STEP]\r + [--flye-step FLYE_STEP] [--no-flye]\r + [--nextdenovo-step NEXTDENOVO_STEP]\r + [--run-nextdenovo]\r + [--nextpolish-cores nextpolish_cores]\r + [--minimap2-cores minimap2_cores]\r + [--bwa-cores bwa_cores]\r + [--hypo-cores hypo_cores]\r + [--pairtools-cores pairtools_cores]\r + [--busco-cores busco_cores]\r + [--nextpolish-ont-rounds nextpolish_ont_rounds]\r + [--nextpolish-ill-rounds nextpolish_ill_rounds]\r + [--hypo-rounds hypo_rounds]\r + [--longranger-cores longranger_cores]\r + [--longranger-path longranger_path]\r + [--genomescope-opts genomescope_additional]\r + [--no-purgedups] [--ploidy ploidy]\r + [--run-tigmint] [--run-kraken2] [--no-yahs]\r + [--scripts-dir SCRIPTS_DIR]\r + [--ont-reads ONT_READS] [--ont-dir ONT_DIR]\r + [--ont-filt ONT_FILTERED] [--pe1 PE1]\r + [--pe2 PE2]\r + [--processed-illumina PROCESSED_ILLUMINA]\r + [--raw-10X RAW_10X [RAW_10X ...]]\r + [--processed-10X PROCESSED_10X] [--10X R10X]\r + [--illumina-dir ILLUMINA_DIR]\r + [--assembly-in ASSEMBLY_IN [ASSEMBLY_IN ...]]\r + [--postpolish-assemblies POSTPOLISH_ASSEMBLIES [POSTPOLISH_ASSEMBLIES ...]]\r + [--hic-dir HIC_DIR]\r + [--pipeline-workdir PIPELINE_WORKDIR]\r + [--filtlong-dir FILTLONG_DIR]\r + [--concat-hic-dir CONCAT_HIC_DIR]\r + [--flye-dir FLYE_DIR]\r + [--nextdenovo-dir NEXTDENOVO_DIR]\r + [--flye-polishing-dir POLISH_FLYE_DIR]\r + [--nextdenovo-polishing-dir POLISH_NEXTDENOVO_DIR]\r + [--eval-dir eval_dir] [--stats-out stats_out]\r + [--hic-qc-dir hic_qc_dir]\r + [--filtlong-minlen filtlong_minlen]\r + [--filtlong-min-mean-q filtlong_min_mean_q]\r + [--filtlong-opts filtlong_opts]\r + [--kraken2-db kraken2_db]\r + [--kraken2-kmer kraken2_kmers]\r + [--kraken2-opts additional_kraken2_opts]\r + [--kraken2-cores kraken2_threads]\r + [--trim-galore-opts trim_galore_opts]\r + [--trim-Illumina-cores Trim_Illumina_cores]\r + [--flye-cores flye_cores]\r + [--flye-polishing-iterations flye_pol_it]\r + [--other-flye-opts other_flye_opts]\r + [--nextdenovo-cores nextdenovo_cores]\r + [--nextdenovo-jobtype nextdenovo_type]\r + [--nextdenovo-task nextdenovo_task]\r + [--nextdenovo-rewrite nextdenovo_rewrite]\r + [--nextdenovo-parallel_jobs nextdenovo_parallel_jobs]\r + [--nextdenovo-minreadlen nextdenovo_minreadlen]\r + [--nextdenovo-seeddepth nextdenovo_seeddepth]\r + [--nextdenovo-seedcutoff nextdenovo_seedcutoff]\r + [--nextdenovo-blocksize nextdenovo_blocksize]\r + [--nextdenovo-pa-correction nextdenovo_pa_correction]\r + [--nextdenovo-minimap_raw nextdenovo_minimap_raw]\r + [--nextdenovo-minimap_cns nextdenovo_minimap_cns]\r + [--nextdenovo-minimap_map nextdenovo_minimap_map]\r + [--nextdenovo-sort nextdenovo_sort]\r + [--nextdenovo-correction_opts nextdenovo_correction_opts]\r + [--nextdenovo-nextgraph_opt nextdenovo_nextgraph_opt]\r + [--sr-cov ill_cov]\r + [--hypo-proc hypo_processes] [--hypo-no-lr]\r + [--hypo-opts hypo_opts]\r + [--purgedups-cores purgedups_cores]\r + [--purgedups-calcuts-opts calcuts_opts]\r + [--tigmint-cores tigmint_cores]\r + [--tigmint-opts tigmint_opts] [--hic-qc]\r + [--no-pretext] [--assembly-qc assembly_qc]\r + [--yahs-cores yahs_cores] [--yahs-mq yahs_mq]\r + [--yahs-opts yahs_opts]\r + [--hic-map-opts hic_map_opts]\r + [--mq mq [mq ...]]\r + [--hic-qc-assemblylen hic_qc_assemblylen]\r + [--blast-cores blast_cores]\r + [--hic-blastdb blastdb]\r + [--hic-readsblast hic_readsblast]\r + [--no-final-evals]\r + [--busco-lin busco_lineage]\r + [--merqury-db merqury_db] [--meryl-k meryl_k]\r + [--meryl-threads meryl_threads]\r + [--ont-list ONT_wildcards]\r + [--illumina-list illumina_wildcards]\r + [--r10X-list r10X_wildcards]\r + [--hic-list hic_wildcards]\r +\r +Create a configuration json file for the assembly pipeline.\r +\r +optional arguments:\r + -h, --help show this help message and exit\r +\r +General Parameters:\r + --configFile configFile\r + Configuration JSON to be generated. Default\r + assembly.config\r + --specFile specFile Cluster specifications JSON fileto be generated.\r + Default assembly.spec\r + --ndconfFile ndconfFile\r + Name pf the nextdenovo config file. Default\r + nextdenovo.config\r + --concat-cores concat_cores\r + Number of threads to concatenate reads and to run\r + filtlong. Default 4\r + --genome-size genome_size\r + Approximate genome size. Example: 615m or 2.6g.\r + Default None\r + --lr-type lr_type Type of long reads (options are flye read-type\r + options). Default nano-raw\r + --basename base_name Base name for the project. Default None\r + --species species Name of the species to be assembled. Default None\r + --keep-intermediate Set this to True if you do not want intermediate files\r + to be removed. Default False\r + --preprocess-lr-step PREPROCESS_ONT_STEP\r + Step for preprocessing long-reads. Default 02.1\r + --preprocess-10X-step PREPROCESS_10X_STEP\r + Step for preprocessing 10X reads. Default 02.2\r + --preprocess-illumina-step PREPROCESS_ILLUMINA_STEP\r + Step for preprocessing illumina reads. Default 02.2\r + --preprocess-hic-step PREPROCESS_HIC_STEP\r + Step for preprocessing hic reads. Default 02.3\r + --flye-step FLYE_STEP\r + Step for running flye. Default 03.1\r + --no-flye Give this option if you do not want to run Flye.\r + --nextdenovo-step NEXTDENOVO_STEP\r + Step for running nextdenovo. Default 03.2\r + --run-nextdenovo Give this option if you do want to run Nextdenovo.\r + --nextpolish-cores nextpolish_cores\r + Number of threads to run the nextpolish step. Default\r + 24\r + --minimap2-cores minimap2_cores\r + Number of threads to run the alignment with minimap2.\r + Default 32\r + --bwa-cores bwa_cores\r + Number of threads to run the alignments with BWA-Mem2.\r + Default 16\r + --hypo-cores hypo_cores\r + Number of threads to run the hypo step. Default 24\r + --pairtools-cores pairtools_cores\r + Number of threads to run the pairtools step. Default\r + 64\r + --busco-cores busco_cores\r + Number of threads to run BUSCO. Default 32\r + --nextpolish-ont-rounds nextpolish_ont_rounds\r + Number of rounds to run the Nextpolish with ONT step.\r + Default 0\r + --nextpolish-ill-rounds nextpolish_ill_rounds\r + Number of rounds to run the Nextpolish with illumina\r + step. Default 0\r + --hypo-rounds hypo_rounds\r + Number of rounds to run the Hypostep. Default 1\r + --longranger-cores longranger_cores\r + Number of threads to run longranger. Default 16\r + --longranger-path longranger_path\r + Path to longranger executable. Default\r + /scratch/project/devel/aateam/src/10X/longranger-2.2.2\r + --genomescope-opts genomescope_additional\r + Additional options to run Genomescope2 with. Default\r + --no-purgedups Give this option if you do not want to run Purgedups.\r + --ploidy ploidy Expected ploidy. Default 2\r + --run-tigmint Give this option if you want to run the scaffolding\r + with 10X reads step.\r + --run-kraken2 Give this option if you want to run Kraken2 on the\r + input reads.\r + --no-yahs Give this option if you do not want to run yahs.\r +\r +Inputs:\r + --scripts-dir SCRIPTS_DIR\r + Directory with the different scripts for the pipeline.\r + Default bin/../scripts/\r + --ont-reads ONT_READS\r + File with all the ONT reads. Default None\r + --ont-dir ONT_DIR Directory where the ONT fastqs are stored. Default\r + None\r + --ont-filt ONT_FILTERED\r + File with the ONT reads after running filtlong on\r + them. Default None\r + --pe1 PE1 File with the illumina paired-end fastqs, already\r + trimmed, pair 1.\r + --pe2 PE2 File with the illumina paired-end fastqs, already\r + trimmed, pair 2.\r + --processed-illumina PROCESSED_ILLUMINA\r + Directory to Processed illumina reads. Already there\r + or to be produced by the pipeline.\r + --raw-10X RAW_10X [RAW_10X ...]\r + Dictionary with 10X raw read directories, it has to be\r + the mkfastq dir. You must specify as well the\r + sampleIDs from this run. Example: '{"mkfastq-\r + dir":"sample1,sample2,sample3"}'...\r + --processed-10X PROCESSED_10X\r + Directory to Processed 10X reads. Already there or to\r + be produced by the pipeline.\r + --10X R10X File with barcoded 10X reads in fastq.gz format,\r + concatenated.\r + --illumina-dir ILLUMINA_DIR\r + Directory where the raw illumina fastqs are stored.\r + Default None\r + --assembly-in ASSEMBLY_IN [ASSEMBLY_IN ...]\r + Dictionary with assemblies that need to be polished\r + but not assembled and directory where they should be\r + polished. Example: '{"assembly1":"polishing_dir1"}'\r + '{"assembly2"="polishing_dir2"}' ...\r + --postpolish-assemblies POSTPOLISH_ASSEMBLIES [POSTPOLISH_ASSEMBLIES ...]\r + Dictionary with assemblies for whic postpolishing\r + steps need to be run but that are not assembled and\r + base step for the directory where the first\r + postpolishing step should be run. Example:\r + '{"assembly1":"s04.1_p03.1"}'\r + '{"assembly2":"s04.2_p03.2"}' ...\r + --hic-dir HIC_DIR Directory where the HiC fastqs are stored. Default\r + None\r +\r +Outputs:\r + --pipeline-workdir PIPELINE_WORKDIR\r + Base directory for the pipeline run. Default /software\r + /assembly/pipelines/Assembly_pipeline/v2.1/GLAWS/\r + --filtlong-dir FILTLONG_DIR\r + Directory to process the ONT reads with filtlong.\r + Default s02.1_p01.1_Filtlong\r + --concat-hic-dir CONCAT_HIC_DIR\r + Directory to concatenate the HiC reads. Default\r + s02.3_p01.1_Concat_HiC\r + --flye-dir FLYE_DIR Directory to run flye. Default s03.1_p02.1_flye/\r + --nextdenovo-dir NEXTDENOVO_DIR\r + Directory to run nextdenovo. Default\r + s03.2_p02.1_nextdenovo/\r + --flye-polishing-dir POLISH_FLYE_DIR\r + Directory to polish the flye assembly. Default\r + s04.1_p03.1_polishing/\r + --nextdenovo-polishing-dir POLISH_NEXTDENOVO_DIR\r + Directory to run nextdenovo. Default\r + s04.2_p03.2_polishing/\r + --eval-dir eval_dir Base directory for the evaluations. Default\r + evaluations/\r + --stats-out stats_out\r + Path to the file with the final statistics.\r + --hic-qc-dir hic_qc_dir\r + Directory to run the hic_qc. Default hic_qc/\r +\r +Filtlong:\r + --filtlong-minlen filtlong_minlen\r + Minimum read length to use with Filtlong. Default 1000\r + --filtlong-min-mean-q filtlong_min_mean_q\r + Minimum mean quality to use with Filtlong. Default 80\r + --filtlong-opts filtlong_opts\r + Extra options to run Filtlong (eg. -t 4000000000)\r +\r +Kraken2:\r + --kraken2-db kraken2_db\r + Database to be used for running Kraken2. Default None\r + --kraken2-kmer kraken2_kmers\r + Database to be used for running Kraken2. Default None\r + --kraken2-opts additional_kraken2_opts\r + Optional parameters for the rule Kraken2. Default\r + --kraken2-cores kraken2_threads\r + Number of threads to run the Kraken2 step. Default 16\r +\r +Trim_Galore:\r + --trim-galore-opts trim_galore_opts\r + Optional parameters for the rule trim_galore. Default\r + --gzip -q 20 --paired --retain_unpaired\r + --trim-Illumina-cores Trim_Illumina_cores\r + Number of threads to run the Illumina trimming step.\r + Default 8\r +\r +Flye:\r + --flye-cores flye_cores\r + Number of threads to run FLYE. Default 128\r + --flye-polishing-iterations flye_pol_it\r + Number of polishing iterations to use with FLYE.\r + Default 2\r + --other-flye-opts other_flye_opts\r + Additional options to run Flye. Default --scaffold\r +\r +Nextdenovo:\r + --nextdenovo-cores nextdenovo_cores\r + Number of threads to run nextdenovo. Default 2\r + --nextdenovo-jobtype nextdenovo_type\r + Job_type for nextdenovo. Default slurm\r + --nextdenovo-task nextdenovo_task\r + Task need to run. Default all\r + --nextdenovo-rewrite nextdenovo_rewrite\r + Overwrite existing directory. Default yes\r + --nextdenovo-parallel_jobs nextdenovo_parallel_jobs\r + Number of tasks used to run in parallel. Default 50\r + --nextdenovo-minreadlen nextdenovo_minreadlen\r + Filter reads with length < minreadlen. Default 1k\r + --nextdenovo-seeddepth nextdenovo_seeddepth\r + Expected seed depth, used to calculate seed_cutoff,\r + co-use with genome_size, you can try to set it 30-45\r + to get a better assembly result. Default 45\r + --nextdenovo-seedcutoff nextdenovo_seedcutoff\r + Minimum seed length, <=0 means calculate it\r + automatically using bin/seq_stat. Default 0\r + --nextdenovo-blocksize nextdenovo_blocksize\r + Block size for parallel running, split non-seed reads\r + into small files, the maximum size of each file is\r + blocksize. Default 1g\r + --nextdenovo-pa-correction nextdenovo_pa_correction\r + number of corrected tasks used to run in parallel,\r + each corrected task requires ~TOTAL_INPUT_BASES/4\r + bytes of memory usage, overwrite parallel_jobs only\r + for this step. Default 100\r + --nextdenovo-minimap_raw nextdenovo_minimap_raw\r + minimap2 options, used to find overlaps between raw\r + reads, see minimap2-nd for details. Default -t 30\r + --nextdenovo-minimap_cns nextdenovo_minimap_cns\r + minimap2 options, used to find overlaps between\r + corrected reads. Default -t 30\r + --nextdenovo-minimap_map nextdenovo_minimap_map\r + minimap2 options, used to map reads back to the\r + assembly. Default -t 30 --no-kalloc\r + --nextdenovo-sort nextdenovo_sort\r + sort options, see ovl_sort for details. Default -m\r + 400g -t 20\r + --nextdenovo-correction_opts nextdenovo_correction_opts\r + Correction options. Default -p 30 -dbuf\r + --nextdenovo-nextgraph_opt nextdenovo_nextgraph_opt\r + nextgraph options, see nextgraph for details. Default\r + -a 1\r +\r +Hypo:\r + --sr-cov ill_cov Approximate short read coverage for hypo Default 0\r + --hypo-proc hypo_processes\r + Number of contigs to be processed in parallel by HyPo.\r + Default 6\r + --hypo-no-lr Set this to false if you don¡t want to run hypo with\r + long reads. Default True\r + --hypo-opts hypo_opts\r + Additional options to run Hypo. Default None\r +\r +Purge_dups:\r + --purgedups-cores purgedups_cores\r + Number of threads to run purgedups. Default 8\r + --purgedups-calcuts-opts calcuts_opts\r + Adjusted values to run calcuts for purgedups. Default\r + None\r +\r +Scaffold_with_10X:\r + --tigmint-cores tigmint_cores\r + Number of threads to run the 10X scaffolding step.\r + Default 12\r + --tigmint-opts tigmint_opts\r + Adjusted values to run the scaffolding with 10X reads.\r + Default None\r +\r +HiC:\r + --hic-qc Give this option if only QC of the HiC data needs to\r + be done.\r + --no-pretext Give this option if you do not want to generate the\r + pretext file\r + --assembly-qc assembly_qc\r + Path to the assembly to be used perfom the QC of the\r + HiC reads.\r + --yahs-cores yahs_cores\r + Number of threads to run YAHS. Default 48\r + --yahs-mq yahs_mq Mapping quality to use when running yahs.Default 40\r + --yahs-opts yahs_opts\r + Additional options to give to YAHS.Default\r + --hic-map-opts hic_map_opts\r + Options to use with bwa mem when aligning the HiC\r + reads. Deafault -5SP -T0\r + --mq mq [mq ...] Mapping qualities to use for processing the hic\r + mappings. Default [0, 40]\r + --hic-qc-assemblylen hic_qc_assemblylen\r + Lentgh of the assembly to be used for HiC QC\r + --blast-cores blast_cores\r + Number of threads to run blast with the HiC unmapped\r + reads.Default 8\r + --hic-blastdb blastdb\r + BLAST Database to use to classify the hic unmapped\r + reads. Default\r + /scratch_isilon/groups/assembly/data/blastdbs\r + --hic-readsblast hic_readsblast\r + Number of unmapped hic reads to classify with blast.\r + Default 100\r +\r +Finalize:\r + --no-final-evals If specified, do not run evaluations on final\r + assemblies. Default True\r + --busco-lin busco_lineage\r + Path to the lineage directory to run Busco with.\r + Default None\r + --merqury-db merqury_db\r + Meryl database. Default None\r + --meryl-k meryl_k Kmer length to build the meryl database. Default None\r + --meryl-threads meryl_threads\r + Number of threads to run meryl and merqury. Default 4\r +\r +Wildcards:\r + --ont-list ONT_wildcards\r + List with basename of the ONT fastqs that will be\r + used. Default None\r + --illumina-list illumina_wildcards\r + List with basename of the illumina fastqs. Default\r + None\r + --r10X-list r10X_wildcards\r + List with basename of the raw 10X fastqs. Default None\r + --hic-list hic_wildcards\r + List with basename of the raw hic fastqs. Default None\r +```\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.567.1" ; + schema1:isBasedOn "https://github.com/cnag-aat/assembly_pipeline.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CLAWS (CNAG's long-read assembly workflow in Snakemake)" ; + schema1:sdDatePublished "2024-08-05 10:25:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/567/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3888 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2023-09-12T13:23:11Z" ; + schema1:dateModified "2023-09-26T16:54:53Z" ; + schema1:description """# CLAWS (CNAG's Long-read Assembly Workflow in Snakemake)\r + Snakemake Pipeline used for de novo genome assembly @CNAG. It has been developed for Snakemake v6.0.5.\r +\r +It accepts Oxford Nanopore Technologies (ONT) reads, PacBio HFi reads, illumina paired-end data, illumina 10X data and Hi-C reads. It does the preprocessing of the reads, assembly, polishing, purge_dups, scaffodling and different evaluation steps. By default it will preprocess the reads, run Flye + Hypo + purge_dups + yahs and evaluate the resulting assemblies with BUSCO, MERQURY, Nseries and assembly_stats. \r +It needs a config file and a spec file (json file with instructions on which resources should slurm use for each of the jobs). Both files are created by the script "create_config_assembly.py" that is located in the bin directory. To check all the options accepted by the script, do:\r +\r +```\r +bin/create_config_assembly.py -h\r +```\r +\r +Once the 2 config files are produced, the pipeline can be launched using snakemake like this:\r +\r +``snakemake --notemp -j 999 --snakefile assembly_pipeline.smk --configfile assembly.config --is --cluster-conf assembly.spec --use-conda --use-envmodules``\r +\r +If you are using an HPC cluster, please check how should you run snakemake to launch the jobs to the cluster. \r +\r +Most of the tools used will be installed via conda using the environments of the "envs" directory after providing the "--use-conda" option to snakemake. However, a few tools cannot be installed via conda and will have to be available in your PATH, or as a module in the cluster. Those tools are:\r +\r +- NextDenovo/2.5.0\r +- NextPolish/1.4.1\r +\r +# How to provide input data:\r +\r +There are several ways of providing the reads.\r +\r +### 1- ONT reads\r +\r +1.1 Using the option ``--ont-dir {DIR}`` in create_config_assembly.py.\r +\r +If you do so, it will look for all the files in the directory that end in '.fastq.gz' and will add the basenames to "ONT_wildcards". These wildcards will be processed by the pipeline that will:\r +\r +- Concatenate all the files into a single file\r +\r +- Run filtlong with the default or specified parameters. \r +\r +- Use the resulting file for assembly, polishing and/or purging.\r +\r +You can also specify the basenames of the files that you want to use with the ``--ont-list `` option. In this case, the pipeline will use the wildcards that you're providing instead of merging all the files in the directory.\r +\r +1.2 Using the option ```--ont-reads {FILE}``` in create_config_assembly.py.\r +\r +If you do so, it will consider that you already have all the reads in one file and will: \r +\r +- Run filtlong with the default or specified parameters.\r +\r +- Use the resulting file for assembly, polishing and/or purging.\r +\r +1.3 Using the option ```--ont-filt {FILE}```. It will use this file as the output from filtlong. Hence, it will skip the preprocessing steps and directly use it for assembly, polishing and/or purging. \r +\r +\r +\r +### 2-Illumina 10X-linked data\r +\r +2.1 Using the ```--raw-10X {DIR:list}``` option. \r +\r +Dictionary with 10X raw read directories, it has to be the mkfastq dir. You must specify as well the sampleIDs from this run. Example: '{"mkfastq- dir":"sample1,sample2,sample3"}'...\r +\r +It will take each basename in the list to get the fastqs from the corresponding directory and run longranger on each sample. Afterwards, it will build meryldbs for each "barcoded" file. Finally, it will concatenate all the meryldbs and "barcoded" files. Resulting "barcoded" file will be used for polishing. \r +\r +2.2 Using the ``--processed-10X {DIR}`` parameter. \r +\r +This directory can already be there or be produced by the pipeline as described in step 2.1. Once all the "barcoded" fastq files are there, meryldbs will be built for each "barcoded" file. Finally, it will concatenate all the meryldbs and "barcoded" files. Resulting "barcoded" file will be used for polishing. \r +\r +2.3 Using the ``--10X`` option. \r +\r +The argument to this is the path to the concatenated ".barcoded" file that needs to be used for polishing. If the pre-concatenated files are not given, meryldbs will be directly generated with this file, but it may run out of memory. \r +\r +### 3- Illumina short-read data\r +\r +3.1 Using the ``--illumina-dir {DIR}`` option, that will look for all the files in the directory that end in '.1.fastq.gz' and will add the basenames to "illumina_wildcards". These wildcards will be processed by the pipeline that will: \r +\r +- Trim adaptors with Trimgalore\r +\r +- Concatenate all the trimmed *.1.fastq.gz and the *2.fastq.gz in one file per pair. \r +\r +- The resulting reads will be used for building meryldbs and polishing. \r +\r +3.2 Using the ``--processed-illumina`` option. If the directory exists and contains files, the pipeline will look for all the files in the directory that end in '.1.fastq.gz' and will add the basenames to "illumina_wildcards". These wildcards will be processed by the pipeline that will:\r +\r +- Concatenate all the trimmed *.1.fastq.gz and the *2.fastq.gz in one file per pair. \r +\r +- The resulting reads will be used for building meryldbs and polishing. \r +\r +3.3 Using the ``--pe1 {FILE} and --pe2 {FILE}`` options. That will consider that these are the paired files containing all the illumina reads ready to be used and will build meryldbs and polish with them.\r +\r +### 4- Input assemblies\r +\r +If you want to polish an already assembled assembly, you can give it to the pipeline by using the option ``--assembly-in ASSEMBLY_IN [ASSEMBLY_IN ...]\r + Dictionary with assemblies that need to be polished but not assembled and directory where they should\r + be polished. Example: '{"assembly1":"polishing_dir1"}' '{"assembly2"="polishing_dir2"}' ...``\r + \r +If you want to improve an already polished assembly, you can give it to the pipeline by using the option ``--postpolish-assemblies POSTPOLISH_ASSEMBLIES [POSTPOLISH_ASSEMBLIES ...]\r + Dictionary with assemblies for whic postpolishing steps need to be run but that are not assembled and\r + base step for the directory where the first postpolishing step should be run. Example:\r + '{"assembly1":"s04.1_p03.1"}' '{"assembly2"="s04.2_p03.2"}' ...``\r +\r +\r +\r +# Description of implemented rules\r +\r +1- Preprocessing:\r + \r +- **Read concatenation:**\r +\r +``zcat {input.fastqs} | pigz -p {threads} -c > {output.final_fastq}``\r + \r +- **Longranger for 10X reads**: it uses the Longranger version installed in the path specified in the configfile\r +\r +``longranger basic --id={params.sample} --sample={params.sample} --fastqs={input.mkfastq_dir} --localcores={threads}``\r +\r +- **Trimgalore:** By default it gives the ``--gzip -q 20 --paired --retain_unpaired`` options, but it can be changed with the ``--trim-galore-opts `` argument. \r +\r +``trim_galore -j {threads} {params.opts} {input.read1} {input.read2}``\r +\r +- **Filtlong:** it uses the Filtlong version installed in the path specified in the configfile. By default it gives the min_length and min_mean_q parameters, but extra parameters can be added with the ``--filtlong-opts`` option.\r +\r +``filtlong --min_length {params.minlen} --min_mean_q {params.min_mean_q} {params.opts} {input.reads} | pigz -p {threads} -c > {output.outreads}``\r + \r +- **Build meryldb** (with processed 10X reads or illumina reads): it uses the merqury conda environment specified in the configfile. It takes as argument the `--mery-k` value that needs to be estimated first for the genime size. \r +\r +``meryl k={params.kmer} count output {output.out_dir} {input.fastq}``\r + \r +- Concat meryldbs: with the merqury conda environment specified in the configfile\r +\r +``meryl union-sum output {output.meryl_all} {input.input_run}``\r + \r +- **Align ONT (Minimap2):** it aligns the reads using minimap2 and outputs the alignment either in bam or in paf.gz formats. It uses the minimap2 conda environment specified in the configfile\r +\r +``minimap2 -{params.align_opts} -t {threads} {input.genome} {input.reads} ``\r +\r +- **Align Illumina (BWA-MEM):** it aligns the reads with BWA-mem and outputs a bam file\r +\r +``bwa mem -Y {params.options} -t {threads} {input.genome} {input.reads} | samtools view -Sb - | samtools sort -@ {threads} -o {output.mapping} -``\r +\r +2- Assembly\r +\r +- **Flye (default)**. It is run by default, if you don't want the pipeline to run it, you can give `--no-flye` option when creating the config. It uses the conda environment specified in the config. By default it is set to 2 polishing iterations and gives the genome-size estimate that has been given when creating the config. Extra options can be provided with the `--flye-opts`.\r +\r +``flye --{params.readtype} {input.reads} -o {params.outdir}out -t {threads} -i {params.pol_iterations} {params.other_flye_opts} ``\r + \r +- **Nextdenovo (if ``run-nextdenovo``):** It uses the cluster module specified in the config. If nextdenovo option is turned on, the create_config script will also create the nextdenovo config file. Check the create_config help to see which options can be modified on it. \r +\r +``nextDenovo {input.config}``\r +\r +3- Polishing\r +\r +- **Hypo (default):** It is the polisher that the pipeline uses by default, it can be turned off specifying ``--no-hypo`` when creating the config. If selected, the reads will be aligned in previous rules and then hypo will be run, it requires illumina data. It uses the conda environment specified in the config. \r +\r +``hypo -r @short_reads.list.txt -d {input.genome} -b {input.sr_bam} -c {coverage} -s {params.genome_size} -B {input.lr_bam} -t {threads} -o {output.polished} -p {params.proc} {params.opts} ``\r + \r +- **Racon (if turned on):** to run racon, specify ``--racon-rounds `` and the number of rounds of it you want to run. It uses the conda environment specified in the config file. \r +\r +``{params.racon_env}/scripts/racon_wrapper.py -u {params.opts} -t {threads} reads4racon.fastq.gz {input.mapping} {input.assembly} > {output.polished} ``\r + \r +- **Medaka (if turned on):** to run medaka, specify ``--medaka-rounds`` and the nummber of rounds of it you want to run. It uses the conda environment specified in the config file. It'll run after racon and before pilon, if they are also selected. \r +\r +`` medaka consensus {input.mapping} {wildcards.directory}/rmp/{wildcards.base}.medaka{wildcards.param}.hdf --threads {medaka_threads} --model {params.model} {params.consensus_opts};\r +medaka stitch --threads {threads} {wildcards.directory}/rmp/{wildcards.base}.medaka{wildcards.param}.hdf {input.assembly} {output.polished}``\r + \r +- **Pilon (if turned on):** to run Pilon, specify ``--pilon-rounds`` and the number of rounds of it you want to run. If it's a big genome, the pipeline will split the consensus step in several jobs, each of them running on certain scaffolds. It uses the version installed in the path specified in the config. \r +\r +``{scripts_dir}split_bam.py assembly.len {input.mapping} {params.chunks} {threads};\r +java {params.java_opts} -jar {params.path} --genome {input.assembly} --frags {input.alignment} {params.opts} --threads {threads} --output {basename}; \r +{scripts_dir}/concat_pilon.py {params.splitdir} {params.chunks} > {output.polished}``\r +\r + \r +- **Nextpolish ont (if turned on):** to run nextpolish with ONT reads, specify ``--nextpolish-ont-rounds`` and the number of rounds you want to run of it. \r +\r +``"python /apps/NEXTPOLISH/1.3.1/lib/nextpolish2.py -g {input.genome} -p {threads} -l lgs.fofn -r {params.lrtype} > {output.polished}``\r + \r +- **Nextpolish illumina (if turned on):** to run nextpolish with ONT reads, specify ``--nextpolish-ill-rounds`` and the number of rounds you want to run of it. \r +\r +``"python /apps/NEXTPOLISH/1.3.1/lib/nextpolish1.py -g {input.genome} -p {threads} -s {input.bam} -t {params.task} > {output.polished}``\r +\r +4- Post-assembly\r +\r +- **Purge_dups (by default):** select ``--no-purgedups`` if you don't want to run it. If no manual cutoffs are given, it'll run purgedups with automatic cutoffs and then will rerun it selecting the mean cutoff as 0.75\\*cov. It uses the version installed in the cluster module specified in the config. \r +\r +5- Evaluations\r + \r +- **Merqury:** It runs on each 'terminal' assembly. This is, the base assembly and the resulting assembly from each branch of the pipeline. \r + \r +- **Busco:** It can be run only in the terminal assemblies or on all the assemblies produced by the pipeline. It uses the conda environment specified in the config as well as the parameters specified. \r + \r +- **Nseries:** This is run during the *finalize* on all the assemblies that are evaluated. After it, that rule combines the statistics produced by all the evaluation rules. \r +\r +# Description of all options\r +```\r +bin/create_config_assembly.py -h\r +usage: create_configuration_file [-h] [--configFile configFile]\r + [--specFile specFile]\r + [--ndconfFile ndconfFile]\r + [--concat-cores concat_cores]\r + [--genome-size genome_size]\r + [--lr-type lr_type] [--basename base_name]\r + [--species species] [--keep-intermediate]\r + [--preprocess-lr-step PREPROCESS_ONT_STEP]\r + [--preprocess-10X-step PREPROCESS_10X_STEP]\r + [--preprocess-illumina-step PREPROCESS_ILLUMINA_STEP]\r + [--preprocess-hic-step PREPROCESS_HIC_STEP]\r + [--flye-step FLYE_STEP] [--no-flye]\r + [--nextdenovo-step NEXTDENOVO_STEP]\r + [--run-nextdenovo]\r + [--nextpolish-cores nextpolish_cores]\r + [--minimap2-cores minimap2_cores]\r + [--bwa-cores bwa_cores]\r + [--hypo-cores hypo_cores]\r + [--pairtools-cores pairtools_cores]\r + [--busco-cores busco_cores]\r + [--nextpolish-ont-rounds nextpolish_ont_rounds]\r + [--nextpolish-ill-rounds nextpolish_ill_rounds]\r + [--hypo-rounds hypo_rounds]\r + [--longranger-cores longranger_cores]\r + [--longranger-path longranger_path]\r + [--genomescope-opts genomescope_additional]\r + [--no-purgedups] [--ploidy ploidy]\r + [--run-tigmint] [--run-kraken2] [--no-yahs]\r + [--scripts-dir SCRIPTS_DIR]\r + [--ont-reads ONT_READS] [--ont-dir ONT_DIR]\r + [--ont-filt ONT_FILTERED] [--pe1 PE1]\r + [--pe2 PE2]\r + [--processed-illumina PROCESSED_ILLUMINA]\r + [--raw-10X RAW_10X [RAW_10X ...]]\r + [--processed-10X PROCESSED_10X] [--10X R10X]\r + [--illumina-dir ILLUMINA_DIR]\r + [--assembly-in ASSEMBLY_IN [ASSEMBLY_IN ...]]\r + [--postpolish-assemblies POSTPOLISH_ASSEMBLIES [POSTPOLISH_ASSEMBLIES ...]]\r + [--hic-dir HIC_DIR]\r + [--pipeline-workdir PIPELINE_WORKDIR]\r + [--filtlong-dir FILTLONG_DIR]\r + [--concat-hic-dir CONCAT_HIC_DIR]\r + [--flye-dir FLYE_DIR]\r + [--nextdenovo-dir NEXTDENOVO_DIR]\r + [--flye-polishing-dir POLISH_FLYE_DIR]\r + [--nextdenovo-polishing-dir POLISH_NEXTDENOVO_DIR]\r + [--eval-dir eval_dir] [--stats-out stats_out]\r + [--hic-qc-dir hic_qc_dir]\r + [--filtlong-minlen filtlong_minlen]\r + [--filtlong-min-mean-q filtlong_min_mean_q]\r + [--filtlong-opts filtlong_opts]\r + [--kraken2-db kraken2_db]\r + [--kraken2-kmer kraken2_kmers]\r + [--kraken2-opts additional_kraken2_opts]\r + [--kraken2-cores kraken2_threads]\r + [--trim-galore-opts trim_galore_opts]\r + [--trim-Illumina-cores Trim_Illumina_cores]\r + [--flye-cores flye_cores]\r + [--flye-polishing-iterations flye_pol_it]\r + [--other-flye-opts other_flye_opts]\r + [--nextdenovo-cores nextdenovo_cores]\r + [--nextdenovo-jobtype nextdenovo_type]\r + [--nextdenovo-task nextdenovo_task]\r + [--nextdenovo-rewrite nextdenovo_rewrite]\r + [--nextdenovo-parallel_jobs nextdenovo_parallel_jobs]\r + [--nextdenovo-minreadlen nextdenovo_minreadlen]\r + [--nextdenovo-seeddepth nextdenovo_seeddepth]\r + [--nextdenovo-seedcutoff nextdenovo_seedcutoff]\r + [--nextdenovo-blocksize nextdenovo_blocksize]\r + [--nextdenovo-pa-correction nextdenovo_pa_correction]\r + [--nextdenovo-minimap_raw nextdenovo_minimap_raw]\r + [--nextdenovo-minimap_cns nextdenovo_minimap_cns]\r + [--nextdenovo-minimap_map nextdenovo_minimap_map]\r + [--nextdenovo-sort nextdenovo_sort]\r + [--nextdenovo-correction_opts nextdenovo_correction_opts]\r + [--nextdenovo-nextgraph_opt nextdenovo_nextgraph_opt]\r + [--sr-cov ill_cov]\r + [--hypo-proc hypo_processes] [--hypo-no-lr]\r + [--hypo-opts hypo_opts]\r + [--purgedups-cores purgedups_cores]\r + [--purgedups-calcuts-opts calcuts_opts]\r + [--tigmint-cores tigmint_cores]\r + [--tigmint-opts tigmint_opts] [--hic-qc]\r + [--no-pretext] [--assembly-qc assembly_qc]\r + [--yahs-cores yahs_cores] [--yahs-mq yahs_mq]\r + [--yahs-opts yahs_opts]\r + [--hic-map-opts hic_map_opts]\r + [--mq mq [mq ...]]\r + [--hic-qc-assemblylen hic_qc_assemblylen]\r + [--blast-cores blast_cores]\r + [--hic-blastdb blastdb]\r + [--hic-readsblast hic_readsblast]\r + [--no-final-evals]\r + [--busco-lin busco_lineage]\r + [--merqury-db merqury_db] [--meryl-k meryl_k]\r + [--meryl-threads meryl_threads]\r + [--ont-list ONT_wildcards]\r + [--illumina-list illumina_wildcards]\r + [--r10X-list r10X_wildcards]\r + [--hic-list hic_wildcards]\r +\r +Create a configuration json file for the assembly pipeline.\r +\r +optional arguments:\r + -h, --help show this help message and exit\r +\r +General Parameters:\r + --configFile configFile\r + Configuration JSON to be generated. Default\r + assembly.config\r + --specFile specFile Cluster specifications JSON fileto be generated.\r + Default assembly.spec\r + --ndconfFile ndconfFile\r + Name pf the nextdenovo config file. Default\r + nextdenovo.config\r + --concat-cores concat_cores\r + Number of threads to concatenate reads and to run\r + filtlong. Default 4\r + --genome-size genome_size\r + Approximate genome size. Example: 615m or 2.6g.\r + Default None\r + --lr-type lr_type Type of long reads (options are flye read-type\r + options). Default nano-raw\r + --basename base_name Base name for the project. Default None\r + --species species Name of the species to be assembled. Default None\r + --keep-intermediate Set this to True if you do not want intermediate files\r + to be removed. Default False\r + --preprocess-lr-step PREPROCESS_ONT_STEP\r + Step for preprocessing long-reads. Default 02.1\r + --preprocess-10X-step PREPROCESS_10X_STEP\r + Step for preprocessing 10X reads. Default 02.2\r + --preprocess-illumina-step PREPROCESS_ILLUMINA_STEP\r + Step for preprocessing illumina reads. Default 02.2\r + --preprocess-hic-step PREPROCESS_HIC_STEP\r + Step for preprocessing hic reads. Default 02.3\r + --flye-step FLYE_STEP\r + Step for running flye. Default 03.1\r + --no-flye Give this option if you do not want to run Flye.\r + --nextdenovo-step NEXTDENOVO_STEP\r + Step for running nextdenovo. Default 03.2\r + --run-nextdenovo Give this option if you do want to run Nextdenovo.\r + --nextpolish-cores nextpolish_cores\r + Number of threads to run the nextpolish step. Default\r + 24\r + --minimap2-cores minimap2_cores\r + Number of threads to run the alignment with minimap2.\r + Default 32\r + --bwa-cores bwa_cores\r + Number of threads to run the alignments with BWA-Mem2.\r + Default 16\r + --hypo-cores hypo_cores\r + Number of threads to run the hypo step. Default 24\r + --pairtools-cores pairtools_cores\r + Number of threads to run the pairtools step. Default\r + 64\r + --busco-cores busco_cores\r + Number of threads to run BUSCO. Default 32\r + --nextpolish-ont-rounds nextpolish_ont_rounds\r + Number of rounds to run the Nextpolish with ONT step.\r + Default 0\r + --nextpolish-ill-rounds nextpolish_ill_rounds\r + Number of rounds to run the Nextpolish with illumina\r + step. Default 0\r + --hypo-rounds hypo_rounds\r + Number of rounds to run the Hypostep. Default 1\r + --longranger-cores longranger_cores\r + Number of threads to run longranger. Default 16\r + --longranger-path longranger_path\r + Path to longranger executable. Default\r + /scratch/project/devel/aateam/src/10X/longranger-2.2.2\r + --genomescope-opts genomescope_additional\r + Additional options to run Genomescope2 with. Default\r + --no-purgedups Give this option if you do not want to run Purgedups.\r + --ploidy ploidy Expected ploidy. Default 2\r + --run-tigmint Give this option if you want to run the scaffolding\r + with 10X reads step.\r + --run-kraken2 Give this option if you want to run Kraken2 on the\r + input reads.\r + --no-yahs Give this option if you do not want to run yahs.\r +\r +Inputs:\r + --scripts-dir SCRIPTS_DIR\r + Directory with the different scripts for the pipeline.\r + Default bin/../scripts/\r + --ont-reads ONT_READS\r + File with all the ONT reads. Default None\r + --ont-dir ONT_DIR Directory where the ONT fastqs are stored. Default\r + None\r + --ont-filt ONT_FILTERED\r + File with the ONT reads after running filtlong on\r + them. Default None\r + --pe1 PE1 File with the illumina paired-end fastqs, already\r + trimmed, pair 1.\r + --pe2 PE2 File with the illumina paired-end fastqs, already\r + trimmed, pair 2.\r + --processed-illumina PROCESSED_ILLUMINA\r + Directory to Processed illumina reads. Already there\r + or to be produced by the pipeline.\r + --raw-10X RAW_10X [RAW_10X ...]\r + Dictionary with 10X raw read directories, it has to be\r + the mkfastq dir. You must specify as well the\r + sampleIDs from this run. Example: '{"mkfastq-\r + dir":"sample1,sample2,sample3"}'...\r + --processed-10X PROCESSED_10X\r + Directory to Processed 10X reads. Already there or to\r + be produced by the pipeline.\r + --10X R10X File with barcoded 10X reads in fastq.gz format,\r + concatenated.\r + --illumina-dir ILLUMINA_DIR\r + Directory where the raw illumina fastqs are stored.\r + Default None\r + --assembly-in ASSEMBLY_IN [ASSEMBLY_IN ...]\r + Dictionary with assemblies that need to be polished\r + but not assembled and directory where they should be\r + polished. Example: '{"assembly1":"polishing_dir1"}'\r + '{"assembly2"="polishing_dir2"}' ...\r + --postpolish-assemblies POSTPOLISH_ASSEMBLIES [POSTPOLISH_ASSEMBLIES ...]\r + Dictionary with assemblies for whic postpolishing\r + steps need to be run but that are not assembled and\r + base step for the directory where the first\r + postpolishing step should be run. Example:\r + '{"assembly1":"s04.1_p03.1"}'\r + '{"assembly2":"s04.2_p03.2"}' ...\r + --hic-dir HIC_DIR Directory where the HiC fastqs are stored. Default\r + None\r +\r +Outputs:\r + --pipeline-workdir PIPELINE_WORKDIR\r + Base directory for the pipeline run. Default /software\r + /assembly/pipelines/Assembly_pipeline/v2.1/GLAWS/\r + --filtlong-dir FILTLONG_DIR\r + Directory to process the ONT reads with filtlong.\r + Default s02.1_p01.1_Filtlong\r + --concat-hic-dir CONCAT_HIC_DIR\r + Directory to concatenate the HiC reads. Default\r + s02.3_p01.1_Concat_HiC\r + --flye-dir FLYE_DIR Directory to run flye. Default s03.1_p02.1_flye/\r + --nextdenovo-dir NEXTDENOVO_DIR\r + Directory to run nextdenovo. Default\r + s03.2_p02.1_nextdenovo/\r + --flye-polishing-dir POLISH_FLYE_DIR\r + Directory to polish the flye assembly. Default\r + s04.1_p03.1_polishing/\r + --nextdenovo-polishing-dir POLISH_NEXTDENOVO_DIR\r + Directory to run nextdenovo. Default\r + s04.2_p03.2_polishing/\r + --eval-dir eval_dir Base directory for the evaluations. Default\r + evaluations/\r + --stats-out stats_out\r + Path to the file with the final statistics.\r + --hic-qc-dir hic_qc_dir\r + Directory to run the hic_qc. Default hic_qc/\r +\r +Filtlong:\r + --filtlong-minlen filtlong_minlen\r + Minimum read length to use with Filtlong. Default 1000\r + --filtlong-min-mean-q filtlong_min_mean_q\r + Minimum mean quality to use with Filtlong. Default 80\r + --filtlong-opts filtlong_opts\r + Extra options to run Filtlong (eg. -t 4000000000)\r +\r +Kraken2:\r + --kraken2-db kraken2_db\r + Database to be used for running Kraken2. Default None\r + --kraken2-kmer kraken2_kmers\r + Database to be used for running Kraken2. Default None\r + --kraken2-opts additional_kraken2_opts\r + Optional parameters for the rule Kraken2. Default\r + --kraken2-cores kraken2_threads\r + Number of threads to run the Kraken2 step. Default 16\r +\r +Trim_Galore:\r + --trim-galore-opts trim_galore_opts\r + Optional parameters for the rule trim_galore. Default\r + --gzip -q 20 --paired --retain_unpaired\r + --trim-Illumina-cores Trim_Illumina_cores\r + Number of threads to run the Illumina trimming step.\r + Default 8\r +\r +Flye:\r + --flye-cores flye_cores\r + Number of threads to run FLYE. Default 128\r + --flye-polishing-iterations flye_pol_it\r + Number of polishing iterations to use with FLYE.\r + Default 2\r + --other-flye-opts other_flye_opts\r + Additional options to run Flye. Default --scaffold\r +\r +Nextdenovo:\r + --nextdenovo-cores nextdenovo_cores\r + Number of threads to run nextdenovo. Default 2\r + --nextdenovo-jobtype nextdenovo_type\r + Job_type for nextdenovo. Default slurm\r + --nextdenovo-task nextdenovo_task\r + Task need to run. Default all\r + --nextdenovo-rewrite nextdenovo_rewrite\r + Overwrite existing directory. Default yes\r + --nextdenovo-parallel_jobs nextdenovo_parallel_jobs\r + Number of tasks used to run in parallel. Default 50\r + --nextdenovo-minreadlen nextdenovo_minreadlen\r + Filter reads with length < minreadlen. Default 1k\r + --nextdenovo-seeddepth nextdenovo_seeddepth\r + Expected seed depth, used to calculate seed_cutoff,\r + co-use with genome_size, you can try to set it 30-45\r + to get a better assembly result. Default 45\r + --nextdenovo-seedcutoff nextdenovo_seedcutoff\r + Minimum seed length, <=0 means calculate it\r + automatically using bin/seq_stat. Default 0\r + --nextdenovo-blocksize nextdenovo_blocksize\r + Block size for parallel running, split non-seed reads\r + into small files, the maximum size of each file is\r + blocksize. Default 1g\r + --nextdenovo-pa-correction nextdenovo_pa_correction\r + number of corrected tasks used to run in parallel,\r + each corrected task requires ~TOTAL_INPUT_BASES/4\r + bytes of memory usage, overwrite parallel_jobs only\r + for this step. Default 100\r + --nextdenovo-minimap_raw nextdenovo_minimap_raw\r + minimap2 options, used to find overlaps between raw\r + reads, see minimap2-nd for details. Default -t 30\r + --nextdenovo-minimap_cns nextdenovo_minimap_cns\r + minimap2 options, used to find overlaps between\r + corrected reads. Default -t 30\r + --nextdenovo-minimap_map nextdenovo_minimap_map\r + minimap2 options, used to map reads back to the\r + assembly. Default -t 30 --no-kalloc\r + --nextdenovo-sort nextdenovo_sort\r + sort options, see ovl_sort for details. Default -m\r + 400g -t 20\r + --nextdenovo-correction_opts nextdenovo_correction_opts\r + Correction options. Default -p 30 -dbuf\r + --nextdenovo-nextgraph_opt nextdenovo_nextgraph_opt\r + nextgraph options, see nextgraph for details. Default\r + -a 1\r +\r +Hypo:\r + --sr-cov ill_cov Approximate short read coverage for hypo Default 0\r + --hypo-proc hypo_processes\r + Number of contigs to be processed in parallel by HyPo.\r + Default 6\r + --hypo-no-lr Set this to false if you don¡t want to run hypo with\r + long reads. Default True\r + --hypo-opts hypo_opts\r + Additional options to run Hypo. Default None\r +\r +Purge_dups:\r + --purgedups-cores purgedups_cores\r + Number of threads to run purgedups. Default 8\r + --purgedups-calcuts-opts calcuts_opts\r + Adjusted values to run calcuts for purgedups. Default\r + None\r +\r +Scaffold_with_10X:\r + --tigmint-cores tigmint_cores\r + Number of threads to run the 10X scaffolding step.\r + Default 12\r + --tigmint-opts tigmint_opts\r + Adjusted values to run the scaffolding with 10X reads.\r + Default None\r +\r +HiC:\r + --hic-qc Give this option if only QC of the HiC data needs to\r + be done.\r + --no-pretext Give this option if you do not want to generate the\r + pretext file\r + --assembly-qc assembly_qc\r + Path to the assembly to be used perfom the QC of the\r + HiC reads.\r + --yahs-cores yahs_cores\r + Number of threads to run YAHS. Default 48\r + --yahs-mq yahs_mq Mapping quality to use when running yahs.Default 40\r + --yahs-opts yahs_opts\r + Additional options to give to YAHS.Default\r + --hic-map-opts hic_map_opts\r + Options to use with bwa mem when aligning the HiC\r + reads. Deafault -5SP -T0\r + --mq mq [mq ...] Mapping qualities to use for processing the hic\r + mappings. Default [0, 40]\r + --hic-qc-assemblylen hic_qc_assemblylen\r + Lentgh of the assembly to be used for HiC QC\r + --blast-cores blast_cores\r + Number of threads to run blast with the HiC unmapped\r + reads.Default 8\r + --hic-blastdb blastdb\r + BLAST Database to use to classify the hic unmapped\r + reads. Default\r + /scratch_isilon/groups/assembly/data/blastdbs\r + --hic-readsblast hic_readsblast\r + Number of unmapped hic reads to classify with blast.\r + Default 100\r +\r +Finalize:\r + --no-final-evals If specified, do not run evaluations on final\r + assemblies. Default True\r + --busco-lin busco_lineage\r + Path to the lineage directory to run Busco with.\r + Default None\r + --merqury-db merqury_db\r + Meryl database. Default None\r + --meryl-k meryl_k Kmer length to build the meryl database. Default None\r + --meryl-threads meryl_threads\r + Number of threads to run meryl and merqury. Default 4\r +\r +Wildcards:\r + --ont-list ONT_wildcards\r + List with basename of the ONT fastqs that will be\r + used. Default None\r + --illumina-list illumina_wildcards\r + List with basename of the illumina fastqs. Default\r + None\r + --r10X-list r10X_wildcards\r + List with basename of the raw 10X fastqs. Default None\r + --hic-list hic_wildcards\r + List with basename of the raw hic fastqs. Default None\r +```\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/567?version=1" ; + schema1:keywords "Bioinformatics, Genomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "CLAWS (CNAG's long-read assembly workflow in Snakemake)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/567?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Complete workflow for TANGO as reported in Lecomte et al (2024),\r +"Revealing the dynamics and mechanisms of bacterial interactions in\r +cheese production with metabolic modelling", Metabolic Eng. 83:24-38\r +https://doi.org/10.1016/j.ymben.2024.02.014\r +\r +1. Parameters for individual models are obtained by optimization\r +2. Individual dynamics and community dynamics are simulated\r +3. Figures for the manuscript are assembled from the results.""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/873?version=1" ; + schema1:isBasedOn "https://forgemia.inra.fr/tango/tango_models.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Tango: Numerical reconciliation of bacterial fermentation in cheese production" ; + schema1:sdDatePublished "2024-08-05 10:24:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/873/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 58757 ; + schema1:dateCreated "2024-05-03T07:03:12Z" ; + schema1:dateModified "2024-05-03T07:08:20Z" ; + schema1:description """Complete workflow for TANGO as reported in Lecomte et al (2024),\r +"Revealing the dynamics and mechanisms of bacterial interactions in\r +cheese production with metabolic modelling", Metabolic Eng. 83:24-38\r +https://doi.org/10.1016/j.ymben.2024.02.014\r +\r +1. Parameters for individual models are obtained by optimization\r +2. Individual dynamics and community dynamics are simulated\r +3. Figures for the manuscript are assembled from the results.""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Tango: Numerical reconciliation of bacterial fermentation in cheese production" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/873?version=1" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 11509 . + + a schema1:Dataset ; + schema1:datePublished "2024-04-22T11:45:22.529281" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/parallel-accession-download" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "parallel-accession-download/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:Dataset ; + schema1:datePublished "2022-02-04T14:56:46.372304" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-variation-reporting" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sars-cov-2-variation-reporting/COVID-19-VARIATION-REPORTING" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "The workflow runs the RetroSynthesis algorithm to generate a collection of heterologous pathways in a host organism of choice, converts them to SBML files, performs analysis on the pathways to then rank the theoretical best performing ones." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/25?version=1" ; + schema1:isBasedOn "https://galaxy-synbiocad.org/u/mdulac/w/rpranker-3" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Pathway Ranker" ; + schema1:sdDatePublished "2024-08-05 10:33:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/25/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14605 ; + schema1:creator ; + schema1:dateCreated "2020-05-29T11:19:05Z" ; + schema1:dateModified "2023-01-16T13:41:47Z" ; + schema1:description "The workflow runs the RetroSynthesis algorithm to generate a collection of heterologous pathways in a host organism of choice, converts them to SBML files, performs analysis on the pathways to then rank the theoretical best performing ones." ; + schema1:keywords "pathway prediction, pathway design, metabolic engineering, Synthetic Biology, Retrosynthesis" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Pathway Ranker" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/25?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2024-04-29T16:50:16.825885" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for the identification of circular DNAs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/972?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/circdna" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/circdna" ; + schema1:sdDatePublished "2024-08-05 10:24:11 +0100" ; + schema1:url "https://workflowhub.eu/workflows/972/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9237 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:45Z" ; + schema1:dateModified "2024-06-11T12:54:45Z" ; + schema1:description "Pipeline for the identification of circular DNAs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/972?version=7" ; + schema1:keywords "ampliconarchitect, ampliconsuite, circle-seq, circular, DNA, eccdna, ecdna, extrachromosomal-circular-dna, Genomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/circdna" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/972?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Simple bacterial assembly and annotation pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/966?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/bacass" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/bacass" ; + schema1:sdDatePublished "2024-08-05 10:24:17 +0100" ; + schema1:url "https://workflowhub.eu/workflows/966/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3698 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:42Z" ; + schema1:dateModified "2024-06-11T12:54:42Z" ; + schema1:description "Simple bacterial assembly and annotation pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/966?version=7" ; + schema1:keywords "Assembly, bacterial-genomes, denovo, denovo-assembly, genome-assembly, hybrid-assembly, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/bacass" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/966?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# The Polygenic Score Catalog Calculator (`pgsc_calc`)\r +\r +[![Documentation Status](https://readthedocs.org/projects/pgsc-calc/badge/?version=latest)](https://pgsc-calc.readthedocs.io/en/latest/?badge=latest)\r +[![pgscatalog/pgsc_calc CI](https://github.com/PGScatalog/pgsc_calc/actions/workflows/ci.yml/badge.svg)](https://github.com/PGScatalog/pgsc_calc/actions/workflows/ci.yml)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A521.04.0-23aa62.svg?labelColor=000000)](https://www.nextflow.io/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +\r +## Introduction\r +\r +`pgsc_calc` is a bioinformatics best-practice analysis pipeline for calculating\r +polygenic [risk] scores on samples with imputed genotypes using existing scoring\r +files from the [Polygenic Score (PGS) Catalog](https://www.pgscatalog.org/)\r +and/or user-defined PGS/PRS.\r +\r +## Pipeline summary\r +\r +

\r + \r +

\r +\r +* Downloading scoring files using the PGS Catalog API in a specified genome build (GRCh37 and GRCh38).\r +* Reading custom scoring files (and performing a liftover if genotyping data is in a different build).\r +* Automatically combines and creates scoring files for efficient parallel computation of multiple PGS\r + - Matching variants in the scoring files against variants in the target dataset (in plink bfile/pfile or VCF format)\r +* Calculates PGS for all samples (linear sum of weights and dosages)\r +* Creates a summary report to visualize score distributions and pipeline metadata (variant matching QC)\r +\r +### Features in development\r +\r +- *Genetic Ancestry*: calculate similarity of target samples to populations in a\r + reference dataset (e.g. [1000 Genomes (1000G)](http://www.nature.com/nature/journal/v526/n7571/full/nature15393.html), \r + [Human Genome Diversity Project (HGDP)](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC7115999/)) using principal components analysis (PCA).\r +- *PGS Normalization*: Using reference population data and/or PCA projections to report\r + individual-level PGS predictions (e.g. percentiles, z-scores) that account for genetic ancestry.\r +\r +## Quick start\r +\r +1. Install\r +[`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation)\r +(`>=21.04.0`)\r +\r +2. Install [`Docker`](https://docs.docker.com/engine/installation/) or\r +[`Singularity (v3.8.3 minimum)`](https://www.sylabs.io/guides/3.0/user-guide/)\r +(please only use [`Conda`](https://conda.io/miniconda.html) as a last resort)\r +\r +3. Download the pipeline and test it on a minimal dataset with a single command:\r +\r + ```console\r + nextflow run pgscatalog/pgsc_calc -profile test,\r + ```\r +\r +4. Start running your own analysis!\r +\r + ```console\r + nextflow run pgscatalog/pgsc_calc -profile --input samplesheet.csv --pgs_id PGS001229\r + ```\r +\r +See [getting\r +started](https://pgsc-calc.readthedocs.io/en/latest/getting-started.html) for more\r +details.\r +\r +## Documentation\r +\r +[Full documentation is available on Read the Docs](https://pgsc-calc.readthedocs.io/)\r +\r +## Credits\r +\r +pgscatalog/pgsc_calc is developed as part of the PGS Catalog project, a\r +collaboration between the University of Cambridge’s Department of Public Health\r +and Primary Care (Michael Inouye, Samuel Lambert) and the European\r +Bioinformatics Institute (Helen Parkinson, Laura Harris).\r +\r +The pipeline seeks to provide a standardized workflow for PGS calculation and\r +ancestry inference implemented in nextflow derived from an existing set of\r +tools/scripts developed by Inouye lab (Rodrigo Canovas, Scott Ritchie, Jingqin\r +Wu) and PGS Catalog teams (Samuel Lambert, Laurent Gil).\r +\r +The adaptation of the codebase, nextflow implementation, and PGS Catalog features\r +are written by Benjamin Wingfield, Samuel Lambert, Laurent Gil with additional input\r +from Aoife McMahon (EBI). Development of new features, testing, and code review\r +is ongoing including Inouye lab members (Rodrigo Canovas, Scott Ritchie) and others. A\r +manuscript describing the tool is in preparation. In the meantime if you use the\r +tool we ask you to cite the repo and the paper describing the PGS Catalog\r +resource:\r +\r +- >PGS Catalog Calculator _(in development)_. PGS Catalog\r + Team. [https://github.com/PGScatalog/pgsc_calc](https://github.com/PGScatalog/pgsc_calc)\r +- >Lambert _et al._ (2021) The Polygenic Score Catalog as an open database for\r +reproducibility and systematic evaluation. Nature Genetics. 53:420–425\r +doi:[10.1038/s41588-021-00783-5](https://doi.org/10.1038/s41588-021-00783-5).\r +\r +This pipeline is distrubuted under an [Apache License](LICENSE) amd uses code and \r +infrastructure developed and maintained by the [nf-core](https://nf-co.re) community \r +(Ewels *et al. Nature Biotech* (2020) doi:[10.1038/s41587-020-0439-x](https://doi.org/10.1038/s41587-020-0439-x)), \r +reused here under the [MIT license](https://github.com/nf-core/tools/blob/master/LICENSE).\r +\r +Additional references of open-source tools and data used in this pipeline are described in\r +[`CITATIONS.md`](CITATIONS.md).\r +\r +This work has received funding from EMBL-EBI core funds, the Baker Institute,\r +the University of Cambridge, Health Data Research UK (HDRUK), and the European\r +Union’s Horizon 2020 research and innovation programme under grant agreement No\r +101016775 INTERVENE.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/556?version=3" ; + schema1:isBasedOn "https://github.com/PGScatalog/pgsc_calc" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for The Polygenic Score Catalog Calculator" ; + schema1:sdDatePublished "2024-08-05 10:28:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/556/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1673 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-08-10T09:10:39Z" ; + schema1:dateModified "2023-08-10T09:10:39Z" ; + schema1:description """# The Polygenic Score Catalog Calculator (`pgsc_calc`)\r +\r +[![Documentation Status](https://readthedocs.org/projects/pgsc-calc/badge/?version=latest)](https://pgsc-calc.readthedocs.io/en/latest/?badge=latest)\r +[![pgscatalog/pgsc_calc CI](https://github.com/PGScatalog/pgsc_calc/actions/workflows/ci.yml/badge.svg)](https://github.com/PGScatalog/pgsc_calc/actions/workflows/ci.yml)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A521.04.0-23aa62.svg?labelColor=000000)](https://www.nextflow.io/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +\r +## Introduction\r +\r +`pgsc_calc` is a bioinformatics best-practice analysis pipeline for calculating\r +polygenic [risk] scores on samples with imputed genotypes using existing scoring\r +files from the [Polygenic Score (PGS) Catalog](https://www.pgscatalog.org/)\r +and/or user-defined PGS/PRS.\r +\r +## Pipeline summary\r +\r +

\r + \r +

\r +\r +* Downloading scoring files using the PGS Catalog API in a specified genome build (GRCh37 and GRCh38).\r +* Reading custom scoring files (and performing a liftover if genotyping data is in a different build).\r +* Automatically combines and creates scoring files for efficient parallel computation of multiple PGS\r + - Matching variants in the scoring files against variants in the target dataset (in plink bfile/pfile or VCF format)\r +* Calculates PGS for all samples (linear sum of weights and dosages)\r +* Creates a summary report to visualize score distributions and pipeline metadata (variant matching QC)\r +\r +### Features in development\r +\r +- *Genetic Ancestry*: calculate similarity of target samples to populations in a\r + reference dataset (e.g. [1000 Genomes (1000G)](http://www.nature.com/nature/journal/v526/n7571/full/nature15393.html), \r + [Human Genome Diversity Project (HGDP)](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC7115999/)) using principal components analysis (PCA).\r +- *PGS Normalization*: Using reference population data and/or PCA projections to report\r + individual-level PGS predictions (e.g. percentiles, z-scores) that account for genetic ancestry.\r +\r +## Quick start\r +\r +1. Install\r +[`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation)\r +(`>=21.04.0`)\r +\r +2. Install [`Docker`](https://docs.docker.com/engine/installation/) or\r +[`Singularity (v3.8.3 minimum)`](https://www.sylabs.io/guides/3.0/user-guide/)\r +(please only use [`Conda`](https://conda.io/miniconda.html) as a last resort)\r +\r +3. Download the pipeline and test it on a minimal dataset with a single command:\r +\r + ```console\r + nextflow run pgscatalog/pgsc_calc -profile test,\r + ```\r +\r +4. Start running your own analysis!\r +\r + ```console\r + nextflow run pgscatalog/pgsc_calc -profile --input samplesheet.csv --pgs_id PGS001229\r + ```\r +\r +See [getting\r +started](https://pgsc-calc.readthedocs.io/en/latest/getting-started.html) for more\r +details.\r +\r +## Documentation\r +\r +[Full documentation is available on Read the Docs](https://pgsc-calc.readthedocs.io/)\r +\r +## Credits\r +\r +pgscatalog/pgsc_calc is developed as part of the PGS Catalog project, a\r +collaboration between the University of Cambridge’s Department of Public Health\r +and Primary Care (Michael Inouye, Samuel Lambert) and the European\r +Bioinformatics Institute (Helen Parkinson, Laura Harris).\r +\r +The pipeline seeks to provide a standardized workflow for PGS calculation and\r +ancestry inference implemented in nextflow derived from an existing set of\r +tools/scripts developed by Inouye lab (Rodrigo Canovas, Scott Ritchie, Jingqin\r +Wu) and PGS Catalog teams (Samuel Lambert, Laurent Gil).\r +\r +The adaptation of the codebase, nextflow implementation, and PGS Catalog features\r +are written by Benjamin Wingfield, Samuel Lambert, Laurent Gil with additional input\r +from Aoife McMahon (EBI). Development of new features, testing, and code review\r +is ongoing including Inouye lab members (Rodrigo Canovas, Scott Ritchie) and others. A\r +manuscript describing the tool is in preparation. In the meantime if you use the\r +tool we ask you to cite the repo and the paper describing the PGS Catalog\r +resource:\r +\r +- >PGS Catalog Calculator _(in development)_. PGS Catalog\r + Team. [https://github.com/PGScatalog/pgsc_calc](https://github.com/PGScatalog/pgsc_calc)\r +- >Lambert _et al._ (2021) The Polygenic Score Catalog as an open database for\r +reproducibility and systematic evaluation. Nature Genetics. 53:420–425\r +doi:[10.1038/s41588-021-00783-5](https://doi.org/10.1038/s41588-021-00783-5).\r +\r +This pipeline is distrubuted under an [Apache License](LICENSE) amd uses code and \r +infrastructure developed and maintained by the [nf-core](https://nf-co.re) community \r +(Ewels *et al. Nature Biotech* (2020) doi:[10.1038/s41587-020-0439-x](https://doi.org/10.1038/s41587-020-0439-x)), \r +reused here under the [MIT license](https://github.com/nf-core/tools/blob/master/LICENSE).\r +\r +Additional references of open-source tools and data used in this pipeline are described in\r +[`CITATIONS.md`](CITATIONS.md).\r +\r +This work has received funding from EMBL-EBI core funds, the Baker Institute,\r +the University of Cambridge, Health Data Research UK (HDRUK), and the European\r +Union’s Horizon 2020 research and innovation programme under grant agreement No\r +101016775 INTERVENE.\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/556?version=3" ; + schema1:keywords "Nextflow, Workflows, polygenic risk score, polygenic score, prediction, GWAS, genomic ancestry" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "The Polygenic Score Catalog Calculator" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/556?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# metaGOflow: A workflow for marine Genomic Observatories' data analysis\r +\r +![logo](https://raw.githubusercontent.com/hariszaf/metaGOflow-use-case/gh-pages/assets/img/metaGOflow_logo_italics.png)\r +\r +\r +## An EOSC-Life project\r +\r +The workflows developed in the framework of this project are based on `pipeline-v5` of the MGnify resource.\r +\r +> This branch is a child of the [`pipeline_5.1`](https://github.com/hariszaf/pipeline-v5/tree/pipeline_5.1) branch\r +> that contains all CWL descriptions of the MGnify pipeline version 5.1.\r +\r +## Dependencies\r +\r +To run metaGOflow you need to make sure you have the following set on your computing environmnet first:\r +\r +- python3 [v 3.8+]\r +- [Docker](https://www.docker.com) [v 19.+] or [Singularity](https://apptainer.org) [v 3.7.+]/[Apptainer](https://apptainer.org) [v 1.+]\r +- [cwltool](https://github.com/common-workflow-language/cwltool) [v 3.+]\r +- [rdflib](https://rdflib.readthedocs.io/en/stable/) [v 6.+]\r +- [rdflib-jsonld](https://pypi.org/project/rdflib-jsonld/) [v 0.6.2]\r +- [ro-crate-py](https://github.com/ResearchObject/ro-crate-py) [v 0.7.0]\r +- [pyyaml](https://pypi.org/project/PyYAML/) [v 6.0]\r +- [Node.js](https://nodejs.org/) [v 10.24.0+]\r +- Available storage ~235GB for databases\r +\r +### Storage while running\r +\r +Depending on the analysis you are about to run, disk requirements vary.\r +Indicatively, you may have a look at the metaGOflow publication for computing resources used in various cases.\r +\r +## Installation\r +\r +### Get the EOSC-Life marine GOs workflow\r +\r +```bash\r +git clone https://github.com/emo-bon/MetaGOflow\r +cd MetaGOflow\r +```\r +\r +### Download necessary databases (~235GB)\r +\r +You can download databases for the EOSC-Life GOs workflow by running the\r +`download_dbs.sh` script under the `Installation` folder.\r +\r +```bash\r +bash Installation/download_dbs.sh -f [Output Directory e.g. ref-dbs] \r +```\r +If you have one or more already in your system, then create a symbolic link pointing\r +at the `ref-dbs` folder or at one of its subfolders/files.\r +\r +The final structure of the DB directory should be like the following:\r +\r +````bash\r +user@server:~/MetaGOflow: ls ref-dbs/\r +db_kofam/ diamond/ eggnog/ GO-slim/ interproscan-5.57-90.0/ kegg_pathways/ kofam_ko_desc.tsv Rfam/ silva_lsu/ silva_ssu/\r +````\r +\r +## How to run\r +\r +### Ensure that `Node.js` is installed on your system before running metaGOflow\r +\r +If you have root access on your system, you can run the commands below to install it:\r +\r +##### DEBIAN/UBUNTU\r +```bash\r +sudo apt-get update -y\r +sudo apt-get install -y nodejs\r +```\r +\r +##### RH/CentOS\r +```bash\r +sudo yum install rh-nodejs (e.g. rh-nodejs10)\r +```\r +\r +### Set up the environment\r +\r +#### Run once - Setup environment\r +\r +- ```bash\r + conda create -n EOSC-CWL python=3.8\r + ```\r +\r +- ```bash\r + conda activate EOSC-CWL\r + ```\r +\r +- ```bash\r + pip install cwlref-runner cwltool[all] rdflib-jsonld rocrate pyyaml\r +\r + ```\r +\r +#### Run every time\r +\r +```bash\r +conda activate EOSC-CWL\r +``` \r +\r +### Run the workflow\r +\r +- Edit the `config.yml` file to set the parameter values of your choice. For selecting all the steps, then set to `true` the variables in lines [2-6].\r +\r +#### Using Singularity\r +\r +##### Standalone\r +- run:\r + ```bash\r + ./run_wf.sh -s -n osd-short -d short-test-case -f test_input/wgs-paired-SRR1620013_1.fastq.gz -r test_input/wgs-paired-SRR1620013_2.fastq.gz\r + ``\r +\r +##### Using a cluster with a queueing system (e.g. SLURM)\r +\r +- Create a job file (e.g., SBATCH file)\r +\r +- Enable Singularity, e.g. module load Singularity & all other dependencies \r +\r +- Add the run line to the job file\r +\r +\r +#### Using Docker\r +\r +##### Standalone\r +- run:\r + ``` bash\r + ./run_wf.sh -n osd-short -d short-test-case -f test_input/wgs-paired-SRR1620013_1.fastq.gz -r test_input/wgs-paired-SRR1620013_2.fastq.gz\r + ```\r + HINT: If you are using Docker, you may need to run the above command without the `-s' flag.\r +\r +## Testing samples\r +The samples are available in the `test_input` folder.\r +\r +We provide metaGOflow with partial samples from the Human Metagenome Project ([SRR1620013](https://www.ebi.ac.uk/ena/browser/view/SRR1620013) and [SRR1620014](https://www.ebi.ac.uk/ena/browser/view/SRR1620014))\r +They are partial as only a small part of their sequences have been kept, in terms for the pipeline to test in a fast way. \r +\r +\r +## Hints and tips\r +\r +1. In case you are using Docker, it is strongly recommended to **avoid** installing it through `snap`.\r +\r +2. `RuntimeError`: slurm currently does not support shared caching, because it does not support cleaning up a worker\r + after the last job finishes.\r + Set the `--disableCaching` flag if you want to use this batch system.\r +\r +3. In case you are having errors like:\r +\r +```\r +cwltool.errors.WorkflowException: Singularity is not available for this tool\r +```\r +\r +You may run the following command:\r +\r +```\r +singularity pull --force --name debian:stable-slim.sif docker://debian:stable-sli\r +```\r +\r +## Contribution\r +\r +To make contribution to the project a bit easier, all the MGnify `conditionals` and `subworkflows` under\r +the `workflows/` directory that are not used in the metaGOflow framework, have been removed. \r +However, all the MGnify `tools/` and `utils/` are available in this repo, even if they are not invoked in the current\r +version of metaGOflow.\r +This way, we hope we encourage people to implement their own `conditionals` and/or `subworkflows` by exploiting the\r +currently supported `tools` and `utils` as well as by developing new `tools` and/or `utils`.\r +\r +\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/384?version=2" ; + schema1:isBasedOn "https://github.com/emo-bon/MetaGOflow.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for A workflow for marine Genomic Observatories data analysis" ; + schema1:sdDatePublished "2024-08-05 10:30:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/384/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7006 ; + schema1:creator , + ; + schema1:dateCreated "2023-05-16T20:38:36Z" ; + schema1:dateModified "2023-05-16T20:38:36Z" ; + schema1:description """# metaGOflow: A workflow for marine Genomic Observatories' data analysis\r +\r +![logo](https://raw.githubusercontent.com/hariszaf/metaGOflow-use-case/gh-pages/assets/img/metaGOflow_logo_italics.png)\r +\r +\r +## An EOSC-Life project\r +\r +The workflows developed in the framework of this project are based on `pipeline-v5` of the MGnify resource.\r +\r +> This branch is a child of the [`pipeline_5.1`](https://github.com/hariszaf/pipeline-v5/tree/pipeline_5.1) branch\r +> that contains all CWL descriptions of the MGnify pipeline version 5.1.\r +\r +## Dependencies\r +\r +To run metaGOflow you need to make sure you have the following set on your computing environmnet first:\r +\r +- python3 [v 3.8+]\r +- [Docker](https://www.docker.com) [v 19.+] or [Singularity](https://apptainer.org) [v 3.7.+]/[Apptainer](https://apptainer.org) [v 1.+]\r +- [cwltool](https://github.com/common-workflow-language/cwltool) [v 3.+]\r +- [rdflib](https://rdflib.readthedocs.io/en/stable/) [v 6.+]\r +- [rdflib-jsonld](https://pypi.org/project/rdflib-jsonld/) [v 0.6.2]\r +- [ro-crate-py](https://github.com/ResearchObject/ro-crate-py) [v 0.7.0]\r +- [pyyaml](https://pypi.org/project/PyYAML/) [v 6.0]\r +- [Node.js](https://nodejs.org/) [v 10.24.0+]\r +- Available storage ~235GB for databases\r +\r +### Storage while running\r +\r +Depending on the analysis you are about to run, disk requirements vary.\r +Indicatively, you may have a look at the metaGOflow publication for computing resources used in various cases.\r +\r +## Installation\r +\r +### Get the EOSC-Life marine GOs workflow\r +\r +```bash\r +git clone https://github.com/emo-bon/MetaGOflow\r +cd MetaGOflow\r +```\r +\r +### Download necessary databases (~235GB)\r +\r +You can download databases for the EOSC-Life GOs workflow by running the\r +`download_dbs.sh` script under the `Installation` folder.\r +\r +```bash\r +bash Installation/download_dbs.sh -f [Output Directory e.g. ref-dbs] \r +```\r +If you have one or more already in your system, then create a symbolic link pointing\r +at the `ref-dbs` folder or at one of its subfolders/files.\r +\r +The final structure of the DB directory should be like the following:\r +\r +````bash\r +user@server:~/MetaGOflow: ls ref-dbs/\r +db_kofam/ diamond/ eggnog/ GO-slim/ interproscan-5.57-90.0/ kegg_pathways/ kofam_ko_desc.tsv Rfam/ silva_lsu/ silva_ssu/\r +````\r +\r +## How to run\r +\r +### Ensure that `Node.js` is installed on your system before running metaGOflow\r +\r +If you have root access on your system, you can run the commands below to install it:\r +\r +##### DEBIAN/UBUNTU\r +```bash\r +sudo apt-get update -y\r +sudo apt-get install -y nodejs\r +```\r +\r +##### RH/CentOS\r +```bash\r +sudo yum install rh-nodejs (e.g. rh-nodejs10)\r +```\r +\r +### Set up the environment\r +\r +#### Run once - Setup environment\r +\r +- ```bash\r + conda create -n EOSC-CWL python=3.8\r + ```\r +\r +- ```bash\r + conda activate EOSC-CWL\r + ```\r +\r +- ```bash\r + pip install cwlref-runner cwltool[all] rdflib-jsonld rocrate pyyaml\r +\r + ```\r +\r +#### Run every time\r +\r +```bash\r +conda activate EOSC-CWL\r +``` \r +\r +### Run the workflow\r +\r +- Edit the `config.yml` file to set the parameter values of your choice. For selecting all the steps, then set to `true` the variables in lines [2-6].\r +\r +#### Using Singularity\r +\r +##### Standalone\r +- run:\r + ```bash\r + ./run_wf.sh -s -n osd-short -d short-test-case -f test_input/wgs-paired-SRR1620013_1.fastq.gz -r test_input/wgs-paired-SRR1620013_2.fastq.gz\r + ``\r +\r +##### Using a cluster with a queueing system (e.g. SLURM)\r +\r +- Create a job file (e.g., SBATCH file)\r +\r +- Enable Singularity, e.g. module load Singularity & all other dependencies \r +\r +- Add the run line to the job file\r +\r +\r +#### Using Docker\r +\r +##### Standalone\r +- run:\r + ``` bash\r + ./run_wf.sh -n osd-short -d short-test-case -f test_input/wgs-paired-SRR1620013_1.fastq.gz -r test_input/wgs-paired-SRR1620013_2.fastq.gz\r + ```\r + HINT: If you are using Docker, you may need to run the above command without the `-s' flag.\r +\r +## Testing samples\r +The samples are available in the `test_input` folder.\r +\r +We provide metaGOflow with partial samples from the Human Metagenome Project ([SRR1620013](https://www.ebi.ac.uk/ena/browser/view/SRR1620013) and [SRR1620014](https://www.ebi.ac.uk/ena/browser/view/SRR1620014))\r +They are partial as only a small part of their sequences have been kept, in terms for the pipeline to test in a fast way. \r +\r +\r +## Hints and tips\r +\r +1. In case you are using Docker, it is strongly recommended to **avoid** installing it through `snap`.\r +\r +2. `RuntimeError`: slurm currently does not support shared caching, because it does not support cleaning up a worker\r + after the last job finishes.\r + Set the `--disableCaching` flag if you want to use this batch system.\r +\r +3. In case you are having errors like:\r +\r +```\r +cwltool.errors.WorkflowException: Singularity is not available for this tool\r +```\r +\r +You may run the following command:\r +\r +```\r +singularity pull --force --name debian:stable-slim.sif docker://debian:stable-sli\r +```\r +\r +## Contribution\r +\r +To make contribution to the project a bit easier, all the MGnify `conditionals` and `subworkflows` under\r +the `workflows/` directory that are not used in the metaGOflow framework, have been removed. \r +However, all the MGnify `tools/` and `utils/` are available in this repo, even if they are not invoked in the current\r +version of metaGOflow.\r +This way, we hope we encourage people to implement their own `conditionals` and/or `subworkflows` by exploiting the\r +currently supported `tools` and `utils` as well as by developing new `tools` and/or `utils`.\r +\r +\r +\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/384?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "A workflow for marine Genomic Observatories data analysis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/384?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# RNA-Seq pipeline\r +Here we provide the tools to perform paired end or single read RNA-Seq analysis including raw data quality control, differential expression (DE) analysis and functional annotation. As input files you may use either zipped fastq-files (.fastq.gz) or mapped read data (.bam files). In case of paired end reads, corresponding fastq files should be named using *.R1.fastq.gz* and *.R2.fastq.gz* suffixes.\r +\r +\r +## Pipeline Workflow\r +All analysis steps are illustrated in the pipeline [flowchart](https://www.draw.io/?lightbox=1&highlight=0000ff&edit=_blank&layers=1&nav=1&title=NGSpipe2go_RNAseq_pipeline.html#R7R1Zk5s489e4Knmwi%2Ft4nDOz2WSyO5OtbPYlJZCw%2BYLBATzXr%2F90AOYQNtgY8Ex2UjuDACG1Wn13ayJfLJ8%2BhGC1%2BBxA5E0kAT5N5MuJJImKJE3IPwE%2BsxZd11nDPHRh8tCm4d59QUmjkLSuXYiiwoNxEHixuyo22oHvIzsutIEwDB6LjzmBV%2FzqCsxRpeHeBl619ZsL40XSKmrm5sYNcueL5NOGlMzPAvbPeRis%2FeR7fuAjdmcJ0m6SOUYLAIPHXJN8NZEvwiCI2V%2FLpwvkEbCmEGPvXdfczYYcIj9u8sLiRdX%2F%2FXT3%2FOR9Xj7NbwX7r3%2FtaTa4%2BDmFBYIYNMllEMaLYB74wLvatJ7T%2BSLSrYCv%2FrdertLn52CFWzZvfQoC3HApksdQHD8nKw%2FWcYCbFvHSS%2B6iJzf%2Bl3Q4U5Or77k7l0%2FJt%2BjFc3rhx%2BFz7iVy%2BT1%2Fb%2FMavUrfi%2BIw%2BJmtM16H8yooE%2BhGwTq0E7g82Ld%2F339bf1xfvvz39Y%2Bv19%2Bjh%2F%2BmcoIgMQjnKN7yoJIAmkA394lkqT6gYInwGPEDIfJA7D4UERMk%2BD3PntssNP4jWWv%2But8IH53FrRF7D4s76%2Fbm%2FvrHy8003XgPwFsnn5pImodncA7dB%2FJFz5379Ib2a01Q9Dyk6J9d4r%2FmyW%2F6mhWWW%2FDAaF9pawnPcqvvAQt559lWugi8IKQPydf0P%2B6iOYEfJ%2FgkamTcIFpQpBQLKEquHNfz8p1K5CfrNL1D9658Pg8BdDEilJrtYOnaCQbNPRBFKTal21qgn43x4gUEcFNT2IZZDyiM0dNWTEjuGmpCaxLaOjU1dv2Yo1RGQoAXOSKlpIt8CPZwkVl80%2BhzpZGfY6BPB9giGUIBWzIWm8MWRTCr2KJ1gS0v3%2BLLr8K%2F%2F328vrsKlyL8%2BuH5j6l8MIt5SwylIT%2BRlaH4CXeNVfG1LzLy4RmRNPGlTfYv2c2k8dr10uFU8aBAVHpACnkopBDlx9sb9zE4%2B3O%2B%2BvLjW%2FDxXBGnEodJ9I8kXJAcdx0S%2FtgD3LcNk8OcCdMrLEDKjsmNaURBeIYfEJXVE4dXX0iTs%2FMlcH0CP3eFPBfzNtx4vp2Ns68Wm5mYsO9AuplO0ku0An5FHCk3ALLublz6TuiDCP2apbCYYQEkeHjOfypdqbQFqMCWFc1GpiVA3QKCaiqm4uiOo5qaIAlT2wRQRbbqiFDSZR0auqBrjgUdVRUECymKAnUgI6vwkUWInMJnFnFMVOczgkjS9dyNF2trhmUQfOEuLSfACIz%2FvP1wT4YuzQN8YXmBRdANRDHCq3idTirCf9%2FdnuF54j92T5iBrfa5FCcAB08qQOdiE2%2B5apCsLEKOBvWmlCK5MbDw27m9JAkQOa7vUh1CEt5Z5M771ptr1PP2AgAjSgujFbIpNZQEOlFiNQng2iMod%2FrzQ1GEGYwLCLkGPswmNyXTdh0sP1AmBDDNJ%2Fttjwl3gINPyF7HBN45HKwZSb3%2B1YXaYooFtcWoai2yztFaROlYOq6yW3jButuK%2FInp6gpreWTyTBZJRUAjbchshwoXXFu5eGMYihWQpcDJQyxtayyKJN%2F7K3AptiUfm0pqYcH0YgeB40RYRCovQjboA2wP5pCaRvr394LWsUvTKOgZG7WjB3VSa6o5DKU4bBt1rXWpSvGs4IkQPNefM5JnBSFE4RQ34wsmIyX7ghJEAW8CNbuTN%2BKT2xNJRgr5yZ5YAQizvqVt%2FKMiw%2FDEFb4waWNyG2LpK5rFTwVLWVmGhKYGRV2Fmg5EYAiKaIgSFiNFxZIMgGQw1R1bMETTAVARJB0CSVJkU9ZsQVRtLGealiqogoNFxkFkyNp5MiCVb2%2BTGGtkwcoi8PlnKn5V0akeV8qsFS0LQLQxduNB2RRZch0KlacylMo9JdXjVWwF8LnSWBEIY1hucdMGvGD405dXVBwB3nPk4lXB%2B1OYMxGMeq6EFPoE6ckSCkwmwM9pYEmcO74VrdgssFQ%2FY1D488tD9O0rnsq7P79Mv319P8vB3eWsRXWU1RausFt9jDRWodAGLh%2Fwzl%2FhSfhYEosYNcM7zs%2BoZhkPxzqhhv2%2FkYGRRt6WIe1s29fItUTecm3gnSUejJhICOepP8NDDnkrwE85HjUGOtT%2Bl5cVHhdujO4xESA9PoZgtY2DtxCRi4Z9WZQ48p5YFfiMo3mB1N%2BSWHNJLF2u0Yhi%2F0Qo%2FGL9j4RTYCpPXGhvR9QqUvZaQUu1DV3XHMlxREUxVUeVbRGopmTqjgMdxZqKGJUtyTItLFxZQFZMXVRUJBkOhIajiwKybSQpUB1G0KqZJQMQn7n9FrJaCVkRlom8zGSyN6Nzy%2F1RYaS1zNGaQ2Pe1eHgQwSgjbc2oSekZzYHISd3vlv77q81xYa1NcXUlawNIReU2cDg%2FbYpH0tMmTP5rysozHPiJJGqHTLbApr0ObkQrTwszcSdLnPWJaGXR5pUO%2Fltu5wi5RhoIticoMCXBSPuEvjEPYQBfJmTB1oIgZLxWwhsLgSmRuSTEQJXXM69xMPHGjJlzsJqI%2Fex9indQeSelruHkT6eJrvqjLJnn%2Fk9dkl3f22cYzx3SUUM4VKdVbltUXlqr6ky8kBuGnxR9ZwKuW3l08x3NHsAYdTArWxZqmBAKIuyqEhYaIVAFIGlSw6CyNEgmiITmhDaClINBe9QoBu6ZGL6AQSkmo4sQQgVU5Hso0uqmYdvI6funC0DWd1jjWXXRONogybHQYoSR12FwYMLCYLnfYY8ZN%2FKYukIKHRv8YsFIBVmzvkue43MleubFN4Fq3jWXjrLgXMnl5a2c%2Bnx8eMs6WEXPzb11mR5f3bMi%2F3p1W2ZfnC0fkuluIrFDo7nt5R%2B%2By0369W%2F%2FMPHBimNhE6wQUv3T9oFk%2BiStzZIUe3IKMYviErJ6spmXOnoLAzBc%2B6xFXkg2jJgVeMOuHZcpefTcW2Qm42gW1TnxU6MUZ60A99x5%2BuQ5mzkg454tq1RiJCgkVSGBflZ6K1mPiFltZLZdCOdQfRAMvquGUBIO3lsxi5rBTL%2BQ7XiWI0dkRt%2BGfKnXydB9wGSVYjA0sKiay08OE%2FsYVftSQbtEav4xukg8OpBWbpbC8aq2NkCywYT%2BCsNNOqTUKTHIPxJAghp3z6YU%2Bl7m9zNazukIUSM5UUtvvuuuwFiQIC1RxiYm4wggWXkrcOCF4BELrjEuOoH5HEfISIo4cXb%2FRGCv0noY5LTlXbrBTbRvXIL2Ghe7ydpiCfvLkfNyihF9iJBeGKCwQNImBFWusji%2F0SI4OQGMFg%2BjV1%2FHu2MsihpX6NWplRZ52gBHDVA1EoSWmfu7DSgcUDVSWkLxVenOnGT1MXqQgyapD6MMtVVLlnjZDLtKDpYW41HF4wC1kmatlXjKT8vG%2BohGs8uA3mmKVyDKP77gkoBTWzTv9aYCMfPkyTijhJ%2F6isMwSOFKU0iIPPAktPPZHN0aX3Fj%2BH%2FO%2FjGL3vGmjN5a6exjiFRrbGORyZLPCfx33%2BiV5eyVsNpaJZ20rEw4eT1HcSE1DJuyZzUeo1DUTWhHssPMOnxU2sPJn49kbrjkSylIclSjmM2akuylFIOvpxYmepIVuV5xZwcQLKaodWOPArkWcHjb5Saqp3XbekEpRRVOj6K7CgO9BtFEqpjjhNFxD5QZHvdhzeCIsd2U6hiaWlTZ1LH3oUKI9J6YESpQjL%2B0iGd1nlpTF06V8MOK%2BazPd7rJDZ8ppNnF6xTaZdS3sdy68OQFE3rh6QoRrckhWsw0oekKCNztXeHmnLncs6eQYmZWeX%2B69ldY4vLHUrcKqsVDTs%2Fom0lc3JFGKjtjStKDqzHNK4ct4SDlNZrGMiYwodt1ciPF2t6T1ZL2EQGlKgHiTXIQTkJNShapwgUy6HkSxdCSlxKC7JpuUvAIE9qygnSipI04J3t%2FITe4mGp5%2FgfBt0FIRfqJfGIqefi5hr%2FI4%2BH8UXg466BS1cWYWx9RFE8qVZA7AAHZIN8vIAFSgUJuH4dWWmNBA1XvL6YwG5fLy2cknh8hT2CT76s49Wa5sMEHiQoc1ahYfjmL3ucoSeZuzLIpuEynyXL6dmMu0n05%2BEpF3mETUt4XoOl6xFcu0HeAyK9TvpwX%2BpKsSirrKrN8HwPYtcQz%2FWR4blQQfUQRWsvHmmcVT5zLdo2nGN8HLqOg8Is9Bs9rTCsIlYErAq1Xob04QsZiB%2B69mKJ%2FM33x7vVj7CtVaEqw3C39dGS7FN9e8Tb2gvmI93TeGTTlGXZgedhYZHGCDH2hYC9mGyCcl49MityjzwqejGMp2vBVeX%2FxPnl8i5a3vijO22gnaq%2Bs%2F4vvko%2BScTpxICCQpdm0dxnCetdK%2FBpqPlOBd4czJS4ddyjIW1VyTxERIEaKXXbN7nw8opNa3a3hPleKzmF0BINZDuSbkuOYQFoObZkAmSqOhJMQZyKliWZJoIahrJsAMG0LUMVBc2EDrB1UVQUAxi6WvzIMXIKabQw%2Fp0t13W0cP3nH6wK7Q%2FWjDffDxZleV0HAQa90t22IdxtY9d5CzSj48fDmC2Qt0JhNLvbtlJANGwk6tAwdFs2bVGWbVWWoQmRIUBoy1PbkgQJiQCJoi7aUHdMVdaBaMqGZTqKSRZMEnTdGuFK7QBFumT8x8Yafv8XHiim9BmFIXQoJIDMlwKmksssh0FZkO%2FnACsMhFBTDBVYGQ%2FM1mMiz9jrKKZdbaq8bXI5cwHHeHmjBSZ%2BWc2zNIR6z6Di16fUa5wqtPygZFk6nEfyg78OL6%2BQl5gSo2L%2FIlPy4Y28dFS%2F%2FJHiwDixpsX0wTRwa5cTrcOwU9cnhoOnFRUz%2FbixNwQSarBkhIZYhylNSLO%2FWWAqDUf1XCsECSIf3V1Smkxrz4nSV1jqccNQZXFYzwmfEh2evzwcJSq5%2F9uSpQMokToMJWoaIdQhJao0RGuLhLFLWLaLn1eIr0W19N5SKy2rGsbK02LaQYSZ5xXqh0SVJ9WeRqmvg0aZI6RRqRNx%2FKFkYzhqTGlaHF4Z7KwxbnSPNuQin1p0T%2BNFlo8TeNY6XiylF1n61o7cidLz6mG5E425XcIHfjC34aFczEEgXof9srD2nEs7Sc5VjkuS1RFyrh3VgcYRVju6XNbUXrOTuKXVecYTulhpsMDygljHyEnlhwjKNt7btDar5c6%2FufM%2Bkkjx0O1k6O2Jiv4qiIpqGrOqd3VwsiKfAlnpVNwxBhN3DqUIH778uMrCbRpv902ETtHin6u6PUe09E4fokVhDu1pgfE6aMHA5rvzG1G8mL%2B4yvJGhnfXcPnpCU5PpvrgcKa646Rx7jTVZTFm%2FZnq4Hp1ByAI9%2FIW4Jdp%2FfckOpEKG33QFvzdkAy6PVkxT5KslC1uysD5FPzSPIcnb55caZ4CheqSAqlNA7lGWppH3WGrGao0DxGAzgP4jPUrqTHJC1NTTaqTCeSvRJwiH0T%2BnMRb9EH6yCfJYRh4MFJr8qfmz6I%2FXfKnjtFsIw9anOfkHA6Nk6CNnpSzZkzuZCowjcLh0DideJwOB0XbzsSGcjhcXv24vLpHv5pzsMstSUUbS0EvojvCv9tzLuV1JEIr8rCca6tFfDTB99W8ojgE9s%2BRxt4z4zqZcjLIxOD24EaksGOUqMdvMWWOawk%2FWp4RP1bt1YcINMhGasHUj37MRqnas14%2BHaPxMRumVOior2M29OKxGeM4ZsMY9TEbd4gKHzbRlT8gP1jy3ZoNSWT%2FeVW0tgw5CB6iJ5o0Oi9Mopzlk0VwCLSWv0vpP%2F3GPHb6Ti%2BpJIxsStAnuSL5OvP0rPtdh6vtdeDXqEvO62K15LyYRjUWsjuEDvgUfwcPXnI%2BoyFvt%2BQ8P1N5e13L3u3aRVNPJkb0LVMMlOFscoIYucsmjivD2ayvPURPSucQfit4IiefUzgS%2Bm4FIdaQprg54wH156ZbWBeZUyxMVSoZKeQneyI7EJ3clrbxjpzhIUmHpWi5XRt7a%2Be%2FM5AU6yPhX%2B8wiuNVY7UmWF5nMak4zdX8iQWF8P3mGksO1FaT1XxhSViAohrvyACWt5WL%2B4hitIryJ8M0Odv7WKePt%2Bz%2FjQxs0upo81M4qdxMi4emibMm5xyF1OLUuWjFNwEMWgN0P89M5%2By6Sx4simqVCQ96dnkzHpyNexBL5x9%2Bkyoj4BGCGIzT0klPiJlsSv%2B9e3FXKwTfbxtZLzqtD5YshQJzwweiu1LHBjPDroDLuC9KRKGmNQ16HH8ElisPibM7cUZhPJu%2FjHuc0h7jPGnTdslgkF7urAZxNHtB6kgasdOGlFcuFBEZESmzwHJSKvD4plwzvIrM%2FRaAO%2FnC7HqfoRSpH3l3vEznhxQdmsxAqzHdJUWemgYsJAAhHJ3Zqikvx3v7JwweWSlUViyJjProWZJJPSkyg9aRC2Ye%2FqcbuaArVYLRZ%2BTCjfDRWdwasfewuLNub%2B6vf7zcTOsDFyqoRegsl4%2BUDUwKj118QnMiPm5Ql3VXYRdWLbtI6sbnFolb451wiVJTpdR8bWl5HlYUSWYnnEQuchK5ghdKGq%2FJcwMcwki4SCBXkIAeRj%2FZlHNt4EbZveW6AJ1Rio8wqluK50Epe7Y7A90hXmOtLEPm7K0pNDeNFK48BQNrFESBsyLyK1fxbrM4g8uKewqIbYxxR0CuzNiRxy6Fh13qkbCrT4%2FmVvR%2Buw5NLlhO0BOVGMOvmVNly3Z8a16nrcB4s26V0Q5sQH9PByym7NvhyS88DtPFgT5cSjbkOScuM%2BdfByVb2Cij2N6AhauMnKJYPW6KL%2F9oR8LO%2BuM6Mkv5HzEGp50PbN8Y0el6riOaaJggW1aX%2BdUql2UhVpOaLeLRtMuqmf2S5fxSsxNiQZOQJMETGYMXl0HDMN7QkvEsy%2BnBvgXfiHn4knEtyydT36K7HL1qgGHtyuZNzd9N7%2BNX948r6%2F7%2BKXgOnj9%2B%2F%2FLPtKmlOT2OaOjM83Isr1zIJN%2F5vKL1k3meHDpEbRyfyDFJjat4Batnuh7RT%2FIJesKSgNEvF2uWNKYSyBFt4ks3ImoN8FGwJtfJrKjFLbONS1zreD2ynZxxvJyQrgvDFvzipipzzC0Dxy23oo34IgsnLtPLtod17%2BmJ42Js05zm1H8yksR1aWTVWTrEhhL3lPtHhqHz1o3iQbspi%2BOytOrx72ViVpal9y5C1UxcS%2FW0V4iZJTo1MJniQ38oKrVt1I1cBuvQez4nKdco3s30N%2FjCrmKWlS1fTs2WkG8uJSi6XDoBO9uoBd2oKibIegc%2BGf5yH17A5IhFuqS21UFbVQhstJn62SQ1qkm5HKB5nJRmXSyqQJrZjwr0ee3F7t8XzYOA1sslCAkKSQJ%2BLYvX26Hf4CHMQm8184n7rFbHmW70HIgekFdQc1gx4yUZ7y97W8hPPbqcnFJTjvjhHXnWp1KzNauv1nlZDT0I6UjbGcULbsQSvcxBnSL7eebfTA11E0m%2Bpv9xaVbePq6VaViRT%2BWtf7jTK4381JkK5yGALl7%2BUjNGM5IOQingnCSHpMR0ASCllQKPIXaAT6YszLRiLQRFUbKmPAtUjZnICRkSDXV2LEYo1aeC1CFSbQwLfWvtlVu8zJb%2Fd9ksTA9i9AKW1sfCGWmJQY9Roi2Y6VWi7DctyZESJO6apUNEZFWjuOhh2NbDPxf3hM6mRRoEjBiPzANRrMCz79B%2BrYEfk2PldhfF2dbbp%2FQkOhom5BEhgfG%2FCEXRshg0tHVU9MQomjJF14BUxcJvFwa3o4sPrAQkc%2BrmSkQ2evme5lOkggOCPqIDwbt%2BTQ7iYyeUW6Tjz5f3OZRhqZ4LBGK81vsvyraqYO8ur95POGXEk3MB6cgoMoMo3n8EV7tLlmf5r3t%2B4gxCWoEDeMVPuL7trSHbJ5TRp%2BkKdC%2B2%2FR5urBKAHYzkcBdQIi4M6wAySwGhilgNC5BEns9uj7hGfImlsDgvmuJpLT4HkEidV%2F8H). Specify the desired analysis details for your data in the *essential.vars.groovy* file (see below) and run the pipeline *rnaseq.pipeline.groovy* as described [here](https://gitlab.rlp.net/imbforge/NGSpipe2go/-/blob/master/README.md). A markdown file *DEreport.Rmd* will be generated in the output reports folder after running the pipeline. Subsequently, the *DEreport.Rmd* file can be converted to a final html report using the *knitr* R-package.\r +\r +\r +### The pipelines includes\r +- quality control of rawdata with FastQC and MultiQC\r +- Read mapping to the reference genome using STAR\r +- generation of bigWig tracks for visualisation of alignment with deeptools\r +- Characterization of insert size for paired-end libraries\r +- Read quantification with featureCounts (Subread) \r +- Library complexity assessment with dupRadar\r +- RNA class representation\r +- Check for strand specificity\r +- Visualization of gene body coverage\r +- Illustration of sample relatedness with MDS plots and heatmaps\r +- Differential Expression Analysis for depicted group comparisons with DESeq2\r +- Enrichment analysis for DE results with clusterProfiler and ReactomePA\r +- Additional DE analysis including multimapped reads\r +\r +\r +### Pipeline parameter settings\r +- targets.txt: tab-separated txt-file giving information about the analysed samples. The following columns are required \r + - sample: sample identifier for use in plots and and tables\r + - file: read counts file name (a unique sub-string of the file name is sufficient, this sub-string is grebbed against the count file names produced by the pipeline) \r + - group: variable for sample grouping (e.g. by condition)\r + - replicate: replicate number of samples belonging to the same group\r +- contrasts.txt: indicate intended group comparisions for differential expression analysis, e.g. *KOvsWT=(KO-WT)* if targets.txt contains the groups *KO* and *WT*. Give 1 contrast per line. \r +- essential.vars.groovy: essential parameter describing the experiment including: \r + - ESSENTIAL_PROJECT: your project folder name\r + - ESSENTIAL_STAR_REF: path to STAR indexed reference genome\r + - ESSENTIAL_GENESGTF: genome annotation file in gtf-format\r + - ESSENTIAL_PAIRED: either paired end ("yes") or single read ("no") design\r + - ESSENTIAL_STRANDED: strandness of library (no|yes|reverse)\r + - ESSENTIAL_ORG: UCSC organism name\r + - ESSENTIAL_READLENGTH: read length of library\r + - ESSENTIAL_THREADS: number of threads for parallel tasks\r +- additional (more specialized) parameter can be given in the var.groovy-files of the individual pipeline modules \r +\r +\r +## Programs required\r +- Bedtools\r +- DEseq2\r +- deeptools\r +- dupRadar (provided by another project from imbforge)\r +- FastQC\r +- MultiQC\r +- Picard\r +- R packages DESeq2, clusterProfiler, ReactomePA\r +- RSeQC\r +- Samtools\r +- STAR\r +- Subread\r +- UCSC utilities\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/58?version=1" ; + schema1:isBasedOn "https://gitlab.rlp.net/imbforge/NGSpipe2go/-/tree/master/pipelines/RNAseq" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for RNA-Seq" ; + schema1:sdDatePublished "2024-08-05 10:32:40 +0100" ; + schema1:url "https://workflowhub.eu/workflows/58/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2519 ; + schema1:creator ; + schema1:dateCreated "2020-10-07T07:38:07Z" ; + schema1:dateModified "2023-01-16T13:44:52Z" ; + schema1:description """# RNA-Seq pipeline\r +Here we provide the tools to perform paired end or single read RNA-Seq analysis including raw data quality control, differential expression (DE) analysis and functional annotation. As input files you may use either zipped fastq-files (.fastq.gz) or mapped read data (.bam files). In case of paired end reads, corresponding fastq files should be named using *.R1.fastq.gz* and *.R2.fastq.gz* suffixes.\r +\r +\r +## Pipeline Workflow\r +All analysis steps are illustrated in the pipeline [flowchart](https://www.draw.io/?lightbox=1&highlight=0000ff&edit=_blank&layers=1&nav=1&title=NGSpipe2go_RNAseq_pipeline.html#R7R1Zk5s489e4Knmwi%2Ft4nDOz2WSyO5OtbPYlJZCw%2BYLBATzXr%2F90AOYQNtgY8Ex2UjuDACG1Wn13ayJfLJ8%2BhGC1%2BBxA5E0kAT5N5MuJJImKJE3IPwE%2BsxZd11nDPHRh8tCm4d59QUmjkLSuXYiiwoNxEHixuyo22oHvIzsutIEwDB6LjzmBV%2FzqCsxRpeHeBl619ZsL40XSKmrm5sYNcueL5NOGlMzPAvbPeRis%2FeR7fuAjdmcJ0m6SOUYLAIPHXJN8NZEvwiCI2V%2FLpwvkEbCmEGPvXdfczYYcIj9u8sLiRdX%2F%2FXT3%2FOR9Xj7NbwX7r3%2FtaTa4%2BDmFBYIYNMllEMaLYB74wLvatJ7T%2BSLSrYCv%2FrdertLn52CFWzZvfQoC3HApksdQHD8nKw%2FWcYCbFvHSS%2B6iJzf%2Bl3Q4U5Or77k7l0%2FJt%2BjFc3rhx%2BFz7iVy%2BT1%2Fb%2FMavUrfi%2BIw%2BJmtM16H8yooE%2BhGwTq0E7g82Ld%2F339bf1xfvvz39Y%2Bv19%2Bjh%2F%2BmcoIgMQjnKN7yoJIAmkA394lkqT6gYInwGPEDIfJA7D4UERMk%2BD3PntssNP4jWWv%2But8IH53FrRF7D4s76%2Fbm%2FvrHy8003XgPwFsnn5pImodncA7dB%2FJFz5379Ib2a01Q9Dyk6J9d4r%2FmyW%2F6mhWWW%2FDAaF9pawnPcqvvAQt559lWugi8IKQPydf0P%2B6iOYEfJ%2FgkamTcIFpQpBQLKEquHNfz8p1K5CfrNL1D9658Pg8BdDEilJrtYOnaCQbNPRBFKTal21qgn43x4gUEcFNT2IZZDyiM0dNWTEjuGmpCaxLaOjU1dv2Yo1RGQoAXOSKlpIt8CPZwkVl80%2BhzpZGfY6BPB9giGUIBWzIWm8MWRTCr2KJ1gS0v3%2BLLr8K%2F%2F328vrsKlyL8%2BuH5j6l8MIt5SwylIT%2BRlaH4CXeNVfG1LzLy4RmRNPGlTfYv2c2k8dr10uFU8aBAVHpACnkopBDlx9sb9zE4%2B3O%2B%2BvLjW%2FDxXBGnEodJ9I8kXJAcdx0S%2FtgD3LcNk8OcCdMrLEDKjsmNaURBeIYfEJXVE4dXX0iTs%2FMlcH0CP3eFPBfzNtx4vp2Ns68Wm5mYsO9AuplO0ku0An5FHCk3ALLublz6TuiDCP2apbCYYQEkeHjOfypdqbQFqMCWFc1GpiVA3QKCaiqm4uiOo5qaIAlT2wRQRbbqiFDSZR0auqBrjgUdVRUECymKAnUgI6vwkUWInMJnFnFMVOczgkjS9dyNF2trhmUQfOEuLSfACIz%2FvP1wT4YuzQN8YXmBRdANRDHCq3idTirCf9%2FdnuF54j92T5iBrfa5FCcAB08qQOdiE2%2B5apCsLEKOBvWmlCK5MbDw27m9JAkQOa7vUh1CEt5Z5M771ptr1PP2AgAjSgujFbIpNZQEOlFiNQng2iMod%2FrzQ1GEGYwLCLkGPswmNyXTdh0sP1AmBDDNJ%2Fttjwl3gINPyF7HBN45HKwZSb3%2B1YXaYooFtcWoai2yztFaROlYOq6yW3jButuK%2FInp6gpreWTyTBZJRUAjbchshwoXXFu5eGMYihWQpcDJQyxtayyKJN%2F7K3AptiUfm0pqYcH0YgeB40RYRCovQjboA2wP5pCaRvr394LWsUvTKOgZG7WjB3VSa6o5DKU4bBt1rXWpSvGs4IkQPNefM5JnBSFE4RQ34wsmIyX7ghJEAW8CNbuTN%2BKT2xNJRgr5yZ5YAQizvqVt%2FKMiw%2FDEFb4waWNyG2LpK5rFTwVLWVmGhKYGRV2Fmg5EYAiKaIgSFiNFxZIMgGQw1R1bMETTAVARJB0CSVJkU9ZsQVRtLGealiqogoNFxkFkyNp5MiCVb2%2BTGGtkwcoi8PlnKn5V0akeV8qsFS0LQLQxduNB2RRZch0KlacylMo9JdXjVWwF8LnSWBEIY1hucdMGvGD405dXVBwB3nPk4lXB%2B1OYMxGMeq6EFPoE6ckSCkwmwM9pYEmcO74VrdgssFQ%2FY1D488tD9O0rnsq7P79Mv319P8vB3eWsRXWU1RausFt9jDRWodAGLh%2Fwzl%2FhSfhYEosYNcM7zs%2BoZhkPxzqhhv2%2FkYGRRt6WIe1s29fItUTecm3gnSUejJhICOepP8NDDnkrwE85HjUGOtT%2Bl5cVHhdujO4xESA9PoZgtY2DtxCRi4Z9WZQ48p5YFfiMo3mB1N%2BSWHNJLF2u0Yhi%2F0Qo%2FGL9j4RTYCpPXGhvR9QqUvZaQUu1DV3XHMlxREUxVUeVbRGopmTqjgMdxZqKGJUtyTItLFxZQFZMXVRUJBkOhIajiwKybSQpUB1G0KqZJQMQn7n9FrJaCVkRlom8zGSyN6Nzy%2F1RYaS1zNGaQ2Pe1eHgQwSgjbc2oSekZzYHISd3vlv77q81xYa1NcXUlawNIReU2cDg%2FbYpH0tMmTP5rysozHPiJJGqHTLbApr0ObkQrTwszcSdLnPWJaGXR5pUO%2Fltu5wi5RhoIticoMCXBSPuEvjEPYQBfJmTB1oIgZLxWwhsLgSmRuSTEQJXXM69xMPHGjJlzsJqI%2Fex9indQeSelruHkT6eJrvqjLJnn%2Fk9dkl3f22cYzx3SUUM4VKdVbltUXlqr6ky8kBuGnxR9ZwKuW3l08x3NHsAYdTArWxZqmBAKIuyqEhYaIVAFIGlSw6CyNEgmiITmhDaClINBe9QoBu6ZGL6AQSkmo4sQQgVU5Hso0uqmYdvI6funC0DWd1jjWXXRONogybHQYoSR12FwYMLCYLnfYY8ZN%2FKYukIKHRv8YsFIBVmzvkue43MleubFN4Fq3jWXjrLgXMnl5a2c%2Bnx8eMs6WEXPzb11mR5f3bMi%2F3p1W2ZfnC0fkuluIrFDo7nt5R%2B%2By0369W%2F%2FMPHBimNhE6wQUv3T9oFk%2BiStzZIUe3IKMYviErJ6spmXOnoLAzBc%2B6xFXkg2jJgVeMOuHZcpefTcW2Qm42gW1TnxU6MUZ60A99x5%2BuQ5mzkg454tq1RiJCgkVSGBflZ6K1mPiFltZLZdCOdQfRAMvquGUBIO3lsxi5rBTL%2BQ7XiWI0dkRt%2BGfKnXydB9wGSVYjA0sKiay08OE%2FsYVftSQbtEav4xukg8OpBWbpbC8aq2NkCywYT%2BCsNNOqTUKTHIPxJAghp3z6YU%2Bl7m9zNazukIUSM5UUtvvuuuwFiQIC1RxiYm4wggWXkrcOCF4BELrjEuOoH5HEfISIo4cXb%2FRGCv0noY5LTlXbrBTbRvXIL2Ghe7ydpiCfvLkfNyihF9iJBeGKCwQNImBFWusji%2F0SI4OQGMFg%2BjV1%2FHu2MsihpX6NWplRZ52gBHDVA1EoSWmfu7DSgcUDVSWkLxVenOnGT1MXqQgyapD6MMtVVLlnjZDLtKDpYW41HF4wC1kmatlXjKT8vG%2BohGs8uA3mmKVyDKP77gkoBTWzTv9aYCMfPkyTijhJ%2F6isMwSOFKU0iIPPAktPPZHN0aX3Fj%2BH%2FO%2FjGL3vGmjN5a6exjiFRrbGORyZLPCfx33%2BiV5eyVsNpaJZ20rEw4eT1HcSE1DJuyZzUeo1DUTWhHssPMOnxU2sPJn49kbrjkSylIclSjmM2akuylFIOvpxYmepIVuV5xZwcQLKaodWOPArkWcHjb5Saqp3XbekEpRRVOj6K7CgO9BtFEqpjjhNFxD5QZHvdhzeCIsd2U6hiaWlTZ1LH3oUKI9J6YESpQjL%2B0iGd1nlpTF06V8MOK%2BazPd7rJDZ8ppNnF6xTaZdS3sdy68OQFE3rh6QoRrckhWsw0oekKCNztXeHmnLncs6eQYmZWeX%2B69ldY4vLHUrcKqsVDTs%2Fom0lc3JFGKjtjStKDqzHNK4ct4SDlNZrGMiYwodt1ciPF2t6T1ZL2EQGlKgHiTXIQTkJNShapwgUy6HkSxdCSlxKC7JpuUvAIE9qygnSipI04J3t%2FITe4mGp5%2FgfBt0FIRfqJfGIqefi5hr%2FI4%2BH8UXg466BS1cWYWx9RFE8qVZA7AAHZIN8vIAFSgUJuH4dWWmNBA1XvL6YwG5fLy2cknh8hT2CT76s49Wa5sMEHiQoc1ahYfjmL3ucoSeZuzLIpuEynyXL6dmMu0n05%2BEpF3mETUt4XoOl6xFcu0HeAyK9TvpwX%2BpKsSirrKrN8HwPYtcQz%2FWR4blQQfUQRWsvHmmcVT5zLdo2nGN8HLqOg8Is9Bs9rTCsIlYErAq1Xob04QsZiB%2B69mKJ%2FM33x7vVj7CtVaEqw3C39dGS7FN9e8Tb2gvmI93TeGTTlGXZgedhYZHGCDH2hYC9mGyCcl49MityjzwqejGMp2vBVeX%2FxPnl8i5a3vijO22gnaq%2Bs%2F4vvko%2BScTpxICCQpdm0dxnCetdK%2FBpqPlOBd4czJS4ddyjIW1VyTxERIEaKXXbN7nw8opNa3a3hPleKzmF0BINZDuSbkuOYQFoObZkAmSqOhJMQZyKliWZJoIahrJsAMG0LUMVBc2EDrB1UVQUAxi6WvzIMXIKabQw%2Fp0t13W0cP3nH6wK7Q%2FWjDffDxZleV0HAQa90t22IdxtY9d5CzSj48fDmC2Qt0JhNLvbtlJANGwk6tAwdFs2bVGWbVWWoQmRIUBoy1PbkgQJiQCJoi7aUHdMVdaBaMqGZTqKSRZMEnTdGuFK7QBFumT8x8Yafv8XHiim9BmFIXQoJIDMlwKmksssh0FZkO%2FnACsMhFBTDBVYGQ%2FM1mMiz9jrKKZdbaq8bXI5cwHHeHmjBSZ%2BWc2zNIR6z6Di16fUa5wqtPygZFk6nEfyg78OL6%2BQl5gSo2L%2FIlPy4Y28dFS%2F%2FJHiwDixpsX0wTRwa5cTrcOwU9cnhoOnFRUz%2FbixNwQSarBkhIZYhylNSLO%2FWWAqDUf1XCsECSIf3V1Smkxrz4nSV1jqccNQZXFYzwmfEh2evzwcJSq5%2F9uSpQMokToMJWoaIdQhJao0RGuLhLFLWLaLn1eIr0W19N5SKy2rGsbK02LaQYSZ5xXqh0SVJ9WeRqmvg0aZI6RRqRNx%2FKFkYzhqTGlaHF4Z7KwxbnSPNuQin1p0T%2BNFlo8TeNY6XiylF1n61o7cidLz6mG5E425XcIHfjC34aFczEEgXof9srD2nEs7Sc5VjkuS1RFyrh3VgcYRVju6XNbUXrOTuKXVecYTulhpsMDygljHyEnlhwjKNt7btDar5c6%2FufM%2Bkkjx0O1k6O2Jiv4qiIpqGrOqd3VwsiKfAlnpVNwxBhN3DqUIH778uMrCbRpv902ETtHin6u6PUe09E4fokVhDu1pgfE6aMHA5rvzG1G8mL%2B4yvJGhnfXcPnpCU5PpvrgcKa646Rx7jTVZTFm%2FZnq4Hp1ByAI9%2FIW4Jdp%2FfckOpEKG33QFvzdkAy6PVkxT5KslC1uysD5FPzSPIcnb55caZ4CheqSAqlNA7lGWppH3WGrGao0DxGAzgP4jPUrqTHJC1NTTaqTCeSvRJwiH0T%2BnMRb9EH6yCfJYRh4MFJr8qfmz6I%2FXfKnjtFsIw9anOfkHA6Nk6CNnpSzZkzuZCowjcLh0DideJwOB0XbzsSGcjhcXv24vLpHv5pzsMstSUUbS0EvojvCv9tzLuV1JEIr8rCca6tFfDTB99W8ojgE9s%2BRxt4z4zqZcjLIxOD24EaksGOUqMdvMWWOawk%2FWp4RP1bt1YcINMhGasHUj37MRqnas14%2BHaPxMRumVOior2M29OKxGeM4ZsMY9TEbd4gKHzbRlT8gP1jy3ZoNSWT%2FeVW0tgw5CB6iJ5o0Oi9Mopzlk0VwCLSWv0vpP%2F3GPHb6Ti%2BpJIxsStAnuSL5OvP0rPtdh6vtdeDXqEvO62K15LyYRjUWsjuEDvgUfwcPXnI%2BoyFvt%2BQ8P1N5e13L3u3aRVNPJkb0LVMMlOFscoIYucsmjivD2ayvPURPSucQfit4IiefUzgS%2Bm4FIdaQprg54wH156ZbWBeZUyxMVSoZKeQneyI7EJ3clrbxjpzhIUmHpWi5XRt7a%2Be%2FM5AU6yPhX%2B8wiuNVY7UmWF5nMak4zdX8iQWF8P3mGksO1FaT1XxhSViAohrvyACWt5WL%2B4hitIryJ8M0Odv7WKePt%2Bz%2FjQxs0upo81M4qdxMi4emibMm5xyF1OLUuWjFNwEMWgN0P89M5%2By6Sx4simqVCQ96dnkzHpyNexBL5x9%2Bkyoj4BGCGIzT0klPiJlsSv%2B9e3FXKwTfbxtZLzqtD5YshQJzwweiu1LHBjPDroDLuC9KRKGmNQ16HH8ElisPibM7cUZhPJu%2FjHuc0h7jPGnTdslgkF7urAZxNHtB6kgasdOGlFcuFBEZESmzwHJSKvD4plwzvIrM%2FRaAO%2FnC7HqfoRSpH3l3vEznhxQdmsxAqzHdJUWemgYsJAAhHJ3Zqikvx3v7JwweWSlUViyJjProWZJJPSkyg9aRC2Ye%2FqcbuaArVYLRZ%2BTCjfDRWdwasfewuLNub%2B6vf7zcTOsDFyqoRegsl4%2BUDUwKj118QnMiPm5Ql3VXYRdWLbtI6sbnFolb451wiVJTpdR8bWl5HlYUSWYnnEQuchK5ghdKGq%2FJcwMcwki4SCBXkIAeRj%2FZlHNt4EbZveW6AJ1Rio8wqluK50Epe7Y7A90hXmOtLEPm7K0pNDeNFK48BQNrFESBsyLyK1fxbrM4g8uKewqIbYxxR0CuzNiRxy6Fh13qkbCrT4%2FmVvR%2Buw5NLlhO0BOVGMOvmVNly3Z8a16nrcB4s26V0Q5sQH9PByym7NvhyS88DtPFgT5cSjbkOScuM%2BdfByVb2Cij2N6AhauMnKJYPW6KL%2F9oR8LO%2BuM6Mkv5HzEGp50PbN8Y0el6riOaaJggW1aX%2BdUql2UhVpOaLeLRtMuqmf2S5fxSsxNiQZOQJMETGYMXl0HDMN7QkvEsy%2BnBvgXfiHn4knEtyydT36K7HL1qgGHtyuZNzd9N7%2BNX948r6%2F7%2BKXgOnj9%2B%2F%2FLPtKmlOT2OaOjM83Isr1zIJN%2F5vKL1k3meHDpEbRyfyDFJjat4Batnuh7RT%2FIJesKSgNEvF2uWNKYSyBFt4ks3ImoN8FGwJtfJrKjFLbONS1zreD2ynZxxvJyQrgvDFvzipipzzC0Dxy23oo34IgsnLtPLtod17%2BmJ42Js05zm1H8yksR1aWTVWTrEhhL3lPtHhqHz1o3iQbspi%2BOytOrx72ViVpal9y5C1UxcS%2FW0V4iZJTo1MJniQ38oKrVt1I1cBuvQez4nKdco3s30N%2FjCrmKWlS1fTs2WkG8uJSi6XDoBO9uoBd2oKibIegc%2BGf5yH17A5IhFuqS21UFbVQhstJn62SQ1qkm5HKB5nJRmXSyqQJrZjwr0ee3F7t8XzYOA1sslCAkKSQJ%2BLYvX26Hf4CHMQm8184n7rFbHmW70HIgekFdQc1gx4yUZ7y97W8hPPbqcnFJTjvjhHXnWp1KzNauv1nlZDT0I6UjbGcULbsQSvcxBnSL7eebfTA11E0m%2Bpv9xaVbePq6VaViRT%2BWtf7jTK4381JkK5yGALl7%2BUjNGM5IOQingnCSHpMR0ASCllQKPIXaAT6YszLRiLQRFUbKmPAtUjZnICRkSDXV2LEYo1aeC1CFSbQwLfWvtlVu8zJb%2Fd9ksTA9i9AKW1sfCGWmJQY9Roi2Y6VWi7DctyZESJO6apUNEZFWjuOhh2NbDPxf3hM6mRRoEjBiPzANRrMCz79B%2BrYEfk2PldhfF2dbbp%2FQkOhom5BEhgfG%2FCEXRshg0tHVU9MQomjJF14BUxcJvFwa3o4sPrAQkc%2BrmSkQ2evme5lOkggOCPqIDwbt%2BTQ7iYyeUW6Tjz5f3OZRhqZ4LBGK81vsvyraqYO8ur95POGXEk3MB6cgoMoMo3n8EV7tLlmf5r3t%2B4gxCWoEDeMVPuL7trSHbJ5TRp%2BkKdC%2B2%2FR5urBKAHYzkcBdQIi4M6wAySwGhilgNC5BEns9uj7hGfImlsDgvmuJpLT4HkEidV%2F8H). Specify the desired analysis details for your data in the *essential.vars.groovy* file (see below) and run the pipeline *rnaseq.pipeline.groovy* as described [here](https://gitlab.rlp.net/imbforge/NGSpipe2go/-/blob/master/README.md). A markdown file *DEreport.Rmd* will be generated in the output reports folder after running the pipeline. Subsequently, the *DEreport.Rmd* file can be converted to a final html report using the *knitr* R-package.\r +\r +\r +### The pipelines includes\r +- quality control of rawdata with FastQC and MultiQC\r +- Read mapping to the reference genome using STAR\r +- generation of bigWig tracks for visualisation of alignment with deeptools\r +- Characterization of insert size for paired-end libraries\r +- Read quantification with featureCounts (Subread) \r +- Library complexity assessment with dupRadar\r +- RNA class representation\r +- Check for strand specificity\r +- Visualization of gene body coverage\r +- Illustration of sample relatedness with MDS plots and heatmaps\r +- Differential Expression Analysis for depicted group comparisons with DESeq2\r +- Enrichment analysis for DE results with clusterProfiler and ReactomePA\r +- Additional DE analysis including multimapped reads\r +\r +\r +### Pipeline parameter settings\r +- targets.txt: tab-separated txt-file giving information about the analysed samples. The following columns are required \r + - sample: sample identifier for use in plots and and tables\r + - file: read counts file name (a unique sub-string of the file name is sufficient, this sub-string is grebbed against the count file names produced by the pipeline) \r + - group: variable for sample grouping (e.g. by condition)\r + - replicate: replicate number of samples belonging to the same group\r +- contrasts.txt: indicate intended group comparisions for differential expression analysis, e.g. *KOvsWT=(KO-WT)* if targets.txt contains the groups *KO* and *WT*. Give 1 contrast per line. \r +- essential.vars.groovy: essential parameter describing the experiment including: \r + - ESSENTIAL_PROJECT: your project folder name\r + - ESSENTIAL_STAR_REF: path to STAR indexed reference genome\r + - ESSENTIAL_GENESGTF: genome annotation file in gtf-format\r + - ESSENTIAL_PAIRED: either paired end ("yes") or single read ("no") design\r + - ESSENTIAL_STRANDED: strandness of library (no|yes|reverse)\r + - ESSENTIAL_ORG: UCSC organism name\r + - ESSENTIAL_READLENGTH: read length of library\r + - ESSENTIAL_THREADS: number of threads for parallel tasks\r +- additional (more specialized) parameter can be given in the var.groovy-files of the individual pipeline modules \r +\r +\r +## Programs required\r +- Bedtools\r +- DEseq2\r +- deeptools\r +- dupRadar (provided by another project from imbforge)\r +- FastQC\r +- MultiQC\r +- Picard\r +- R packages DESeq2, clusterProfiler, ReactomePA\r +- RSeQC\r +- Samtools\r +- STAR\r +- Subread\r +- UCSC utilities\r +""" ; + schema1:keywords "rna-seq, bpipe, groovy" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "RNA-Seq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/58?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "This workflow correspond to the Genome-wide alternative splicing analysis training. It allows to analyze isoform switching by making use of IsoformSwitchAnalyzeR." ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/472?version=2" ; + schema1:license "CC-BY-NC-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genome-wide alternative splicing analysis" ; + schema1:sdDatePublished "2024-08-05 10:30:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/472/ro_crate?version=2" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 17028 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 71945 ; + schema1:creator ; + schema1:dateCreated "2023-05-24T23:22:29Z" ; + schema1:dateModified "2023-05-24T23:23:44Z" ; + schema1:description "This workflow correspond to the Genome-wide alternative splicing analysis training. It allows to analyze isoform switching by making use of IsoformSwitchAnalyzeR." ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/472?version=3" ; + schema1:keywords "Transcriptomics, alternative-splicing, GTN" ; + schema1:license "https://spdx.org/licenses/CC-BY-NC-4.0" ; + schema1:name "Genome-wide alternative splicing analysis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/472?version=2" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 46064 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/991?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/hlatyping" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hlatyping" ; + schema1:sdDatePublished "2024-08-05 10:22:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/991/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3422 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:55Z" ; + schema1:dateModified "2024-06-11T12:54:55Z" ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/991?version=9" ; + schema1:keywords "DNA, hla, hla-typing, immunology, optitype, personalized-medicine, rna" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hlatyping" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/991?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.283.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_md_setup/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Amber Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/283/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6933 ; + schema1:creator , + ; + schema1:dateCreated "2023-04-14T08:32:45Z" ; + schema1:dateModified "2023-04-14T08:34:33Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/283?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Amber Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_md_setup/python/workflow.py" ; + schema1:version 3 . + + a schema1:Dataset ; + schema1:datePublished "2024-02-07T16:26:57.747008" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Mitogenome-assembly-VGP0" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Mitogenome-assembly-VGP0/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.20" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/991?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/hlatyping" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hlatyping" ; + schema1:sdDatePublished "2024-08-05 10:22:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/991/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4297 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:56Z" ; + schema1:dateModified "2024-06-11T12:54:56Z" ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/991?version=9" ; + schema1:keywords "DNA, hla, hla-typing, immunology, optitype, personalized-medicine, rna" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hlatyping" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/991?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "ATACSeq peak-calling and differential analysis pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/965?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/atacseq" ; + schema1:sdDatePublished "2024-08-05 10:24:19 +0100" ; + schema1:url "https://workflowhub.eu/workflows/965/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11439 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:42Z" ; + schema1:dateModified "2024-06-11T12:54:42Z" ; + schema1:description "ATACSeq peak-calling and differential analysis pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/965?version=8" ; + schema1:keywords "ATAC-seq, chromatin-accessibiity" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/atacseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/965?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Automated quantitative analysis of DIA proteomics mass spectrometry measurements." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/980?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/diaproteomics" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/diaproteomics" ; + schema1:sdDatePublished "2024-08-05 10:24:05 +0100" ; + schema1:url "https://workflowhub.eu/workflows/980/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6543 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:48Z" ; + schema1:dateModified "2024-06-11T12:54:48Z" ; + schema1:description "Automated quantitative analysis of DIA proteomics mass spectrometry measurements." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/980?version=6" ; + schema1:keywords "data-independent-proteomics, dia-proteomics, openms, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/diaproteomics" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/980?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + schema1:datePublished "2024-05-01T16:03:28.967443" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + , + ; + schema1:name "consensus-peaks/consensus-peaks-chip-sr" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-atac-cutandrun" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "1.0" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-chip-pe" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "1.0" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:Dataset ; + schema1:datePublished "2023-11-16T14:32:18.217599" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/kmer-profiling-hifi-VGP1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "kmer-profiling-hifi-VGP1/main" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "kmer-profiling-hifi-VGP1/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/kmer-profiling-hifi-VGP1" ; + schema1:version "0.1.4" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Evaluation of Swin Transformer and knowledge transfer for denoising of super-resolution structured illumination microscopy data\r +\r +In recent years, convolutional neural network (CNN)-based methods have shown remarkable performance in the denoising and reconstruction of super-resolved structured illumination microscopy (SR-SIM) data. Therefore, CNN-based architectures have been the main focus of existing studies. Recently, however, an alternative and highly\r +competitive deep learning architecture, Swin Transformer, has been proposed for image restoration tasks. In this work, we present SwinT-fairSIM, a novel method for restoring SR-SIM images with low signal-to-noise ratio (SNR) based on Swin Transformer. The experimental results show that SwinT-fairSIM outperforms previous CNN-based denoising methods. Furthermore, the generalization capabilities of deep learning methods for image restoration tasks on real fluorescence microscopy data have not been fully explored yet, i.e., the extent to which trained artificial neural networks are limited to specific types of cell structures and noise. Therefore, as a second contribution, we benchmark two types of transfer learning, i.e., direct transfer and fine-tuning, in combination with SwinT-fairSIM and two CNN-based methods for denoising SR-SIM data. Direct transfer does not prove to be a viable strategy, but fine-tuning achieves results comparable to conventional training from scratch while saving computational time and potentially reducing the amount of required training data. As a third contribution, we published four datasets of raw SIM images and already reconstructed SR-SIM images. These datasets cover two types of cell structures, tubulin filaments and vesicle structures. Different noise levels are available for the tubulin filaments. These datasets are structured in such a way that they can be easily used by the research community for research on denoising, super-resolution, and transfer learning strategies.\r +\r +The SIM microscopy datasets that were used during this work can be downloaded through this link: http://dx.doi.org/10.5524/102461\r +\r +\r +## Installation:\r +\r +This implementation requires the Tensorflow-GPU2.5 version. To avoid package conflicts, we recommend you create a new environment by using our provided environment.yml file. To create a new environment please run the following script:\r +\r +> conda env create -f environment.yml\r +\r +## How to use this code:\r +\r +This code can be used to train a denoising model from scratch or to fine-tune a pretrained model. After the installation of the Python environment from the yml file, the next step is to set the input parameters in the JSON parameter file (i.e., ParameterFile.json). Most of the input parameters are self-explanatory but below we will discuss some of the important input parameters from the JSON file:\r +\r +- TrainNetworkfromScratch: This input parameter will train the model from scratch If set to True, otherwise, for fine-tuning, It should be False.\r +- ActivateTrainandTestModel: This parameter will be set to False If you want to use this code for evaluation of the trained model or the reproducibility of the results by using pretrained models.\r +- PretrainedmodelPath: This parameter is mandatory in case of fine-tuning or evaluation of a pretrained model.\r +- FineTuneStartingpoint and FineTuneEndingpoint: These two input parameters are essential in the fine-tuning of a pretrained model. All the layers between the starting and ending points will be frozen during the fine-tuning of the pretrained model.\r +\r +After the assignment of the input parameters. You can run the following script from the command line to start training the model:\r +\r +> python MainModule.py 'ParameterFile.json'\r +\r +## Reproducibility and evaluation:\r +\r +To reproduce the results of the paper all the trained models used in this work are available in the 'Models' directory at [zenodo](https://doi.org/10.5281/zenodo.7626173). This code is capable of performing all the necessary steps for the training and test phases. It will automatically evaluate the model and generate a result directory to write all the results. Similarly, during the training process, It will also create a model directory and save the trained model along with the best checkpoints in the model directory. \r +\r +## Important Note:\r +\r +This code will work with at least one GPU.\r +\r +## Reference:\r +\r +Please cite our paper in case you use this code for any scientific publication. We will soon upload the citation index!\r +\r +\r +\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.675.1" ; + schema1:isBasedOn "https://github.com/ZafranShah/SwinT-fairSIM-and-knowledge-transfer" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Evaluation of Swin Transformer and knowledge transfer for denoising of super-resolution structured illumination microscopy data" ; + schema1:sdDatePublished "2024-08-05 10:27:08 +0100" ; + schema1:url "https://workflowhub.eu/workflows/675/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 35149 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7370 ; + schema1:creator ; + schema1:dateCreated "2023-11-21T09:02:46Z" ; + schema1:dateModified "2023-11-21T10:28:24Z" ; + schema1:description """# Evaluation of Swin Transformer and knowledge transfer for denoising of super-resolution structured illumination microscopy data\r +\r +In recent years, convolutional neural network (CNN)-based methods have shown remarkable performance in the denoising and reconstruction of super-resolved structured illumination microscopy (SR-SIM) data. Therefore, CNN-based architectures have been the main focus of existing studies. Recently, however, an alternative and highly\r +competitive deep learning architecture, Swin Transformer, has been proposed for image restoration tasks. In this work, we present SwinT-fairSIM, a novel method for restoring SR-SIM images with low signal-to-noise ratio (SNR) based on Swin Transformer. The experimental results show that SwinT-fairSIM outperforms previous CNN-based denoising methods. Furthermore, the generalization capabilities of deep learning methods for image restoration tasks on real fluorescence microscopy data have not been fully explored yet, i.e., the extent to which trained artificial neural networks are limited to specific types of cell structures and noise. Therefore, as a second contribution, we benchmark two types of transfer learning, i.e., direct transfer and fine-tuning, in combination with SwinT-fairSIM and two CNN-based methods for denoising SR-SIM data. Direct transfer does not prove to be a viable strategy, but fine-tuning achieves results comparable to conventional training from scratch while saving computational time and potentially reducing the amount of required training data. As a third contribution, we published four datasets of raw SIM images and already reconstructed SR-SIM images. These datasets cover two types of cell structures, tubulin filaments and vesicle structures. Different noise levels are available for the tubulin filaments. These datasets are structured in such a way that they can be easily used by the research community for research on denoising, super-resolution, and transfer learning strategies.\r +\r +The SIM microscopy datasets that were used during this work can be downloaded through this link: http://dx.doi.org/10.5524/102461\r +\r +\r +## Installation:\r +\r +This implementation requires the Tensorflow-GPU2.5 version. To avoid package conflicts, we recommend you create a new environment by using our provided environment.yml file. To create a new environment please run the following script:\r +\r +> conda env create -f environment.yml\r +\r +## How to use this code:\r +\r +This code can be used to train a denoising model from scratch or to fine-tune a pretrained model. After the installation of the Python environment from the yml file, the next step is to set the input parameters in the JSON parameter file (i.e., ParameterFile.json). Most of the input parameters are self-explanatory but below we will discuss some of the important input parameters from the JSON file:\r +\r +- TrainNetworkfromScratch: This input parameter will train the model from scratch If set to True, otherwise, for fine-tuning, It should be False.\r +- ActivateTrainandTestModel: This parameter will be set to False If you want to use this code for evaluation of the trained model or the reproducibility of the results by using pretrained models.\r +- PretrainedmodelPath: This parameter is mandatory in case of fine-tuning or evaluation of a pretrained model.\r +- FineTuneStartingpoint and FineTuneEndingpoint: These two input parameters are essential in the fine-tuning of a pretrained model. All the layers between the starting and ending points will be frozen during the fine-tuning of the pretrained model.\r +\r +After the assignment of the input parameters. You can run the following script from the command line to start training the model:\r +\r +> python MainModule.py 'ParameterFile.json'\r +\r +## Reproducibility and evaluation:\r +\r +To reproduce the results of the paper all the trained models used in this work are available in the 'Models' directory at [zenodo](https://doi.org/10.5281/zenodo.7626173). This code is capable of performing all the necessary steps for the training and test phases. It will automatically evaluate the model and generate a result directory to write all the results. Similarly, during the training process, It will also create a model directory and save the trained model along with the best checkpoints in the model directory. \r +\r +## Important Note:\r +\r +This code will work with at least one GPU.\r +\r +## Reference:\r +\r +Please cite our paper in case you use this code for any scientific publication. We will soon upload the citation index!\r +\r +\r +\r +\r +""" ; + schema1:keywords "Machine Learning, Python, image processing, SIM, microscopy, Deep learning" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Evaluation of Swin Transformer and knowledge transfer for denoising of super-resolution structured illumination microscopy data" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/675?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2021-07-26T10:22:23.311704" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-wgs-variant-calling" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:sdDatePublished "2021-10-27 15:45:09 +0100" ; + schema1:softwareVersion "v0.2.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.6" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/991?version=9" ; + schema1:isBasedOn "https://github.com/nf-core/hlatyping" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hlatyping" ; + schema1:sdDatePublished "2024-08-05 10:22:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/991/ro_crate?version=9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4780 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:57Z" ; + schema1:dateModified "2024-06-11T12:54:57Z" ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/991?version=9" ; + schema1:keywords "DNA, hla, hla-typing, immunology, optitype, personalized-medicine, rna" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hlatyping" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/991?version=9" ; + schema1:version 9 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Coprolite Identification" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/974?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/coproid" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/coproid" ; + schema1:sdDatePublished "2024-08-05 10:24:09 +0100" ; + schema1:url "https://workflowhub.eu/workflows/974/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4232 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:45Z" ; + schema1:dateModified "2024-06-11T12:54:45Z" ; + schema1:description "Coprolite Identification" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/974?version=2" ; + schema1:keywords "adna, ancient-dna, coprolite, microbiome" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/coproid" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/974?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2024-04-22T15:13:48.408861" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Purge-duplicate-contigs-VGP6/main" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Purge-duplicate-contigs-VGP6/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:version "0.3.7" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 1908023 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "MARS-seq v2 preprocessing pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/996?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/marsseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/marsseq" ; + schema1:sdDatePublished "2024-08-05 10:23:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/996/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9527 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:00Z" ; + schema1:dateModified "2024-06-11T12:55:00Z" ; + schema1:description "MARS-seq v2 preprocessing pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/996?version=4" ; + schema1:keywords "facs-sorting, MARS-seq, single-cell, single-cell-rna-seq, star-solo, transcriptional-dynamics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/marsseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/996?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1017?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/rnafusion" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnafusion" ; + schema1:sdDatePublished "2024-08-05 10:22:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1017/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5461 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:09Z" ; + schema1:dateModified "2024-06-11T12:55:09Z" ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1017?version=16" ; + schema1:keywords "fusion, fusion-genes, gene-fusion, rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnafusion" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1017?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-07T21:34:48.828460" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/kmer-profiling-hifi-VGP1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "kmer-profiling-hifi-VGP1/main" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "kmer-profiling-hifi-VGP1/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/kmer-profiling-hifi-VGP1" ; + schema1:version "0.1.2" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Deprecated" ; + schema1:description "A workflow for mapping and consensus generation of SARS-CoV2 whole genome amplicon nanopore data implemented in the Nextflow framework. Reads are mapped to a reference genome using Minimap2 after trimming the amplicon primers with a fixed length at both ends of the amplicons using Cutadapt. The consensus is called using Pysam based on a majority read support threshold per position of the Minimap2 alignment and positions with less than 30x coverage are masked using ‘N’." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/104?version=1" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ENA SARS-CoV-2 Nanopore Amplicon Sequencing Analysis Workflow" ; + schema1:sdDatePublished "2024-08-05 10:27:14 +0100" ; + schema1:url "https://workflowhub.eu/workflows/104/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2826 ; + schema1:creator , + ; + schema1:dateCreated "2021-02-12T11:46:24Z" ; + schema1:dateModified "2023-11-24T10:02:56Z" ; + schema1:description "A workflow for mapping and consensus generation of SARS-CoV2 whole genome amplicon nanopore data implemented in the Nextflow framework. Reads are mapped to a reference genome using Minimap2 after trimming the amplicon primers with a fixed length at both ends of the amplicons using Cutadapt. The consensus is called using Pysam based on a majority read support threshold per position of the Minimap2 alignment and positions with less than 30x coverage are masked using ‘N’." ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "ENA SARS-CoV-2 Nanopore Amplicon Sequencing Analysis Workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/104?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=17" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-08-05 10:23:19 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=17" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17563 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:16Z" ; + schema1:dateModified "2024-06-11T12:55:16Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=17" ; + schema1:version 17 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=22" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-08-05 10:23:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=22" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12338 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:04Z" ; + schema1:dateModified "2024-06-11T12:55:04Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=22" ; + schema1:version 22 . + + a schema1:Dataset ; + schema1:datePublished "2024-04-22T15:45:17.824364" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Purge-duplicates-one-haplotype-VGP6b" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Purge-duplicates-one-haplotype-VGP6b/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "This portion of the workflow produces sets of feature Counts ready for analysis by limma/etc." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/688?version=1" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for mRNA-Seq BY-COVID Pipeline: Counts" ; + schema1:sdDatePublished "2024-08-05 10:26:07 +0100" ; + schema1:url "https://workflowhub.eu/workflows/688/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 26083 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2023-12-19T10:10:00Z" ; + schema1:dateModified "2024-01-24T09:42:47Z" ; + schema1:description "This portion of the workflow produces sets of feature Counts ready for analysis by limma/etc." ; + schema1:image ; + schema1:keywords "BY-COVID, covid-19" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "mRNA-Seq BY-COVID Pipeline: Counts" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/688?version=1" ; + schema1:version 1 ; + ns1:input , + ; + ns1:output , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 772694 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Continuous flexibility analysis of SARS-CoV-2 Spike prefusion structures" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/73?version=1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Scipion workflow for Cryo electron microscopy of SARS-CoV-2 stabilized spike in prefusion state" ; + schema1:sdDatePublished "2024-08-05 10:33:22 +0100" ; + schema1:url "https://workflowhub.eu/workflows/73/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 38059 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1387470 ; + schema1:creator , + , + ; + schema1:dateCreated "2020-11-19T13:34:09Z" ; + schema1:dateModified "2023-01-16T13:46:12Z" ; + schema1:description "Continuous flexibility analysis of SARS-CoV-2 Spike prefusion structures" ; + schema1:image ; + schema1:keywords "covid-19, image processing, bioimaging" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Scipion workflow for Cryo electron microscopy of SARS-CoV-2 stabilized spike in prefusion state" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/73?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + ; + schema1:description "This workflow is used for the virtual screening of the SARS-CoV-2 main protease (de.NBI-cloud, STFC). It includes Charge enumeration, Generation of 3D conformations, Preparation of active site for docking using rDock, Docking, Scoring and Selection of compounds available. More info can be found at https://covid19.galaxyproject.org/cheminformatics/" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/18?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Cheminformatics - XChem combined" ; + schema1:sdDatePublished "2024-08-05 10:33:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/18/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1908 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 38448 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2020-04-10T15:02:39Z" ; + schema1:dateModified "2023-01-16T13:41:21Z" ; + schema1:description "This workflow is used for the virtual screening of the SARS-CoV-2 main protease (de.NBI-cloud, STFC). It includes Charge enumeration, Generation of 3D conformations, Preparation of active site for docking using rDock, Docking, Scoring and Selection of compounds available. More info can be found at https://covid19.galaxyproject.org/cheminformatics/" ; + schema1:image ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Cheminformatics - XChem combined" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/18?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 5969 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/278?version=4" ; + schema1:isBasedOn "https://gitlab.bsc.es/lrodrig1/structuralvariants_poc/-/tree/1.1.3/structuralvariants/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CNV_pipeline" ; + schema1:sdDatePublished "2024-08-05 10:31:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/278/ro_crate?version=4" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 8992 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8794 ; + schema1:creator , + ; + schema1:dateCreated "2022-04-25T07:30:13Z" ; + schema1:dateModified "2022-04-25T07:37:40Z" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/278?version=10" ; + schema1:keywords "cancer, CODEX2, ExomeDepth, manta, TransBioNet, variant calling, GRIDSS, structural variants" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CNV_pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/278?version=4" ; + schema1:version 4 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 65152 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# DNA-Seq pipeline\r +Here we provide the tools to perform paired end or single read DNA-Seq analysis including raw data quality control, read mapping, variant calling and variant filtering. \r +\r +\r +## Pipeline Workflow\r +All analysis steps are illustrated in the pipeline [flowchart](https://www.draw.io/?lightbox=1&highlight=0000ff&edit=_blank&layers=1&nav=1&title=NGSpipe2go_DNAseq_pipeline.html#R7R1bd5s489f4nObBPoDvj3ESO%2BnXZpuk3Wz3pUcG2WaDgQJ2Lr%2F%2B0wgJIxAY29jGabZ7TowAIc2MRnNXrXkxfxl5yJ19dQxs1TTFeKk1L2uaprY0rQb%2FK8Zr2NLtdsOGqWca7KFVw4P5hlmjwloXpoF94cHAcazAdMVG3bFtrAdCG%2FI851l8bOJY4lddNMWphgcdWenWR9MIZqxV7fRXN66xOZ2xT%2Fc0Nr8x0p%2BmnrOw2fdsx8bhnTni3bA5%2BjNkOM%2BxpuZVrXnhOU4Q%2Fpq%2FXGALwMohFr43zLgbDdnDdlDkhaV%2Be%2FfwuPi8uHz79%2FvN9%2BFPf%2FlvXeUYWCJrwYBR0zoW6XFgmEsAr2VObXqj83sBYx14FA7RJfk1ZX%2Fpa2Mv2ULGRPvirRQawSsH%2FiyYW%2BSXSu5ZaIytQQTTC8dyPPpQc0j%2FI4%2F4gec8RVgiUBxMHDtgJKV2YNzIn2GD9Uj7ia4mpmXFOr3qwL%2BoU36HIrE5mHrIMAlwE826Mzd1cqnAIxbyffY7wq8STTKOHIavJfYC%2FBJrYsgaYWeOA%2B%2BVPMLuaj32CltTmsJQ9byi0JbCCHQWI84OfxCxVTGN%2Bl6RB%2FnBKEROLf9i21w%2BXM8nj8355L63%2FPEjqNc7Kdxhg6wjdul4wcyZOjayrlatMQQAXP5bzF3%2B%2FBS5pGX11hfHcRme%2FsNB8MpwihaBQ5piVIJfzOAf6LDRZlc%2FWffw%2B%2FIlfvHKL2wCgNhLcPmT9wcXq9foFX8vTW4hDGDimQuRNfnOwtPZU%2F3P6rBl%2FfwyuhtMHr%2BPTHXx2Ku3GatD3hQHOXBvygnFwxYKzKU4jl2Qfq18nsxue4G1nN2Pb68fhr%2FerusfHGIvHMJzAoI8BwBX75fEMnrtrsAy6n0txTLUnpZmGS1F2516rkZfR%2BdB%2F9v8WRuN0C%2F%2F4n%2Bvn%2BvdnVlGaQyC35EziKyFvmZdS3fVZj%2B9sKUPthQpNznAUn97DC6%2FK%2F%2F8%2B3l4f%2BXNVeP76PUm5DOngKwsbr7i35tx862QXBDHzValcNxW3zuSsW2cg05ALnXgvsCLoXFoWnw4aToQtoQDEEXzWESRN2zJFg9bp0AtfFOHG3Wf4vucPKC23JecHT9quNBq54PL2%2FP6A%2F4NMDZdbJlk1yTtg7g4MC4kNISjE5tDoWTHAe827TrFrBmgMXk7NkmiIOOJaZt03ycosddMpRzg%2By6Cr%2BlMElk9qPt%2BPfAWto4CGFv8ss4oWNKpEht02HUhtFRyLmGnCKQhD0%2BEDmdBAMaHc1hY2nBqBkTybHiW27Chq6E5H08c0i35eTt6AAxrU4dc1IF2LWcMEi1egplkyNHvk9%2BE8n0g%2FKFhI%2FKjwe81iDjrLF%2FTY8x8jsMaSeC%2FAVqSknel1pDlIMMHKwtMH2wqjrGwAI4Ksg1YY75POLSJrJBbI8IMsXfKE8YvWF8EdIYxpkEIDbo17SllJ598fEb%2B0EYXmR6moKAA%2BeTSWwb2QSXbBhDlLNPUJ7MVulIsJYqg9nQ6aUMJ12LjWk9k%2FCp9P21J9tMECIgy6MJPoiy6RG2EyYcyFJdKerwhMjy2pODK3dALw1BNgUyTQIy3FZZK2Pe%2BOSYlK%2FaxutYWENYVO3AmEx%2BYbAIJ0aC3x4vWO6bwy3%2F%2FFAThdcKvIPquJOEDaDicatdLswcSZn%2F42Ptr%2FB94AMjmAJYgUVh1pZxrToZPRC3KthTgWqG1RQnb6wFgEe51YvfIqgnqzLAF93QCPLq1rBNwv63YtmxHKibWusm2WeqpraZq4UkQ3uzJuXdcvJMJEceQlKJNvrFEnp8pKGU9toWclAL%2FfoAd9m3yBtdzlqaBC4g1Zh4RDSI56Za8KACDDibnu%2BFrdd%2FFujkhWrPwdeVT3giyGxw3aMjePCs6IRlatLWMXotxILYzxJg%2BN1mHOBo4ZJ%2BcWNRiMKFGgjj3fp6ZAX5wEWWVzx51WpQhtCS9O7IdWCa0KK2N%2BSa5jLHOTfZLmWHgoIIM%2F2BlJZmWgMYEbkqRZPqfvd7T01B%2Fsr4P6uhypGL1S11tv3dJRrDIlWq4LWyk61REromkhiHyg7sLysiLiBS%2FF4TRBTBCnWhjngNU5UxgwOiZDpuq1zBYsus%2BcdWsrN080tbhSdAIyeB%2F642wOdqg17LyZtz7mWblMvaTYO9km9eJ8vyFXl02OxlMnXoJWceKSPcl8Ps2d9Vzht9qphiV2pFwqs7mSmoRfi%2FlKmnu%2Fs58A6XykVZBPtKqHB8ZPJ4XZiKCiSxsuieMgwq6rkvWVUyoXO%2FZT%2FX1yceRMQufZb22fx4V6RvjZ7Q5j2odiEeVbDjj8QGcJ%2FW6R%2BVJubuwxDm1Xi%2BjVkqmnSlb6Pd%2FLQJ3AasJQgrJA6tFE1sxym89bykdT6EPyRiWVzQNM7SfhyZlGLfyiQkV3Kg%2BOv%2F%2BPwpuz0RU1MUAeRqrcpa7zhP6WYz%2BgSJNHVnnjOapySW5AtJKWDxsh4fxDNHctIBcr7G1xNDrQbS1bqsnrJRmP21jlqpr3BZdvsu2W7FloaRWhof9hRX41VwbS30SWw%2FPJsFiRPTQQsjVop6V8etqUVwj13KCVxdfwF1qFbm%2BOIuWzg%2FbnJjYGGGbPkTv%2Fxid5U12H1P7%2B%2B7hy1%2BX0LHueNj%2FWLRcOe92Gu1iy7YfPVn%2Bwu1VfuFazrSiq5aMrM7XrO6QNagHdJGG%2BxlGOqxivu1Vi8j3QNDtfkFy3kJeKxbeyT2o1TdDlan58bDNtZpf%2F1CaX0G3dHYod1VEaQ%2B7hCAqyn1EB9menWGQAAWKaQSRoT8z7ddfYUTQr7CZqIy%2F4EFyl8kuYXvjfm5IZB7JI5n%2BsaRTqdrTbtCnyN3GDFtE9PIb95nzlz%2Bb5yg8iNj2jYyIMKNoEcBS8Zx5TYwFgs2vUZP48b46hjkBXkLxSh9cCbb6wg9oV4jw2Vff9Gsx594qoCogiPVnZH36IfvRFBtjYNU5vrqq7K8HEiI73X6xPVfVSth0pVbajzD9TfbqTnqvlnvUDrRVFzbS3s8vF4ypFrHTfkXeE%2BWbc7KeSNfGwrVMiH81agd39Xhz8nV%2FYzNqiKuTN6M2O12Jnnl05456VL5x0i7jdlGBv3ou479DmeeKyPwbuHzawEDGD7ffahpYhqfMokW1blt3PAPZBGD0Hg70xiE4S%2BSgYVIcmKU35jBtpXaKHCbpPG42laNzGEk%2B88lH1zY3FkzSCe07iCqFHcq9Y5kVpGjvf6C9bLTnlQs4PNalRQlaO2O91GIQIh1o2xGCtm14%2FablIHZb%2F2F%2B%2BQ6Ip6%2Beex56jT3gQmijH%2Bs5EevY4lsJtz4z1WpY8Pmm2k0QXTiCgnGRxYNbkI%2FvsU727bHHSiwUlHtWb4GkA%2F2QP3c0iM6kLgfI2FQG519rkefwgHKPF5%2FT5rrVqYaoiHHS7WY6vveQcs%2B4uzBGzu0%2FLp7X7%2B77QV8fdervPha3TF2qV5jLHWu7yx32Vr6Tvacd3eMJJqgAnUyB8IM5LqrpVcOnAgGJwF4N%2FELtVVNhDklXBIvGuMeM2DRlTBZTmBxLWGKDKq%2B%2B7TaW%2BuQs%2FPaT7TzbtXhYE2Pgm0DkwHlAzIRvOTpl%2BdEGtC7Vaat0moplx4gWtbYkq0PtS2qfqWp%2FX%2Bu%2FLVn%2FCcDtNxUm4kB%2FbiqMVBFR04j4KEu3ix7Sl7hMpJDfVQ3ZaUH2s%2BOCs6o5jJ0XKGEAMeuUIY8dz8BenTRHTJutFVriALx67ehOvNRnGDXVxC34Fz3hIsOI%2BtYK1cMRPfKUmPIjHyTB9LzMTHq%2B2ZNJVnPA8%2FhgidxhWUzcFzpUUk9Fc449pWVPPBg7xmuqMbU9BQZvCUEiBjDDNk%2BomqAujP0KneSS4Abu%2FX6yzcA7W11PTJsm10YBqmFWFKJEJ0uaCr3jMd%2B5H2DXj%2Be1rkacnkPuNpx%2BDBrTEMmB0Yb9%2FyEDg0YZtUF7uGLKjVnYv1DU7ybqnPRbaamoJ9nh%2ByVEIsiLyZ1MecdtNd%2Btisltv%2FWqqqQm7Alox9G4jxJaeGMXiSxEzwYK0Jr99UhaMM2NXSmlyqc303WxcfDUgZQ2aqN5mOJHNsMlaJ20ct0kXXxKsLGuCRI74Ph9NHctrDbu1QaFcWP6Vu1xaluMs5wN7EhB7alCGBKlVVoHY1%2FFu3j13spESacTNCDzVojKrBArG6N5jJFRwZrlZYDQDLwtyhuG8JEoTI3uVXMH4lTojRs7PGXiHlOwzYHC6A3mlxHcIR%2BZidH6KZoT0uvtvn7k9phjyoSiN6R7WHdIOdYXfgBKseLhRwwto2H09zkJGzFBLFqWkcled4xVcUvCWp6M0ELPg98P6Fnl%2BQAwkY0dq%2F34%2Bjwlx6poYpcqkwd0rMrPnsjciVMb3m6FS7%2FgKa2gGhFxRsXUceZOAhu8iCTpkRHx8yVYE6cGJhI0B8lNaW4aBuWSMqoQOWcpG4p4iIQmqaYq20%2FKSHeQEsExE93NULMcOgmxrJKO1KpLQCUQZ9IEpvLA2DjXakmos1VCHQYpdWZnc0dK201AwKnHt8iVPkfxufBprQBGbFHO1bvlMD0xmllt94ohcW8sJq3xXYapBFQEYaFfBldTZB4C6hB4zygTnbtqpy8rpSBFWgkeeWnU1e4BqO8z6koKrKJKxUcp50jEi5VyJut8Yk4XHo%2FDqaLd5aBZ2iFAoB0ea4SXmbWZ5Q8dPeH6OBBzPYzmY8jizgKX5IkKwCoh1IjAk9moj1EsnFYPyARs4m62uJwCc3Z9a3qiD820d7wnOLCEgtRGUyrSrbHhb1BHe32Dx8Ih%2FQ2%2Bm1vUe7MBEkCghQXs3GQjYND3rYUnhJeAEdcEu7DtxMVdxV%2F%2FEUwL0tFTUtgRjbxbiJK0BJQXmteqeJfsbrI2Omy%2BfHFGL4aFKIQwzU%2FUqPWEqb17BRifiBamPfXPpBXRs9W1XDEo3%2FxUgbCFhIOnyesvr%2FXwdDYXK4pYmqRQ1CRqXEK6LC26M09A%2BwjuFJ0JaTScmJDf3VzKz0ou3NS9IKWzXlFFoHccwT%2BSqFl5twHs81%2BgIF3RLC7dcV%2FpDP0nypahlh1h0rVYFCFr5Ma8%2Fbsa5qYP0ZLIxs7Cp9IgnRwVjCOXgyZ1OmRj8eR8Dm1VtN51Osf1OUhL2h25us5GPIdcfMOeGR7RkuBDqsiHtk5t3craUDSKjTs7Dx%2FGJsX97gfcVwP3O%2Be3l4v6vI2oPMxvl9ScyC%2FtsOPRt0xSLiZsSoMls6TNhWe9DjykPwE01zFp8TD71BnzGyC6OFdv9UQQttqSKvKSXK0mP4m%2B9AJpR7UMV7hAWl7ds%2FXhJkc7x9xEw7vl893NTXPYt97uH2%2FsN62EnOs9lp84cPWJfaKdR0Mem08nikm0WJ5D1YpP0KjFeMhi8doToYSeqC%2BhIPgij4V0Hd9kNqb96yyxwhPRdPKio7LJ7OQUlWRwVIsXgKtStS21QoGWlXKA5hVPWs%2Fvugfa5jat4AdHTvwYFeYnF2E2Y%2BxEi4UfZnKkT6k4IC9h49HpZPK5SRYST46bJKv3taXBE8fnJ39mLf%2FCBtEqM4br4odCZjOG1Ok2x%2BEL1xebF7c6UXtogjG0%2Bq0jMwZpdFz6EGBqt66tvKJZtouN4F0CQLs9MRmmIxHbZF6y1ubQLAa6E6xRwZLjh2GlhZwgiT%2BtFEUuMP7YMguVHVjtePUfSuBk%2FY64NXTSWRjSEOlma3dOJj%2FpKTsXZ328WSeZhhFbuhywsVAeALFs3aI5YMIe%2B%2FDHjYdn8n0m8Uz6WsklLWno5gYjTFBSBYJvkjuiPGFCk2yJJSRMyAlJkxBSAQFiq1CbfFL%2Bc2NtpNVctHzXN7bGzvPRdb8SjedlFvEufBh4Re3pzbaSIKnd%2FJ5Sn83u7vU4ylimzOnR3fZUJjvM6KSorN%2BLU9m%2BvTbFQgx3r2H1DspHCkfnSGlnLcVKgdssSLDCwSwHP8tg94NLKudMPlQNi52wHtpUD29CZcUCJkgXdZZV4vL69DDeENaxkBYj2KR%2FZsx9gCO7a1D1Jn6Awdq6BMIgsizByiYHg4sfOrwl%2BBcd46%2FChx5k02HJZuFCOltKtZCso%2BJmYVU7slk4u%2Fpvjm0zrTZ7dKCbVS4QrIwJJh1DBV3kg8j8ybOpa1pzSP%2BTcrx4EYNOchMUQ%2BniKdqk06sO%2FKtl5HNPPWSYBNmJZkJnUD6Sst8pFJPkrHiGDGoVUGpCzJ5SDjklaybw0BmxbKjEBKB223vab3OMSVm0k2kUom8trGSLtcqLQ8%2Bkx7D2ozR3H7xe8aReK5WSFesMsww%2FVsKMkCNBlx%2BI9R3yevg7OoEAnF20k5z38ibGjkFI%2B%2FMLjYO9nXL65bxMGvPGQ3rQF1ZYIyG11bBCT5MFEYtoaiRZUQH2QiDSKnErj2QOOygwo%2FiJh%2FEDH9KYKvgF6bTXsKbdKz%2BwHWjnug%2B78Y5WU%2BQd%2FXRsr9QWpm7hUoNQM8cJ4todmdbsqwPBb82r%2FwM%3D). In case of paired end reads, corresponding fastq files should be named using *.R1.fastq.gz* and *.R2.fastq.gz* suffixes. Specify the desired analysis details for your data in the *essential.vars.groovy* file (see below) and run the pipeline *dnaseq.pipeline.groovy* as described [here](https://gitlab.rlp.net/imbforge/NGSpipe2go/-/blob/master/README.md). A markdown file *variantreport.Rmd* will be generated in the output reports folder after running the pipeline. Subsequently, the *variantreport.Rmd* file can be converted to a final html report using the *knitr* R-package.\r +GATK requires chromosomes in bam files to be karyotypically ordered. Best you use an ordered genome fasta file as reference for the pipeline (assigned in *essential.vars.groovy*, see below).\r +\r +\r +### The pipelines includes\r +- quality control of rawdata with FastQC\r +- Read mapping to the reference genome using BWA\r +- identify and remove duplicate reads with Picard MarkDuplicates\r +- Realign BAM files at Indel positions using GATK\r +- Recalibrate Base Qualities in BAM files using GATK\r +- Variant calling using GATK UnifiedGenotyper and GATK HaplotypeCaller\r +- Calculate VQSLOD scores for further filtering variants using GATK VariantRecalibrator and ApplyRecalibration\r +- Calculate the basic properties of variants as triplets for "all", "known" ,"novel" variants in comparison to dbSNP using GATK VariantEval\r +\r +\r +### Pipeline parameter settings\r +- essential.vars.groovy: essential parameter describing the experiment including: \r + - ESSENTIAL_PROJECT: your project folder name\r + - ESSENTIAL_BWA_REF: path to BWA indexed reference genome\r + - ESSENTIAL_CALL_REGION: bath to bed file containing region s to limit variant calling to (optional)\r + - ESSENTIAL_PAIRED: either paired end ("yes") or single end ("no") design\r + - ESSENTIAL_KNOWN_VARIANTS: dbSNP from GATK resource bundle (crucial for BaseQualityRecalibration step)\r + - ESSENTIAL_HAPMAP_VARIANTS: variants provided by the GATK bundle (essential for Variant Score Recalibration)\r + - ESSENTIAL_OMNI_VARIANTS: variants provided by the GATK bundle (essential for Variant Score Recalibration)\r + - ESSENTIAL_MILLS_VARIANTS: variants provided by the GATK bundle (essential for Variant Score Recalibration)\r + - ESSENTIAL_THOUSAND_GENOMES_VARIANTS: variants provided by the GATK bundle (essential for Variant Score Recalibration)\r + - ESSENTIAL_THREADS: number of threads for parallel tasks\r +- additional (more specialized) parameter can be given in the var.groovy-files of the individual pipeline modules \r +\r +\r +## Programs required\r +- Bedtools\r +- BWA\r +- FastQC\r +- GATK\r +- Picard\r +- Samtools\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/60?version=1" ; + schema1:isBasedOn "https://gitlab.rlp.net/imbforge/NGSpipe2go/-/tree/master/pipelines/DNAseq" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for DNA-seq" ; + schema1:sdDatePublished "2024-08-05 10:32:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/60/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1793 ; + schema1:creator ; + schema1:dateCreated "2020-10-07T07:43:50Z" ; + schema1:dateModified "2023-01-16T13:44:52Z" ; + schema1:description """# DNA-Seq pipeline\r +Here we provide the tools to perform paired end or single read DNA-Seq analysis including raw data quality control, read mapping, variant calling and variant filtering. \r +\r +\r +## Pipeline Workflow\r +All analysis steps are illustrated in the pipeline [flowchart](https://www.draw.io/?lightbox=1&highlight=0000ff&edit=_blank&layers=1&nav=1&title=NGSpipe2go_DNAseq_pipeline.html#R7R1bd5s489f4nObBPoDvj3ESO%2BnXZpuk3Wz3pUcG2WaDgQJ2Lr%2F%2B0wgJIxAY29jGabZ7TowAIc2MRnNXrXkxfxl5yJ19dQxs1TTFeKk1L2uaprY0rQb%2FK8Zr2NLtdsOGqWca7KFVw4P5hlmjwloXpoF94cHAcazAdMVG3bFtrAdCG%2FI851l8bOJY4lddNMWphgcdWenWR9MIZqxV7fRXN66xOZ2xT%2Fc0Nr8x0p%2BmnrOw2fdsx8bhnTni3bA5%2BjNkOM%2BxpuZVrXnhOU4Q%2Fpq%2FXGALwMohFr43zLgbDdnDdlDkhaV%2Be%2FfwuPi8uHz79%2FvN9%2BFPf%2FlvXeUYWCJrwYBR0zoW6XFgmEsAr2VObXqj83sBYx14FA7RJfk1ZX%2Fpa2Mv2ULGRPvirRQawSsH%2FiyYW%2BSXSu5ZaIytQQTTC8dyPPpQc0j%2FI4%2F4gec8RVgiUBxMHDtgJKV2YNzIn2GD9Uj7ia4mpmXFOr3qwL%2BoU36HIrE5mHrIMAlwE826Mzd1cqnAIxbyffY7wq8STTKOHIavJfYC%2FBJrYsgaYWeOA%2B%2BVPMLuaj32CltTmsJQ9byi0JbCCHQWI84OfxCxVTGN%2Bl6RB%2FnBKEROLf9i21w%2BXM8nj8355L63%2FPEjqNc7Kdxhg6wjdul4wcyZOjayrlatMQQAXP5bzF3%2B%2FBS5pGX11hfHcRme%2FsNB8MpwihaBQ5piVIJfzOAf6LDRZlc%2FWffw%2B%2FIlfvHKL2wCgNhLcPmT9wcXq9foFX8vTW4hDGDimQuRNfnOwtPZU%2F3P6rBl%2FfwyuhtMHr%2BPTHXx2Ku3GatD3hQHOXBvygnFwxYKzKU4jl2Qfq18nsxue4G1nN2Pb68fhr%2FerusfHGIvHMJzAoI8BwBX75fEMnrtrsAy6n0txTLUnpZmGS1F2516rkZfR%2BdB%2F9v8WRuN0C%2F%2F4n%2Bvn%2BvdnVlGaQyC35EziKyFvmZdS3fVZj%2B9sKUPthQpNznAUn97DC6%2FK%2F%2F8%2B3l4f%2BXNVeP76PUm5DOngKwsbr7i35tx862QXBDHzValcNxW3zuSsW2cg05ALnXgvsCLoXFoWnw4aToQtoQDEEXzWESRN2zJFg9bp0AtfFOHG3Wf4vucPKC23JecHT9quNBq54PL2%2FP6A%2F4NMDZdbJlk1yTtg7g4MC4kNISjE5tDoWTHAe827TrFrBmgMXk7NkmiIOOJaZt03ycosddMpRzg%2By6Cr%2BlMElk9qPt%2BPfAWto4CGFv8ss4oWNKpEht02HUhtFRyLmGnCKQhD0%2BEDmdBAMaHc1hY2nBqBkTybHiW27Chq6E5H08c0i35eTt6AAxrU4dc1IF2LWcMEi1egplkyNHvk9%2BE8n0g%2FKFhI%2FKjwe81iDjrLF%2FTY8x8jsMaSeC%2FAVqSknel1pDlIMMHKwtMH2wqjrGwAI4Ksg1YY75POLSJrJBbI8IMsXfKE8YvWF8EdIYxpkEIDbo17SllJ598fEb%2B0EYXmR6moKAA%2BeTSWwb2QSXbBhDlLNPUJ7MVulIsJYqg9nQ6aUMJ12LjWk9k%2FCp9P21J9tMECIgy6MJPoiy6RG2EyYcyFJdKerwhMjy2pODK3dALw1BNgUyTQIy3FZZK2Pe%2BOSYlK%2FaxutYWENYVO3AmEx%2BYbAIJ0aC3x4vWO6bwy3%2F%2FFAThdcKvIPquJOEDaDicatdLswcSZn%2F42Ptr%2FB94AMjmAJYgUVh1pZxrToZPRC3KthTgWqG1RQnb6wFgEe51YvfIqgnqzLAF93QCPLq1rBNwv63YtmxHKibWusm2WeqpraZq4UkQ3uzJuXdcvJMJEceQlKJNvrFEnp8pKGU9toWclAL%2FfoAd9m3yBtdzlqaBC4g1Zh4RDSI56Za8KACDDibnu%2BFrdd%2FFujkhWrPwdeVT3giyGxw3aMjePCs6IRlatLWMXotxILYzxJg%2BN1mHOBo4ZJ%2BcWNRiMKFGgjj3fp6ZAX5wEWWVzx51WpQhtCS9O7IdWCa0KK2N%2BSa5jLHOTfZLmWHgoIIM%2F2BlJZmWgMYEbkqRZPqfvd7T01B%2Fsr4P6uhypGL1S11tv3dJRrDIlWq4LWyk61REromkhiHyg7sLysiLiBS%2FF4TRBTBCnWhjngNU5UxgwOiZDpuq1zBYsus%2BcdWsrN080tbhSdAIyeB%2F642wOdqg17LyZtz7mWblMvaTYO9km9eJ8vyFXl02OxlMnXoJWceKSPcl8Ps2d9Vzht9qphiV2pFwqs7mSmoRfi%2FlKmnu%2Fs58A6XykVZBPtKqHB8ZPJ4XZiKCiSxsuieMgwq6rkvWVUyoXO%2FZT%2FX1yceRMQufZb22fx4V6RvjZ7Q5j2odiEeVbDjj8QGcJ%2FW6R%2BVJubuwxDm1Xi%2BjVkqmnSlb6Pd%2FLQJ3AasJQgrJA6tFE1sxym89bykdT6EPyRiWVzQNM7SfhyZlGLfyiQkV3Kg%2BOv%2F%2BPwpuz0RU1MUAeRqrcpa7zhP6WYz%2BgSJNHVnnjOapySW5AtJKWDxsh4fxDNHctIBcr7G1xNDrQbS1bqsnrJRmP21jlqpr3BZdvsu2W7FloaRWhof9hRX41VwbS30SWw%2FPJsFiRPTQQsjVop6V8etqUVwj13KCVxdfwF1qFbm%2BOIuWzg%2FbnJjYGGGbPkTv%2Fxid5U12H1P7%2B%2B7hy1%2BX0LHueNj%2FWLRcOe92Gu1iy7YfPVn%2Bwu1VfuFazrSiq5aMrM7XrO6QNagHdJGG%2BxlGOqxivu1Vi8j3QNDtfkFy3kJeKxbeyT2o1TdDlan58bDNtZpf%2F1CaX0G3dHYod1VEaQ%2B7hCAqyn1EB9menWGQAAWKaQSRoT8z7ddfYUTQr7CZqIy%2F4EFyl8kuYXvjfm5IZB7JI5n%2BsaRTqdrTbtCnyN3GDFtE9PIb95nzlz%2Bb5yg8iNj2jYyIMKNoEcBS8Zx5TYwFgs2vUZP48b46hjkBXkLxSh9cCbb6wg9oV4jw2Vff9Gsx594qoCogiPVnZH36IfvRFBtjYNU5vrqq7K8HEiI73X6xPVfVSth0pVbajzD9TfbqTnqvlnvUDrRVFzbS3s8vF4ypFrHTfkXeE%2BWbc7KeSNfGwrVMiH81agd39Xhz8nV%2FYzNqiKuTN6M2O12Jnnl05456VL5x0i7jdlGBv3ou479DmeeKyPwbuHzawEDGD7ffahpYhqfMokW1blt3PAPZBGD0Hg70xiE4S%2BSgYVIcmKU35jBtpXaKHCbpPG42laNzGEk%2B88lH1zY3FkzSCe07iCqFHcq9Y5kVpGjvf6C9bLTnlQs4PNalRQlaO2O91GIQIh1o2xGCtm14%2FablIHZb%2F2F%2B%2BQ6Ip6%2Beex56jT3gQmijH%2Bs5EevY4lsJtz4z1WpY8Pmm2k0QXTiCgnGRxYNbkI%2FvsU727bHHSiwUlHtWb4GkA%2F2QP3c0iM6kLgfI2FQG519rkefwgHKPF5%2FT5rrVqYaoiHHS7WY6vveQcs%2B4uzBGzu0%2FLp7X7%2B77QV8fdervPha3TF2qV5jLHWu7yx32Vr6Tvacd3eMJJqgAnUyB8IM5LqrpVcOnAgGJwF4N%2FELtVVNhDklXBIvGuMeM2DRlTBZTmBxLWGKDKq%2B%2B7TaW%2BuQs%2FPaT7TzbtXhYE2Pgm0DkwHlAzIRvOTpl%2BdEGtC7Vaat0moplx4gWtbYkq0PtS2qfqWp%2FX%2Bu%2FLVn%2FCcDtNxUm4kB%2FbiqMVBFR04j4KEu3ix7Sl7hMpJDfVQ3ZaUH2s%2BOCs6o5jJ0XKGEAMeuUIY8dz8BenTRHTJutFVriALx67ehOvNRnGDXVxC34Fz3hIsOI%2BtYK1cMRPfKUmPIjHyTB9LzMTHq%2B2ZNJVnPA8%2FhgidxhWUzcFzpUUk9Fc449pWVPPBg7xmuqMbU9BQZvCUEiBjDDNk%2BomqAujP0KneSS4Abu%2FX6yzcA7W11PTJsm10YBqmFWFKJEJ0uaCr3jMd%2B5H2DXj%2Be1rkacnkPuNpx%2BDBrTEMmB0Yb9%2FyEDg0YZtUF7uGLKjVnYv1DU7ybqnPRbaamoJ9nh%2ByVEIsiLyZ1MecdtNd%2Btisltv%2FWqqqQm7Alox9G4jxJaeGMXiSxEzwYK0Jr99UhaMM2NXSmlyqc303WxcfDUgZQ2aqN5mOJHNsMlaJ20ct0kXXxKsLGuCRI74Ph9NHctrDbu1QaFcWP6Vu1xaluMs5wN7EhB7alCGBKlVVoHY1%2FFu3j13spESacTNCDzVojKrBArG6N5jJFRwZrlZYDQDLwtyhuG8JEoTI3uVXMH4lTojRs7PGXiHlOwzYHC6A3mlxHcIR%2BZidH6KZoT0uvtvn7k9phjyoSiN6R7WHdIOdYXfgBKseLhRwwto2H09zkJGzFBLFqWkcled4xVcUvCWp6M0ELPg98P6Fnl%2BQAwkY0dq%2F34%2Bjwlx6poYpcqkwd0rMrPnsjciVMb3m6FS7%2FgKa2gGhFxRsXUceZOAhu8iCTpkRHx8yVYE6cGJhI0B8lNaW4aBuWSMqoQOWcpG4p4iIQmqaYq20%2FKSHeQEsExE93NULMcOgmxrJKO1KpLQCUQZ9IEpvLA2DjXakmos1VCHQYpdWZnc0dK201AwKnHt8iVPkfxufBprQBGbFHO1bvlMD0xmllt94ohcW8sJq3xXYapBFQEYaFfBldTZB4C6hB4zygTnbtqpy8rpSBFWgkeeWnU1e4BqO8z6koKrKJKxUcp50jEi5VyJut8Yk4XHo%2FDqaLd5aBZ2iFAoB0ea4SXmbWZ5Q8dPeH6OBBzPYzmY8jizgKX5IkKwCoh1IjAk9moj1EsnFYPyARs4m62uJwCc3Z9a3qiD820d7wnOLCEgtRGUyrSrbHhb1BHe32Dx8Ih%2FQ2%2Bm1vUe7MBEkCghQXs3GQjYND3rYUnhJeAEdcEu7DtxMVdxV%2F%2FEUwL0tFTUtgRjbxbiJK0BJQXmteqeJfsbrI2Omy%2BfHFGL4aFKIQwzU%2FUqPWEqb17BRifiBamPfXPpBXRs9W1XDEo3%2FxUgbCFhIOnyesvr%2FXwdDYXK4pYmqRQ1CRqXEK6LC26M09A%2BwjuFJ0JaTScmJDf3VzKz0ou3NS9IKWzXlFFoHccwT%2BSqFl5twHs81%2BgIF3RLC7dcV%2FpDP0nypahlh1h0rVYFCFr5Ma8%2Fbsa5qYP0ZLIxs7Cp9IgnRwVjCOXgyZ1OmRj8eR8Dm1VtN51Osf1OUhL2h25us5GPIdcfMOeGR7RkuBDqsiHtk5t3craUDSKjTs7Dx%2FGJsX97gfcVwP3O%2Be3l4v6vI2oPMxvl9ScyC%2FtsOPRt0xSLiZsSoMls6TNhWe9DjykPwE01zFp8TD71BnzGyC6OFdv9UQQttqSKvKSXK0mP4m%2B9AJpR7UMV7hAWl7ds%2FXhJkc7x9xEw7vl893NTXPYt97uH2%2FsN62EnOs9lp84cPWJfaKdR0Mem08nikm0WJ5D1YpP0KjFeMhi8doToYSeqC%2BhIPgij4V0Hd9kNqb96yyxwhPRdPKio7LJ7OQUlWRwVIsXgKtStS21QoGWlXKA5hVPWs%2Fvugfa5jat4AdHTvwYFeYnF2E2Y%2BxEi4UfZnKkT6k4IC9h49HpZPK5SRYST46bJKv3taXBE8fnJ39mLf%2FCBtEqM4br4odCZjOG1Ok2x%2BEL1xebF7c6UXtogjG0%2Bq0jMwZpdFz6EGBqt66tvKJZtouN4F0CQLs9MRmmIxHbZF6y1ubQLAa6E6xRwZLjh2GlhZwgiT%2BtFEUuMP7YMguVHVjtePUfSuBk%2FY64NXTSWRjSEOlma3dOJj%2FpKTsXZ328WSeZhhFbuhywsVAeALFs3aI5YMIe%2B%2FDHjYdn8n0m8Uz6WsklLWno5gYjTFBSBYJvkjuiPGFCk2yJJSRMyAlJkxBSAQFiq1CbfFL%2Bc2NtpNVctHzXN7bGzvPRdb8SjedlFvEufBh4Re3pzbaSIKnd%2FJ5Sn83u7vU4ylimzOnR3fZUJjvM6KSorN%2BLU9m%2BvTbFQgx3r2H1DspHCkfnSGlnLcVKgdssSLDCwSwHP8tg94NLKudMPlQNi52wHtpUD29CZcUCJkgXdZZV4vL69DDeENaxkBYj2KR%2FZsx9gCO7a1D1Jn6Awdq6BMIgsizByiYHg4sfOrwl%2BBcd46%2FChx5k02HJZuFCOltKtZCso%2BJmYVU7slk4u%2Fpvjm0zrTZ7dKCbVS4QrIwJJh1DBV3kg8j8ybOpa1pzSP%2BTcrx4EYNOchMUQ%2BniKdqk06sO%2FKtl5HNPPWSYBNmJZkJnUD6Sst8pFJPkrHiGDGoVUGpCzJ5SDjklaybw0BmxbKjEBKB223vab3OMSVm0k2kUom8trGSLtcqLQ8%2Bkx7D2ozR3H7xe8aReK5WSFesMsww%2FVsKMkCNBlx%2BI9R3yevg7OoEAnF20k5z38ibGjkFI%2B%2FMLjYO9nXL65bxMGvPGQ3rQF1ZYIyG11bBCT5MFEYtoaiRZUQH2QiDSKnErj2QOOygwo%2FiJh%2FEDH9KYKvgF6bTXsKbdKz%2BwHWjnug%2B78Y5WU%2BQd%2FXRsr9QWpm7hUoNQM8cJ4todmdbsqwPBb82r%2FwM%3D). In case of paired end reads, corresponding fastq files should be named using *.R1.fastq.gz* and *.R2.fastq.gz* suffixes. Specify the desired analysis details for your data in the *essential.vars.groovy* file (see below) and run the pipeline *dnaseq.pipeline.groovy* as described [here](https://gitlab.rlp.net/imbforge/NGSpipe2go/-/blob/master/README.md). A markdown file *variantreport.Rmd* will be generated in the output reports folder after running the pipeline. Subsequently, the *variantreport.Rmd* file can be converted to a final html report using the *knitr* R-package.\r +GATK requires chromosomes in bam files to be karyotypically ordered. Best you use an ordered genome fasta file as reference for the pipeline (assigned in *essential.vars.groovy*, see below).\r +\r +\r +### The pipelines includes\r +- quality control of rawdata with FastQC\r +- Read mapping to the reference genome using BWA\r +- identify and remove duplicate reads with Picard MarkDuplicates\r +- Realign BAM files at Indel positions using GATK\r +- Recalibrate Base Qualities in BAM files using GATK\r +- Variant calling using GATK UnifiedGenotyper and GATK HaplotypeCaller\r +- Calculate VQSLOD scores for further filtering variants using GATK VariantRecalibrator and ApplyRecalibration\r +- Calculate the basic properties of variants as triplets for "all", "known" ,"novel" variants in comparison to dbSNP using GATK VariantEval\r +\r +\r +### Pipeline parameter settings\r +- essential.vars.groovy: essential parameter describing the experiment including: \r + - ESSENTIAL_PROJECT: your project folder name\r + - ESSENTIAL_BWA_REF: path to BWA indexed reference genome\r + - ESSENTIAL_CALL_REGION: bath to bed file containing region s to limit variant calling to (optional)\r + - ESSENTIAL_PAIRED: either paired end ("yes") or single end ("no") design\r + - ESSENTIAL_KNOWN_VARIANTS: dbSNP from GATK resource bundle (crucial for BaseQualityRecalibration step)\r + - ESSENTIAL_HAPMAP_VARIANTS: variants provided by the GATK bundle (essential for Variant Score Recalibration)\r + - ESSENTIAL_OMNI_VARIANTS: variants provided by the GATK bundle (essential for Variant Score Recalibration)\r + - ESSENTIAL_MILLS_VARIANTS: variants provided by the GATK bundle (essential for Variant Score Recalibration)\r + - ESSENTIAL_THOUSAND_GENOMES_VARIANTS: variants provided by the GATK bundle (essential for Variant Score Recalibration)\r + - ESSENTIAL_THREADS: number of threads for parallel tasks\r +- additional (more specialized) parameter can be given in the var.groovy-files of the individual pipeline modules \r +\r +\r +## Programs required\r +- Bedtools\r +- BWA\r +- FastQC\r +- GATK\r +- Picard\r +- Samtools\r +""" ; + schema1:keywords "DNA-seq, GATK3, bpipe, groovy" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "DNA-seq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/60?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:description "Analysis of S-protein polymorphism. This workflow includes: obtaining coding sequences of S proteins from a diverse group of coronaviruses and generating amino acid alignments to assess conservation of the polymorphic location. More info can be found at https://covid19.galaxyproject.org/genomics/" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/9?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genomics - Analysis of S-protein polymorphism" ; + schema1:sdDatePublished "2024-08-05 10:33:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/9/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1619 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6982 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2020-04-10T13:00:36Z" ; + schema1:dateModified "2023-01-16T13:40:19Z" ; + schema1:description "Analysis of S-protein polymorphism. This workflow includes: obtaining coding sequences of S proteins from a diverse group of coronaviruses and generating amino acid alignments to assess conservation of the polymorphic location. More info can be found at https://covid19.galaxyproject.org/genomics/" ; + schema1:image ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Genomics - Analysis of S-protein polymorphism" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/9?version=1" ; + schema1:version 1 ; + ns1:input . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 3531 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Analysis of variation within individual COVID-19 samples \r +using bowtie2, bwa, fastp, multiqc , picard ,samtools, snpEff \r +Workflow, tools and data are available on https://github.com/fjrmoreews/cwl-workflow-SARS-CoV-2/tree/master/Variation\r +This worklow was ported into CWL from a Galaxy Workflow \r + ( https://github.com/galaxyproject/SARS-CoV-2/tree/master/genomics/4-Variation migrated to CWL).\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/28?version=1" ; + schema1:isBasedOn "https://github.com/fjrmoreews/cwl-workflow-SARS-CoV-2/tree/master/Variation" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for var-PE" ; + schema1:sdDatePublished "2024-08-05 10:33:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/28/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15597 ; + schema1:creator ; + schema1:dateCreated "2020-06-08T14:57:04Z" ; + schema1:dateModified "2023-01-16T13:41:56Z" ; + schema1:description """Analysis of variation within individual COVID-19 samples \r +using bowtie2, bwa, fastp, multiqc , picard ,samtools, snpEff \r +Workflow, tools and data are available on https://github.com/fjrmoreews/cwl-workflow-SARS-CoV-2/tree/master/Variation\r +This worklow was ported into CWL from a Galaxy Workflow \r + ( https://github.com/galaxyproject/SARS-CoV-2/tree/master/genomics/4-Variation migrated to CWL).\r +\r +""" ; + schema1:image ; + schema1:keywords "variation, CWL, covid-19" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "var-PE" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/28?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 117071 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/963?version=13" ; + schema1:isBasedOn "https://github.com/nf-core/airrflow" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/airrflow" ; + schema1:sdDatePublished "2024-08-05 10:24:22 +0100" ; + schema1:url "https://workflowhub.eu/workflows/963/ro_crate?version=13" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14171 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:37Z" ; + schema1:dateModified "2024-06-11T12:54:37Z" ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/963?version=13" ; + schema1:keywords "airr, b-cell, immcantation, immunorepertoire, repseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/airrflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/963?version=13" ; + schema1:version 13 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "This pipeline analyses data for HiCAR data, a robust and sensitive multi-omic co-assay for simultaneous measurement of transcriptome, chromatin accessibility and cis-regulatory chromatin contacts." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/990?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/hicar" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hicar" ; + schema1:sdDatePublished "2024-08-05 10:23:46 +0100" ; + schema1:url "https://workflowhub.eu/workflows/990/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9925 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:55Z" ; + schema1:dateModified "2024-06-11T12:54:55Z" ; + schema1:description "This pipeline analyses data for HiCAR data, a robust and sensitive multi-omic co-assay for simultaneous measurement of transcriptome, chromatin accessibility and cis-regulatory chromatin contacts." ; + schema1:keywords "atac, ATAC-seq, HiC, hicar, Multi-omics, transcriptome" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hicar" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/990?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + ; + schema1:description """# ![IMPaCT program](impact_qc/docs/png/impact_data_logo_pink_horitzontal.png)\r +\r +[![IMPaCT](https://img.shields.io/badge/Web%20-IMPaCT-blue)](https://impact.isciii.es/)\r +[![IMPaCT-isciii](https://img.shields.io/badge/Web%20-IMPaCT--isciii-red)](https://www.isciii.es/QueHacemos/Financiacion/IMPaCT/Paginas/default.aspx)\r +[![IMPaCT-Data](https://img.shields.io/badge/Web%20-IMPaCT--Data-1d355c.svg?labelColor=000000)](https://impact-data.bsc.es/)\r +\r +## Introduction of the project\r +\r +IMPaCT-Data is the IMPaCT program that aims to support the development of a common, interoperable and integrated system for the collection and analysis of clinical and molecular data by providing the knowledge and resources available in the Spanish Science and Technology System. This development will make it possible to answer research questions based on the different clinical and molecular information systems available. Fundamentally, it aims to provide researchers with a population perspective based on individual data.\r +\r +The IMPaCT-Data project is divided into different work packages (WP). In the context of IMPaCT-Data WP3 (Genomics), a working group of experts worked on the generation of a specific quality control (QC) workflow for germline exome samples.\r +\r +To achieve this, a set of metrics related to human genomic data was decided upon, and the toolset or software to extract these metrics was implemented in an existing variant calling workflow called Sarek, part of the nf-core community. The final outcome is a Nextflow subworkflow, called IMPaCT-QC implemented in the Sarek pipeline.\r +\r +Below you can find the explanation of this workflow (raw pipeline), the link to the documentation of the IMPaCT QC subworkflow and a linked documentation associated to the QC metrics added in the mentioned workflow.\r +\r +- [IMPaCT-data subworkflow documentation](impact_qc/README.md)\r +\r +- [Metrics documentation](https://docs.google.com/document/d/12OWCcNKatkdJelYyiovyil-bIXDESO_K2zeIB3vncW4/edit#heading=h.cvdlfn10wodq)\r +\r +

\r + \r + \r + nf-core/sarek\r + \r +

\r +\r +[![GitHub Actions CI Status](https://github.com/nf-core/sarek/actions/workflows/ci.yml/badge.svg)](https://github.com/nf-core/sarek/actions/workflows/ci.yml)\r +[![GitHub Actions Linting Status](https://github.com/nf-core/sarek/actions/workflows/linting.yml/badge.svg)](https://github.com/nf-core/sarek/actions/workflows/linting.yml)\r +[![AWS CI](https://img.shields.io/badge/CI%20tests-full%20size-FF9900?labelColor=000000&logo=Amazon%20AWS)](https://nf-co.re/sarek/results)\r +[![nf-test](https://img.shields.io/badge/unit_tests-nf--test-337ab7.svg)](https://www.nf-test.com)\r +[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.3476425-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.3476425)\r +[![nf-test](https://img.shields.io/badge/unit_tests-nf--test-337ab7.svg)](https://www.nf-test.com)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A523.04.0-23aa62.svg)](https://www.nextflow.io/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +[![Launch on Seqera Platform](https://img.shields.io/badge/Launch%20%F0%9F%9A%80-Seqera%20Platform-%234256e7)](https://tower.nf/launch?pipeline=https://github.com/nf-core/sarek)\r +\r +[![Get help on Slack](http://img.shields.io/badge/slack-nf--core%20%23sarek-4A154B?labelColor=000000&logo=slack)](https://nfcore.slack.com/channels/sarek)\r +[![Follow on Twitter](http://img.shields.io/badge/twitter-%40nf__core-1DA1F2?labelColor=000000&logo=twitter)](https://twitter.com/nf_core)\r +[![Follow on Mastodon](https://img.shields.io/badge/mastodon-nf__core-6364ff?labelColor=FFFFFF&logo=mastodon)](https://mstdn.science/@nf_core)\r +[![Watch on YouTube](http://img.shields.io/badge/youtube-nf--core-FF0000?labelColor=000000&logo=youtube)](https://www.youtube.com/c/nf-core)\r +\r +## Introduction\r +\r +**nf-core/sarek** is a workflow designed to detect variants on whole genome or targeted sequencing data. Initially designed for Human, and Mouse, it can work on any species with a reference genome. Sarek can also handle tumour / normal pairs and could include additional relapses.\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!\r +\r +On release, automated continuous integration tests run the pipeline on a full-sized dataset on the AWS cloud infrastructure. This ensures that the pipeline runs on AWS, has sensible resource allocation defaults set to run on real-world datasets, and permits the persistent storage of results to benchmark between pipeline releases and other analysis sources. The results obtained from the full-sized test can be viewed on the [nf-core website](https://nf-co.re/sarek/results).\r +\r +It's listed on [Elixir - Tools and Data Services Registry](https://bio.tools/nf-core-sarek) and [Dockstore](https://dockstore.org/workflows/github.com/nf-core/sarek).\r +\r +

\r + \r +

\r +\r +## Pipeline summary\r +\r +Depending on the options and samples provided, the pipeline can currently perform the following:\r +\r +- Form consensus reads from UMI sequences (`fgbio`)\r +- Sequencing quality control and trimming (enabled by `--trim_fastq`) (`FastQC`, `fastp`)\r +- Map Reads to Reference (`BWA-mem`, `BWA-mem2`, `dragmap` or `Sentieon BWA-mem`)\r +- Process BAM file (`GATK MarkDuplicates`, `GATK BaseRecalibrator` and `GATK ApplyBQSR` or `Sentieon LocusCollector` and `Sentieon Dedup`)\r +- Summarise alignment statistics (`samtools stats`, `mosdepth`)\r +- Variant calling (enabled by `--tools`, see [compatibility](https://nf-co.re/sarek/latest/docs/usage#which-variant-calling-tool-is-implemented-for-which-data-type)):\r + - `ASCAT`\r + - `CNVkit`\r + - `Control-FREEC`\r + - `DeepVariant`\r + - `freebayes`\r + - `GATK HaplotypeCaller`\r + - `Manta`\r + - `mpileup`\r + - `MSIsensor-pro`\r + - `Mutect2`\r + - `Sentieon Haplotyper`\r + - `Strelka2`\r + - `TIDDIT`\r +- Variant filtering and annotation (`SnpEff`, `Ensembl VEP`, `BCFtools annotate`)\r +- Summarise and represent QC (`MultiQC`)\r +\r +

\r + \r +

\r +\r +## Usage\r +\r +> [!NOTE]\r +> If you are new to Nextflow and nf-core, please refer to [this page](https://nf-co.re/docs/usage/installation) on how to set-up Nextflow. Make sure to [test your setup](https://nf-co.re/docs/usage/introduction#how-to-run-a-pipeline) with `-profile test` before running the workflow on actual data.\r +\r +First, prepare a samplesheet with your input data that looks as follows:\r +\r +`samplesheet.csv`:\r +\r +```csv\r +patient,sample,lane,fastq_1,fastq_2\r +ID1,S1,L002,ID1_S1_L002_R1_001.fastq.gz,ID1_S1_L002_R2_001.fastq.gz\r +```\r +\r +Each row represents a pair of fastq files (paired end).\r +\r +Now, you can run the pipeline using:\r +\r +```bash\r +nextflow run nf-core/sarek \\\r + -profile \\\r + --input samplesheet.csv \\\r + --outdir \r +```\r +\r +> [!WARNING]\r +> Please provide pipeline parameters via the CLI or Nextflow `-params-file` option. Custom config files including those provided by the `-c` Nextflow option can be used to provide any configuration _**except for parameters**_;\r +> see [docs](https://nf-co.re/usage/configuration#custom-configuration-files).\r +\r +For more details and further functionality, please refer to the [usage documentation](https://nf-co.re/sarek/usage) and the [parameter documentation](https://nf-co.re/sarek/parameters).\r +\r +## Pipeline output\r +\r +To see the results of an example test run with a full size dataset refer to the [results](https://nf-co.re/sarek/results) tab on the nf-core website pipeline page.\r +For more details about the output files and reports, please refer to the\r +[output documentation](https://nf-co.re/sarek/output).\r +\r +## Benchmarking\r +\r +On each release, the pipeline is run on 3 full size tests:\r +\r +- `test_full` runs tumor-normal data for one patient from the SEQ2C consortium\r +- `test_full_germline` runs a WGS 30X Genome-in-a-Bottle(NA12878) dataset\r +- `test_full_germline_ncbench_agilent` runs two WES samples with 75M and 200M reads (data available [here](https://github.com/ncbench/ncbench-workflow#contributing-callsets)). The results are uploaded to Zenodo, evaluated against a truth dataset, and results are made available via the [NCBench dashboard](https://ncbench.github.io/report/report.html#).\r +\r +## Credits\r +\r +Sarek was originally written by Maxime U Garcia and Szilveszter Juhos at the [National Genomics Infastructure](https://ngisweden.scilifelab.se) and [National Bioinformatics Infastructure Sweden](https://nbis.se) which are both platforms at [SciLifeLab](https://scilifelab.se), with the support of [The Swedish Childhood Tumor Biobank (Barntumörbanken)](https://ki.se/forskning/barntumorbanken).\r +Friederike Hanssen and Gisela Gabernet at [QBiC](https://www.qbic.uni-tuebingen.de/) later joined and helped with further development.\r +\r +The Nextflow DSL2 conversion of the pipeline was lead by Friederike Hanssen and Maxime U Garcia.\r +\r +Maintenance is now lead by Friederike Hanssen and Maxime U Garcia (now at [Seqera Labs](https://seqera/io))\r +\r +Main developers:\r +\r +- [Maxime U Garcia](https://github.com/maxulysse)\r +- [Friederike Hanssen](https://github.com/FriederikeHanssen)\r +\r +We thank the following people for their extensive assistance in the development of this pipeline:\r +\r +- [Abhinav Sharma](https://github.com/abhi18av)\r +- [Adam Talbot](https://github.com/adamrtalbot)\r +- [Adrian Lärkeryd](https://github.com/adrlar)\r +- [Alexander Peltzer](https://github.com/apeltzer)\r +- [Alison Meynert](https://github.com/ameynert)\r +- [Anders Sune Pedersen](https://github.com/asp8200)\r +- [arontommi](https://github.com/arontommi)\r +- [BarryDigby](https://github.com/BarryDigby)\r +- [Bekir Ergüner](https://github.com/berguner)\r +- [bjornnystedt](https://github.com/bjornnystedt)\r +- [cgpu](https://github.com/cgpu)\r +- [Chela James](https://github.com/chelauk)\r +- [David Mas-Ponte](https://github.com/davidmasp)\r +- [Edmund Miller](https://github.com/edmundmiller)\r +- [Francesco Lescai](https://github.com/lescai)\r +- [Gavin Mackenzie](https://github.com/GCJMackenzie)\r +- [Gisela Gabernet](https://github.com/ggabernet)\r +- [Grant Neilson](https://github.com/grantn5)\r +- [gulfshores](https://github.com/gulfshores)\r +- [Harshil Patel](https://github.com/drpatelh)\r +- [James A. Fellows Yates](https://github.com/jfy133)\r +- [Jesper Eisfeldt](https://github.com/J35P312)\r +- [Johannes Alneberg](https://github.com/alneberg)\r +- [José Fernández Navarro](https://github.com/jfnavarro)\r +- [Júlia Mir Pedrol](https://github.com/mirpedrol)\r +- [Ken Brewer](https://github.com/kenibrewer)\r +- [Lasse Westergaard Folkersen](https://github.com/lassefolkersen)\r +- [Lucia Conde](https://github.com/lconde-ucl)\r +- [Malin Larsson](https://github.com/malinlarsson)\r +- [Marcel Martin](https://github.com/marcelm)\r +- [Nick Smith](https://github.com/nickhsmith)\r +- [Nicolas Schcolnicov](https://github.com/nschcolnicov)\r +- [Nilesh Tawari](https://github.com/nilesh-tawari)\r +- [Nils Homer](https://github.com/nh13)\r +- [Olga Botvinnik](https://github.com/olgabot)\r +- [Oskar Wacker](https://github.com/WackerO)\r +- [pallolason](https://github.com/pallolason)\r +- [Paul Cantalupo](https://github.com/pcantalupo)\r +- [Phil Ewels](https://github.com/ewels)\r +- [Sabrina Krakau](https://github.com/skrakau)\r +- [Sam Minot](https://github.com/sminot)\r +- [Sebastian-D](https://github.com/Sebastian-D)\r +- [Silvia Morini](https://github.com/silviamorins)\r +- [Simon Pearce](https://github.com/SPPearce)\r +- [Solenne Correard](https://github.com/scorreard)\r +- [Susanne Jodoin](https://github.com/SusiJo)\r +- [Szilveszter Juhos](https://github.com/szilvajuhos)\r +- [Tobias Koch](https://github.com/KochTobi)\r +- [Winni Kretzschmar](https://github.com/winni2k)\r +\r +## Acknowledgements\r +\r +| [![Barntumörbanken](docs/images/BTB_logo.png)](https://ki.se/forskning/barntumorbanken) | [![SciLifeLab](docs/images/SciLifeLab_logo.png)](https://scilifelab.se) |\r +| :-----------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------: |\r +| [![National Genomics Infrastructure](docs/images/NGI_logo.png)](https://ngisweden.scilifelab.se/) | [![National Bioinformatics Infrastructure Sweden](docs/images/NBIS_logo.png)](https://nbis.se) |\r +| [![QBiC](docs/images/QBiC_logo.png)](https://www.qbic.uni-tuebingen.de) | [![GHGA](docs/images/GHGA_logo.png)](https://www.ghga.de/) |\r +| [![DNGC](docs/images/DNGC_logo.png)](https://eng.ngc.dk/) | |\r +\r +## Contributions & Support\r +\r +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\r +\r +For further information or help, don't hesitate to get in touch on the [Slack `#sarek` channel](https://nfcore.slack.com/channels/sarek) (you can join with [this invite](https://nf-co.re/join/slack)), or contact us: [Maxime U Garcia](mailto:maxime.garcia@seqera.io?subject=[GitHub]%20nf-core/sarek), [Friederike Hanssen](mailto:friederike.hanssen@qbic.uni-tuebingen.de?subject=[GitHub]%20nf-core/sarek)\r +\r +## Citations\r +\r +If you use `nf-core/sarek` for your analysis, please cite the `Sarek` article as follows:\r +\r +> Friederike Hanssen, Maxime U Garcia, Lasse Folkersen, Anders Sune Pedersen, Francesco Lescai, Susanne Jodoin, Edmund Miller, Oskar Wacker, Nicholas Smith, nf-core community, Gisela Gabernet, Sven Nahnsen **Scalable and efficient DNA sequencing analysis on different compute infrastructures aiding variant discovery** _NAR Genomics and Bioinformatics_ Volume 6, Issue 2, June 2024, lqae031, [doi: 10.1093/nargab/lqae031](https://doi.org/10.1093/nargab/lqae031).\r +\r +> Garcia M, Juhos S, Larsson M et al. **Sarek: A portable workflow for whole-genome sequencing analysis of germline and somatic variants [version 2; peer review: 2 approved]** _F1000Research_ 2020, 9:63 [doi: 10.12688/f1000research.16665.2](http://dx.doi.org/10.12688/f1000research.16665.2).\r +\r +You can cite the sarek zenodo record for a specific version using the following [doi: 10.5281/zenodo.3476425](https://doi.org/10.5281/zenodo.3476425)\r +\r +An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\r +\r +You can cite the `nf-core` publication as follows:\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +\r +## CHANGELOG\r +\r +- [CHANGELOG](CHANGELOG.md)\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.1030.2" ; + schema1:isBasedOn "https://github.com/EGA-archive/sarek-IMPaCT-data-QC.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for IMPaCT-Data quality control workflow implementation in nf-core/Sarek" ; + schema1:sdDatePublished "2024-08-05 10:22:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1030/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 19394 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-06-12T13:11:24Z" ; + schema1:dateModified "2024-06-12T13:19:08Z" ; + schema1:description """# ![IMPaCT program](impact_qc/docs/png/impact_data_logo_pink_horitzontal.png)\r +\r +[![IMPaCT](https://img.shields.io/badge/Web%20-IMPaCT-blue)](https://impact.isciii.es/)\r +[![IMPaCT-isciii](https://img.shields.io/badge/Web%20-IMPaCT--isciii-red)](https://www.isciii.es/QueHacemos/Financiacion/IMPaCT/Paginas/default.aspx)\r +[![IMPaCT-Data](https://img.shields.io/badge/Web%20-IMPaCT--Data-1d355c.svg?labelColor=000000)](https://impact-data.bsc.es/)\r +\r +## Introduction of the project\r +\r +IMPaCT-Data is the IMPaCT program that aims to support the development of a common, interoperable and integrated system for the collection and analysis of clinical and molecular data by providing the knowledge and resources available in the Spanish Science and Technology System. This development will make it possible to answer research questions based on the different clinical and molecular information systems available. Fundamentally, it aims to provide researchers with a population perspective based on individual data.\r +\r +The IMPaCT-Data project is divided into different work packages (WP). In the context of IMPaCT-Data WP3 (Genomics), a working group of experts worked on the generation of a specific quality control (QC) workflow for germline exome samples.\r +\r +To achieve this, a set of metrics related to human genomic data was decided upon, and the toolset or software to extract these metrics was implemented in an existing variant calling workflow called Sarek, part of the nf-core community. The final outcome is a Nextflow subworkflow, called IMPaCT-QC implemented in the Sarek pipeline.\r +\r +Below you can find the explanation of this workflow (raw pipeline), the link to the documentation of the IMPaCT QC subworkflow and a linked documentation associated to the QC metrics added in the mentioned workflow.\r +\r +- [IMPaCT-data subworkflow documentation](impact_qc/README.md)\r +\r +- [Metrics documentation](https://docs.google.com/document/d/12OWCcNKatkdJelYyiovyil-bIXDESO_K2zeIB3vncW4/edit#heading=h.cvdlfn10wodq)\r +\r +

\r + \r + \r + nf-core/sarek\r + \r +

\r +\r +[![GitHub Actions CI Status](https://github.com/nf-core/sarek/actions/workflows/ci.yml/badge.svg)](https://github.com/nf-core/sarek/actions/workflows/ci.yml)\r +[![GitHub Actions Linting Status](https://github.com/nf-core/sarek/actions/workflows/linting.yml/badge.svg)](https://github.com/nf-core/sarek/actions/workflows/linting.yml)\r +[![AWS CI](https://img.shields.io/badge/CI%20tests-full%20size-FF9900?labelColor=000000&logo=Amazon%20AWS)](https://nf-co.re/sarek/results)\r +[![nf-test](https://img.shields.io/badge/unit_tests-nf--test-337ab7.svg)](https://www.nf-test.com)\r +[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.3476425-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.3476425)\r +[![nf-test](https://img.shields.io/badge/unit_tests-nf--test-337ab7.svg)](https://www.nf-test.com)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A523.04.0-23aa62.svg)](https://www.nextflow.io/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +[![Launch on Seqera Platform](https://img.shields.io/badge/Launch%20%F0%9F%9A%80-Seqera%20Platform-%234256e7)](https://tower.nf/launch?pipeline=https://github.com/nf-core/sarek)\r +\r +[![Get help on Slack](http://img.shields.io/badge/slack-nf--core%20%23sarek-4A154B?labelColor=000000&logo=slack)](https://nfcore.slack.com/channels/sarek)\r +[![Follow on Twitter](http://img.shields.io/badge/twitter-%40nf__core-1DA1F2?labelColor=000000&logo=twitter)](https://twitter.com/nf_core)\r +[![Follow on Mastodon](https://img.shields.io/badge/mastodon-nf__core-6364ff?labelColor=FFFFFF&logo=mastodon)](https://mstdn.science/@nf_core)\r +[![Watch on YouTube](http://img.shields.io/badge/youtube-nf--core-FF0000?labelColor=000000&logo=youtube)](https://www.youtube.com/c/nf-core)\r +\r +## Introduction\r +\r +**nf-core/sarek** is a workflow designed to detect variants on whole genome or targeted sequencing data. Initially designed for Human, and Mouse, it can work on any species with a reference genome. Sarek can also handle tumour / normal pairs and could include additional relapses.\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!\r +\r +On release, automated continuous integration tests run the pipeline on a full-sized dataset on the AWS cloud infrastructure. This ensures that the pipeline runs on AWS, has sensible resource allocation defaults set to run on real-world datasets, and permits the persistent storage of results to benchmark between pipeline releases and other analysis sources. The results obtained from the full-sized test can be viewed on the [nf-core website](https://nf-co.re/sarek/results).\r +\r +It's listed on [Elixir - Tools and Data Services Registry](https://bio.tools/nf-core-sarek) and [Dockstore](https://dockstore.org/workflows/github.com/nf-core/sarek).\r +\r +

\r + \r +

\r +\r +## Pipeline summary\r +\r +Depending on the options and samples provided, the pipeline can currently perform the following:\r +\r +- Form consensus reads from UMI sequences (`fgbio`)\r +- Sequencing quality control and trimming (enabled by `--trim_fastq`) (`FastQC`, `fastp`)\r +- Map Reads to Reference (`BWA-mem`, `BWA-mem2`, `dragmap` or `Sentieon BWA-mem`)\r +- Process BAM file (`GATK MarkDuplicates`, `GATK BaseRecalibrator` and `GATK ApplyBQSR` or `Sentieon LocusCollector` and `Sentieon Dedup`)\r +- Summarise alignment statistics (`samtools stats`, `mosdepth`)\r +- Variant calling (enabled by `--tools`, see [compatibility](https://nf-co.re/sarek/latest/docs/usage#which-variant-calling-tool-is-implemented-for-which-data-type)):\r + - `ASCAT`\r + - `CNVkit`\r + - `Control-FREEC`\r + - `DeepVariant`\r + - `freebayes`\r + - `GATK HaplotypeCaller`\r + - `Manta`\r + - `mpileup`\r + - `MSIsensor-pro`\r + - `Mutect2`\r + - `Sentieon Haplotyper`\r + - `Strelka2`\r + - `TIDDIT`\r +- Variant filtering and annotation (`SnpEff`, `Ensembl VEP`, `BCFtools annotate`)\r +- Summarise and represent QC (`MultiQC`)\r +\r +

\r + \r +

\r +\r +## Usage\r +\r +> [!NOTE]\r +> If you are new to Nextflow and nf-core, please refer to [this page](https://nf-co.re/docs/usage/installation) on how to set-up Nextflow. Make sure to [test your setup](https://nf-co.re/docs/usage/introduction#how-to-run-a-pipeline) with `-profile test` before running the workflow on actual data.\r +\r +First, prepare a samplesheet with your input data that looks as follows:\r +\r +`samplesheet.csv`:\r +\r +```csv\r +patient,sample,lane,fastq_1,fastq_2\r +ID1,S1,L002,ID1_S1_L002_R1_001.fastq.gz,ID1_S1_L002_R2_001.fastq.gz\r +```\r +\r +Each row represents a pair of fastq files (paired end).\r +\r +Now, you can run the pipeline using:\r +\r +```bash\r +nextflow run nf-core/sarek \\\r + -profile \\\r + --input samplesheet.csv \\\r + --outdir \r +```\r +\r +> [!WARNING]\r +> Please provide pipeline parameters via the CLI or Nextflow `-params-file` option. Custom config files including those provided by the `-c` Nextflow option can be used to provide any configuration _**except for parameters**_;\r +> see [docs](https://nf-co.re/usage/configuration#custom-configuration-files).\r +\r +For more details and further functionality, please refer to the [usage documentation](https://nf-co.re/sarek/usage) and the [parameter documentation](https://nf-co.re/sarek/parameters).\r +\r +## Pipeline output\r +\r +To see the results of an example test run with a full size dataset refer to the [results](https://nf-co.re/sarek/results) tab on the nf-core website pipeline page.\r +For more details about the output files and reports, please refer to the\r +[output documentation](https://nf-co.re/sarek/output).\r +\r +## Benchmarking\r +\r +On each release, the pipeline is run on 3 full size tests:\r +\r +- `test_full` runs tumor-normal data for one patient from the SEQ2C consortium\r +- `test_full_germline` runs a WGS 30X Genome-in-a-Bottle(NA12878) dataset\r +- `test_full_germline_ncbench_agilent` runs two WES samples with 75M and 200M reads (data available [here](https://github.com/ncbench/ncbench-workflow#contributing-callsets)). The results are uploaded to Zenodo, evaluated against a truth dataset, and results are made available via the [NCBench dashboard](https://ncbench.github.io/report/report.html#).\r +\r +## Credits\r +\r +Sarek was originally written by Maxime U Garcia and Szilveszter Juhos at the [National Genomics Infastructure](https://ngisweden.scilifelab.se) and [National Bioinformatics Infastructure Sweden](https://nbis.se) which are both platforms at [SciLifeLab](https://scilifelab.se), with the support of [The Swedish Childhood Tumor Biobank (Barntumörbanken)](https://ki.se/forskning/barntumorbanken).\r +Friederike Hanssen and Gisela Gabernet at [QBiC](https://www.qbic.uni-tuebingen.de/) later joined and helped with further development.\r +\r +The Nextflow DSL2 conversion of the pipeline was lead by Friederike Hanssen and Maxime U Garcia.\r +\r +Maintenance is now lead by Friederike Hanssen and Maxime U Garcia (now at [Seqera Labs](https://seqera/io))\r +\r +Main developers:\r +\r +- [Maxime U Garcia](https://github.com/maxulysse)\r +- [Friederike Hanssen](https://github.com/FriederikeHanssen)\r +\r +We thank the following people for their extensive assistance in the development of this pipeline:\r +\r +- [Abhinav Sharma](https://github.com/abhi18av)\r +- [Adam Talbot](https://github.com/adamrtalbot)\r +- [Adrian Lärkeryd](https://github.com/adrlar)\r +- [Alexander Peltzer](https://github.com/apeltzer)\r +- [Alison Meynert](https://github.com/ameynert)\r +- [Anders Sune Pedersen](https://github.com/asp8200)\r +- [arontommi](https://github.com/arontommi)\r +- [BarryDigby](https://github.com/BarryDigby)\r +- [Bekir Ergüner](https://github.com/berguner)\r +- [bjornnystedt](https://github.com/bjornnystedt)\r +- [cgpu](https://github.com/cgpu)\r +- [Chela James](https://github.com/chelauk)\r +- [David Mas-Ponte](https://github.com/davidmasp)\r +- [Edmund Miller](https://github.com/edmundmiller)\r +- [Francesco Lescai](https://github.com/lescai)\r +- [Gavin Mackenzie](https://github.com/GCJMackenzie)\r +- [Gisela Gabernet](https://github.com/ggabernet)\r +- [Grant Neilson](https://github.com/grantn5)\r +- [gulfshores](https://github.com/gulfshores)\r +- [Harshil Patel](https://github.com/drpatelh)\r +- [James A. Fellows Yates](https://github.com/jfy133)\r +- [Jesper Eisfeldt](https://github.com/J35P312)\r +- [Johannes Alneberg](https://github.com/alneberg)\r +- [José Fernández Navarro](https://github.com/jfnavarro)\r +- [Júlia Mir Pedrol](https://github.com/mirpedrol)\r +- [Ken Brewer](https://github.com/kenibrewer)\r +- [Lasse Westergaard Folkersen](https://github.com/lassefolkersen)\r +- [Lucia Conde](https://github.com/lconde-ucl)\r +- [Malin Larsson](https://github.com/malinlarsson)\r +- [Marcel Martin](https://github.com/marcelm)\r +- [Nick Smith](https://github.com/nickhsmith)\r +- [Nicolas Schcolnicov](https://github.com/nschcolnicov)\r +- [Nilesh Tawari](https://github.com/nilesh-tawari)\r +- [Nils Homer](https://github.com/nh13)\r +- [Olga Botvinnik](https://github.com/olgabot)\r +- [Oskar Wacker](https://github.com/WackerO)\r +- [pallolason](https://github.com/pallolason)\r +- [Paul Cantalupo](https://github.com/pcantalupo)\r +- [Phil Ewels](https://github.com/ewels)\r +- [Sabrina Krakau](https://github.com/skrakau)\r +- [Sam Minot](https://github.com/sminot)\r +- [Sebastian-D](https://github.com/Sebastian-D)\r +- [Silvia Morini](https://github.com/silviamorins)\r +- [Simon Pearce](https://github.com/SPPearce)\r +- [Solenne Correard](https://github.com/scorreard)\r +- [Susanne Jodoin](https://github.com/SusiJo)\r +- [Szilveszter Juhos](https://github.com/szilvajuhos)\r +- [Tobias Koch](https://github.com/KochTobi)\r +- [Winni Kretzschmar](https://github.com/winni2k)\r +\r +## Acknowledgements\r +\r +| [![Barntumörbanken](docs/images/BTB_logo.png)](https://ki.se/forskning/barntumorbanken) | [![SciLifeLab](docs/images/SciLifeLab_logo.png)](https://scilifelab.se) |\r +| :-----------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------: |\r +| [![National Genomics Infrastructure](docs/images/NGI_logo.png)](https://ngisweden.scilifelab.se/) | [![National Bioinformatics Infrastructure Sweden](docs/images/NBIS_logo.png)](https://nbis.se) |\r +| [![QBiC](docs/images/QBiC_logo.png)](https://www.qbic.uni-tuebingen.de) | [![GHGA](docs/images/GHGA_logo.png)](https://www.ghga.de/) |\r +| [![DNGC](docs/images/DNGC_logo.png)](https://eng.ngc.dk/) | |\r +\r +## Contributions & Support\r +\r +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\r +\r +For further information or help, don't hesitate to get in touch on the [Slack `#sarek` channel](https://nfcore.slack.com/channels/sarek) (you can join with [this invite](https://nf-co.re/join/slack)), or contact us: [Maxime U Garcia](mailto:maxime.garcia@seqera.io?subject=[GitHub]%20nf-core/sarek), [Friederike Hanssen](mailto:friederike.hanssen@qbic.uni-tuebingen.de?subject=[GitHub]%20nf-core/sarek)\r +\r +## Citations\r +\r +If you use `nf-core/sarek` for your analysis, please cite the `Sarek` article as follows:\r +\r +> Friederike Hanssen, Maxime U Garcia, Lasse Folkersen, Anders Sune Pedersen, Francesco Lescai, Susanne Jodoin, Edmund Miller, Oskar Wacker, Nicholas Smith, nf-core community, Gisela Gabernet, Sven Nahnsen **Scalable and efficient DNA sequencing analysis on different compute infrastructures aiding variant discovery** _NAR Genomics and Bioinformatics_ Volume 6, Issue 2, June 2024, lqae031, [doi: 10.1093/nargab/lqae031](https://doi.org/10.1093/nargab/lqae031).\r +\r +> Garcia M, Juhos S, Larsson M et al. **Sarek: A portable workflow for whole-genome sequencing analysis of germline and somatic variants [version 2; peer review: 2 approved]** _F1000Research_ 2020, 9:63 [doi: 10.12688/f1000research.16665.2](http://dx.doi.org/10.12688/f1000research.16665.2).\r +\r +You can cite the sarek zenodo record for a specific version using the following [doi: 10.5281/zenodo.3476425](https://doi.org/10.5281/zenodo.3476425)\r +\r +An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\r +\r +You can cite the `nf-core` publication as follows:\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +\r +## CHANGELOG\r +\r +- [CHANGELOG](CHANGELOG.md)\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1030?version=2" ; + schema1:keywords "Bioinformatics, Nextflow, variant calling, wes, WGS, NGS, EGA-archive, quality control" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "IMPaCT-Data quality control workflow implementation in nf-core/Sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1030?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# CMIP tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of computing **classical molecular interaction potentials** from **protein structures**, step by step, using the **BioExcel Building Blocks library (biobb)**. Examples shown are **Molecular Interaction Potentials (MIPs) grids, protein-protein/ligand interaction potentials, and protein titration**. The particular structures used are the **Lysozyme** protein (PDB code [1AKI](https://www.rcsb.org/structure/1aki)), and a MD simulation of the complex formed by the **SARS-CoV-2 Receptor Binding Domain and the human Angiotensin Converting Enzyme 2** (PDB code [6VW1](https://www.rcsb.org/structure/6vw1)).\r +\r +The code wrapped is the ***Classical Molecular Interaction Potentials (CMIP)*** code:\r +\r +**Classical molecular interaction potentials: Improved setup procedure in molecular dynamics simulations of proteins.**\r +*Gelpí, J.L., Kalko, S.G., Barril, X., Cirera, J., de la Cruz, X., Luque, F.J. and Orozco, M. (2001)*\r +*Proteins, 45: 428-437. [https://doi.org/10.1002/prot.1159](https://doi.org/10.1002/prot.1159)*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.773.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_cmip" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Classical Molecular Interaction Potentials" ; + schema1:sdDatePublished "2024-08-05 10:25:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/773/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 78658 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-04T14:54:36Z" ; + schema1:dateModified "2024-03-04T15:08:10Z" ; + schema1:description """# CMIP tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of computing **classical molecular interaction potentials** from **protein structures**, step by step, using the **BioExcel Building Blocks library (biobb)**. Examples shown are **Molecular Interaction Potentials (MIPs) grids, protein-protein/ligand interaction potentials, and protein titration**. The particular structures used are the **Lysozyme** protein (PDB code [1AKI](https://www.rcsb.org/structure/1aki)), and a MD simulation of the complex formed by the **SARS-CoV-2 Receptor Binding Domain and the human Angiotensin Converting Enzyme 2** (PDB code [6VW1](https://www.rcsb.org/structure/6vw1)).\r +\r +The code wrapped is the ***Classical Molecular Interaction Potentials (CMIP)*** code:\r +\r +**Classical molecular interaction potentials: Improved setup procedure in molecular dynamics simulations of proteins.**\r +*Gelpí, J.L., Kalko, S.G., Barril, X., Cirera, J., de la Cruz, X., Luque, F.J. and Orozco, M. (2001)*\r +*Proteins, 45: 428-437. [https://doi.org/10.1002/prot.1159](https://doi.org/10.1002/prot.1159)*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/773?version=1" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Classical Molecular Interaction Potentials" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_cmip/blob/main/biobb_wf_cmip/notebooks/biobb_wf_cmip.ipynb" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/963?version=9" ; + schema1:isBasedOn "https://github.com/nf-core/airrflow" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/airrflow" ; + schema1:sdDatePublished "2024-08-05 10:24:21 +0100" ; + schema1:url "https://workflowhub.eu/workflows/963/ro_crate?version=9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10787 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:37Z" ; + schema1:dateModified "2024-06-11T12:54:37Z" ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/963?version=13" ; + schema1:keywords "airr, b-cell, immcantation, immunorepertoire, repseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/airrflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/963?version=9" ; + schema1:version 9 . + + a schema1:Dataset ; + schema1:datePublished "2023-10-03T13:42:23.090753" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/parallel-accession-download" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "parallel-accession-download/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.12" . + + a schema1:Dataset ; + schema1:datePublished "2024-04-15T14:40:07.941038" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + , + ; + schema1:name "consensus-peaks/consensus-peaks-chip-sr" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-atac-cutandrun" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.7" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-chip-pe" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.7" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.128.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_virtual-screening" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein-ligand Docking tutorial (PDBe REST API)" ; + schema1:sdDatePublished "2024-08-05 10:30:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/128/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 37193 ; + schema1:creator , + , + ; + schema1:dateCreated "2021-06-29T09:19:51Z" ; + schema1:dateModified "2022-09-15T11:17:49Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/128?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein-ligand Docking tutorial (PDBe REST API)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_virtual-screening/master/biobb_wf_virtual-screening/notebooks/ebi_api/wf_vs_ebi_api.ipynb" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """# Snakemake workflow: dna-seq-varlociraptor\r +\r +[![Snakemake](https://img.shields.io/badge/snakemake-≥6.3.0-brightgreen.svg)](https://snakemake.github.io)\r +[![GitHub actions status](https://github.com/snakemake-workflows/dna-seq-varlociraptor/workflows/Tests/badge.svg?branch=master)](https://github.com/snakemake-workflows/dna-seq-varlociraptor/actions?query=branch%3Amaster+workflow%3ATests)\r +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.4675661.svg)](https://doi.org/10.5281/zenodo.4675661)\r +\r +\r +A Snakemake workflow for calling small and structural variants under any kind of scenario (tumor/normal, tumor/normal/relapse, germline, pedigree, populations) via the unified statistical model of [Varlociraptor](https://varlociraptor.github.io).\r +\r +\r +## Usage\r +\r +The usage of this workflow is described in the [Snakemake Workflow Catalog](https://snakemake.github.io/snakemake-workflow-catalog/?usage=snakemake-workflows%2Fdna-seq-varlociraptor).\r +\r +If you use this workflow in a paper, don't forget to give credits to the authors by citing the URL of this (original) repository and its DOI (see above).\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/382?version=1" ; + schema1:isBasedOn "https://github.com/snakemake-workflows/dna-seq-varlociraptor.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for dna-seq-varlociraptor workflow" ; + schema1:sdDatePublished "2024-08-05 10:31:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/382/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1143 ; + schema1:creator ; + schema1:dateCreated "2022-09-02T10:48:32Z" ; + schema1:dateModified "2023-01-16T14:02:24Z" ; + schema1:description """# Snakemake workflow: dna-seq-varlociraptor\r +\r +[![Snakemake](https://img.shields.io/badge/snakemake-≥6.3.0-brightgreen.svg)](https://snakemake.github.io)\r +[![GitHub actions status](https://github.com/snakemake-workflows/dna-seq-varlociraptor/workflows/Tests/badge.svg?branch=master)](https://github.com/snakemake-workflows/dna-seq-varlociraptor/actions?query=branch%3Amaster+workflow%3ATests)\r +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.4675661.svg)](https://doi.org/10.5281/zenodo.4675661)\r +\r +\r +A Snakemake workflow for calling small and structural variants under any kind of scenario (tumor/normal, tumor/normal/relapse, germline, pedigree, populations) via the unified statistical model of [Varlociraptor](https://varlociraptor.github.io).\r +\r +\r +## Usage\r +\r +The usage of this workflow is described in the [Snakemake Workflow Catalog](https://snakemake.github.io/snakemake-workflow-catalog/?usage=snakemake-workflows%2Fdna-seq-varlociraptor).\r +\r +If you use this workflow in a paper, don't forget to give credits to the authors by citing the URL of this (original) repository and its DOI (see above).\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "dna-seq-varlociraptor workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/382?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A Nanostring nCounter analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1003?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/nanostring" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/nanostring" ; + schema1:sdDatePublished "2024-08-05 10:23:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1003/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8462 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:05Z" ; + schema1:dateModified "2024-06-11T12:55:05Z" ; + schema1:description "A Nanostring nCounter analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1003?version=4" ; + schema1:keywords "nanostring, nanostringnorm" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/nanostring" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1003?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=12" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-08-05 10:24:25 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=12" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8929 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:12Z" ; + schema1:dateModified "2024-06-11T12:55:12Z" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=12" ; + schema1:version 12 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.120.4" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:31:05 +0100" ; + schema1:url "https://workflowhub.eu/workflows/120/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 57824 ; + schema1:creator , + ; + schema1:dateCreated "2023-04-13T15:35:54Z" ; + schema1:dateModified "2023-04-14T07:20:30Z" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/120?version=5" ; + schema1:isPartOf , + , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_md_setup/blob/master/biobb_wf_md_setup/notebooks/biobb_MDsetup_tutorial.ipynb" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and modern ancient DNA pipeline in Nextflow and with cloud support." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-08-05 10:23:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3515 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:49Z" ; + schema1:dateModified "2024-06-11T12:54:49Z" ; + schema1:description "A fully reproducible and modern ancient DNA pipeline in Nextflow and with cloud support." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Workflow converts one or multiple bam files back to the fastq format" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/968?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/bamtofastq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for qbic-pipelines/bamtofastq" ; + schema1:sdDatePublished "2024-08-05 10:24:14 +0100" ; + schema1:url "https://workflowhub.eu/workflows/968/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4042 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:43Z" ; + schema1:dateModified "2024-06-11T12:54:43Z" ; + schema1:description "Workflow converts one or multiple bam files back to the fastq format" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/968?version=5" ; + schema1:keywords "bamtofastq, Conversion, cramtofastq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "qbic-pipelines/bamtofastq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/968?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-08-05 10:23:17 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7941 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:15Z" ; + schema1:dateModified "2024-06-11T12:55:15Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/986?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/fetchngs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/fetchngs" ; + schema1:sdDatePublished "2024-08-05 10:23:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/986/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6418 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:53Z" ; + schema1:dateModified "2024-06-11T12:54:53Z" ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/986?version=13" ; + schema1:keywords "ddbj, download, ena, FASTQ, geo, sra, synapse" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/fetchngs" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/986?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for analysis of Molecular Pixelation assays" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1010?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/pixelator" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/pixelator" ; + schema1:sdDatePublished "2024-08-05 10:23:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1010/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10580 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:07Z" ; + schema1:dateModified "2024-06-11T12:55:07Z" ; + schema1:description "Pipeline for analysis of Molecular Pixelation assays" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1010?version=7" ; + schema1:keywords "molecular-pixelation, pixelator, pixelgen-technologies, proteins, single-cell, single-cell-omics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/pixelator" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1010?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """This WF is based on the official Covid19-Galaxy assembly workflow as available from https://covid19.galaxyproject.org/genomics/2-assembly/ . It has been adapted to suit the needs of the analysis of metagenomics sequencing data. Prior to be submitted to INDSC databases, these data need to be cleaned from contaminant reads, including reads of possible human origin. \r +\r +The assembly of the SARS-CoV-2 genome is performed using both the Unicycler and the SPAdes assemblers, similar to the original WV.\r +\r +To facilitate the deposition of raw sequencing reads in INDSC databases, different fastq files are saved during the different steps of the WV. Which reflect different levels of stringency/filtration:\r +\r +(1) Initially fastq are filtered to remove human reads. \r +(2) Subsequently, a similarity search is performed against the reference assembly of the SARS-CoV-2 genome, to retain only SARS-CoV-2 like reads. \r +(3) Finally, SARS-CoV-2 reads are assembled, and the bowtie2 program is used to identify (and save in the corresponding fastq files) only reads that are completely identical to the final assembly of the genome.\r +\r +Any of the fastq files produced in (1), (2) or (3) are suitable for being submitted in raw reads repositories. While the files filtered according to (1) are richer and contain more data, including for example genomic sequences of different microbes living in the oral cavity; files filtered according to (3) contain only the reads that are completely identical to the final assembly. This should guarantee that any re-analysis/re-assembly of these always produce consistent and identical results. File obtained at (2) include all the reads in the sequencing reaction that had some degree of similarity with the reference SARS-CoV-2 genome, these may include subgenomic RNAs, but also polymorphic regions/variants in the case of a coinfection by multiple SARS-CoV-2 strains. Consequently, reanalysis of these data is not guarateed to produce identical and consistent results, depending on the parameters used during the assembly. However, these data contain more information.\r +\r +Please feel free to comment, ask questions and/or add suggestions\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/68?version=1" ; + schema1:isBasedOn "https://raw.githubusercontent.com/matteo14c/Galaxy_wfs/main/Galaxy-Workflow-MC_COVID19like_Assembly_Reads.ga" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for MC_COVID19like_Assembly_Reads" ; + schema1:sdDatePublished "2024-08-05 10:33:23 +0100" ; + schema1:url "https://workflowhub.eu/workflows/68/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 26705 ; + schema1:dateCreated "2020-11-04T18:35:15Z" ; + schema1:dateModified "2023-02-13T14:06:45Z" ; + schema1:description """This WF is based on the official Covid19-Galaxy assembly workflow as available from https://covid19.galaxyproject.org/genomics/2-assembly/ . It has been adapted to suit the needs of the analysis of metagenomics sequencing data. Prior to be submitted to INDSC databases, these data need to be cleaned from contaminant reads, including reads of possible human origin. \r +\r +The assembly of the SARS-CoV-2 genome is performed using both the Unicycler and the SPAdes assemblers, similar to the original WV.\r +\r +To facilitate the deposition of raw sequencing reads in INDSC databases, different fastq files are saved during the different steps of the WV. Which reflect different levels of stringency/filtration:\r +\r +(1) Initially fastq are filtered to remove human reads. \r +(2) Subsequently, a similarity search is performed against the reference assembly of the SARS-CoV-2 genome, to retain only SARS-CoV-2 like reads. \r +(3) Finally, SARS-CoV-2 reads are assembled, and the bowtie2 program is used to identify (and save in the corresponding fastq files) only reads that are completely identical to the final assembly of the genome.\r +\r +Any of the fastq files produced in (1), (2) or (3) are suitable for being submitted in raw reads repositories. While the files filtered according to (1) are richer and contain more data, including for example genomic sequences of different microbes living in the oral cavity; files filtered according to (3) contain only the reads that are completely identical to the final assembly. This should guarantee that any re-analysis/re-assembly of these always produce consistent and identical results. File obtained at (2) include all the reads in the sequencing reaction that had some degree of similarity with the reference SARS-CoV-2 genome, these may include subgenomic RNAs, but also polymorphic regions/variants in the case of a coinfection by multiple SARS-CoV-2 strains. Consequently, reanalysis of these data is not guarateed to produce identical and consistent results, depending on the parameters used during the assembly. However, these data contain more information.\r +\r +Please feel free to comment, ask questions and/or add suggestions\r +\r +""" ; + schema1:image ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "MC_COVID19like_Assembly_Reads" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/68?version=1" ; + schema1:version 1 ; + ns1:input , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 369188 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-07T20:20:41.921844" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Purge-duplicate-contigs-VGP6/main" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Purge-duplicate-contigs-VGP6/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:version "0.2.0" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description "Finding potential muon stopping sites in crystalline copper" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/757?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Finding the Muon Stopping Site using PyMuonSuite" ; + schema1:sdDatePublished "2024-08-05 10:25:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/757/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8353 ; + schema1:creator , + ; + schema1:dateCreated "2024-02-15T11:52:27Z" ; + schema1:dateModified "2024-02-15T11:56:02Z" ; + schema1:description "Finding potential muon stopping sites in crystalline copper" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Finding the Muon Stopping Site using PyMuonSuite" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/757?version=1" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Nascent Transcription Processing Pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1004?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/nascent" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/nascent" ; + schema1:sdDatePublished "2024-08-05 10:23:30 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1004/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4457 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:06Z" ; + schema1:dateModified "2024-06-11T12:55:06Z" ; + schema1:description "Nascent Transcription Processing Pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1004?version=4" ; + schema1:keywords "gro-seq, nascent, pro-seq, rna, transcription, tss" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/nascent" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1004?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "A workflow querying on an endpoint of a graph database by a file containing a SPARQL query." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/122?version=2" ; + schema1:isBasedOn "https://github.com/longmanplus/EOSC-Life_demos" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for SPARQL query (in a file) on graph database" ; + schema1:sdDatePublished "2024-08-05 10:33:14 +0100" ; + schema1:url "https://workflowhub.eu/workflows/122/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 282 ; + schema1:dateCreated "2021-05-26T10:51:51Z" ; + schema1:dateModified "2023-01-16T13:49:55Z" ; + schema1:description "A workflow querying on an endpoint of a graph database by a file containing a SPARQL query." ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/122?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "SPARQL query (in a file) on graph database" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/122?version=2" ; + schema1:version 2 ; + ns1:input , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 1556 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.276.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_md_setup/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:31:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/276/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6791 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-14T10:06:01Z" ; + schema1:dateModified "2022-04-11T09:29:31Z" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/276?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_md_setup/python/workflow.py" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Mutations Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.289.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_md_setup_mutations/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL Protein MD Setup tutorial with mutations" ; + schema1:sdDatePublished "2024-08-05 10:32:28 +0100" ; + schema1:url "https://workflowhub.eu/workflows/289/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 74473 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 16985 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-17T13:22:24Z" ; + schema1:dateModified "2023-01-16T13:58:33Z" ; + schema1:description """# Mutations Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/289?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CWL Protein MD Setup tutorial with mutations" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_md_setup_mutations/cwl/workflow.cwl" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Takes fastqs and reference data, to produce a single cell counts matrix into and save in annData format - adding a column called sample with the sample name. \r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/646?version=2" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq: Count and Load with Cell Ranger" ; + schema1:sdDatePublished "2024-08-05 10:24:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/646/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 16848 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-05-30T05:49:49Z" ; + schema1:dateModified "2024-05-30T05:56:18Z" ; + schema1:description """Takes fastqs and reference data, to produce a single cell counts matrix into and save in annData format - adding a column called sample with the sample name. \r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/646?version=1" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "scRNAseq: Count and Load with Cell Ranger" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/646?version=2" ; + schema1:version 2 ; + ns1:input , + , + ; + ns1:output . + + a schema1:Dataset ; + schema1:datePublished "2024-06-10T13:24:06.656544" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """From the R1 and R2 fastq files of a single samples, make a scRNAseq counts matrix, and perform basic QC with scanpy. Then, do further processing by making a UMAP and clustering. Produces a processed AnnData \r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/465?version=3" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq Single Sample Processing STARSolo" ; + schema1:sdDatePublished "2024-08-05 10:24:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/465/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 128461 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-11-09T01:58:04Z" ; + schema1:dateModified "2023-11-09T03:52:15Z" ; + schema1:description """From the R1 and R2 fastq files of a single samples, make a scRNAseq counts matrix, and perform basic QC with scanpy. Then, do further processing by making a UMAP and clustering. Produces a processed AnnData \r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/465?version=3" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "scRNAseq Single Sample Processing STARSolo" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/465?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# ![sanger-tol/readmapping](docs/images/sanger-tol-readmapping_logo.png)\r +\r +[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.6563577-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.6563577)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A522.10.1-23aa62.svg)](https://www.nextflow.io/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +[![Launch on Nextflow Tower](https://img.shields.io/badge/Launch%20%F0%9F%9A%80-Nextflow%20Tower-%234256e7)](https://tower.nf/launch?pipeline=https://github.com/sanger-tol/readmapping)\r +\r +## Introduction\r +\r +**sanger-tol/readmapping** is a bioinformatics best-practice analysis pipeline for mapping reads generated using Illumina, HiC, PacBio and Nanopore technologies against a genome assembly.\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!\r +\r +On merge to `dev` and `main` branch, automated continuous integration tests run the pipeline on a full-sized dataset on the Wellcome Sanger Institute HPC farm using the Nextflow Tower infrastructure. This ensures that the pipeline runs on full sized datasets, has sensible resource allocation defaults set to run on real-world datasets, and permits the persistent storage of results to benchmark between pipeline releases and other analysis sources.\r +\r +## Pipeline summary\r +\r +\r +\r +## Quick Start\r +\r +1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=22.10.1`)\r +\r +2. Install any of [`Docker`](https://docs.docker.com/engine/installation/), [`Singularity`](https://www.sylabs.io/guides/3.0/user-guide/) (you can follow [this tutorial](https://singularity-tutorial.github.io/01-installation/)), [`Podman`](https://podman.io/), [`Shifter`](https://nersc.gitlab.io/development/shifter/how-to-use/) or [`Charliecloud`](https://hpc.github.io/charliecloud/) for full pipeline reproducibility _(you can use [`Conda`](https://conda.io/miniconda.html) both to install Nextflow itself and also to manage software within pipelines. Please only use it within pipelines as a last resort; see [docs](https://nf-co.re/usage/configuration#basic-configuration-profiles))_.\r +\r +3. Download the pipeline and test it on a minimal dataset with a single command:\r +\r + ```bash\r + nextflow run sanger-tol/readmapping -profile test,YOURPROFILE --outdir \r + ```\r +\r + Note that some form of configuration will be needed so that Nextflow knows how to fetch the required software. This is usually done in the form of a config profile (`YOURPROFILE` in the example command above). You can chain multiple config profiles in a comma-separated string.\r +\r + > - The pipeline comes with config profiles called `docker`, `singularity`, `podman`, `shifter`, `charliecloud` and `conda` which instruct the pipeline to use the named tool for software management. For example, `-profile test,docker`.\r + > - Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment.\r + > - If you are using `singularity`, please use the [`nf-core download`](https://nf-co.re/tools/#downloading-pipelines-for-offline-use) command to download images first, before running the pipeline. Setting the [`NXF_SINGULARITY_CACHEDIR` or `singularity.cacheDir`](https://www.nextflow.io/docs/latest/singularity.html?#singularity-docker-hub) Nextflow options enables you to store and re-use the images from a central location for future pipeline runs.\r + > - If you are using `conda`, it is highly recommended to use the [`NXF_CONDA_CACHEDIR` or `conda.cacheDir`](https://www.nextflow.io/docs/latest/conda.html) settings to store the environments in a central location for future pipeline runs.\r +\r +4. Start running your own analysis!\r +\r + ```bash\r + nextflow run sanger-tol/readmapping --input samplesheet.csv --fasta genome.fa.gz --outdir -profile \r + ```\r +\r +## Credits\r +\r +sanger-tol/readmapping was originally written by [Priyanka Surana](https://github.com/priyanka-surana).\r +\r +We thank the following people for their extensive assistance in the development of this pipeline:\r +\r +- [Matthieu Muffato](https://github.com/muffato) for the text logo\r +- [Guoying Qi](https://github.com/gq1) for being able to run tests using Nf-Tower and the Sanger HPC farm\r +\r +## Contributions and Support\r +\r +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\r +\r +For further information or help, don't hesitate to get in touch on the [Slack `#pipelines` channel](https://sangertreeoflife.slack.com/channels/pipelines). Please [create an issue](https://github.com/sanger-tol/readmapping/issues/new/choose) on GitHub if you are not on the Sanger slack channel.\r +\r +## Citations\r +\r +If you use sanger-tol/readmapping for your analysis, please cite it using the following doi: [10.5281/zenodo.6563577](https://doi.org/10.5281/zenodo.6563577)\r +\r +An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\r +\r +This pipeline uses code and infrastructure developed and maintained by the [nf-core](https://nf-co.re) community, reused here under the [MIT license](https://github.com/nf-core/tools/blob/master/LICENSE).\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/665?version=1" ; + schema1:isBasedOn "https://github.com/sanger-tol/readmapping" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for sanger-tol/readmapping v1.1.0 - Hebridean Black" ; + schema1:sdDatePublished "2024-08-05 10:27:12 +0100" ; + schema1:url "https://workflowhub.eu/workflows/665/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1641 ; + schema1:creator ; + schema1:dateCreated "2023-11-14T11:55:56Z" ; + schema1:dateModified "2023-11-14T12:00:50Z" ; + schema1:description """# ![sanger-tol/readmapping](docs/images/sanger-tol-readmapping_logo.png)\r +\r +[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.6563577-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.6563577)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A522.10.1-23aa62.svg)](https://www.nextflow.io/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +[![Launch on Nextflow Tower](https://img.shields.io/badge/Launch%20%F0%9F%9A%80-Nextflow%20Tower-%234256e7)](https://tower.nf/launch?pipeline=https://github.com/sanger-tol/readmapping)\r +\r +## Introduction\r +\r +**sanger-tol/readmapping** is a bioinformatics best-practice analysis pipeline for mapping reads generated using Illumina, HiC, PacBio and Nanopore technologies against a genome assembly.\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!\r +\r +On merge to `dev` and `main` branch, automated continuous integration tests run the pipeline on a full-sized dataset on the Wellcome Sanger Institute HPC farm using the Nextflow Tower infrastructure. This ensures that the pipeline runs on full sized datasets, has sensible resource allocation defaults set to run on real-world datasets, and permits the persistent storage of results to benchmark between pipeline releases and other analysis sources.\r +\r +## Pipeline summary\r +\r +\r +\r +## Quick Start\r +\r +1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=22.10.1`)\r +\r +2. Install any of [`Docker`](https://docs.docker.com/engine/installation/), [`Singularity`](https://www.sylabs.io/guides/3.0/user-guide/) (you can follow [this tutorial](https://singularity-tutorial.github.io/01-installation/)), [`Podman`](https://podman.io/), [`Shifter`](https://nersc.gitlab.io/development/shifter/how-to-use/) or [`Charliecloud`](https://hpc.github.io/charliecloud/) for full pipeline reproducibility _(you can use [`Conda`](https://conda.io/miniconda.html) both to install Nextflow itself and also to manage software within pipelines. Please only use it within pipelines as a last resort; see [docs](https://nf-co.re/usage/configuration#basic-configuration-profiles))_.\r +\r +3. Download the pipeline and test it on a minimal dataset with a single command:\r +\r + ```bash\r + nextflow run sanger-tol/readmapping -profile test,YOURPROFILE --outdir \r + ```\r +\r + Note that some form of configuration will be needed so that Nextflow knows how to fetch the required software. This is usually done in the form of a config profile (`YOURPROFILE` in the example command above). You can chain multiple config profiles in a comma-separated string.\r +\r + > - The pipeline comes with config profiles called `docker`, `singularity`, `podman`, `shifter`, `charliecloud` and `conda` which instruct the pipeline to use the named tool for software management. For example, `-profile test,docker`.\r + > - Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment.\r + > - If you are using `singularity`, please use the [`nf-core download`](https://nf-co.re/tools/#downloading-pipelines-for-offline-use) command to download images first, before running the pipeline. Setting the [`NXF_SINGULARITY_CACHEDIR` or `singularity.cacheDir`](https://www.nextflow.io/docs/latest/singularity.html?#singularity-docker-hub) Nextflow options enables you to store and re-use the images from a central location for future pipeline runs.\r + > - If you are using `conda`, it is highly recommended to use the [`NXF_CONDA_CACHEDIR` or `conda.cacheDir`](https://www.nextflow.io/docs/latest/conda.html) settings to store the environments in a central location for future pipeline runs.\r +\r +4. Start running your own analysis!\r +\r + ```bash\r + nextflow run sanger-tol/readmapping --input samplesheet.csv --fasta genome.fa.gz --outdir -profile \r + ```\r +\r +## Credits\r +\r +sanger-tol/readmapping was originally written by [Priyanka Surana](https://github.com/priyanka-surana).\r +\r +We thank the following people for their extensive assistance in the development of this pipeline:\r +\r +- [Matthieu Muffato](https://github.com/muffato) for the text logo\r +- [Guoying Qi](https://github.com/gq1) for being able to run tests using Nf-Tower and the Sanger HPC farm\r +\r +## Contributions and Support\r +\r +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\r +\r +For further information or help, don't hesitate to get in touch on the [Slack `#pipelines` channel](https://sangertreeoflife.slack.com/channels/pipelines). Please [create an issue](https://github.com/sanger-tol/readmapping/issues/new/choose) on GitHub if you are not on the Sanger slack channel.\r +\r +## Citations\r +\r +If you use sanger-tol/readmapping for your analysis, please cite it using the following doi: [10.5281/zenodo.6563577](https://doi.org/10.5281/zenodo.6563577)\r +\r +An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\r +\r +This pipeline uses code and infrastructure developed and maintained by the [nf-core](https://nf-co.re) community, reused here under the [MIT license](https://github.com/nf-core/tools/blob/master/LICENSE).\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "sanger-tol/readmapping v1.1.0 - Hebridean Black" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/665?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description "Genome assembly: Unicycler-based WF for Klebsiella pneumoniae [Wick et al. Microbial genomics 2017]" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/52?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ONT - Workflow-Wick-et.al." ; + schema1:sdDatePublished "2024-08-05 10:33:25 +0100" ; + schema1:url "https://workflowhub.eu/workflows/52/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 22259 ; + schema1:creator ; + schema1:dateCreated "2020-08-05T13:03:51Z" ; + schema1:dateModified "2023-02-13T14:06:46Z" ; + schema1:description "Genome assembly: Unicycler-based WF for Klebsiella pneumoniae [Wick et al. Microbial genomics 2017]" ; + schema1:keywords "ONT" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ONT - Workflow-Wick-et.al." ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/52?version=1" ; + schema1:version 1 ; + ns1:input <#ont___workflow_wick_et_al_-inputs-https://ndownloader.figshare.com/files/8811145>, + <#ont___workflow_wick_et_al_-inputs-https://ndownloader.figshare.com/files/8811148>, + <#ont___workflow_wick_et_al_-inputs-https://ndownloader.figshare.com/files/8812159> . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description "Metagenomics: taxa classification" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/53?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ONT -- Metagenomics-Kraken2-Krona" ; + schema1:sdDatePublished "2024-08-05 10:33:25 +0100" ; + schema1:url "https://workflowhub.eu/workflows/53/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7756 ; + schema1:creator ; + schema1:dateCreated "2020-08-05T13:05:34Z" ; + schema1:dateModified "2023-02-13T14:06:46Z" ; + schema1:description "Metagenomics: taxa classification" ; + schema1:keywords "ONT" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ONT -- Metagenomics-Kraken2-Krona" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/53?version=1" ; + schema1:version 1 ; + ns1:input . + + a schema1:Dataset ; + schema1:datePublished "2023-02-16T09:30:08.832459" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/fragment-based-docking-scoring" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "fragment-based-docking-scoring/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.5" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Galaxy Workflow Documentation: In-Silico Mass Spectra Prediction Using Semi-Empirical Quantum Chemistry\r +\r +\r +## Overview\r +\r +This workflow predicts in-silico mass spectra using a semi-empirical quantum chemistry method. It involves generating and optimizing molecular conformers and simulating their mass spectra with computational chemistry tools. The workflow receives an SDF file as input and outputs the mass spectrum in MSP file format.\r +\r +\r +\r +\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.897.1" ; + schema1:isBasedOn "https://github.com/RECETOX/workflow-testing/tree/main/QCxMS" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for End-to-end spectra predictions: multi atoms dataset" ; + schema1:sdDatePublished "2024-08-05 10:23:04 +0100" ; + schema1:url "https://workflowhub.eu/workflows/897/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 19226 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-03T13:52:34Z" ; + schema1:dateModified "2024-06-06T10:42:41Z" ; + schema1:description """# Galaxy Workflow Documentation: In-Silico Mass Spectra Prediction Using Semi-Empirical Quantum Chemistry\r +\r +\r +## Overview\r +\r +This workflow predicts in-silico mass spectra using a semi-empirical quantum chemistry method. It involves generating and optimizing molecular conformers and simulating their mass spectra with computational chemistry tools. The workflow receives an SDF file as input and outputs the mass spectrum in MSP file format.\r +\r +\r +\r +\r +\r +""" ; + schema1:image ; + schema1:keywords "Exposomics, QCxMS, GC-MS, Metabolomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "End-to-end spectra predictions: multi atoms dataset" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/897?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + ; + ns1:output , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 634182 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "This workflow correspond to the Genome-wide alternative splicing analysis training. It allows to analyze isoform switching by making use of IsoformSwitchAnalyzeR." ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/472?version=4" ; + schema1:license "CC-BY-NC-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genome-wide alternative splicing analysis" ; + schema1:sdDatePublished "2024-08-05 10:30:28 +0100" ; + schema1:url "https://workflowhub.eu/workflows/472/ro_crate?version=4" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 21107 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 81080 ; + schema1:creator ; + schema1:dateCreated "2023-05-25T21:57:53Z" ; + schema1:dateModified "2023-05-25T21:58:58Z" ; + schema1:description "This workflow correspond to the Genome-wide alternative splicing analysis training. It allows to analyze isoform switching by making use of IsoformSwitchAnalyzeR." ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/472?version=3" ; + schema1:keywords "Transcriptomics, alternative-splicing, GTN" ; + schema1:license "https://spdx.org/licenses/CC-BY-NC-4.0" ; + schema1:name "Genome-wide alternative splicing analysis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/472?version=4" ; + schema1:version 4 ; + ns1:input , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 46064 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 54463 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-08-05 10:24:23 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5189 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:11Z" ; + schema1:dateModified "2024-06-11T12:55:11Z" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Exome SAMtools Workflow" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/240?version=1" ; + schema1:isBasedOn "https://github.com/inab/ipc_workflows/tree/main/exome/samtools" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for exome-samtools" ; + schema1:sdDatePublished "2024-08-05 10:32:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/240/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1464 ; + schema1:creator ; + schema1:dateCreated "2021-11-19T10:11:36Z" ; + schema1:dateModified "2023-01-16T13:55:08Z" ; + schema1:description "Exome SAMtools Workflow" ; + schema1:keywords "cancer, pediatric, SAMTools" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "exome-samtools" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/240?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2024-03-13T17:18:04.078159" ; + schema1:hasPart , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/velocyto" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + ; + schema1:name "velocyto/Velocyto-on10X-from-bundled" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "velocyto/Velocyto-on10X-filtered-barcodes" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/velocyto" ; + schema1:version "0.2" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Conformational Transitions calculations tutorial using BioExcel Building Blocks (biobb) and GOdMD\r +\r +This tutorial aims to illustrate the process of computing a conformational transition between two known structural conformations of a protein, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.548.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_godmd" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein Conformational Transitions calculations tutorial" ; + schema1:sdDatePublished "2024-08-05 10:29:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/548/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 19968 ; + schema1:creator , + ; + schema1:dateCreated "2023-08-02T09:35:32Z" ; + schema1:dateModified "2023-08-02T09:43:19Z" ; + schema1:description """# Protein Conformational Transitions calculations tutorial using BioExcel Building Blocks (biobb) and GOdMD\r +\r +This tutorial aims to illustrate the process of computing a conformational transition between two known structural conformations of a protein, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/548?version=1" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein Conformational Transitions calculations tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_godmd/blob/a5e8e564a4fb8d90abc84ecac0a5322f17b2b562/biobb_wf_godmd/notebooks/biobb_wf_godmd.ipynb" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 6311 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:MediaObject ; + schema1:contentSize 256 ; + schema1:dateModified "2024-04-30T10:11:40+00:00" ; + schema1:name "A.0.0" ; + schema1:sdDatePublished "2024-04-30T10:49:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256 ; + schema1:dateModified "2024-04-30T10:11:40+00:00" ; + schema1:name "A.0.1" ; + schema1:sdDatePublished "2024-04-30T10:49:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256 ; + schema1:dateModified "2024-04-30T10:11:40+00:00" ; + schema1:name "A.1.0" ; + schema1:sdDatePublished "2024-04-30T10:49:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256 ; + schema1:dateModified "2024-04-30T10:11:40+00:00" ; + schema1:name "A.1.1" ; + schema1:sdDatePublished "2024-04-30T10:49:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256 ; + schema1:dateModified "2024-04-30T10:11:40+00:00" ; + schema1:name "B.0.0" ; + schema1:sdDatePublished "2024-04-30T10:49:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256 ; + schema1:dateModified "2024-04-30T10:11:40+00:00" ; + schema1:name "B.0.1" ; + schema1:sdDatePublished "2024-04-30T10:49:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256 ; + schema1:dateModified "2024-04-30T10:11:40+00:00" ; + schema1:name "B.1.0" ; + schema1:sdDatePublished "2024-04-30T10:49:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256 ; + schema1:dateModified "2024-04-30T10:11:40+00:00" ; + schema1:name "B.1.1" ; + schema1:sdDatePublished "2024-04-30T10:49:33+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Analysis pipeline for CUT&RUN and CUT&TAG experiments that includes sequencing QC, spike-in normalisation, IgG control normalisation, peak calling and downstream peak analysis." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/976?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/cutandrun" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/cutandrun" ; + schema1:sdDatePublished "2024-08-05 10:24:08 +0100" ; + schema1:url "https://workflowhub.eu/workflows/976/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10767 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:46Z" ; + schema1:dateModified "2024-06-11T12:54:46Z" ; + schema1:description "Analysis pipeline for CUT&RUN and CUT&TAG experiments that includes sequencing QC, spike-in normalisation, IgG control normalisation, peak calling and downstream peak analysis." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/976?version=7" ; + schema1:keywords "cutandrun, cutandrun-seq, cutandtag, cutandtag-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/cutandrun" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/976?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.286.4" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_dna_helparms/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Structural DNA helical parameters tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/286/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 22900 ; + schema1:creator , + ; + schema1:dateCreated "2023-07-26T15:48:55Z" ; + schema1:dateModified "2023-07-26T15:53:47Z" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/286?version=3" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Structural DNA helical parameters tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_dna_helparms/python/workflow.py" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """![Perl CI](https://github.com/FabianDeister/Library_curation_BOLD/actions/workflows/ci.yml/badge.svg)\r +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.10975576.svg)](https://doi.org/10.5281/zenodo.10975576)\r +\r +# Library curation BOLD\r +\r +![alt text](https://github.com/FabianDeister/Library_curation_BOLD/blob/main/doc/IBOL_LOGO_TRANSPARENT.png?raw=true)\r +\r +This repository contains scripts and synonymy data for pipelining the \r +automated curation of [BOLD](https://boldsystems.org) data dumps in \r +BCDM TSV format. The goal is to implement the classification of barcode \r +reference sequences as is being developed by the \r +[BGE](https://biodiversitygenomics.eu) consortium. A living document\r +in which these criteria are being developed is located\r +[here](https://docs.google.com/document/d/18m-7UnoJTG49TbvTsq_VncKMYZbYVbau98LE_q4rQvA/edit).\r +\r +A further goal of this project is to develop the code in this repository\r +according to the standards developed by the community in terms of automation,\r +reproducibility, and provenance. In practice, this means including the\r +scripts in a pipeline system such as [snakemake](https://snakemake.readthedocs.io/),\r +adopting an environment configuration system such as\r +[conda](https://docs.conda.io/), and organizing the folder structure\r +in compliance with the requirements of\r +[WorkFlowHub](https://workflowhub.eu/). The latter will provide it with \r +a DOI and will help generate [RO-crate](https://www.researchobject.org/ro-crate/)\r +documents, which means the entire tool chain is FAIR compliant according\r +to the current state of the art.\r +\r +## Install\r +Clone the repo:\r +```{shell}\r +git clone https://github.com/FabianDeister/Library_curation_BOLD.git\r +```\r +Change directory: \r +```{shell}\r +cd Library_curation_BOLD\r +```\r +The code in this repo depends on various tools. These are managed using\r +the `mamba` program (a drop-in replacement of `conda`). The following\r +sets up an environment in which all needed tools are installed:\r +\r +```{shell}\r +mamba env create -f environment.yml\r +```\r +\r +Once set up, this is activated like so:\r +\r +```{shell}\r +mamba activate bold-curation\r +```\r +\r +## How to run\r +### Bash\r +Although the aim of this project is to integrate all steps of the process\r +in a simple snakemake pipeline, at present this is not implemented. Instead,\r +the steps are executed individually on the command line as perl scripts\r +within the conda/mamba environment. Because the current project has its own\r +perl modules in the `lib` folder, every script needs to be run with the \r +additional include flag to add the module folder to the search path. Hence,\r +the invocation looks like the following inside the scripts folder:\r +\r +```{shell}\r +perl -I../../lib scriptname.pl -arg1 val1 -arg2 val2\r +```\r +### snakemake\r +\r +Follow the installation instructions above.\r +\r +Update config/config.yml to define your input data.\r +\r +Navigate to the directory "workflow" and type:\r +```{shell}\r +snakemake -p -c {number of cores} target\r +```\r +\r +If running on an HPC cluster with a SLURM scheduler you could use a bash script like this one:\r +```{shell}\r +#!/bin/bash\r +#SBATCH --partition=hour\r +#SBATCH --output=job_curate_bold_%j.out\r +#SBATCH --error=job_curate_bold_%j.err\r +#SBATCH --mem=24G\r +#SBATCH --cpus-per-task=2\r +\r +source activate bold-curation\r +\r +snakemake -p -c 2 target\r +\r +echo Complete!\r +```\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.833.1" ; + schema1:isBasedOn "https://github.com/FabianDeister/Library_curation_BOLD" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Library curation BOLD" ; + schema1:sdDatePublished "2024-08-05 10:24:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/833/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 27275 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13728 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-04-24T08:51:29Z" ; + schema1:dateModified "2024-04-24T09:11:55Z" ; + schema1:description """![Perl CI](https://github.com/FabianDeister/Library_curation_BOLD/actions/workflows/ci.yml/badge.svg)\r +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.10975576.svg)](https://doi.org/10.5281/zenodo.10975576)\r +\r +# Library curation BOLD\r +\r +![alt text](https://github.com/FabianDeister/Library_curation_BOLD/blob/main/doc/IBOL_LOGO_TRANSPARENT.png?raw=true)\r +\r +This repository contains scripts and synonymy data for pipelining the \r +automated curation of [BOLD](https://boldsystems.org) data dumps in \r +BCDM TSV format. The goal is to implement the classification of barcode \r +reference sequences as is being developed by the \r +[BGE](https://biodiversitygenomics.eu) consortium. A living document\r +in which these criteria are being developed is located\r +[here](https://docs.google.com/document/d/18m-7UnoJTG49TbvTsq_VncKMYZbYVbau98LE_q4rQvA/edit).\r +\r +A further goal of this project is to develop the code in this repository\r +according to the standards developed by the community in terms of automation,\r +reproducibility, and provenance. In practice, this means including the\r +scripts in a pipeline system such as [snakemake](https://snakemake.readthedocs.io/),\r +adopting an environment configuration system such as\r +[conda](https://docs.conda.io/), and organizing the folder structure\r +in compliance with the requirements of\r +[WorkFlowHub](https://workflowhub.eu/). The latter will provide it with \r +a DOI and will help generate [RO-crate](https://www.researchobject.org/ro-crate/)\r +documents, which means the entire tool chain is FAIR compliant according\r +to the current state of the art.\r +\r +## Install\r +Clone the repo:\r +```{shell}\r +git clone https://github.com/FabianDeister/Library_curation_BOLD.git\r +```\r +Change directory: \r +```{shell}\r +cd Library_curation_BOLD\r +```\r +The code in this repo depends on various tools. These are managed using\r +the `mamba` program (a drop-in replacement of `conda`). The following\r +sets up an environment in which all needed tools are installed:\r +\r +```{shell}\r +mamba env create -f environment.yml\r +```\r +\r +Once set up, this is activated like so:\r +\r +```{shell}\r +mamba activate bold-curation\r +```\r +\r +## How to run\r +### Bash\r +Although the aim of this project is to integrate all steps of the process\r +in a simple snakemake pipeline, at present this is not implemented. Instead,\r +the steps are executed individually on the command line as perl scripts\r +within the conda/mamba environment. Because the current project has its own\r +perl modules in the `lib` folder, every script needs to be run with the \r +additional include flag to add the module folder to the search path. Hence,\r +the invocation looks like the following inside the scripts folder:\r +\r +```{shell}\r +perl -I../../lib scriptname.pl -arg1 val1 -arg2 val2\r +```\r +### snakemake\r +\r +Follow the installation instructions above.\r +\r +Update config/config.yml to define your input data.\r +\r +Navigate to the directory "workflow" and type:\r +```{shell}\r +snakemake -p -c {number of cores} target\r +```\r +\r +If running on an HPC cluster with a SLURM scheduler you could use a bash script like this one:\r +```{shell}\r +#!/bin/bash\r +#SBATCH --partition=hour\r +#SBATCH --output=job_curate_bold_%j.out\r +#SBATCH --error=job_curate_bold_%j.err\r +#SBATCH --mem=24G\r +#SBATCH --cpus-per-task=2\r +\r +source activate bold-curation\r +\r +snakemake -p -c 2 target\r +\r +echo Complete!\r +```\r +""" ; + schema1:image ; + schema1:keywords "dna barcoding" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Library curation BOLD" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/833?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Automated quantitative analysis of DIA proteomics mass spectrometry measurements." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/980?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/diaproteomics" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/diaproteomics" ; + schema1:sdDatePublished "2024-08-05 10:24:05 +0100" ; + schema1:url "https://workflowhub.eu/workflows/980/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6507 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:48Z" ; + schema1:dateModified "2024-06-11T12:54:48Z" ; + schema1:description "Automated quantitative analysis of DIA proteomics mass spectrometry measurements." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/980?version=6" ; + schema1:keywords "data-independent-proteomics, dia-proteomics, openms, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/diaproteomics" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/980?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/991?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/hlatyping" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hlatyping" ; + schema1:sdDatePublished "2024-08-05 10:22:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/991/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3272 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:55Z" ; + schema1:dateModified "2024-06-11T12:54:55Z" ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/991?version=9" ; + schema1:keywords "DNA, hla, hla-typing, immunology, optitype, personalized-medicine, rna" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hlatyping" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/991?version=2" ; + schema1:version 2 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 13430 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:MediaObject ; + schema1:contentSize 2868 ; + schema1:dateModified "2020-09-21T10:34:27" ; + schema1:name "file0.txt" ; + schema1:sdDatePublished "2023-11-02T10:55:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2811 ; + schema1:dateModified "2020-09-21T10:34:27" ; + schema1:name "file1.txt" ; + schema1:sdDatePublished "2023-11-02T10:55:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2961 ; + schema1:dateModified "2020-09-21T10:34:27" ; + schema1:name "file2.txt" ; + schema1:sdDatePublished "2023-11-02T10:55:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2808 ; + schema1:dateModified "2020-09-21T10:34:27" ; + schema1:name "file3.txt" ; + schema1:sdDatePublished "2023-11-02T10:55:02+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=18" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-08-05 10:22:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=18" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12290 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:40Z" ; + schema1:dateModified "2024-06-11T12:54:40Z" ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=18" ; + schema1:version 18 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "extract 1 Id from SRA and assume it is PE as input to viralRNASpades." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/434?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for extract SRA + viralRNAspades (PE)" ; + schema1:sdDatePublished "2024-08-05 10:31:20 +0100" ; + schema1:url "https://workflowhub.eu/workflows/434/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6165 ; + schema1:dateCreated "2023-02-10T10:05:10Z" ; + schema1:dateModified "2023-02-10T10:05:10Z" ; + schema1:description "extract 1 Id from SRA and assume it is PE as input to viralRNASpades." ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "extract SRA + viralRNAspades (PE)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/434?version=1" ; + schema1:version 1 ; + ns1:output , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "The workflow takes a trimmed HiFi reads collection, Forward/Reverse HiC reads, and the max coverage depth (calculated from WF1) to run Hifiasm in HiC phasing mode. It produces both Pri/Alt and Hap1/Hap2 assemblies, and runs all the QC analysis (gfastats, BUSCO, and Merqury). The default Hifiasm purge level is Light (l1)." ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.605.1" ; + schema1:isBasedOn "https://github.com/ERGA-consortium/pipelines/tree/main/assembly/galaxy" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ERGA HiFi+HiC Assembly+QC Hifiasm v2309 (WF2)" ; + schema1:sdDatePublished "2024-08-05 10:27:46 +0100" ; + schema1:url "https://workflowhub.eu/workflows/605/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 284296 ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/2.Contigging/pics/Cont_hifiasm_hic_2309.png" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 70426 ; + schema1:creator , + ; + schema1:dateCreated "2023-10-09T12:47:49Z" ; + schema1:dateModified "2024-03-13T09:52:13Z" ; + schema1:description "The workflow takes a trimmed HiFi reads collection, Forward/Reverse HiC reads, and the max coverage depth (calculated from WF1) to run Hifiasm in HiC phasing mode. It produces both Pri/Alt and Hap1/Hap2 assemblies, and runs all the QC analysis (gfastats, BUSCO, and Merqury). The default Hifiasm purge level is Light (l1)." ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "ERGA, Assembly+QC, Hi-C, HiFi" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ERGA HiFi+HiC Assembly+QC Hifiasm v2309 (WF2)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/2.Contigging/Galaxy-Workflow-ERGA_HiFi_HiC_Assembly_QC_Hifiasm_v2309_(WF2).ga" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """CWL version of the md_list.cwl workflow for HPC.\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/98?version=2" ; + schema1:isBasedOn "https://github.com/douglowe/biobb_hpc_cwl_md_list" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Example of setting up a simulation system" ; + schema1:sdDatePublished "2024-08-05 10:33:21 +0100" ; + schema1:url "https://workflowhub.eu/workflows/98/ro_crate?version=2" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 33936 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6609 ; + schema1:dateCreated "2021-01-29T16:56:33Z" ; + schema1:dateModified "2023-01-16T13:46:36Z" ; + schema1:description """CWL version of the md_list.cwl workflow for HPC.\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/98?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Example of setting up a simulation system" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/98?version=2" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + , + , + ; + ns1:output . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Summary\r +This notebook shows how to integrate genomic and image data resources. This notebook looks at the question **Which diabetes related genes are expressed in the pancreas?** \r +\r +Steps:\r +\r +* Query humanmine.org, an integrated database of Homo sapiens genomic data using the intermine API to find the genes.\r +* Using the list of found genes, search in the Image Data Resource (IDR) for images linked to the genes, tissue and disease.\r +* \r +We use the intermine API and the IDR API\r +\r +The notebook can be launched in [My Binder](https://mybinder.org/v2/gh/IDR/idr-notebooks/master?urlpath=notebooks%2Fhumanmine.ipynb)\r +\r +# Inputs\r +Parameters needed to configure the workflow:\r +\r +* TISSUE = "Pancreas" \r +* DISEASE = "diabetes"\r +\r +# Ouputs\r +* List of genes found using HumanMine\r +* List of images from IDR for one of the gene found""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.242.1" ; + schema1:isBasedOn "https://github.com/IDR/idr-notebooks" ; + schema1:license "BSD-2-Clause" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Diabetes related genes expressed in pancreas" ; + schema1:sdDatePublished "2024-08-05 10:32:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/242/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 1135634 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 0 ; + schema1:creator ; + schema1:dateCreated "2021-11-23T11:28:07Z" ; + schema1:dateModified "2023-01-16T13:55:09Z" ; + schema1:description """# Summary\r +This notebook shows how to integrate genomic and image data resources. This notebook looks at the question **Which diabetes related genes are expressed in the pancreas?** \r +\r +Steps:\r +\r +* Query humanmine.org, an integrated database of Homo sapiens genomic data using the intermine API to find the genes.\r +* Using the list of found genes, search in the Image Data Resource (IDR) for images linked to the genes, tissue and disease.\r +* \r +We use the intermine API and the IDR API\r +\r +The notebook can be launched in [My Binder](https://mybinder.org/v2/gh/IDR/idr-notebooks/master?urlpath=notebooks%2Fhumanmine.ipynb)\r +\r +# Inputs\r +Parameters needed to configure the workflow:\r +\r +* TISSUE = "Pancreas" \r +* DISEASE = "diabetes"\r +\r +# Ouputs\r +* List of genes found using HumanMine\r +* List of images from IDR for one of the gene found""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/BSD-2-Clause" ; + schema1:name "Diabetes related genes expressed in pancreas" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/IDR/idr-notebooks/blob/master/humanmine.ipynb" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """From the R1 and R2 fastq files of a single samples, make a scRNAseq counts matrix, and perform basic QC with scanpy. Then, do further processing by making a UMAP and clustering. Produces a processed AnnData \r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/465?version=1" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq_SingleSampleProcessing_STARSolo" ; + schema1:sdDatePublished "2024-08-05 10:24:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/465/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 121325 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-05-05T06:29:11Z" ; + schema1:dateModified "2023-05-05T06:32:22Z" ; + schema1:description """From the R1 and R2 fastq files of a single samples, make a scRNAseq counts matrix, and perform basic QC with scanpy. Then, do further processing by making a UMAP and clustering. Produces a processed AnnData \r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/465?version=3" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "scRNAseq_SingleSampleProcessing_STARSolo" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/465?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Analysis of Chromosome Conformation Capture data (Hi-C)" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/989?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/hic" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hic" ; + schema1:sdDatePublished "2024-08-05 10:23:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/989/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4842 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:54Z" ; + schema1:dateModified "2024-06-11T12:54:54Z" ; + schema1:description "Analysis of Chromosome Conformation Capture data (Hi-C)" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/989?version=7" ; + schema1:keywords "chromosome-conformation-capture, Hi-C" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hic" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/989?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and modern ancient DNA pipeline in Nextflow and with cloud support." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-08-05 10:23:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3643 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:49Z" ; + schema1:dateModified "2024-06-11T12:54:49Z" ; + schema1:description "A fully reproducible and modern ancient DNA pipeline in Nextflow and with cloud support." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-wgs-variant-calling" ; + schema1:mainEntity ; + schema1:name "COVID-19-PE-WGS-ILLUMINA (v0.1)" ; + schema1:sdDatePublished "2021-03-12 13:41:30 +0000" ; + schema1:softwareVersion "v0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 34917 ; + schema1:name "COVID-19-PE-WGS-ILLUMINA" ; + schema1:programmingLanguage . + + a schema1:Dataset ; + schema1:datePublished "2024-07-31T14:58:52.308058" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.24" . + + a schema1:Dataset ; + schema1:datePublished "2023-09-01T09:38:56.153751" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/cutandrun" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "cutandrun/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.10" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """\r +\r +![Logo](https://cbg-ethz.github.io/V-pipe/img/logo.svg)\r +\r +[![bio.tools](https://img.shields.io/badge/bio-tools-blue.svg)](https://bio.tools/V-Pipe)\r +[![Snakemake](https://img.shields.io/badge/snakemake-≥7.11.0-blue.svg)](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe)\r +[![Deploy Docker image](https://github.com/cbg-ethz/V-pipe/actions/workflows/deploy-docker.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe)\r +[![Tests](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml)\r +[![Mega-Linter](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml)\r +[![License: Apache-2.0](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)\r +\r +V-pipe is a workflow designed for the analysis of next generation sequencing (NGS) data from viral pathogens. It produces a number of results in a curated format (e.g., consensus sequences, SNV calls, local/global haplotypes).\r +V-pipe is written using the Snakemake workflow management system.\r +\r +## Usage\r +\r +Different ways of initializing V-pipe are presented below. We strongly encourage you to deploy it [using the quick install script](#using-quick-install-script), as this is our preferred method.\r +\r +To configure V-pipe refer to the documentation present in [config/README.md](config/README.md).\r +\r +V-pipe expects the input samples to be organized in a [two-level](config/README.md#samples) directory hierarchy,\r +and the sequencing reads must be provided in a sub-folder named `raw_data`. Further details can be found on the [website](https://cbg-ethz.github.io/V-pipe/usage/).\r +Check the utils subdirectory for [mass-importers tools](utils/README.md#samples-mass-importers) that can assist you in generating this hierarchy.\r +\r +We provide [virus-specific base configuration files](config/README.md#virus-base-config) which contain handy defaults for, e.g., HIV and SARS-CoV-2. Set the virus in the general section of the configuration file:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +```\r +\r +Also see [snakemake's documentation](https://snakemake.readthedocs.io/en/stable/executing/cli.html) to learn more about the command-line options available when executing the workflow.\r +\r +\r +### Tutorials\r +\r +Tutorials for your first steps with V-pipe for different scenarios are available in the [docs/](docs/README.md) subdirectory.\r +\r +\r +### Using quick install script\r +\r +To deploy V-pipe, use the [installation script](utils/README.md#quick-installer) with the following parameters:\r +\r +```bash\r +curl -O 'https://raw.githubusercontent.com/cbg-ethz/V-pipe/master/utils/quick_install.sh'\r +./quick_install.sh -w work\r +```\r +\r +This script will download and install miniconda, checkout the V-pipe git repository (use `-b` to specify which branch/tag) and setup a work directory (specified with `-w`) with an executable script that will execute the workflow:\r +\r +```bash\r +cd work\r +# edit config.yaml and provide samples/ directory\r +./vpipe --jobs 4 --printshellcmds --dry-run\r +```\r +\r +Test data to test your installation is available with the tutorials provided in the [docs/](docs/README.md) subdirectory.\r +\r +### Using Docker\r +\r +Note: the [docker image](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe) is only setup with components to run the workflow for HIV and SARS-CoV-2 virus base configurations.\r +Using V-pipe with other viruses or configurations might require internet connectivity for additional software components.\r +\r +Create `config.yaml` or `vpipe.config` and then populate the `samples/` directory.\r +For example, the following config file could be used:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +\r +output:\r + snv: true\r + local: true\r + global: false\r + visualization: true\r + QA: true\r +```\r +\r +Then execute:\r +\r +```bash\r +docker run --rm -it -v $PWD:/work ghcr.io/cbg-ethz/v-pipe:master --jobs 4 --printshellcmds --dry-run\r +```\r +\r +### Using Snakedeploy\r +\r +First install [mamba](https://github.com/conda-forge/miniforge#mambaforge), then create and activate an environment with Snakemake and Snakedeploy:\r +\r +```bash\r +mamba create -c conda-forge -c bioconda --name snakemake snakemake snakedeploy\r +conda activate snakemake\r +```\r +\r +Snakemake's [official workflow installer Snakedeploy](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe) can now be used:\r +\r +```bash\r +snakedeploy deploy-workflow https://github.com/cbg-ethz/V-pipe --tag master .\r +# edit config/config.yaml and provide samples/ directory\r +snakemake --use-conda --jobs 4 --printshellcmds --dry-run\r +```\r +\r +## Dependencies\r +\r +- **[Conda](https://conda.io/docs/index.html)**\r +\r + Conda is a cross-platform package management system and an environment manager application. Snakemake uses mamba as a package manager.\r +\r +- **[Snakemake](https://snakemake.readthedocs.io/)**\r +\r + Snakemake is the central workflow and dependency manager of V-pipe. It determines the order in which individual tools are invoked and checks that programs do not exit unexpectedly.\r +\r +- **[VICUNA](https://www.broadinstitute.org/viral-genomics/vicuna)**\r +\r + VICUNA is a _de novo_ assembly software designed for populations with high mutation rates. It is used to build an initial reference for mapping reads with ngshmmalign aligner when a `references/cohort_consensus.fasta` file is not provided. Further details can be found in the [wiki](https://github.com/cbg-ethz/V-pipe/wiki/getting-started#input-files) pages.\r +\r +### Computational tools\r +\r +Other dependencies are managed by using isolated conda environments per rule, and below we list some of the computational tools integrated in V-pipe:\r +\r +- **[FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/)**\r +\r + FastQC gives an overview of the raw sequencing data. Flowcells that have been overloaded or otherwise fail during sequencing can easily be determined with FastQC.\r +\r +- **[PRINSEQ](http://prinseq.sourceforge.net/)**\r +\r + Trimming and clipping of reads is performed by PRINSEQ. It is currently the most versatile raw read processor with many customization options.\r +\r +- **[ngshmmalign](https://github.com/cbg-ethz/ngshmmalign)**\r +\r + We perform the alignment of the curated NGS data using our custom ngshmmalign that takes structural variants into account. It produces multiple consensus sequences that include either majority bases or ambiguous bases.\r +\r +- **[bwa](https://github.com/lh3/bwa)**\r +\r + In order to detect specific cross-contaminations with other probes, the Burrows-Wheeler aligner is used. It quickly yields estimates for foreign genomic material in an experiment.\r + Additionally, It can be used as an alternative aligner to ngshmmalign.\r +\r +- **[MAFFT](http://mafft.cbrc.jp/alignment/software/)**\r +\r + To standardise multiple samples to the same reference genome (say HXB2 for HIV-1), the multiple sequence aligner MAFFT is employed. The multiple sequence alignment helps in determining regions of low conservation and thus makes standardisation of alignments more robust.\r +\r +- **[Samtools and bcftools](https://www.htslib.org/)**\r +\r + The Swiss Army knife of alignment postprocessing and diagnostics. bcftools is also used to generate consensus sequence with indels.\r +\r +- **[SmallGenomeUtilities](https://github.com/cbg-ethz/smallgenomeutilities)**\r +\r + We perform genomic liftovers to standardised reference genomes using our in-house developed python library of utilities for rewriting alignments.\r +\r +- **[ShoRAH](https://github.com/cbg-ethz/shorah)**\r +\r + ShoRAh performs SNV calling and local haplotype reconstruction by using bayesian clustering.\r +\r +- **[LoFreq](https://csb5.github.io/lofreq/)**\r +\r + LoFreq (version 2) is SNVs and indels caller from next-generation sequencing data, and can be used as an alternative engine for SNV calling.\r +\r +- **[SAVAGE](https://bitbucket.org/jbaaijens/savage) and [Haploclique](https://github.com/cbg-ethz/haploclique)**\r +\r + We use HaploClique or SAVAGE to perform global haplotype reconstruction for heterogeneous viral populations by using an overlap graph.\r +\r +## Citation\r +\r +If you use this software in your research, please cite:\r +\r +Fuhrmann, L., Jablonski, K. P., Topolsky, I., Batavia, A. A., Borgsmueller, N., Icer Baykal, P., Carrara, M. ... & Beerenwinkel, (2023).\r +"V-Pipe 3.0: A Sustainable Pipeline for Within-Sample Viral Genetic Diversity Estimation."\r +_bioRxiv_, doi:[10.1101/2023.10.16.562462](https://doi.org/10.1101/2023.10.16.562462).\r +\r +## Contributions\r +\r +- [Ivan Topolsky\\* ![orcid]](https://orcid.org/0000-0002-7561-0810), [![github]](https://github.com/dryak)\r +- [Pelin Icer Baykal ![orcid]](https://orcid.org/0000-0002-9542-5292), [![github]](https://github.com/picerbaykal)\r +- [Kim Philipp Jablonski ![orcid]](https://orcid.org/0000-0002-4166-4343), [![github]](https://github.com/kpj)\r +- [Lara Fuhrmann ![orcid]](https://orcid.org/0000-0001-6405-0654), [![github]](https://github.com/LaraFuhrmann)\r +- [Uwe Schmitt ![orcid]](https://orcid.org/0000-0002-4658-0616), [![github]](https://github.com/uweschmitt)\r +- [Michal Okoniewski ![orcid]](https://orcid.org/0000-0003-4722-4506), [![github]](https://github.com/michalogit)\r +- [Monica Dragan ![orcid]](https://orcid.org/0000-0002-7719-5892), [![github]](https://github.com/monicadragan)\r +- [Susana Posada Céspedes ![orcid]](https://orcid.org/0000-0002-7459-8186), [![github]](https://github.com/sposadac)\r +- [David Seifert ![orcid]](https://orcid.org/0000-0003-4739-5110), [![github]](https://github.com/SoapZA)\r +- Tobias Marschall\r +- [Niko Beerenwinkel\\*\\* ![orcid]](https://orcid.org/0000-0002-0573-6119)\r +\r +\\* software maintainer ;\r +\\** group leader\r +\r +[github]: https://cbg-ethz.github.io/V-pipe/img/mark-github.svg\r +[orcid]: https://cbg-ethz.github.io/V-pipe/img/ORCIDiD_iconvector.svg\r +\r +## Contact\r +\r +We encourage users to use the [issue tracker](https://github.com/cbg-ethz/V-pipe/issues). For further enquiries, you can also contact the V-pipe Dev Team .\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.301.5" ; + schema1:isBasedOn "https://github.com/cbg-ethz/V-pipe.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for V-pipe (main multi-virus version)" ; + schema1:sdDatePublished "2024-08-05 10:23:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/301/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1634 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-10T18:38:49Z" ; + schema1:dateModified "2024-06-10T18:39:57Z" ; + schema1:description """\r +\r +![Logo](https://cbg-ethz.github.io/V-pipe/img/logo.svg)\r +\r +[![bio.tools](https://img.shields.io/badge/bio-tools-blue.svg)](https://bio.tools/V-Pipe)\r +[![Snakemake](https://img.shields.io/badge/snakemake-≥7.11.0-blue.svg)](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe)\r +[![Deploy Docker image](https://github.com/cbg-ethz/V-pipe/actions/workflows/deploy-docker.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe)\r +[![Tests](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml)\r +[![Mega-Linter](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml)\r +[![License: Apache-2.0](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)\r +\r +V-pipe is a workflow designed for the analysis of next generation sequencing (NGS) data from viral pathogens. It produces a number of results in a curated format (e.g., consensus sequences, SNV calls, local/global haplotypes).\r +V-pipe is written using the Snakemake workflow management system.\r +\r +## Usage\r +\r +Different ways of initializing V-pipe are presented below. We strongly encourage you to deploy it [using the quick install script](#using-quick-install-script), as this is our preferred method.\r +\r +To configure V-pipe refer to the documentation present in [config/README.md](config/README.md).\r +\r +V-pipe expects the input samples to be organized in a [two-level](config/README.md#samples) directory hierarchy,\r +and the sequencing reads must be provided in a sub-folder named `raw_data`. Further details can be found on the [website](https://cbg-ethz.github.io/V-pipe/usage/).\r +Check the utils subdirectory for [mass-importers tools](utils/README.md#samples-mass-importers) that can assist you in generating this hierarchy.\r +\r +We provide [virus-specific base configuration files](config/README.md#virus-base-config) which contain handy defaults for, e.g., HIV and SARS-CoV-2. Set the virus in the general section of the configuration file:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +```\r +\r +Also see [snakemake's documentation](https://snakemake.readthedocs.io/en/stable/executing/cli.html) to learn more about the command-line options available when executing the workflow.\r +\r +\r +### Tutorials\r +\r +Tutorials for your first steps with V-pipe for different scenarios are available in the [docs/](docs/README.md) subdirectory.\r +\r +\r +### Using quick install script\r +\r +To deploy V-pipe, use the [installation script](utils/README.md#quick-installer) with the following parameters:\r +\r +```bash\r +curl -O 'https://raw.githubusercontent.com/cbg-ethz/V-pipe/master/utils/quick_install.sh'\r +./quick_install.sh -w work\r +```\r +\r +This script will download and install miniconda, checkout the V-pipe git repository (use `-b` to specify which branch/tag) and setup a work directory (specified with `-w`) with an executable script that will execute the workflow:\r +\r +```bash\r +cd work\r +# edit config.yaml and provide samples/ directory\r +./vpipe --jobs 4 --printshellcmds --dry-run\r +```\r +\r +Test data to test your installation is available with the tutorials provided in the [docs/](docs/README.md) subdirectory.\r +\r +### Using Docker\r +\r +Note: the [docker image](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe) is only setup with components to run the workflow for HIV and SARS-CoV-2 virus base configurations.\r +Using V-pipe with other viruses or configurations might require internet connectivity for additional software components.\r +\r +Create `config.yaml` or `vpipe.config` and then populate the `samples/` directory.\r +For example, the following config file could be used:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +\r +output:\r + snv: true\r + local: true\r + global: false\r + visualization: true\r + QA: true\r +```\r +\r +Then execute:\r +\r +```bash\r +docker run --rm -it -v $PWD:/work ghcr.io/cbg-ethz/v-pipe:master --jobs 4 --printshellcmds --dry-run\r +```\r +\r +### Using Snakedeploy\r +\r +First install [mamba](https://github.com/conda-forge/miniforge#mambaforge), then create and activate an environment with Snakemake and Snakedeploy:\r +\r +```bash\r +mamba create -c conda-forge -c bioconda --name snakemake snakemake snakedeploy\r +conda activate snakemake\r +```\r +\r +Snakemake's [official workflow installer Snakedeploy](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe) can now be used:\r +\r +```bash\r +snakedeploy deploy-workflow https://github.com/cbg-ethz/V-pipe --tag master .\r +# edit config/config.yaml and provide samples/ directory\r +snakemake --use-conda --jobs 4 --printshellcmds --dry-run\r +```\r +\r +## Dependencies\r +\r +- **[Conda](https://conda.io/docs/index.html)**\r +\r + Conda is a cross-platform package management system and an environment manager application. Snakemake uses mamba as a package manager.\r +\r +- **[Snakemake](https://snakemake.readthedocs.io/)**\r +\r + Snakemake is the central workflow and dependency manager of V-pipe. It determines the order in which individual tools are invoked and checks that programs do not exit unexpectedly.\r +\r +- **[VICUNA](https://www.broadinstitute.org/viral-genomics/vicuna)**\r +\r + VICUNA is a _de novo_ assembly software designed for populations with high mutation rates. It is used to build an initial reference for mapping reads with ngshmmalign aligner when a `references/cohort_consensus.fasta` file is not provided. Further details can be found in the [wiki](https://github.com/cbg-ethz/V-pipe/wiki/getting-started#input-files) pages.\r +\r +### Computational tools\r +\r +Other dependencies are managed by using isolated conda environments per rule, and below we list some of the computational tools integrated in V-pipe:\r +\r +- **[FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/)**\r +\r + FastQC gives an overview of the raw sequencing data. Flowcells that have been overloaded or otherwise fail during sequencing can easily be determined with FastQC.\r +\r +- **[PRINSEQ](http://prinseq.sourceforge.net/)**\r +\r + Trimming and clipping of reads is performed by PRINSEQ. It is currently the most versatile raw read processor with many customization options.\r +\r +- **[ngshmmalign](https://github.com/cbg-ethz/ngshmmalign)**\r +\r + We perform the alignment of the curated NGS data using our custom ngshmmalign that takes structural variants into account. It produces multiple consensus sequences that include either majority bases or ambiguous bases.\r +\r +- **[bwa](https://github.com/lh3/bwa)**\r +\r + In order to detect specific cross-contaminations with other probes, the Burrows-Wheeler aligner is used. It quickly yields estimates for foreign genomic material in an experiment.\r + Additionally, It can be used as an alternative aligner to ngshmmalign.\r +\r +- **[MAFFT](http://mafft.cbrc.jp/alignment/software/)**\r +\r + To standardise multiple samples to the same reference genome (say HXB2 for HIV-1), the multiple sequence aligner MAFFT is employed. The multiple sequence alignment helps in determining regions of low conservation and thus makes standardisation of alignments more robust.\r +\r +- **[Samtools and bcftools](https://www.htslib.org/)**\r +\r + The Swiss Army knife of alignment postprocessing and diagnostics. bcftools is also used to generate consensus sequence with indels.\r +\r +- **[SmallGenomeUtilities](https://github.com/cbg-ethz/smallgenomeutilities)**\r +\r + We perform genomic liftovers to standardised reference genomes using our in-house developed python library of utilities for rewriting alignments.\r +\r +- **[ShoRAH](https://github.com/cbg-ethz/shorah)**\r +\r + ShoRAh performs SNV calling and local haplotype reconstruction by using bayesian clustering.\r +\r +- **[LoFreq](https://csb5.github.io/lofreq/)**\r +\r + LoFreq (version 2) is SNVs and indels caller from next-generation sequencing data, and can be used as an alternative engine for SNV calling.\r +\r +- **[SAVAGE](https://bitbucket.org/jbaaijens/savage) and [Haploclique](https://github.com/cbg-ethz/haploclique)**\r +\r + We use HaploClique or SAVAGE to perform global haplotype reconstruction for heterogeneous viral populations by using an overlap graph.\r +\r +## Citation\r +\r +If you use this software in your research, please cite:\r +\r +Fuhrmann, L., Jablonski, K. P., Topolsky, I., Batavia, A. A., Borgsmueller, N., Icer Baykal, P., Carrara, M. ... & Beerenwinkel, (2023).\r +"V-Pipe 3.0: A Sustainable Pipeline for Within-Sample Viral Genetic Diversity Estimation."\r +_bioRxiv_, doi:[10.1101/2023.10.16.562462](https://doi.org/10.1101/2023.10.16.562462).\r +\r +## Contributions\r +\r +- [Ivan Topolsky\\* ![orcid]](https://orcid.org/0000-0002-7561-0810), [![github]](https://github.com/dryak)\r +- [Pelin Icer Baykal ![orcid]](https://orcid.org/0000-0002-9542-5292), [![github]](https://github.com/picerbaykal)\r +- [Kim Philipp Jablonski ![orcid]](https://orcid.org/0000-0002-4166-4343), [![github]](https://github.com/kpj)\r +- [Lara Fuhrmann ![orcid]](https://orcid.org/0000-0001-6405-0654), [![github]](https://github.com/LaraFuhrmann)\r +- [Uwe Schmitt ![orcid]](https://orcid.org/0000-0002-4658-0616), [![github]](https://github.com/uweschmitt)\r +- [Michal Okoniewski ![orcid]](https://orcid.org/0000-0003-4722-4506), [![github]](https://github.com/michalogit)\r +- [Monica Dragan ![orcid]](https://orcid.org/0000-0002-7719-5892), [![github]](https://github.com/monicadragan)\r +- [Susana Posada Céspedes ![orcid]](https://orcid.org/0000-0002-7459-8186), [![github]](https://github.com/sposadac)\r +- [David Seifert ![orcid]](https://orcid.org/0000-0003-4739-5110), [![github]](https://github.com/SoapZA)\r +- Tobias Marschall\r +- [Niko Beerenwinkel\\*\\* ![orcid]](https://orcid.org/0000-0002-0573-6119)\r +\r +\\* software maintainer ;\r +\\** group leader\r +\r +[github]: https://cbg-ethz.github.io/V-pipe/img/mark-github.svg\r +[orcid]: https://cbg-ethz.github.io/V-pipe/img/ORCIDiD_iconvector.svg\r +\r +## Contact\r +\r +We encourage users to use the [issue tracker](https://github.com/cbg-ethz/V-pipe/issues). For further enquiries, you can also contact the V-pipe Dev Team .\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/301?version=4" ; + schema1:keywords "Alignment, Assembly, covid-19, Genomics, INDELs, rna, SNPs, variant_calling, workflow" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "V-pipe (main multi-virus version)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/301?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.56.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_protein-complex_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:31:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/56/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 68545 ; + schema1:creator , + ; + schema1:dateCreated "2021-07-02T08:43:37Z" ; + schema1:dateModified "2022-06-10T09:41:40Z" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/56?version=6" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/56?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for the identification of circular DNAs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/972?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/circdna" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/circdna" ; + schema1:sdDatePublished "2024-08-05 10:24:11 +0100" ; + schema1:url "https://workflowhub.eu/workflows/972/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9220 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:45Z" ; + schema1:dateModified "2024-06-11T12:54:45Z" ; + schema1:description "Pipeline for the identification of circular DNAs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/972?version=7" ; + schema1:keywords "ampliconarchitect, ampliconsuite, circle-seq, circular, DNA, eccdna, ecdna, extrachromosomal-circular-dna, Genomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/circdna" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/972?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Flashlite-Juicer is a PBS implementation of [Juicer](https://github.com/aidenlab/juicer) for University of Queensland's Flashlite HPC.\r +\r +Infrastructure\\_deployment\\_metadata: FlashLite (QRISCloud)""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.150.1" ; + schema1:isBasedOn "https://github.com/natbutter/juicer" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Flashlite-Juicer" ; + schema1:sdDatePublished "2024-08-05 10:33:07 +0100" ; + schema1:url "https://workflowhub.eu/workflows/150/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6741 ; + schema1:creator , + , + ; + schema1:dateCreated "2021-08-18T23:19:40Z" ; + schema1:dateModified "2023-01-16T13:51:45Z" ; + schema1:description """Flashlite-Juicer is a PBS implementation of [Juicer](https://github.com/aidenlab/juicer) for University of Queensland's Flashlite HPC.\r +\r +Infrastructure\\_deployment\\_metadata: FlashLite (QRISCloud)""" ; + schema1:isPartOf ; + schema1:keywords "Juicer, Flashlite, Hi-C, PBS, TAD, scalable, map, FASTQ, BWA, topologically associating domains" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Flashlite-Juicer" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/150?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Proteomics label-free quantification (LFQ) analysis pipeline using OpenMS and MSstats, with feature quantification, feature summarization, quality control and group-based statistical analysis." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1012?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/proteomicslfq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/proteomicslfq" ; + schema1:sdDatePublished "2024-08-05 10:23:25 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1012/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7027 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:08Z" ; + schema1:dateModified "2024-06-11T12:55:08Z" ; + schema1:description "Proteomics label-free quantification (LFQ) analysis pipeline using OpenMS and MSstats, with feature quantification, feature summarization, quality control and group-based statistical analysis." ; + schema1:keywords "label-free-quantification, openms, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/proteomicslfq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1012?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=13" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-08-05 10:22:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=13" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9496 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:39Z" ; + schema1:dateModified "2024-06-11T12:54:39Z" ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=13" ; + schema1:version 13 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for screening for functional components of assembled contigs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/987?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/funcscan" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/funcscan" ; + schema1:sdDatePublished "2024-08-05 10:23:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/987/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 16629 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:54Z" ; + schema1:dateModified "2024-06-11T12:54:54Z" ; + schema1:description "Pipeline for screening for functional components of assembled contigs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/987?version=8" ; + schema1:keywords "amp, AMR, antibiotic-resistance, antimicrobial-peptides, antimicrobial-resistance-genes, arg, Assembly, bgc, biosynthetic-gene-clusters, contigs, function, Metagenomics, natural-products, screening, secondary-metabolites" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/funcscan" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/987?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.261.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_complex_md_setup/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:32:30 +0100" ; + schema1:url "https://workflowhub.eu/workflows/261/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 166381 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 27124 ; + schema1:creator , + ; + schema1:dateCreated "2022-01-10T15:27:53Z" ; + schema1:dateModified "2023-06-09T06:44:35Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/261?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CWL Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/261?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "This workflow performs the process of protein-ligand docking, step by step, using the BioExcel Building Blocks library (biobb)." ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.257.1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Protein-ligand docking (fpocket)" ; + schema1:sdDatePublished "2024-08-05 10:32:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/257/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 29165 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5518 ; + schema1:creator , + ; + schema1:dateCreated "2022-01-10T11:48:32Z" ; + schema1:dateModified "2023-01-16T13:56:53Z" ; + schema1:description "This workflow performs the process of protein-ligand docking, step by step, using the BioExcel Building Blocks library (biobb)." ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Protein-ligand docking (fpocket)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/257?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Amplicon analysis workflow using NG-Tax\r +\r +**Steps:**\r +\r +* Quality control on the reads\r +* Execute NGTax for ASV detection and classification\r +\r +For more information about NG-Tax 2.0 have a look at https://doi.org/10.3389/fgene.2019.01366""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/45?version=7" ; + schema1:isBasedOn "https://gitlab.com/m-unlock/cwl" ; + schema1:license "CC0-1.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for NGTax" ; + schema1:sdDatePublished "2024-08-05 10:33:13 +0100" ; + schema1:url "https://workflowhub.eu/workflows/45/ro_crate?version=7" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 15649 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3837 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2020-10-28T11:07:26Z" ; + schema1:dateModified "2023-01-16T13:43:45Z" ; + schema1:description """Amplicon analysis workflow using NG-Tax\r +\r +**Steps:**\r +\r +* Quality control on the reads\r +* Execute NGTax for ASV detection and classification\r +\r +For more information about NG-Tax 2.0 have a look at https://doi.org/10.3389/fgene.2019.01366""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/45?version=6" ; + schema1:keywords "Amplicon, 16S, ITS" ; + schema1:license "https://spdx.org/licenses/CC0-1.0" ; + schema1:name "NGTax" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/45?version=7" ; + schema1:version 7 ; + ns1:input , + , + , + , + , + , + , + ; + ns1:output , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Protein 3D structure prediction pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1011?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/proteinfold" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/proteinfold" ; + schema1:sdDatePublished "2024-08-05 10:23:25 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1011/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11104 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:07Z" ; + schema1:dateModified "2024-06-11T12:55:07Z" ; + schema1:description "Protein 3D structure prediction pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1011?version=2" ; + schema1:keywords "alphafold2, protein-fold-prediction, protein-folding, protein-sequences, protein-structure" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/proteinfold" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1011?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/986?version=9" ; + schema1:isBasedOn "https://github.com/nf-core/fetchngs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/fetchngs" ; + schema1:sdDatePublished "2024-08-05 10:23:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/986/ro_crate?version=9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7847 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:53Z" ; + schema1:dateModified "2024-06-11T12:54:53Z" ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/986?version=13" ; + schema1:keywords "ddbj, download, ena, FASTQ, geo, sra, synapse" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/fetchngs" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/986?version=9" ; + schema1:version 9 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """### Workflow for LongRead Quality Control and Filtering\r +\r +- NanoPlot (read quality control) before and after filtering\r +- Filtlong (read trimming)\r +- Kraken2 taxonomic read classification before and after filtering\r +- Minimap2 read filtering based on given references

\r +\r +Other UNLOCK workflows on WorkflowHub: https://workflowhub.eu/projects/16/workflows?view=default

\r +\r +**All tool CWL files and other workflows can be found here:**
\r +https://gitlab.com/m-unlock/cwl/workflows\r +\r +**How to setup and use an UNLOCK workflow:**
\r +https://m-unlock.gitlab.io/docs/setup/setup.html
\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/337?version=1" ; + schema1:isBasedOn "https://gitlab.com/m-unlock/cwl/-/blob/master/cwl/workflows/workflow_nanopore_quality.cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for LongRead Quality Control and Filtering" ; + schema1:sdDatePublished "2024-08-05 10:31:08 +0100" ; + schema1:url "https://workflowhub.eu/workflows/337/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 95508 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 18185 ; + schema1:creator , + , + ; + schema1:dateCreated "2022-04-21T16:19:59Z" ; + schema1:dateModified "2023-04-07T14:07:55Z" ; + schema1:description """### Workflow for LongRead Quality Control and Filtering\r +\r +- NanoPlot (read quality control) before and after filtering\r +- Filtlong (read trimming)\r +- Kraken2 taxonomic read classification before and after filtering\r +- Minimap2 read filtering based on given references

\r +\r +Other UNLOCK workflows on WorkflowHub: https://workflowhub.eu/projects/16/workflows?view=default

\r +\r +**All tool CWL files and other workflows can be found here:**
\r +https://gitlab.com/m-unlock/cwl/workflows\r +\r +**How to setup and use an UNLOCK workflow:**
\r +https://m-unlock.gitlab.io/docs/setup/setup.html
\r +""" ; + schema1:image ; + schema1:keywords "Genomics, nanopore, CWL, Assembly" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "LongRead Quality Control and Filtering" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/337?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + ; + ns1:output , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """From the R1 and R2 fastq files of a single samples, make a scRNAseq counts matrix, and perform basic QC with scanpy. Then, do further processing by making a UMAP and clustering. Produces a processed AnnData \r +Depreciated: use individual workflows insead for multiple samples""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/647?version=1" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "other-closed" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq Single Sample Processing Cell Ranger" ; + schema1:sdDatePublished "2024-08-05 10:24:30 +0100" ; + schema1:url "https://workflowhub.eu/workflows/647/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 108091 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-11-09T01:30:26Z" ; + schema1:dateModified "2024-05-30T05:56:52Z" ; + schema1:description """From the R1 and R2 fastq files of a single samples, make a scRNAseq counts matrix, and perform basic QC with scanpy. Then, do further processing by making a UMAP and clustering. Produces a processed AnnData \r +Depreciated: use individual workflows insead for multiple samples""" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "" ; + schema1:name "scRNAseq Single Sample Processing Cell Ranger" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/647?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Automated quantitative analysis of DIA proteomics mass spectrometry measurements." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/980?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/diaproteomics" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/diaproteomics" ; + schema1:sdDatePublished "2024-08-05 10:24:04 +0100" ; + schema1:url "https://workflowhub.eu/workflows/980/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6375 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:48Z" ; + schema1:dateModified "2024-06-11T12:54:48Z" ; + schema1:description "Automated quantitative analysis of DIA proteomics mass spectrometry measurements." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/980?version=6" ; + schema1:keywords "data-independent-proteomics, dia-proteomics, openms, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/diaproteomics" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/980?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/998?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/methylseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/methylseq" ; + schema1:sdDatePublished "2024-08-05 10:22:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/998/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3868 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:01Z" ; + schema1:dateModified "2024-06-11T12:55:01Z" ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/998?version=14" ; + schema1:keywords "bisulfite-sequencing, dna-methylation, em-seq, epigenome, Epigenomics, methyl-seq, pbat, rrbs" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/methylseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/998?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """# Drug Synergies Screening Workflow\r +\r +## Table of Contents\r +\r +- [Drug Synergies Screening Workflow](#drug-synergies-screening-workflow)\r + - [Table of Contents](#table-of-contents)\r + - [Description](#description)\r + - [Contents](#contents)\r + - [Building Blocks](#building-blocks)\r + - [Workflows](#workflows)\r + - [Resources](#resources)\r + - [Tests](#tests)\r + - [Instructions](#instructions)\r + - [Local machine](#local-machine)\r + - [Requirements](#requirements)\r + - [Usage steps](#usage-steps)\r + - [MareNostrum 4](#marenostrum-4)\r + - [Requirements in MN4](#requirements-in-mn4)\r + - [Usage steps in MN4](#usage-steps-in-mn4)\r + - [License](#license)\r + - [Contact](#contact)\r +\r +## Description\r +\r +This pipeline simulates a drug screening on personalised cell line models. It automatically builds Boolean models of interest, then uses cell lines data (expression, mutations, copy number variations) to personalise them as MaBoSS models. Finally, this pipeline simulates multiple drug intervention on these MaBoSS models, and lists drug synergies of interest.\r +\r +The workflow uses the following building blocks, described in order of execution:\r +\r +1. Build model from species\r +2. Personalise patient\r +3. MaBoSS\r +4. Print drug results\r +\r +For details on individual workflow steps, see the user documentation for each building block.\r +\r +[`GitHub repository`](https://github.com/PerMedCoE/drug-synergies-workflow>)\r +\r +## Contents\r +\r +### Building Blocks\r +\r +The ``BuildingBlocks`` folder contains the script to install the\r +Building Blocks used in the Drug Synergies Workflow.\r +\r +### Workflows\r +\r +The ``Workflow`` folder contains the workflows implementations.\r +\r +Currently contains the implementation using PyCOMPSs.\r +\r +### Resources\r +\r +The ``Resources`` folder contains a small dataset for testing purposes.\r +\r +### Tests\r +\r +The ``Tests`` folder contains the scripts that run each Building Block\r +used in the workflow for a small dataset.\r +They can be executed individually *without PyCOMPSs installed* for testing\r +purposes.\r +\r +## Instructions\r +\r +### Local machine\r +\r +This section explains the requirements and usage for the Drug Synergies Workflow in a laptop or desktop computer.\r +\r +#### Requirements\r +\r +- [`permedcoe`](https://github.com/PerMedCoE/permedcoe) package\r +- [PyCOMPSs](https://pycompss.readthedocs.io/en/stable/Sections/00_Quickstart.html)\r +- [Singularity](https://sylabs.io/guides/3.0/user-guide/installation.html)\r +\r +#### Usage steps\r +\r +1. Clone this repository:\r +\r + ```bash\r + git clone https://github.com/PerMedCoE/drug-synergies-workflow.git\r + ```\r +\r +2. Install the Building Blocks required for the COVID19 Workflow:\r +\r + ```bash\r + drug-synergies-workflow/BuildingBlocks/./install_BBs.sh\r + ```\r +\r +3. Get the required Building Block images from the project [B2DROP](https://b2drop.bsc.es/index.php/f/444350):\r +\r + - Required images:\r + - PhysiCell-COVID19.singularity\r + - printResults.singularity\r + - MaBoSS_sensitivity.singularity\r + - FromSpeciesToMaBoSSModel.singularity\r +\r + The path where these files are stored **MUST be exported in the `PERMEDCOE_IMAGES`** environment variable.\r +\r + > :warning: **TIP**: These containers can be built manually as follows (be patient since some of them may take some time):\r + 1. Clone the `BuildingBlocks` repository\r + ```bash\r + git clone https://github.com/PerMedCoE/BuildingBlocks.git\r + ```\r + 2. Build the required Building Block images\r + ```bash\r + cd BuildingBlocks/Resources/images\r + sudo singularity build PhysiCell-COVID19.sif PhysiCell-COVID19.singularity\r + sudo singularity build printResults.sif printResults.singularity\r + sudo singularity build MaBoSS_sensitivity.sif MaBoSS_sensitivity.singularity\r + sudo singularity build FromSpeciesToMaBoSSModel.sif FromSpeciesToMaBoSSModel.singularity\r + cd ../../..\r + ```\r +\r +**If using PyCOMPSs in local PC** (make sure that PyCOMPSs in installed):\r +\r +4. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflows/PyCOMPSs\r + ```\r +\r +5. Execute `./run.sh`\r + > **TIP**: If you want to run the workflow with a different dataset, please update the `run.sh` script setting the `dataset` variable to the new dataset folder and their file names.\r +\r +### MareNostrum 4\r +\r +This section explains the requirements and usage for the Drug Synergies Workflow in the MareNostrum 4 supercomputer.\r +\r +#### Requirements in MN4\r +\r +- Access to MN4\r +\r +All Building Blocks are already installed in MN4, and the Drug Synergies Workflow available.\r +\r +#### Usage steps in MN4\r +\r +1. Load the `COMPSs`, `Singularity` and `permedcoe` modules\r +\r + ```bash\r + export COMPSS_PYTHON_VERSION=3\r + module load COMPSs/3.1\r + module load singularity/3.5.2\r + module use /apps/modules/modulefiles/tools/COMPSs/libraries\r + module load permedcoe\r + ```\r +\r + > **TIP**: Include the loading into your `${HOME}/.bashrc` file to load it automatically on the session start.\r +\r + This commands will load COMPSs and the permedcoe package which provides all necessary dependencies, as well as the path to the singularity container images (`PERMEDCOE_IMAGES` environment variable) and testing dataset (`DRUG_SYNERGIES_WORKFLOW_DATASET` environment variable).\r +\r +2. Get a copy of the pilot workflow into your desired folder\r +\r + ```bash\r + mkdir desired_folder\r + cd desired_folder\r + get_drug_synergies_workflow\r + ```\r +\r +3. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflow/PyCOMPSs\r + ```\r +\r +4. Execute `./launch.sh`\r +\r + This command will launch a job into the job queuing system (SLURM) requesting 2 nodes (one node acting half master and half worker, and other full worker node) for 20 minutes, and is prepared to use the singularity images that are already deployed in MN4 (located into the `PERMEDCOE_IMAGES` environment variable). It uses the dataset located into `../../Resources/data` folder.\r +\r + > :warning: **TIP**: If you want to run the workflow with a different dataset, please edit the `launch.sh` script and define the appropriate dataset path.\r +\r + After the execution, a `results` folder will be available with with Drug Synergies Workflow results.\r +\r +## License\r +\r +[Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)\r +\r +## Contact\r +\r +\r +\r +This software has been developed for the [PerMedCoE project](https://permedcoe.eu/), funded by the European Commission (EU H2020 [951773](https://cordis.europa.eu/project/id/951773)).\r +\r +![](https://permedcoe.eu/wp-content/uploads/2020/11/logo_1.png "PerMedCoE")\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/479?version=1" ; + schema1:isBasedOn "https://github.com/PerMedCoE/drug-synergies-workflow" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for PerMedCoE Drug Synergy" ; + schema1:sdDatePublished "2024-08-05 10:30:23 +0100" ; + schema1:url "https://workflowhub.eu/workflows/479/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1083 ; + schema1:dateCreated "2023-05-23T12:36:45Z" ; + schema1:dateModified "2023-05-23T12:36:45Z" ; + schema1:description """# Drug Synergies Screening Workflow\r +\r +## Table of Contents\r +\r +- [Drug Synergies Screening Workflow](#drug-synergies-screening-workflow)\r + - [Table of Contents](#table-of-contents)\r + - [Description](#description)\r + - [Contents](#contents)\r + - [Building Blocks](#building-blocks)\r + - [Workflows](#workflows)\r + - [Resources](#resources)\r + - [Tests](#tests)\r + - [Instructions](#instructions)\r + - [Local machine](#local-machine)\r + - [Requirements](#requirements)\r + - [Usage steps](#usage-steps)\r + - [MareNostrum 4](#marenostrum-4)\r + - [Requirements in MN4](#requirements-in-mn4)\r + - [Usage steps in MN4](#usage-steps-in-mn4)\r + - [License](#license)\r + - [Contact](#contact)\r +\r +## Description\r +\r +This pipeline simulates a drug screening on personalised cell line models. It automatically builds Boolean models of interest, then uses cell lines data (expression, mutations, copy number variations) to personalise them as MaBoSS models. Finally, this pipeline simulates multiple drug intervention on these MaBoSS models, and lists drug synergies of interest.\r +\r +The workflow uses the following building blocks, described in order of execution:\r +\r +1. Build model from species\r +2. Personalise patient\r +3. MaBoSS\r +4. Print drug results\r +\r +For details on individual workflow steps, see the user documentation for each building block.\r +\r +[`GitHub repository`](https://github.com/PerMedCoE/drug-synergies-workflow>)\r +\r +## Contents\r +\r +### Building Blocks\r +\r +The ``BuildingBlocks`` folder contains the script to install the\r +Building Blocks used in the Drug Synergies Workflow.\r +\r +### Workflows\r +\r +The ``Workflow`` folder contains the workflows implementations.\r +\r +Currently contains the implementation using PyCOMPSs.\r +\r +### Resources\r +\r +The ``Resources`` folder contains a small dataset for testing purposes.\r +\r +### Tests\r +\r +The ``Tests`` folder contains the scripts that run each Building Block\r +used in the workflow for a small dataset.\r +They can be executed individually *without PyCOMPSs installed* for testing\r +purposes.\r +\r +## Instructions\r +\r +### Local machine\r +\r +This section explains the requirements and usage for the Drug Synergies Workflow in a laptop or desktop computer.\r +\r +#### Requirements\r +\r +- [`permedcoe`](https://github.com/PerMedCoE/permedcoe) package\r +- [PyCOMPSs](https://pycompss.readthedocs.io/en/stable/Sections/00_Quickstart.html)\r +- [Singularity](https://sylabs.io/guides/3.0/user-guide/installation.html)\r +\r +#### Usage steps\r +\r +1. Clone this repository:\r +\r + ```bash\r + git clone https://github.com/PerMedCoE/drug-synergies-workflow.git\r + ```\r +\r +2. Install the Building Blocks required for the COVID19 Workflow:\r +\r + ```bash\r + drug-synergies-workflow/BuildingBlocks/./install_BBs.sh\r + ```\r +\r +3. Get the required Building Block images from the project [B2DROP](https://b2drop.bsc.es/index.php/f/444350):\r +\r + - Required images:\r + - PhysiCell-COVID19.singularity\r + - printResults.singularity\r + - MaBoSS_sensitivity.singularity\r + - FromSpeciesToMaBoSSModel.singularity\r +\r + The path where these files are stored **MUST be exported in the `PERMEDCOE_IMAGES`** environment variable.\r +\r + > :warning: **TIP**: These containers can be built manually as follows (be patient since some of them may take some time):\r + 1. Clone the `BuildingBlocks` repository\r + ```bash\r + git clone https://github.com/PerMedCoE/BuildingBlocks.git\r + ```\r + 2. Build the required Building Block images\r + ```bash\r + cd BuildingBlocks/Resources/images\r + sudo singularity build PhysiCell-COVID19.sif PhysiCell-COVID19.singularity\r + sudo singularity build printResults.sif printResults.singularity\r + sudo singularity build MaBoSS_sensitivity.sif MaBoSS_sensitivity.singularity\r + sudo singularity build FromSpeciesToMaBoSSModel.sif FromSpeciesToMaBoSSModel.singularity\r + cd ../../..\r + ```\r +\r +**If using PyCOMPSs in local PC** (make sure that PyCOMPSs in installed):\r +\r +4. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflows/PyCOMPSs\r + ```\r +\r +5. Execute `./run.sh`\r + > **TIP**: If you want to run the workflow with a different dataset, please update the `run.sh` script setting the `dataset` variable to the new dataset folder and their file names.\r +\r +### MareNostrum 4\r +\r +This section explains the requirements and usage for the Drug Synergies Workflow in the MareNostrum 4 supercomputer.\r +\r +#### Requirements in MN4\r +\r +- Access to MN4\r +\r +All Building Blocks are already installed in MN4, and the Drug Synergies Workflow available.\r +\r +#### Usage steps in MN4\r +\r +1. Load the `COMPSs`, `Singularity` and `permedcoe` modules\r +\r + ```bash\r + export COMPSS_PYTHON_VERSION=3\r + module load COMPSs/3.1\r + module load singularity/3.5.2\r + module use /apps/modules/modulefiles/tools/COMPSs/libraries\r + module load permedcoe\r + ```\r +\r + > **TIP**: Include the loading into your `${HOME}/.bashrc` file to load it automatically on the session start.\r +\r + This commands will load COMPSs and the permedcoe package which provides all necessary dependencies, as well as the path to the singularity container images (`PERMEDCOE_IMAGES` environment variable) and testing dataset (`DRUG_SYNERGIES_WORKFLOW_DATASET` environment variable).\r +\r +2. Get a copy of the pilot workflow into your desired folder\r +\r + ```bash\r + mkdir desired_folder\r + cd desired_folder\r + get_drug_synergies_workflow\r + ```\r +\r +3. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflow/PyCOMPSs\r + ```\r +\r +4. Execute `./launch.sh`\r +\r + This command will launch a job into the job queuing system (SLURM) requesting 2 nodes (one node acting half master and half worker, and other full worker node) for 20 minutes, and is prepared to use the singularity images that are already deployed in MN4 (located into the `PERMEDCOE_IMAGES` environment variable). It uses the dataset located into `../../Resources/data` folder.\r +\r + > :warning: **TIP**: If you want to run the workflow with a different dataset, please edit the `launch.sh` script and define the appropriate dataset path.\r +\r + After the execution, a `results` folder will be available with with Drug Synergies Workflow results.\r +\r +## License\r +\r +[Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)\r +\r +## Contact\r +\r +\r +\r +This software has been developed for the [PerMedCoE project](https://permedcoe.eu/), funded by the European Commission (EU H2020 [951773](https://cordis.europa.eu/project/id/951773)).\r +\r +![](https://permedcoe.eu/wp-content/uploads/2020/11/logo_1.png "PerMedCoE")\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "PerMedCoE Drug Synergy" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/479?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/amber-protein-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.297.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_md_setup/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy Amber Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/297/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 83828 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-23T13:28:34Z" ; + schema1:dateModified "2023-01-16T13:58:48Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/amber-protein-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/297?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy Amber Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_md_setup/galaxy/biobb_wf_amber_md_setup.ga" ; + schema1:version 2 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """The workflows in this collection are from the '16S Microbial Analysis with mothur' tutorial for analysis of 16S data (Saskia Hiltemann, Bérénice Batut, Dave Clements), adapted for pipeline use on galaxy australia (Ahmed Mehdi). The workflows developed in galaxy use mothur software package developed by Schloss et al https://pubmed.ncbi.nlm.nih.gov/19801464/. \r +\r +Please also refer to the 16S tutorials available at Galaxy https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop-short/tutorial.html and [https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop/tutorial.html\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/653?version=1" ; + schema1:isBasedOn "https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop-short/tutorial.html" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Workflow 7 : Beta Diversity [16S Microbial Analysis With Mothur]" ; + schema1:sdDatePublished "2024-08-05 10:27:19 +0100" ; + schema1:url "https://workflowhub.eu/workflows/653/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9429 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2023-11-09T05:26:53Z" ; + schema1:dateModified "2023-11-09T05:26:53Z" ; + schema1:description """The workflows in this collection are from the '16S Microbial Analysis with mothur' tutorial for analysis of 16S data (Saskia Hiltemann, Bérénice Batut, Dave Clements), adapted for pipeline use on galaxy australia (Ahmed Mehdi). The workflows developed in galaxy use mothur software package developed by Schloss et al https://pubmed.ncbi.nlm.nih.gov/19801464/. \r +\r +Please also refer to the 16S tutorials available at Galaxy https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop-short/tutorial.html and [https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop/tutorial.html\r +\r +""" ; + schema1:isPartOf ; + schema1:keywords "Metagenomics" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Workflow 7 : Beta Diversity [16S Microbial Analysis With Mothur]" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/653?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for the identification of circular DNAs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/972?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/circdna" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/circdna" ; + schema1:sdDatePublished "2024-08-05 10:24:11 +0100" ; + schema1:url "https://workflowhub.eu/workflows/972/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9212 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:45Z" ; + schema1:dateModified "2024-06-11T12:54:45Z" ; + schema1:description "Pipeline for the identification of circular DNAs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/972?version=7" ; + schema1:keywords "ampliconarchitect, ampliconsuite, circle-seq, circular, DNA, eccdna, ecdna, extrachromosomal-circular-dna, Genomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/circdna" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/972?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Global Run-On sequencing analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1004?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/nascent" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/nascent" ; + schema1:sdDatePublished "2024-08-05 10:23:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1004/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8446 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:06Z" ; + schema1:dateModified "2024-06-11T12:55:06Z" ; + schema1:description "Global Run-On sequencing analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1004?version=4" ; + schema1:keywords "gro-seq, nascent, pro-seq, rna, transcription, tss" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/nascent" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1004?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/521?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for SARS-CoV-2 ONT Amplicon Sequencing SANBI 1.0" ; + schema1:sdDatePublished "2024-08-05 10:30:02 +0100" ; + schema1:url "https://workflowhub.eu/workflows/521/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 19807 ; + schema1:dateCreated "2023-06-29T12:07:19Z" ; + schema1:dateModified "2023-06-29T12:07:19Z" ; + schema1:description "" ; + schema1:keywords "SARS-CoV-2, SANBI, nanopore" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "SARS-CoV-2 ONT Amplicon Sequencing SANBI 1.0" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/521?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """# GERONIMO\r +\r +## Introduction\r +GERONIMO is a bioinformatics pipeline designed to conduct high-throughput homology searches of structural genes using covariance models. These models are based on the alignment of sequences and the consensus of secondary structures. The pipeline is built using Snakemake, a workflow management tool that allows for the reproducible execution of analyses on various computational platforms. \r +\r +The idea for developing GERONIMO emerged from a comprehensive search for [telomerase RNA in lower plants] and was subsequently refined through an [expanded search of telomerase RNA across Insecta]. GERONIMO can test hundreds of genomes and ensures the stability and reproducibility of the analyses performed.\r +\r +\r +[telomerase RNA in lower plants]: https://doi.org/10.1093/nar/gkab545\r +[expanded search of telomerase RNA across Insecta]: https://doi.org/10.1093/nar/gkac1202\r +\r +## Scope\r +The GERONIMO tool utilises covariance models (CMs) to conduct homology searches of RNA sequences across a wide range of gene families in a broad evolutionary context. Specifically, it can be utilised to:\r +\r +* Detect RNA sequences that share a common evolutionary ancestor\r +* Identify and align orthologous RNA sequences among closely related species, as well as paralogous sequences within a single species\r +* Identify conserved non-coding RNAs in a genome, and extract upstream genomic regions to characterise potential promoter regions. \r +It is important to note that GERONIMO is a computational tool, and as such, it is intended to be run on a computer with a small amount of data. Appropriate computational infrastructure is necessary for analysing hundreds of genomes.\r +\r +Although GERONIMO was primarily designed for Telomerase RNA identification, its functionality extends to include the detection and alignment of other RNA gene families, including **rRNA**, **tRNA**, **snRNA**, **miRNA**, and **lncRNA**. This can aid in identifying paralogs and orthologs across different species that may carry specific functions, making it useful for phylogenetic analyses. \r +\r +It is crucial to remember that some gene families may exhibit similar characteristics but different functions. Therefore, analysing the data and functional annotation after conducting the search is essential to characterise the sequences properly.\r +\r +## Pipeline overview\r +\r +\r +By default, the GERONIMO pipeline conducts high-throughput searches of homology sequences in downloaded genomes utilizing covariance models. If a significant similarity is detected between the model and genome sequence, the pipeline extracts the upstream region, making it convenient to identify the promoter of the discovered gene. In brief, the pipeline:\r +- Compiles a list of genomes using the NCBI's [Entrez] database based on a specified query, *e.g. "Rhodophyta"[Organism]*\r +- Downloads and decompresses the requested genomes using *rsync* and *gunzip*, respectively\r +- *Optionally*, generates a covariance model based on a provided alignment using [Infernal]\r +- Conducts searches among the genomes using the covariance model [Infernal]\r +- Supplements genome information with taxonomy data using [rentrez]\r +- Expands the significant hits sequence by extracting upstream genomic regions using [*blastcmd*]\r +- Compiles the results, organizes them into a tabular format, and generates a visual summary of the performed analysis.\r +\r +[Entrez]: https://www.ncbi.nlm.nih.gov/books/NBK179288/\r +[Infernal]: http://eddylab.org/infernal/\r +[rentrez]: https://github.com/ropensci/rentrez\r +[*blastcmd*]: https://www.ncbi.nlm.nih.gov/books/NBK569853/\r +\r +## Quick start\r +The GERONIMO is available as a `snakemake pipeline` running on Linux and Windows operating systems.\r +\r +### Windows 10\r +Instal Linux on Windows 10 (WSL) according to [instructions], which bottling down to opening PowerShell or Windows Command Prompt in *administrator mode* and pasting the following:\r +```shell\r +wsl --install\r +wsl.exe --install UBUNTU\r +```\r +Then restart the machine and follow the instructions for setting up the Linux environment.\r +\r +[instructions]: https://learn.microsoft.com/en-us/windows/wsl/install\r +\r +### Linux:\r +#### Check whether the conda is installed:\r +```shell\r +conda -V\r +```\r +> GERONIMO was tested on conda 23.3.1\r +#### 1) If you do not have installed `conda`, please install `miniconda`\r +Please follow the instructions for installing [miniconda]\r +\r +[miniconda]: https://conda.io/projects/conda/en/stable/user-guide/install/linux.html\r +\r +#### 2) Continue with installing `mamba` (recommended but optional)\r +```shell\r +conda install -n base -c conda-forge mamba\r +```\r +#### 3) Install `snakemake`\r +```shell\r +conda activate base\r +mamba create -p env_snakemake -c conda-forge -c bioconda snakemake\r +mamba activate env_snakemake\r +snakemake --help\r +```\r +In case of complications, please check the section `Questions & Answers` below or follow the [official documentation] for troubleshooting.\r +\r +[official documentation]: https://snakemake.readthedocs.io/en/stable/getting_started/installation.html\r +\r +### Clone the GERONIMO repository\r +Go to the path in which you want to run the analysis and clone the repository:\r +```shell\r +cd \r +git clone https://github.com/amkilar/GERONIMO.git\r +```\r +\r +### Run sample analysis to ensure GERONIMO installation was successful\r +All files are prepared for the sample analysis as a default. Please execute the line below:\r +```shell\r +snakemake -s GERONIMO.sm --cores 1 --use-conda results/summary_table.xlsx\r +```\r +\r +This will prompt GERONIMO to quickly scan all modules, verifying the correct setup of the pipeline without executing any analysis.\r +You should see the message `Building DAG of jobs...`, followed by `Nothing to be done (all requested files are present and up to date).`, when successfully completed.\r +\r +If you want to run the sample analysis fully, please remove the folder `results` from the GERONIMO directory and execute GERONIMO again with:\r +\r +`snakemake -s GERONIMO.sm --cores 1 --use-conda results/summary_table.xlsx`\r +\r +> You might consider allowing more cores to speed up the analysis, which might take up to several hours.\r +\r +#### You might want to clean `GERONIMO/` directory from the files produced by the example analysis. You can safely remove the following:\r +- `GERONIMO/results`\r +- `GERONIMO/database`\r +- `GERONIMO/taxonomy`\r +- `GERONIMO/temp`\r +- `.create_genome_list.touch`\r +- `list_of_genomes.txt`\r +\r +## Setup the inputs\r +\r +### 1) Prepare the `covariance models`:\r +\r +#### Browse the collection of available `covariance models` at [Rfam] (*You can find the covariance model in the tab `Curation`.*) \r +Paste the covariance model to the folder `GERONIMO/models` and ensure its name follows the convention: `cov_model_`\r +\r +[Rfam]: https://rfam.org/\r +\r +#### **OR**\r +\r +#### Prepare your own `covariance model` using [LocARNA]\r +1. Paste or upload your sequences to the web server and download the `.stk` file with the alignment result. \r + \r + > *Please note that the `.stk` file format is crucial for the analysis, containing sequence alignment and secondary structure consensus.*\r + \r + > The LocARNA web service allows you to align 30 sequences at once - if you need to align more sequences, please use the standalone version available [here] \r + > After installation run: \r + ```shell\r + mlocarna my_fasta_sequences.fasta\r + ```\r + \r +2. Paste the `.stk` alignment file to the folder `GERONIMO/model_to_build` and ensure its name follows the convention: `.stk`\r +\r + > Please check the example `heterotrichea.stk` format in `GERONIMO/models_to_built` for reference\r + \r +\r +[LocARNA]: http://rna.informatik.uni-freiburg.de/LocARNA/Input.jsp\r +[here]: http://www.bioinf.uni-freiburg.de/Software/LocARNA/\r +\r +\r +### 2) Adjust the `config.yaml` file\r +Please adjust the analysis specifications, as in the following example:\r +\r +> - database: ' [Organism]' (in case of difficulties with defining the database query, please follow the instructions below)\r +> - extract_genomic_region-length: (here you can determine how long the upstream genomic region should be extracted; tested for 200)\r +> - models: ["", ""] (here specify the names of models that should be used to perform analysis)\r +> \r +> *Here you can also insert the name of the covariance model you want to build with GERONIMO - just be sure you placed `.stk` file in `GERONIMO/models_to_build` before starting analysis*\r +> - CPU_for_model_building: (specify the number of available CPUs devoted to the process of building model (cannot exceed the CPU number allowed to snakemake with `--cores`)\r +>\r +> *You might ignore this parameter when you do not need to create a new covariance model*\r +\r +\r +Keep in mind that the covariance models and alignments must be present in the respective GERONIMO folders.\r + \r +### 3) Remove folder `results`, which contains example analysis output\r +### 4) **Please ensure you have enough storage capacity to download all the requested genomes (in the `GERONIMO/` directory)**\r +\r +## Run GERONIMO\r +```shell\r +mamba activate env_snakemake\r +cd ~/GERONIMO\r +snakemake -s GERONIMO.sm --cores --use-conda results/summary_table.xlsx\r +```\r + \r +## Example results\r +\r +### Outputs characterisation\r +\r +#### A) Summary table\r +The Excel table contains the results arranged by taxonomy information and hit significance. The specific columns include:\r +* family, organism_name, class, order, phylum (taxonomy context)\r +* GCA_id - corresponds to the genome assembly in the *NCBI database*\r +* model - describes which covariance model identified the result\r +* label - follows the *Infernal* convention of categorizing hits\r +* number - the counter of the result\r +* e_value - indicates the significance level of the hit\r +* HIT_sequence - the exact HIT sequence found by *Infernal*, which corresponds to the covariance model\r +* HIT_ID - describes in which part of the genome assembly the hit was found, which may help publish novel sequences\r +* extended_genomic_region - upstream sequence, which may contain a possible promoter sequence\r +* secondary_structure - the secondary structure consensus of the covariance model\r +\r +\r +#### B) Significant Hits Distribution Across Taxonomy Families\r +The plot provides an overview of the number of genomes in which at least one significant hit was identified, grouped by family. The bold black line corresponds to the number of genomes present in each family, helping to minimize bias regarding unequal data representation across the taxonomy.\r +\r +\r +#### C) Hits Distribution in Genomes Across Families\r +The heatmap provides information about the most significant hits from the genome, identified by a specific covariance model. Genomes are grouped by families (on the right). Hits are classified into three categories based on their e-values. Generally, these categories correspond to hit classifications ("HIT," "MAYBE," "NO HIT"). The "HIT" category is further divided to distinguish between highly significant hits and moderately significant ones.\r +\r +\r +\r +### GERONIMO directory structure\r +\r +The GERONIMO directory structure is designed to produce files in a highly structured manner, ensuring clear insight and facilitating the analysis of results. During a successful run, GERONIMO produces the following folders:\r +* `/database` - which contains genome assemblies that were downloaded from the *NCBI database* and grouped in subfolders\r +* `/taxonomy` - where taxonomy information is gathered and stored in the form of tables\r +* `/results` - the main folder containing all produced results:\r + * `/infernal_raw` - contains the raw results produced by *Infernal*\r + * `/infernal` - contains restructured results of *Infernal* in table format\r + * `/cmdBLAST` - contains results of *cmdblast*, which extracts the extended genomic region\r + * `/summary` - contains summary files that join results from *Infernal*, *cmdblast*, and attach taxonomy context\r + * `/plots` - contains two types of summary plots\r +* `/temp` - folder contains the information necessary to download genome assemblies from *NCBI database*\r +\r +* `/env` - stores instructions for dependency installation\r +* `/models` - where calibrated covariance models can be pasted, *for example, from the Rfam database*\r +* `/modes_to_built` - where multiple alignments in *.stk* format can be pasted\r +* `/scripts` - contains developed scripts that perform results structurization\r +\r +#### The example GERONIMO directory structure:\r +\r +```shell\r +GERONIMO\r +├── database\r +│   ├── GCA_000091205.1_ASM9120v1_genomic\r +│   ├── GCA_000341285.1_ASM34128v1_genomic\r +│   ├── GCA_000350225.2_ASM35022v2_genomic\r +│   └── ...\r +├── env\r +├── models\r +├── model_to_build\r +├── results\r +│   ├── cmdBLAST\r +│   │   ├── MRP\r +│   │   │   ├── GCA_000091205.1_ASM9120v1_genomic\r +│   │   │   │   ├── extended\r +│   │   │   │   └── filtered\r +│   │   │   ├── GCA_000341285.1_ASM34128v1_genomic\r +│   │   │   │   ├── extended\r +│   │   │   │   └── filtered\r +│   │   │   ├── GCA_000350225.2_ASM35022v2_genomic\r +│   │   │   │   ├── extended\r +│   │   │   │   └── filtered\r +│   │   │   └── ...\r +│   │   ├── SRP\r +│   │   │   ├── GCA_000091205.1_ASM9120v1_genomic\r +│   │   │   │   ├── extended\r +│   │   │   │   └── filtered\r +│   │   │   ├── GCA_000341285.1_ASM34128v1_genomic\r +│   │   │   │   ├── extended\r +│   │   │   │   └── filtered\r +│   │   │   ├── GCA_000350225.2_ASM35022v2_genomic\r +│   │   │   │   ├── extended\r +│   │   │   │   └── filtered\r +│   │   │   └── ...\r +│   │   ├── ...\r +│   ├── infernal\r +│   │   ├── MRP\r +│   │   │   ├── GCA_000091205.1_ASM9120v1_genomic\r +│   │   │   ├── GCA_000341285.1_ASM34128v1_genomic\r +│   │   │   ├── GCA_000350225.2_ASM35022v2_genomic\r +│   │   │   ├── ...\r +│   │   ├── SRP\r +│   │   │   ├── GCA_000091205.1_ASM9120v1_genomic\r +│   │   │   ├── GCA_000341285.1_ASM34128v1_genomic\r +│   │   │   ├── GCA_000350225.2_ASM35022v2_genomic\r +│   │   │   ├── ...\r +│   ├── plots\r +│   ├── raw_infernal\r +│   │   ├── MRP\r +│   │   │   ├── GCA_000091205.1_ASM9120v1_genomic\r +│   │   │   ├── GCA_000341285.1_ASM34128v1_genomic\r +│   │   │   ├── GCA_000350225.2_ASM35022v2_genomic\r +│   │   │   ├── ...\r +│   │   ├── SRP\r +│   │   │   ├── GCA_000091205.1_ASM9120v1_genomic\r +│   │   │   ├── GCA_000341285.1_ASM34128v1_genomic\r +│   │   │   ├── GCA_000350225.2_ASM35022v2_genomic\r +│   │   │   ├── ...\r +│   └── summary\r +│   ├── GCA_000091205.1_ASM9120v1_genomic\r +│   ├── GCA_000341285.1_ASM34128v1_genomic\r +│   ├── GCA_000350225.2_ASM35022v2_genomic\r +│   ├── ...\r +├── scripts\r +├── taxonomy\r +└── temp\r +```\r +\r +## GERONIMO applicability\r +\r +### Expanding the evolutionary context\r +To add new genomes or database queries to an existing analysis, please follow the instructions:\r +1) Rename the `list_of_genomes.txt` file to `previous_list_of_genomes.txt` or any other preferred name.\r +2) Modify the `config.yaml` file by replacing the previous database query with the new one.\r +3) Delete:\r + - `summary_table.xlsx`, `part_summary_table.csv`, `summary_table_models.xlsx` files located in the `GERONIMO\\results` directory\r + - `.create_genome_list.touch` file\r +5) Run GERONIMO to calculate new results using the command:\r + ```shell\r + snakemake -s GERONIMO.sm --cores --use-conda results/summary_table.xlsx\r + ```\r +7) Once the new results are generated, reviewing them before merging them with the original results is recommended.\r +8) Copy the contents of the `previous_list_of_genomes.txt` file and paste them into the current `list_of_genomes.txt`.\r +9) Delete:\r + - `summary_table.xlsx` located in the `GERONIMO\\results` directory\r + - `.create_genome_list.touch` file\r +10) Run GERONIMO to merge the results from both analyses using the command:\r + ```shell\r + snakemake -s GERONIMO.sm --cores 1 --use-conda results/summary_table.xlsx\r + ```\r +\r +### Incorporating new covariance models into existing analysis\r +1) Copy the new covariance model to `GERONIMO/models`\r +2) Modify the `config.yaml` file by adding the name of the new model to the line `models: [...]`\r +3) Run GERONIMO to see the updated analysis outcome\r +\r +### Building a new covariance model\r +With GERONIMO, building a new covariance model from multiple sequence alignment in the `.stk` format is possible. \r +\r +To do so, simply paste `.stk` file to `GERONIMO/models_to_build` and paste the name of the new covariance model to `config.yaml` file to the line `models: [""]`\r +\r +and run GERONIMO.\r +\r +\r +## Questions & Answers\r +\r +### How to specify the database query?\r +- Visit the [NCBI Assemblies] website. \r +- Follow the instruction on the graphic below:\r +\r +[NCBI Assemblies]: https://www.ncbi.nlm.nih.gov/assembly/?term=\r +\r +### WSL: problem with creating `snakemake_env`\r +In the case of an error similar to the one below:\r +> CondaError: Unable to create prefix directory '/mnt/c/Windows/system32/env_snakemake'.\r +> Check that you have sufficient permissions. \r + \r +You might try to delete the cache with: `rm -r ~/.cache/` and try again.\r +\r +### When `snakemake` does not seem to be installed properly\r +In the case of the following error:\r +> Command 'snakemake' not found ...\r +\r +Check whether the `env_snakemake` is activated.\r +> It should result in a change from (base) to (env_snakemake) before your login name in the command line window.\r +\r +If you still see `(base)` before your login name, please try to activate the environment with conda:\r +`conda activate env_snakemake`\r +\r +\r +Please note that you might need to specify the full path to the `env_snakemake`, like /home/your user name/env_snakemake\r +\r +### How to browse GERONIMO results obtained in WSL?\r +You can easily access the results obtained on WSL from your Windows environment by opening `File Explorer` and pasting the following line into the search bar: `\\\\wsl.localhost\\Ubuntu\\home\\`. This will reveal a folder with your username, as specified during the configuration of your Ubuntu system. To locate the GERONIMO results, simply navigate to the folder with your username and then to the `home` folder. (`\\\\wsl.localhost\\Ubuntu\\home\\\\home\\GERONIMO`)\r +\r +### GERONIMO occupies a lot of storage space\r +Through genome downloads, GERONIMO can potentially consume storage space, rapidly leading to a shortage. Currently, downloading genomes is an essential step for optimal GERONIMO performance.\r +\r +Regrettably, if the analysis is rerun without the `/database` folder, it will result in the need to redownload genomes, which is a highly time-consuming process.\r +\r +Nevertheless, if you do not intend to repeat the analysis and have no requirement for additional genomes or models, you are welcome to retain your results tables and plots while removing the remaining files.\r +\r +It is strongly advised against using local machines for extensive analyses. If you lack access to external storage space, it is recommended to divide the analysis into smaller segments, which can be later merged, as explained in the section titled `Expanding the evolutionary context`.\r +\r +Considering this limitation, I am currently working on implementing a solution that will help circumvent the need for redundant genome downloads without compromising GERONIMO performance in the future.\r +\r +You might consider deleting the `.snakemake` folder to free up storage space. However, please note that deleting this folder will require the reinstallation of GERONIMO dependencies when the analysis is rerun.\r +\r +## License\r +Copyright (c) 2023 Agata M. Kilar\r +\r +Permission is hereby granted, free of charge, to any person obtaining a copy\r +of this software and associated documentation files (the "Software"), to deal\r +in the Software without restriction, including without limitation the rights\r +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r +copies of the Software, and to permit persons to whom the Software is\r +furnished to do so, subject to the following conditions:\r +\r +The above copyright notice and this permission notice shall be included in all\r +copies or substantial portions of the Software.\r +\r +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r +SOFTWARE.\r +\r +## Contact\r +mgr inż. Agata Magdalena Kilar, PhD (agata.kilar@ceitec.muni.cz)\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.547.1" ; + schema1:isBasedOn "https://github.com/amkilar/GERONIMO.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for GERONIMO" ; + schema1:sdDatePublished "2024-08-05 10:29:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/547/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6740 ; + schema1:creator ; + schema1:dateCreated "2023-08-01T01:34:42Z" ; + schema1:dateModified "2023-08-03T18:15:31Z" ; + schema1:description """# GERONIMO\r +\r +## Introduction\r +GERONIMO is a bioinformatics pipeline designed to conduct high-throughput homology searches of structural genes using covariance models. These models are based on the alignment of sequences and the consensus of secondary structures. The pipeline is built using Snakemake, a workflow management tool that allows for the reproducible execution of analyses on various computational platforms. \r +\r +The idea for developing GERONIMO emerged from a comprehensive search for [telomerase RNA in lower plants] and was subsequently refined through an [expanded search of telomerase RNA across Insecta]. GERONIMO can test hundreds of genomes and ensures the stability and reproducibility of the analyses performed.\r +\r +\r +[telomerase RNA in lower plants]: https://doi.org/10.1093/nar/gkab545\r +[expanded search of telomerase RNA across Insecta]: https://doi.org/10.1093/nar/gkac1202\r +\r +## Scope\r +The GERONIMO tool utilises covariance models (CMs) to conduct homology searches of RNA sequences across a wide range of gene families in a broad evolutionary context. Specifically, it can be utilised to:\r +\r +* Detect RNA sequences that share a common evolutionary ancestor\r +* Identify and align orthologous RNA sequences among closely related species, as well as paralogous sequences within a single species\r +* Identify conserved non-coding RNAs in a genome, and extract upstream genomic regions to characterise potential promoter regions. \r +It is important to note that GERONIMO is a computational tool, and as such, it is intended to be run on a computer with a small amount of data. Appropriate computational infrastructure is necessary for analysing hundreds of genomes.\r +\r +Although GERONIMO was primarily designed for Telomerase RNA identification, its functionality extends to include the detection and alignment of other RNA gene families, including **rRNA**, **tRNA**, **snRNA**, **miRNA**, and **lncRNA**. This can aid in identifying paralogs and orthologs across different species that may carry specific functions, making it useful for phylogenetic analyses. \r +\r +It is crucial to remember that some gene families may exhibit similar characteristics but different functions. Therefore, analysing the data and functional annotation after conducting the search is essential to characterise the sequences properly.\r +\r +## Pipeline overview\r +\r +\r +By default, the GERONIMO pipeline conducts high-throughput searches of homology sequences in downloaded genomes utilizing covariance models. If a significant similarity is detected between the model and genome sequence, the pipeline extracts the upstream region, making it convenient to identify the promoter of the discovered gene. In brief, the pipeline:\r +- Compiles a list of genomes using the NCBI's [Entrez] database based on a specified query, *e.g. "Rhodophyta"[Organism]*\r +- Downloads and decompresses the requested genomes using *rsync* and *gunzip*, respectively\r +- *Optionally*, generates a covariance model based on a provided alignment using [Infernal]\r +- Conducts searches among the genomes using the covariance model [Infernal]\r +- Supplements genome information with taxonomy data using [rentrez]\r +- Expands the significant hits sequence by extracting upstream genomic regions using [*blastcmd*]\r +- Compiles the results, organizes them into a tabular format, and generates a visual summary of the performed analysis.\r +\r +[Entrez]: https://www.ncbi.nlm.nih.gov/books/NBK179288/\r +[Infernal]: http://eddylab.org/infernal/\r +[rentrez]: https://github.com/ropensci/rentrez\r +[*blastcmd*]: https://www.ncbi.nlm.nih.gov/books/NBK569853/\r +\r +## Quick start\r +The GERONIMO is available as a `snakemake pipeline` running on Linux and Windows operating systems.\r +\r +### Windows 10\r +Instal Linux on Windows 10 (WSL) according to [instructions], which bottling down to opening PowerShell or Windows Command Prompt in *administrator mode* and pasting the following:\r +```shell\r +wsl --install\r +wsl.exe --install UBUNTU\r +```\r +Then restart the machine and follow the instructions for setting up the Linux environment.\r +\r +[instructions]: https://learn.microsoft.com/en-us/windows/wsl/install\r +\r +### Linux:\r +#### Check whether the conda is installed:\r +```shell\r +conda -V\r +```\r +> GERONIMO was tested on conda 23.3.1\r +#### 1) If you do not have installed `conda`, please install `miniconda`\r +Please follow the instructions for installing [miniconda]\r +\r +[miniconda]: https://conda.io/projects/conda/en/stable/user-guide/install/linux.html\r +\r +#### 2) Continue with installing `mamba` (recommended but optional)\r +```shell\r +conda install -n base -c conda-forge mamba\r +```\r +#### 3) Install `snakemake`\r +```shell\r +conda activate base\r +mamba create -p env_snakemake -c conda-forge -c bioconda snakemake\r +mamba activate env_snakemake\r +snakemake --help\r +```\r +In case of complications, please check the section `Questions & Answers` below or follow the [official documentation] for troubleshooting.\r +\r +[official documentation]: https://snakemake.readthedocs.io/en/stable/getting_started/installation.html\r +\r +### Clone the GERONIMO repository\r +Go to the path in which you want to run the analysis and clone the repository:\r +```shell\r +cd \r +git clone https://github.com/amkilar/GERONIMO.git\r +```\r +\r +### Run sample analysis to ensure GERONIMO installation was successful\r +All files are prepared for the sample analysis as a default. Please execute the line below:\r +```shell\r +snakemake -s GERONIMO.sm --cores 1 --use-conda results/summary_table.xlsx\r +```\r +\r +This will prompt GERONIMO to quickly scan all modules, verifying the correct setup of the pipeline without executing any analysis.\r +You should see the message `Building DAG of jobs...`, followed by `Nothing to be done (all requested files are present and up to date).`, when successfully completed.\r +\r +If you want to run the sample analysis fully, please remove the folder `results` from the GERONIMO directory and execute GERONIMO again with:\r +\r +`snakemake -s GERONIMO.sm --cores 1 --use-conda results/summary_table.xlsx`\r +\r +> You might consider allowing more cores to speed up the analysis, which might take up to several hours.\r +\r +#### You might want to clean `GERONIMO/` directory from the files produced by the example analysis. You can safely remove the following:\r +- `GERONIMO/results`\r +- `GERONIMO/database`\r +- `GERONIMO/taxonomy`\r +- `GERONIMO/temp`\r +- `.create_genome_list.touch`\r +- `list_of_genomes.txt`\r +\r +## Setup the inputs\r +\r +### 1) Prepare the `covariance models`:\r +\r +#### Browse the collection of available `covariance models` at [Rfam] (*You can find the covariance model in the tab `Curation`.*) \r +Paste the covariance model to the folder `GERONIMO/models` and ensure its name follows the convention: `cov_model_`\r +\r +[Rfam]: https://rfam.org/\r +\r +#### **OR**\r +\r +#### Prepare your own `covariance model` using [LocARNA]\r +1. Paste or upload your sequences to the web server and download the `.stk` file with the alignment result. \r + \r + > *Please note that the `.stk` file format is crucial for the analysis, containing sequence alignment and secondary structure consensus.*\r + \r + > The LocARNA web service allows you to align 30 sequences at once - if you need to align more sequences, please use the standalone version available [here] \r + > After installation run: \r + ```shell\r + mlocarna my_fasta_sequences.fasta\r + ```\r + \r +2. Paste the `.stk` alignment file to the folder `GERONIMO/model_to_build` and ensure its name follows the convention: `.stk`\r +\r + > Please check the example `heterotrichea.stk` format in `GERONIMO/models_to_built` for reference\r + \r +\r +[LocARNA]: http://rna.informatik.uni-freiburg.de/LocARNA/Input.jsp\r +[here]: http://www.bioinf.uni-freiburg.de/Software/LocARNA/\r +\r +\r +### 2) Adjust the `config.yaml` file\r +Please adjust the analysis specifications, as in the following example:\r +\r +> - database: ' [Organism]' (in case of difficulties with defining the database query, please follow the instructions below)\r +> - extract_genomic_region-length: (here you can determine how long the upstream genomic region should be extracted; tested for 200)\r +> - models: ["", ""] (here specify the names of models that should be used to perform analysis)\r +> \r +> *Here you can also insert the name of the covariance model you want to build with GERONIMO - just be sure you placed `.stk` file in `GERONIMO/models_to_build` before starting analysis*\r +> - CPU_for_model_building: (specify the number of available CPUs devoted to the process of building model (cannot exceed the CPU number allowed to snakemake with `--cores`)\r +>\r +> *You might ignore this parameter when you do not need to create a new covariance model*\r +\r +\r +Keep in mind that the covariance models and alignments must be present in the respective GERONIMO folders.\r + \r +### 3) Remove folder `results`, which contains example analysis output\r +### 4) **Please ensure you have enough storage capacity to download all the requested genomes (in the `GERONIMO/` directory)**\r +\r +## Run GERONIMO\r +```shell\r +mamba activate env_snakemake\r +cd ~/GERONIMO\r +snakemake -s GERONIMO.sm --cores --use-conda results/summary_table.xlsx\r +```\r + \r +## Example results\r +\r +### Outputs characterisation\r +\r +#### A) Summary table\r +The Excel table contains the results arranged by taxonomy information and hit significance. The specific columns include:\r +* family, organism_name, class, order, phylum (taxonomy context)\r +* GCA_id - corresponds to the genome assembly in the *NCBI database*\r +* model - describes which covariance model identified the result\r +* label - follows the *Infernal* convention of categorizing hits\r +* number - the counter of the result\r +* e_value - indicates the significance level of the hit\r +* HIT_sequence - the exact HIT sequence found by *Infernal*, which corresponds to the covariance model\r +* HIT_ID - describes in which part of the genome assembly the hit was found, which may help publish novel sequences\r +* extended_genomic_region - upstream sequence, which may contain a possible promoter sequence\r +* secondary_structure - the secondary structure consensus of the covariance model\r +\r +\r +#### B) Significant Hits Distribution Across Taxonomy Families\r +The plot provides an overview of the number of genomes in which at least one significant hit was identified, grouped by family. The bold black line corresponds to the number of genomes present in each family, helping to minimize bias regarding unequal data representation across the taxonomy.\r +\r +\r +#### C) Hits Distribution in Genomes Across Families\r +The heatmap provides information about the most significant hits from the genome, identified by a specific covariance model. Genomes are grouped by families (on the right). Hits are classified into three categories based on their e-values. Generally, these categories correspond to hit classifications ("HIT," "MAYBE," "NO HIT"). The "HIT" category is further divided to distinguish between highly significant hits and moderately significant ones.\r +\r +\r +\r +### GERONIMO directory structure\r +\r +The GERONIMO directory structure is designed to produce files in a highly structured manner, ensuring clear insight and facilitating the analysis of results. During a successful run, GERONIMO produces the following folders:\r +* `/database` - which contains genome assemblies that were downloaded from the *NCBI database* and grouped in subfolders\r +* `/taxonomy` - where taxonomy information is gathered and stored in the form of tables\r +* `/results` - the main folder containing all produced results:\r + * `/infernal_raw` - contains the raw results produced by *Infernal*\r + * `/infernal` - contains restructured results of *Infernal* in table format\r + * `/cmdBLAST` - contains results of *cmdblast*, which extracts the extended genomic region\r + * `/summary` - contains summary files that join results from *Infernal*, *cmdblast*, and attach taxonomy context\r + * `/plots` - contains two types of summary plots\r +* `/temp` - folder contains the information necessary to download genome assemblies from *NCBI database*\r +\r +* `/env` - stores instructions for dependency installation\r +* `/models` - where calibrated covariance models can be pasted, *for example, from the Rfam database*\r +* `/modes_to_built` - where multiple alignments in *.stk* format can be pasted\r +* `/scripts` - contains developed scripts that perform results structurization\r +\r +#### The example GERONIMO directory structure:\r +\r +```shell\r +GERONIMO\r +├── database\r +│   ├── GCA_000091205.1_ASM9120v1_genomic\r +│   ├── GCA_000341285.1_ASM34128v1_genomic\r +│   ├── GCA_000350225.2_ASM35022v2_genomic\r +│   └── ...\r +├── env\r +├── models\r +├── model_to_build\r +├── results\r +│   ├── cmdBLAST\r +│   │   ├── MRP\r +│   │   │   ├── GCA_000091205.1_ASM9120v1_genomic\r +│   │   │   │   ├── extended\r +│   │   │   │   └── filtered\r +│   │   │   ├── GCA_000341285.1_ASM34128v1_genomic\r +│   │   │   │   ├── extended\r +│   │   │   │   └── filtered\r +│   │   │   ├── GCA_000350225.2_ASM35022v2_genomic\r +│   │   │   │   ├── extended\r +│   │   │   │   └── filtered\r +│   │   │   └── ...\r +│   │   ├── SRP\r +│   │   │   ├── GCA_000091205.1_ASM9120v1_genomic\r +│   │   │   │   ├── extended\r +│   │   │   │   └── filtered\r +│   │   │   ├── GCA_000341285.1_ASM34128v1_genomic\r +│   │   │   │   ├── extended\r +│   │   │   │   └── filtered\r +│   │   │   ├── GCA_000350225.2_ASM35022v2_genomic\r +│   │   │   │   ├── extended\r +│   │   │   │   └── filtered\r +│   │   │   └── ...\r +│   │   ├── ...\r +│   ├── infernal\r +│   │   ├── MRP\r +│   │   │   ├── GCA_000091205.1_ASM9120v1_genomic\r +│   │   │   ├── GCA_000341285.1_ASM34128v1_genomic\r +│   │   │   ├── GCA_000350225.2_ASM35022v2_genomic\r +│   │   │   ├── ...\r +│   │   ├── SRP\r +│   │   │   ├── GCA_000091205.1_ASM9120v1_genomic\r +│   │   │   ├── GCA_000341285.1_ASM34128v1_genomic\r +│   │   │   ├── GCA_000350225.2_ASM35022v2_genomic\r +│   │   │   ├── ...\r +│   ├── plots\r +│   ├── raw_infernal\r +│   │   ├── MRP\r +│   │   │   ├── GCA_000091205.1_ASM9120v1_genomic\r +│   │   │   ├── GCA_000341285.1_ASM34128v1_genomic\r +│   │   │   ├── GCA_000350225.2_ASM35022v2_genomic\r +│   │   │   ├── ...\r +│   │   ├── SRP\r +│   │   │   ├── GCA_000091205.1_ASM9120v1_genomic\r +│   │   │   ├── GCA_000341285.1_ASM34128v1_genomic\r +│   │   │   ├── GCA_000350225.2_ASM35022v2_genomic\r +│   │   │   ├── ...\r +│   └── summary\r +│   ├── GCA_000091205.1_ASM9120v1_genomic\r +│   ├── GCA_000341285.1_ASM34128v1_genomic\r +│   ├── GCA_000350225.2_ASM35022v2_genomic\r +│   ├── ...\r +├── scripts\r +├── taxonomy\r +└── temp\r +```\r +\r +## GERONIMO applicability\r +\r +### Expanding the evolutionary context\r +To add new genomes or database queries to an existing analysis, please follow the instructions:\r +1) Rename the `list_of_genomes.txt` file to `previous_list_of_genomes.txt` or any other preferred name.\r +2) Modify the `config.yaml` file by replacing the previous database query with the new one.\r +3) Delete:\r + - `summary_table.xlsx`, `part_summary_table.csv`, `summary_table_models.xlsx` files located in the `GERONIMO\\results` directory\r + - `.create_genome_list.touch` file\r +5) Run GERONIMO to calculate new results using the command:\r + ```shell\r + snakemake -s GERONIMO.sm --cores --use-conda results/summary_table.xlsx\r + ```\r +7) Once the new results are generated, reviewing them before merging them with the original results is recommended.\r +8) Copy the contents of the `previous_list_of_genomes.txt` file and paste them into the current `list_of_genomes.txt`.\r +9) Delete:\r + - `summary_table.xlsx` located in the `GERONIMO\\results` directory\r + - `.create_genome_list.touch` file\r +10) Run GERONIMO to merge the results from both analyses using the command:\r + ```shell\r + snakemake -s GERONIMO.sm --cores 1 --use-conda results/summary_table.xlsx\r + ```\r +\r +### Incorporating new covariance models into existing analysis\r +1) Copy the new covariance model to `GERONIMO/models`\r +2) Modify the `config.yaml` file by adding the name of the new model to the line `models: [...]`\r +3) Run GERONIMO to see the updated analysis outcome\r +\r +### Building a new covariance model\r +With GERONIMO, building a new covariance model from multiple sequence alignment in the `.stk` format is possible. \r +\r +To do so, simply paste `.stk` file to `GERONIMO/models_to_build` and paste the name of the new covariance model to `config.yaml` file to the line `models: [""]`\r +\r +and run GERONIMO.\r +\r +\r +## Questions & Answers\r +\r +### How to specify the database query?\r +- Visit the [NCBI Assemblies] website. \r +- Follow the instruction on the graphic below:\r +\r +[NCBI Assemblies]: https://www.ncbi.nlm.nih.gov/assembly/?term=\r +\r +### WSL: problem with creating `snakemake_env`\r +In the case of an error similar to the one below:\r +> CondaError: Unable to create prefix directory '/mnt/c/Windows/system32/env_snakemake'.\r +> Check that you have sufficient permissions. \r + \r +You might try to delete the cache with: `rm -r ~/.cache/` and try again.\r +\r +### When `snakemake` does not seem to be installed properly\r +In the case of the following error:\r +> Command 'snakemake' not found ...\r +\r +Check whether the `env_snakemake` is activated.\r +> It should result in a change from (base) to (env_snakemake) before your login name in the command line window.\r +\r +If you still see `(base)` before your login name, please try to activate the environment with conda:\r +`conda activate env_snakemake`\r +\r +\r +Please note that you might need to specify the full path to the `env_snakemake`, like /home/your user name/env_snakemake\r +\r +### How to browse GERONIMO results obtained in WSL?\r +You can easily access the results obtained on WSL from your Windows environment by opening `File Explorer` and pasting the following line into the search bar: `\\\\wsl.localhost\\Ubuntu\\home\\`. This will reveal a folder with your username, as specified during the configuration of your Ubuntu system. To locate the GERONIMO results, simply navigate to the folder with your username and then to the `home` folder. (`\\\\wsl.localhost\\Ubuntu\\home\\\\home\\GERONIMO`)\r +\r +### GERONIMO occupies a lot of storage space\r +Through genome downloads, GERONIMO can potentially consume storage space, rapidly leading to a shortage. Currently, downloading genomes is an essential step for optimal GERONIMO performance.\r +\r +Regrettably, if the analysis is rerun without the `/database` folder, it will result in the need to redownload genomes, which is a highly time-consuming process.\r +\r +Nevertheless, if you do not intend to repeat the analysis and have no requirement for additional genomes or models, you are welcome to retain your results tables and plots while removing the remaining files.\r +\r +It is strongly advised against using local machines for extensive analyses. If you lack access to external storage space, it is recommended to divide the analysis into smaller segments, which can be later merged, as explained in the section titled `Expanding the evolutionary context`.\r +\r +Considering this limitation, I am currently working on implementing a solution that will help circumvent the need for redundant genome downloads without compromising GERONIMO performance in the future.\r +\r +You might consider deleting the `.snakemake` folder to free up storage space. However, please note that deleting this folder will require the reinstallation of GERONIMO dependencies when the analysis is rerun.\r +\r +## License\r +Copyright (c) 2023 Agata M. Kilar\r +\r +Permission is hereby granted, free of charge, to any person obtaining a copy\r +of this software and associated documentation files (the "Software"), to deal\r +in the Software without restriction, including without limitation the rights\r +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r +copies of the Software, and to permit persons to whom the Software is\r +furnished to do so, subject to the following conditions:\r +\r +The above copyright notice and this permission notice shall be included in all\r +copies or substantial portions of the Software.\r +\r +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r +SOFTWARE.\r +\r +## Contact\r +mgr inż. Agata Magdalena Kilar, PhD (agata.kilar@ceitec.muni.cz)\r +\r +""" ; + schema1:image ; + schema1:keywords "Bioinformatics, Snakemake, rna" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "GERONIMO" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/547?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2024-06-19T11:29:55.009783" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/bacterial_genome_annotation" ; + schema1:license "GPL-3.0-or-later" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "bacterial_genome_annotation/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Analysis of ribosome profiling, or Ribo-seq (also named ribosome footprinting)" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1016?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/riboseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/riboseq" ; + schema1:sdDatePublished "2024-08-05 10:23:22 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1016/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12214 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:09Z" ; + schema1:dateModified "2024-06-11T12:55:09Z" ; + schema1:description "Analysis of ribosome profiling, or Ribo-seq (also named ribosome footprinting)" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1016?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/riboseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1016?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "16S rRNA amplicon sequencing analysis workflow using QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-08-05 10:22:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5201 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:38Z" ; + schema1:dateModified "2024-06-11T12:54:38Z" ; + schema1:description "16S rRNA amplicon sequencing analysis workflow using QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Demultiplexing pipeline for Illumina sequencing data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/978?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/demultiplex" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/demultiplex" ; + schema1:sdDatePublished "2024-08-05 10:24:07 +0100" ; + schema1:url "https://workflowhub.eu/workflows/978/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10420 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:48Z" ; + schema1:dateModified "2024-06-11T12:54:48Z" ; + schema1:description "Demultiplexing pipeline for Illumina sequencing data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/978?version=7" ; + schema1:keywords "bases2fastq, bcl2fastq, demultiplexing, elementbiosciences, illumina" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/demultiplex" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/978?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "The workflow takes raw ONT reads and trimmed Illumina WGS paired reads collections, and the estimated genome size and Max depth (both calculated from WF1) to run Flye and subsequently polish the assembly with HyPo. It produces collapsed assemblies (unpolished and polished) and runs all the QC analyses (gfastats, BUSCO, and Merqury)." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/788?version=1" ; + schema1:isBasedOn "https://github.com/ERGA-consortium/pipelines/tree/main/assembly/galaxy" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ERGA ONT+Illumina Assembly+QC Flye+HyPo v2403 (WF2)" ; + schema1:sdDatePublished "2024-08-05 10:25:14 +0100" ; + schema1:url "https://workflowhub.eu/workflows/788/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 283740 ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/2.Contigging/pics/Cont_ONTflye_2403.png" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 55231 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-11T12:41:58Z" ; + schema1:dateModified "2024-03-11T12:41:58Z" ; + schema1:description "The workflow takes raw ONT reads and trimmed Illumina WGS paired reads collections, and the estimated genome size and Max depth (both calculated from WF1) to run Flye and subsequently polish the assembly with HyPo. It produces collapsed assemblies (unpolished and polished) and runs all the QC analyses (gfastats, BUSCO, and Merqury)." ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "name:ERGA, name:ASSEMBLY+QC, name:ONT, name:ILLUMINA" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ERGA ONT+Illumina Assembly+QC Flye+HyPo v2403 (WF2)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/2.Contigging/Galaxy-Workflow-ERGA_ONT_Illumina_Assembly_QC_Flye_HyPo_v2403_(WF2).ga" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/998?version=14" ; + schema1:isBasedOn "https://github.com/nf-core/methylseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/methylseq" ; + schema1:sdDatePublished "2024-08-05 10:22:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/998/ro_crate?version=14" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11981 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:02Z" ; + schema1:dateModified "2024-06-11T12:55:02Z" ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/998?version=14" ; + schema1:keywords "bisulfite-sequencing, dna-methylation, em-seq, epigenome, Epigenomics, methyl-seq, pbat, rrbs" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/methylseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/998?version=14" ; + schema1:version 14 . + + a schema1:Dataset ; + schema1:datePublished "2021-07-26T10:22:23.315184" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-variation-reporting" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:sdDatePublished "2021-10-27 15:45:07 +0100" ; + schema1:softwareVersion "v0.1.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.6" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Galaxy Workflow created on Galaxy-E european instance, ecology.usegalaxy.eu, related to the Galaxy training tutorial "[Champs blocs](https://training.galaxyproject.org/training-material/topics/ecology/tutorials/champs-blocs/tutorial.html)" .\r +\r +This workflow allows to produce Visual Rollover Indicator and dissimilarity as diversity indices on boulder fields.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/661?version=1" ; + schema1:isBasedOn "https://ecology.usegalaxy.eu/u/ylebras/w/workflow-constructed-from-history-champs-bloc-1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Boulder fields indicators" ; + schema1:sdDatePublished "2024-08-05 10:27:16 +0100" ; + schema1:url "https://workflowhub.eu/workflows/661/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8647 ; + schema1:creator , + ; + schema1:dateCreated "2023-11-10T08:55:14Z" ; + schema1:dateModified "2023-11-10T08:55:14Z" ; + schema1:description """Galaxy Workflow created on Galaxy-E european instance, ecology.usegalaxy.eu, related to the Galaxy training tutorial "[Champs blocs](https://training.galaxyproject.org/training-material/topics/ecology/tutorials/champs-blocs/tutorial.html)" .\r +\r +This workflow allows to produce Visual Rollover Indicator and dissimilarity as diversity indices on boulder fields.\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Boulder fields indicators" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/661?version=1" ; + schema1:version 1 ; + ns1:input , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# ROIforMSI\r +Source codes for manuscript "Delineating Regions-of-interest for Mass Spectrometry Imaging by Multimodally Corroborated Spatial Segmentation"\r +\r +\r +"ExampleWorkflow.ipynb" is a methods document to demonstrate the workflow of our multimodal fusion-based spatial segmentation.\r +\r +\r +"Utilities.py" contains all the tools to implement our method.\r +\r +\r +"gui.py" and "registration_gui.py" are files to implement linear and nonlinear registration.\r +\r +(Licence: GPL-3)""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.437.1" ; + schema1:isBasedOn "https://github.com/guoang4github/ROIforMSI/" ; + schema1:license "AGPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Delineating Regions-of-interest for Mass Spectrometry Imaging by Multimodally Corroborated Spatial Segmentation" ; + schema1:sdDatePublished "2024-08-05 10:31:14 +0100" ; + schema1:url "https://workflowhub.eu/workflows/437/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2007984 ; + schema1:creator , + ; + schema1:dateCreated "2023-02-16T00:34:18Z" ; + schema1:dateModified "2023-03-08T23:57:45Z" ; + schema1:description """# ROIforMSI\r +Source codes for manuscript "Delineating Regions-of-interest for Mass Spectrometry Imaging by Multimodally Corroborated Spatial Segmentation"\r +\r +\r +"ExampleWorkflow.ipynb" is a methods document to demonstrate the workflow of our multimodal fusion-based spatial segmentation.\r +\r +\r +"Utilities.py" contains all the tools to implement our method.\r +\r +\r +"gui.py" and "registration_gui.py" are files to implement linear and nonlinear registration.\r +\r +(Licence: GPL-3)""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/AGPL-3.0" ; + schema1:name "Delineating Regions-of-interest for Mass Spectrometry Imaging by Multimodally Corroborated Spatial Segmentation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/437?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 277163 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "CAGE-seq pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/969?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/cageseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/cageseq" ; + schema1:sdDatePublished "2024-08-05 10:24:16 +0100" ; + schema1:url "https://workflowhub.eu/workflows/969/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5423 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:44Z" ; + schema1:dateModified "2024-06-11T12:54:44Z" ; + schema1:description "CAGE-seq pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/969?version=2" ; + schema1:keywords "cage, cage-seq, cageseq-data, gene-expression, rna" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/cageseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/969?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:description "Preprocessing of raw SARS-CoV-2 reads. More info can be found at https://covid19.galaxyproject.org/genomics/" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/2?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genomics - Read pre-processing" ; + schema1:sdDatePublished "2024-08-05 10:33:46 +0100" ; + schema1:url "https://workflowhub.eu/workflows/2/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 7753 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 41191 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2020-04-10T10:20:13Z" ; + schema1:dateModified "2023-01-16T13:39:40Z" ; + schema1:description "Preprocessing of raw SARS-CoV-2 reads. More info can be found at https://covid19.galaxyproject.org/genomics/" ; + schema1:image ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Genomics - Read pre-processing" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/2?version=1" ; + schema1:version 1 ; + ns1:input , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 12854 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "This workflow is created as part of a tutorial listed on GTN. The workflow shows the steps in human copy number variance detection using the Contrl_FREEC tool. " ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.676.1" ; + schema1:isBasedOn "https://usegalaxy.eu/u/khaled_jumah/w/somatic-variant-discovery-from-wes-data-using-control-freec" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Somatic-Variant-Discovery-from-WES-Data-Using-Control-FREEC" ; + schema1:sdDatePublished "2024-08-05 10:27:06 +0100" ; + schema1:url "https://workflowhub.eu/workflows/676/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 70117 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2023-11-23T17:19:43Z" ; + schema1:dateModified "2024-04-03T15:20:03Z" ; + schema1:description "This workflow is created as part of a tutorial listed on GTN. The workflow shows the steps in human copy number variance detection using the Contrl_FREEC tool. " ; + schema1:keywords "hCNV, variant-analysis, MIRACUM" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Somatic-Variant-Discovery-from-WES-Data-Using-Control-FREEC" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/676?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + ; + ns1:output , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/998?version=11" ; + schema1:isBasedOn "https://github.com/nf-core/methylseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/methylseq" ; + schema1:sdDatePublished "2024-08-05 10:22:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/998/ro_crate?version=11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9874 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:02Z" ; + schema1:dateModified "2024-06-11T12:55:02Z" ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/998?version=14" ; + schema1:keywords "bisulfite-sequencing, dna-methylation, em-seq, epigenome, Epigenomics, methyl-seq, pbat, rrbs" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/methylseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/998?version=11" ; + schema1:version 11 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=20" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-08-05 10:23:20 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=20" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 18609 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:16Z" ; + schema1:dateModified "2024-06-11T12:55:16Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=20" ; + schema1:version 20 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-03T16:25:31.059727" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.17" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Calculating and visualizing marine biodiversity indicators" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/758?version=1" ; + schema1:license "CC-BY-SA-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Calculating and visualizing OBIS marine biodiversity indicators" ; + schema1:sdDatePublished "2024-08-05 10:25:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/758/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5916 ; + schema1:creator ; + schema1:dateCreated "2024-02-15T11:58:49Z" ; + schema1:dateModified "2024-02-15T11:58:49Z" ; + schema1:description "Calculating and visualizing marine biodiversity indicators" ; + schema1:isPartOf ; + schema1:keywords "Ecology" ; + schema1:license "https://spdx.org/licenses/CC-BY-SA-4.0" ; + schema1:name "Calculating and visualizing OBIS marine biodiversity indicators" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/758?version=1" ; + schema1:version 1 ; + ns1:input . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for processing of 10xGenomics single cell rnaseq data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1021?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/scrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/scrnaseq" ; + schema1:sdDatePublished "2024-08-05 10:23:13 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1021/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5942 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:17Z" ; + schema1:dateModified "2024-06-11T12:55:17Z" ; + schema1:description "Pipeline for processing of 10xGenomics single cell rnaseq data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1021?version=13" ; + schema1:keywords "10x-genomics, 10xgenomics, alevin, bustools, Cellranger, kallisto, rna-seq, single-cell, star-solo" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/scrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1021?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Genome-wide alternative splicing analysis v.2" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/482?version=5" ; + schema1:license "CC-BY-NC-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genome-wide alternative splicing analysis v.2" ; + schema1:sdDatePublished "2024-08-05 10:30:12 +0100" ; + schema1:url "https://workflowhub.eu/workflows/482/ro_crate?version=5" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 214419 . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 30115 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 119677 ; + schema1:creator ; + schema1:dateCreated "2023-06-11T12:01:08Z" ; + schema1:dateModified "2023-06-11T12:01:24Z" ; + schema1:description "Genome-wide alternative splicing analysis v.2" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/482?version=6" ; + schema1:keywords "Transcriptomics, Alternative splicing, isoform switching" ; + schema1:license "https://spdx.org/licenses/CC-BY-NC-4.0" ; + schema1:name "Genome-wide alternative splicing analysis v.2" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/482?version=5" ; + schema1:version 5 ; + ns1:input , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Taxonomic classification and profiling of shotgun short- and long-read metagenomic data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1025?version=10" ; + schema1:isBasedOn "https://github.com/nf-core/taxprofiler" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/taxprofiler" ; + schema1:sdDatePublished "2024-08-05 10:23:10 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1025/ro_crate?version=10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15939 ; + schema1:creator , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:20Z" ; + schema1:dateModified "2024-06-11T12:55:20Z" ; + schema1:description "Taxonomic classification and profiling of shotgun short- and long-read metagenomic data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1025?version=10" ; + schema1:keywords "Classification, illumina, long-reads, Metagenomics, microbiome, nanopore, pathogen, Profiling, shotgun, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/taxprofiler" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1025?version=10" ; + schema1:version 10 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-09T09:28:52+00:00" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:mainEntity . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:name "main" ; + schema1:programmingLanguage . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for analysis of Molecular Pixelation assays" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1010?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/pixelator" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/pixelator" ; + schema1:sdDatePublished "2024-08-05 10:23:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1010/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12801 ; + schema1:creator ; + schema1:dateCreated "2024-07-18T03:03:14Z" ; + schema1:dateModified "2024-07-18T03:03:14Z" ; + schema1:description "Pipeline for analysis of Molecular Pixelation assays" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1010?version=7" ; + schema1:keywords "molecular-pixelation, pixelator, pixelgen-technologies, proteins, single-cell, single-cell-omics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/pixelator" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1010?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=18" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-08-05 10:23:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=18" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12737 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:50Z" ; + schema1:dateModified "2024-06-11T12:54:50Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=18" ; + schema1:version 18 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """This is a Galaxy workflow for performing molecular dynamics simulations and analysis with flavivirus helicases bound to a ligand/drug molecule. \r +The associated input files can be found at:\r +https://zenodo.org/records/7493015\r +The associated output files can be found at:\r +https://zenodo.org/records/7850935""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.761.1" ; + schema1:isBasedOn "https://zenodo.org/records/7493015" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for flavivirushelicase_proteindrugcomplex" ; + schema1:sdDatePublished "2024-08-05 10:25:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/761/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 72937 ; + schema1:creator ; + schema1:dateCreated "2024-02-16T17:23:53Z" ; + schema1:dateModified "2024-03-02T16:44:04Z" ; + schema1:description """This is a Galaxy workflow for performing molecular dynamics simulations and analysis with flavivirus helicases bound to a ligand/drug molecule. \r +The associated input files can be found at:\r +https://zenodo.org/records/7493015\r +The associated output files can be found at:\r +https://zenodo.org/records/7850935""" ; + schema1:keywords "helicase, rna virus, zika, dengue, west nile, NS3, molecular dynamics" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "flavivirushelicase_proteindrugcomplex" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/761?version=1" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2023-11-16T14:32:18.203562" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A Nanostring nCounter analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1003?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/nanostring" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/nanostring" ; + schema1:sdDatePublished "2024-08-05 10:23:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1003/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10131 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:06Z" ; + schema1:dateModified "2024-06-11T12:55:06Z" ; + schema1:description "A Nanostring nCounter analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1003?version=4" ; + schema1:keywords "nanostring, nanostringnorm" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/nanostring" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1003?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.129.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_virtual-screening" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein-ligand Docking tutorial (Fpocket)" ; + schema1:sdDatePublished "2024-08-05 10:30:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/129/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 42751 ; + schema1:creator , + , + ; + schema1:dateCreated "2021-06-29T09:24:27Z" ; + schema1:dateModified "2022-09-15T11:20:56Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/129?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein-ligand Docking tutorial (Fpocket)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_virtual-screening/0ab8d1d3410c67db6a5a25d3dde6f3e0303af08f/biobb_wf_virtual-screening/notebooks/fpocket/wf_vs_fpocket.ipynb" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.131.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/131/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 77840 ; + schema1:creator , + ; + schema1:dateCreated "2022-09-15T12:31:30Z" ; + schema1:dateModified "2023-01-16T13:50:20Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/131?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_amber_md_setup/master/biobb_wf_amber_md_setup/notebooks/mdsetup_lig/biobb_amber_complex_setup_notebook.ipynb" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.129.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_virtual-screening" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein-ligand Docking tutorial (Fpocket)" ; + schema1:sdDatePublished "2024-08-05 10:30:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/129/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 43287 ; + schema1:creator , + , + ; + schema1:dateCreated "2022-09-15T11:21:17Z" ; + schema1:dateModified "2023-01-16T13:50:19Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/129?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein-ligand Docking tutorial (Fpocket)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_virtual-screening/master/biobb_wf_virtual-screening/notebooks/fpocket/wf_vs_fpocket.ipynb" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/303?version=2" ; + schema1:isBasedOn "https://github.com/DimitraPanou/scRNAseq-cwl.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for seurat scRNA-seq" ; + schema1:sdDatePublished "2024-08-05 10:32:12 +0100" ; + schema1:url "https://workflowhub.eu/workflows/303/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6555 ; + schema1:dateCreated "2022-04-14T13:28:34Z" ; + schema1:dateModified "2023-01-16T13:59:13Z" ; + schema1:description "" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/303?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "seurat scRNA-seq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/303?version=2" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 56489 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Galaxy-E (ecology.usegalaxy.eu) workflow to calculate species presence / absence, community metrics and compute generalized linear models to identify effects and significativity of these effects on biodiversity." ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/49?version=2" ; + schema1:isBasedOn "https://ecology.usegalaxy.eu/u/ylebras/w/workflow-constructed-from-history-population-and-community-metrics-calculation-from-biodiversity-data-1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Population and community metrics calculation from Biodiversity data" ; + schema1:sdDatePublished "2024-08-05 10:33:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/49/ro_crate?version=2" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 2434 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9837 ; + schema1:creator , + ; + schema1:dateCreated "2020-07-24T13:00:50Z" ; + schema1:dateModified "2023-01-16T13:44:17Z" ; + schema1:description "Galaxy-E (ecology.usegalaxy.eu) workflow to calculate species presence / absence, community metrics and compute generalized linear models to identify effects and significativity of these effects on biodiversity." ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/49?version=1" ; + schema1:keywords "Community_metrics, Presence_absence, GLM, Ecology, Biodiversity, Species abundance, Modeling, Statistics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Population and community metrics calculation from Biodiversity data" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/49?version=2" ; + schema1:version 2 ; + ns1:input , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 6739 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Coprolite Identification" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/974?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/coproid" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/coproid" ; + schema1:sdDatePublished "2024-08-05 10:24:10 +0100" ; + schema1:url "https://workflowhub.eu/workflows/974/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4839 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:45Z" ; + schema1:dateModified "2024-06-11T12:54:45Z" ; + schema1:description "Coprolite Identification" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/974?version=2" ; + schema1:keywords "adna, ancient-dna, coprolite, microbiome" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/coproid" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/974?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/998?version=12" ; + schema1:isBasedOn "https://github.com/nf-core/methylseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/methylseq" ; + schema1:sdDatePublished "2024-08-05 10:22:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/998/ro_crate?version=12" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10195 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:02Z" ; + schema1:dateModified "2024-06-11T12:55:02Z" ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/998?version=14" ; + schema1:keywords "bisulfite-sequencing, dna-methylation, em-seq, epigenome, Epigenomics, methyl-seq, pbat, rrbs" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/methylseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/998?version=12" ; + schema1:version 12 . + + a schema1:Dataset ; + schema1:datePublished "2021-12-06T14:50:19.556018" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/gromacs-mmgbsa" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "gromacs-mmgbsa/main" ; + schema1:sdDatePublished "2021-12-07 03:00:58 +0000" ; + schema1:softwareVersion "v0.1.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Simple bacterial assembly and annotation" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/966?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/bacass" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/bacass" ; + schema1:sdDatePublished "2024-08-05 10:24:17 +0100" ; + schema1:url "https://workflowhub.eu/workflows/966/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11118 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:43Z" ; + schema1:dateModified "2024-06-11T12:54:43Z" ; + schema1:description "Simple bacterial assembly and annotation" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/966?version=7" ; + schema1:keywords "Assembly, bacterial-genomes, denovo, denovo-assembly, genome-assembly, hybrid-assembly, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/bacass" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/966?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + schema1:datePublished "2024-07-15T07:31:47.944539" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/cutandrun" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "cutandrun/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.24" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """We present an R script that describes the workflow for analysing honey bee (_Apis mellifera_) wing shape. It is based on a dataset of wing images and landmark coordinates available at Zenodo: https://doi.org/10.5281/zenodo.8128010. \r +The dataset can be used as a reference for the identification of local bees from southern Kazakhstan, which most probably belong to the subspecies _Apis mellifera pomonella_. It was compared with data from Nawrocka et al. (2018), available at Zenodo: https://doi.org/10.5281/zenodo.7567336. """ ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.559.1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Apis-mellifera-wings-KZ: A workflow for morphometric identification of honey bees from Kazakhstan" ; + schema1:sdDatePublished "2024-08-05 10:27:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/559/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 26894 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1083489 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2023-08-30T06:57:48Z" ; + schema1:dateModified "2023-08-30T07:06:42Z" ; + schema1:description """We present an R script that describes the workflow for analysing honey bee (_Apis mellifera_) wing shape. It is based on a dataset of wing images and landmark coordinates available at Zenodo: https://doi.org/10.5281/zenodo.8128010. \r +The dataset can be used as a reference for the identification of local bees from southern Kazakhstan, which most probably belong to the subspecies _Apis mellifera pomonella_. It was compared with data from Nawrocka et al. (2018), available at Zenodo: https://doi.org/10.5281/zenodo.7567336. """ ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Apis-mellifera-wings-KZ: A workflow for morphometric identification of honey bees from Kazakhstan" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/559?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "CAGE-seq pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/969?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/cageseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/cageseq" ; + schema1:sdDatePublished "2024-08-05 10:24:15 +0100" ; + schema1:url "https://workflowhub.eu/workflows/969/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5423 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:44Z" ; + schema1:dateModified "2024-06-11T12:54:44Z" ; + schema1:description "CAGE-seq pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/969?version=2" ; + schema1:keywords "cage, cage-seq, cageseq-data, gene-expression, rna" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/cageseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/969?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/520?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for SARS-CoV-2 PostProcessing" ; + schema1:sdDatePublished "2024-08-05 10:30:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/520/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11463 ; + schema1:dateCreated "2023-06-28T12:12:55Z" ; + schema1:dateModified "2023-06-28T12:12:55Z" ; + schema1:description "" ; + schema1:keywords "SARS-CoV-2, SANBI" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "SARS-CoV-2 PostProcessing" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/520?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=11" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-08-05 10:24:25 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8901 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:12Z" ; + schema1:dateModified "2024-06-11T12:55:12Z" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=11" ; + schema1:version 11 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "RNA-RNA interactome analysis using ChiRA tools suite. The aligner used is BWA-MEM." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/66?version=1" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for RNA-RNA interactome analysis using BWA-MEM" ; + schema1:sdDatePublished "2024-08-05 10:33:24 +0100" ; + schema1:url "https://workflowhub.eu/workflows/66/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 29083 ; + schema1:creator ; + schema1:dateCreated "2020-11-03T19:46:07Z" ; + schema1:dateModified "2023-02-13T14:06:45Z" ; + schema1:description "RNA-RNA interactome analysis using ChiRA tools suite. The aligner used is BWA-MEM." ; + schema1:keywords "rna, Transcriptomics" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "RNA-RNA interactome analysis using BWA-MEM" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/66?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """SAMBA is a FAIR scalable workflow integrating, into a unique tool, state-of-the-art bioinformatics and statistical methods to conduct reproducible eDNA analyses using Nextflow. SAMBA starts processing by verifying integrity of raw reads and metadata. Then all bioinformatics processing is done using commonly used procedure (QIIME 2 and DADA2) but adds new steps relying on dbOTU3 and microDecon to build high quality ASV count tables. Extended statistical analyses are also performed. Finally, SAMBA produces a full dynamic HTML report including resources used, commands executed, intermediate results, statistical analyses and figures.\r +\r +The SAMBA pipeline can run tasks across multiple compute infrastructures in a very portable manner. It comes with singularity containers making installation trivial and results highly reproducible.""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.156.1" ; + schema1:isBasedOn "https://github.com/ifremer-bioinformatics/samba" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for SAMBA: Standardized and Automated MetaBarcoding Analyses workflow" ; + schema1:sdDatePublished "2024-08-05 10:33:05 +0100" ; + schema1:url "https://workflowhub.eu/workflows/156/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 82198 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2021-09-10T13:40:28Z" ; + schema1:dateModified "2023-01-16T13:52:16Z" ; + schema1:description """SAMBA is a FAIR scalable workflow integrating, into a unique tool, state-of-the-art bioinformatics and statistical methods to conduct reproducible eDNA analyses using Nextflow. SAMBA starts processing by verifying integrity of raw reads and metadata. Then all bioinformatics processing is done using commonly used procedure (QIIME 2 and DADA2) but adds new steps relying on dbOTU3 and microDecon to build high quality ASV count tables. Extended statistical analyses are also performed. Finally, SAMBA produces a full dynamic HTML report including resources used, commands executed, intermediate results, statistical analyses and figures.\r +\r +The SAMBA pipeline can run tasks across multiple compute infrastructures in a very portable manner. It comes with singularity containers making installation trivial and results highly reproducible.""" ; + schema1:image ; + schema1:keywords "Metabarcoding, Nextflow, 16S, 18S, eDNA" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "SAMBA: Standardized and Automated MetaBarcoding Analyses workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/156?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 398906 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Metagenomic dataset taxonomic classification using kraken2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/101?version=1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for 1: Plant virus detection with kraken2 (PE)" ; + schema1:sdDatePublished "2024-08-05 10:33:12 +0100" ; + schema1:url "https://workflowhub.eu/workflows/101/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12810 ; + schema1:dateCreated "2021-02-04T09:07:38Z" ; + schema1:dateModified "2023-02-13T14:06:45Z" ; + schema1:description "Metagenomic dataset taxonomic classification using kraken2" ; + schema1:keywords "Virology, kraken" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "1: Plant virus detection with kraken2 (PE)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/101?version=1" ; + schema1:version 1 ; + ns1:input , + ; + ns1:output , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2021-12-20T17:04:47.620664" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/protein-ligand-complex-parameterization" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "protein-ligand-complex-parameterization/main" ; + schema1:sdDatePublished "2021-12-21 03:01:01 +0000" ; + schema1:softwareVersion "v0.1.2" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "An example workflow for the Specimen Data Refinery tool, allowing an individual tool to be used" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.374.1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for DLA-Collections-test" ; + schema1:sdDatePublished "2024-08-05 10:31:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/374/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13871 ; + schema1:creator , + ; + schema1:dateCreated "2022-07-08T13:04:19Z" ; + schema1:dateModified "2023-01-16T14:01:59Z" ; + schema1:description "An example workflow for the Specimen Data Refinery tool, allowing an individual tool to be used" ; + schema1:keywords "Default-SDR, multi-specimen-input, collections, validated-2022-06-29" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "DLA-Collections-test" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/374?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """This repository contains the workflow used to find and characterize the HI sources in the data cube of the SKA Data Challenge 2. It was developed to process a simulated [SKA data cube](https://sdc2.astronomers.skatelescope.org/sdc2-challenge/data) data cube, but can be adapted for clean HI data cubes from other radio observatories.\r +\r +The workflow is managed and executed using snakemake workflow management system. It uses [https://spectral-cube.readthedocs.io/en/latest/](http://) based on [https://dask.org/](http://) parallelization tool and [https://www.astropy.org/](http://) suite to divide the large cube in smaller pieces. On each of the subcubes, we execute [https://github.com/SoFiA-Admin/SoFiA-2](http://) for masking the subcubes, find sources and characterize their properties. Finally, the individual catalogs are cleaned, concatenated into a single catalog, and duplicates from the overlapping regions are eliminated. Some diagnostic plots are produced using Jupyter notebook.\r +\r +The documentation can be found in the [Documentation page](https://hi-friends-sdc2.readthedocs.io/en/latest/index.html). The workflow and the results can be cited in the [Zenodo record](https://doi.org/10.5281/zenodo.5167659).""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/141?version=1" ; + schema1:isBasedOn "https://github.com/HI-FRIENDS-SDC2/hi-friends" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for HI-FRIENDS HI data cube source finding and characterization" ; + schema1:sdDatePublished "2024-08-05 10:33:11 +0100" ; + schema1:url "https://workflowhub.eu/workflows/141/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 590 ; + schema1:dateCreated "2021-08-09T20:25:49Z" ; + schema1:dateModified "2023-01-16T13:51:30Z" ; + schema1:description """This repository contains the workflow used to find and characterize the HI sources in the data cube of the SKA Data Challenge 2. It was developed to process a simulated [SKA data cube](https://sdc2.astronomers.skatelescope.org/sdc2-challenge/data) data cube, but can be adapted for clean HI data cubes from other radio observatories.\r +\r +The workflow is managed and executed using snakemake workflow management system. It uses [https://spectral-cube.readthedocs.io/en/latest/](http://) based on [https://dask.org/](http://) parallelization tool and [https://www.astropy.org/](http://) suite to divide the large cube in smaller pieces. On each of the subcubes, we execute [https://github.com/SoFiA-Admin/SoFiA-2](http://) for masking the subcubes, find sources and characterize their properties. Finally, the individual catalogs are cleaned, concatenated into a single catalog, and duplicates from the overlapping regions are eliminated. Some diagnostic plots are produced using Jupyter notebook.\r +\r +The documentation can be found in the [Documentation page](https://hi-friends-sdc2.readthedocs.io/en/latest/index.html). The workflow and the results can be cited in the [Zenodo record](https://doi.org/10.5281/zenodo.5167659).""" ; + schema1:image ; + schema1:keywords "SKA, radio interferometry" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "HI-FRIENDS HI data cube source finding and characterization" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/141?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 8546 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Compare DNA/RNA/protein sequences on k-mer content" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/994?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/kmermaid" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/kmermaid" ; + schema1:sdDatePublished "2024-08-05 10:23:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/994/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6459 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:58Z" ; + schema1:dateModified "2024-06-11T12:54:58Z" ; + schema1:description "Compare DNA/RNA/protein sequences on k-mer content" ; + schema1:keywords "k-mer, kmer, kmer-counting, kmer-frequency-count" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/kmermaid" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/994?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 141286 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Conformational ensembles generation\r +\r +## Workflow included in the [ELIXIR 3D-Bioinfo](https://elixir-europe.org/communities/3d-bioinfo) Implementation Study:\r +\r +### Building on PDBe-KB to chart and characterize the conformation landscape of native proteins\r +\r +This tutorial aims to illustrate the process of generating **protein conformational ensembles** from** 3D structures **and analysing its **molecular flexibility**, step by step, using the **BioExcel Building Blocks library (biobb)**.\r +\r +## Conformational landscape of native proteins\r +**Proteins** are **dynamic** systems that adopt multiple **conformational states**, a property essential for many **biological processes** (e.g. binding other proteins, nucleic acids, small molecule ligands, or switching between functionaly active and inactive states). Characterizing the different **conformational states** of proteins and the transitions between them is therefore critical for gaining insight into their **biological function** and can help explain the effects of genetic variants in **health** and **disease** and the action of drugs.\r +\r +**Structural biology** has become increasingly efficient in sampling the different **conformational states** of proteins. The **PDB** has currently archived more than **170,000 individual structures**, but over two thirds of these structures represent **multiple conformations** of the same or related protein, observed in different crystal forms, when interacting with other proteins or other macromolecules, or upon binding small molecule ligands. Charting this conformational diversity across the PDB can therefore be employed to build a useful approximation of the **conformational landscape** of native proteins.\r +\r +A number of resources and **tools** describing and characterizing various often complementary aspects of protein **conformational diversity** in known structures have been developed, notably by groups in Europe. These tools include algorithms with varying degree of sophistication, for aligning the 3D structures of individual protein chains or domains, of protein assemblies, and evaluating their degree of **structural similarity**. Using such tools one can **align structures pairwise**, compute the corresponding **similarity matrix**, and identify ensembles of **structures/conformations** with a defined **similarity level** that tend to recur in different PDB entries, an operation typically performed using **clustering** methods. Such workflows are at the basis of resources such as **CATH, Contemplate, or PDBflex** that offer access to **conformational ensembles** comprised of similar **conformations** clustered according to various criteria. Other types of tools focus on differences between **protein conformations**, identifying regions of proteins that undergo large **collective displacements** in different PDB entries, those that act as **hinges or linkers**, or regions that are inherently **flexible**.\r +\r +To build a meaningful approximation of the **conformational landscape** of native proteins, the **conformational ensembles** (and the differences between them), identified on the basis of **structural similarity/dissimilarity** measures alone, need to be **biophysically characterized**. This may be approached at **two different levels**. \r +- At the **biological level**, it is important to link observed **conformational ensembles**, to their **functional roles** by evaluating the correspondence with **protein family classifications** based on sequence information and **functional annotations** in public databases e.g. Uniprot, PDKe-Knowledge Base (KB). These links should provide valuable mechanistic insights into how the **conformational and dynamic properties** of proteins are exploited by evolution to regulate their **biological function**.

\r +\r +- At the **physical level** one needs to introduce **energetic consideration** to evaluate the likelihood that the identified **conformational ensembles** represent **conformational states** that the protein (or domain under study) samples in isolation. Such evaluation is notoriously **challenging** and can only be roughly approximated by using **computational methods** to evaluate the extent to which the observed **conformational ensembles** can be reproduced by algorithms that simulate the **dynamic behavior** of protein systems. These algorithms include the computationally expensive **classical molecular dynamics (MD) simulations** to sample local thermal fluctuations but also faster more approximate methods such as **Elastic Network Models** and **Normal Node Analysis** (NMA) to model low energy **collective motions**. Alternatively, **enhanced sampling molecular dynamics** can be used to model complex types of **conformational changes** but at a very high computational cost. \r +\r +The **ELIXIR 3D-Bioinfo Implementation Study** *Building on PDBe-KB to chart and characterize the conformation landscape of native proteins* focuses on:\r +\r +1. Mapping the **conformational diversity** of proteins and their homologs across the PDB. \r +2. Characterize the different **flexibility properties** of protein regions, and link this information to sequence and functional annotation.\r +3. Benchmark **computational methods** that can predict a biophysical description of protein motions.\r +\r +This notebook is part of the third objective, where a list of **computational resources** that are able to predict **protein flexibility** and **conformational ensembles** have been collected, evaluated, and integrated in reproducible and interoperable workflows using the **BioExcel Building Blocks library**. Note that the list is not meant to be exhaustive, it is built following the expertise of the implementation study partners.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.488.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_flexdyn/cwl" ; + schema1:license "other-open" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL Protein conformational ensembles generation" ; + schema1:sdDatePublished "2024-08-05 10:30:19 +0100" ; + schema1:url "https://workflowhub.eu/workflows/488/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 30813 ; + schema1:creator , + ; + schema1:dateCreated "2023-05-31T13:51:17Z" ; + schema1:dateModified "2023-05-31T14:04:20Z" ; + schema1:description """# Protein Conformational ensembles generation\r +\r +## Workflow included in the [ELIXIR 3D-Bioinfo](https://elixir-europe.org/communities/3d-bioinfo) Implementation Study:\r +\r +### Building on PDBe-KB to chart and characterize the conformation landscape of native proteins\r +\r +This tutorial aims to illustrate the process of generating **protein conformational ensembles** from** 3D structures **and analysing its **molecular flexibility**, step by step, using the **BioExcel Building Blocks library (biobb)**.\r +\r +## Conformational landscape of native proteins\r +**Proteins** are **dynamic** systems that adopt multiple **conformational states**, a property essential for many **biological processes** (e.g. binding other proteins, nucleic acids, small molecule ligands, or switching between functionaly active and inactive states). Characterizing the different **conformational states** of proteins and the transitions between them is therefore critical for gaining insight into their **biological function** and can help explain the effects of genetic variants in **health** and **disease** and the action of drugs.\r +\r +**Structural biology** has become increasingly efficient in sampling the different **conformational states** of proteins. The **PDB** has currently archived more than **170,000 individual structures**, but over two thirds of these structures represent **multiple conformations** of the same or related protein, observed in different crystal forms, when interacting with other proteins or other macromolecules, or upon binding small molecule ligands. Charting this conformational diversity across the PDB can therefore be employed to build a useful approximation of the **conformational landscape** of native proteins.\r +\r +A number of resources and **tools** describing and characterizing various often complementary aspects of protein **conformational diversity** in known structures have been developed, notably by groups in Europe. These tools include algorithms with varying degree of sophistication, for aligning the 3D structures of individual protein chains or domains, of protein assemblies, and evaluating their degree of **structural similarity**. Using such tools one can **align structures pairwise**, compute the corresponding **similarity matrix**, and identify ensembles of **structures/conformations** with a defined **similarity level** that tend to recur in different PDB entries, an operation typically performed using **clustering** methods. Such workflows are at the basis of resources such as **CATH, Contemplate, or PDBflex** that offer access to **conformational ensembles** comprised of similar **conformations** clustered according to various criteria. Other types of tools focus on differences between **protein conformations**, identifying regions of proteins that undergo large **collective displacements** in different PDB entries, those that act as **hinges or linkers**, or regions that are inherently **flexible**.\r +\r +To build a meaningful approximation of the **conformational landscape** of native proteins, the **conformational ensembles** (and the differences between them), identified on the basis of **structural similarity/dissimilarity** measures alone, need to be **biophysically characterized**. This may be approached at **two different levels**. \r +- At the **biological level**, it is important to link observed **conformational ensembles**, to their **functional roles** by evaluating the correspondence with **protein family classifications** based on sequence information and **functional annotations** in public databases e.g. Uniprot, PDKe-Knowledge Base (KB). These links should provide valuable mechanistic insights into how the **conformational and dynamic properties** of proteins are exploited by evolution to regulate their **biological function**.

\r +\r +- At the **physical level** one needs to introduce **energetic consideration** to evaluate the likelihood that the identified **conformational ensembles** represent **conformational states** that the protein (or domain under study) samples in isolation. Such evaluation is notoriously **challenging** and can only be roughly approximated by using **computational methods** to evaluate the extent to which the observed **conformational ensembles** can be reproduced by algorithms that simulate the **dynamic behavior** of protein systems. These algorithms include the computationally expensive **classical molecular dynamics (MD) simulations** to sample local thermal fluctuations but also faster more approximate methods such as **Elastic Network Models** and **Normal Node Analysis** (NMA) to model low energy **collective motions**. Alternatively, **enhanced sampling molecular dynamics** can be used to model complex types of **conformational changes** but at a very high computational cost. \r +\r +The **ELIXIR 3D-Bioinfo Implementation Study** *Building on PDBe-KB to chart and characterize the conformation landscape of native proteins* focuses on:\r +\r +1. Mapping the **conformational diversity** of proteins and their homologs across the PDB. \r +2. Characterize the different **flexibility properties** of protein regions, and link this information to sequence and functional annotation.\r +3. Benchmark **computational methods** that can predict a biophysical description of protein motions.\r +\r +This notebook is part of the third objective, where a list of **computational resources** that are able to predict **protein flexibility** and **conformational ensembles** have been collected, evaluated, and integrated in reproducible and interoperable workflows using the **BioExcel Building Blocks library**. Note that the list is not meant to be exhaustive, it is built following the expertise of the implementation study partners.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/488?version=1" ; + schema1:keywords "" ; + schema1:license "" ; + schema1:name "CWL Protein conformational ensembles generation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_flexdyn/cwl/workflow.cwl" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.132.4" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Amber Constant pH MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/132/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 54708 ; + schema1:creator , + ; + schema1:dateCreated "2023-07-26T09:38:15Z" ; + schema1:dateModified "2023-07-26T09:38:42Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/132?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Amber Constant pH MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_amber_md_setup/master/biobb_wf_amber_md_setup/notebooks/mdsetup_ph/biobb_amber_CpHMD_notebook.ipynb" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + ; + schema1:description "This workflow generates binding scores that correlate well with binding affinities using an additional tool SuCOS Max, developed at Oxford University. More info can be found at https://covid19.galaxyproject.org/cheminformatics/" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/15?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Cheminformatics - SuCOS scoring" ; + schema1:sdDatePublished "2024-08-05 10:33:40 +0100" ; + schema1:url "https://workflowhub.eu/workflows/15/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 2468 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9469 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2020-04-10T14:50:31Z" ; + schema1:dateModified "2023-01-16T13:40:57Z" ; + schema1:description "This workflow generates binding scores that correlate well with binding affinities using an additional tool SuCOS Max, developed at Oxford University. More info can be found at https://covid19.galaxyproject.org/cheminformatics/" ; + schema1:image ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Cheminformatics - SuCOS scoring" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/15?version=1" ; + schema1:version 1 ; + ns1:input , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 5544 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """

\r +

+VIRify

\r +

Sankey plot

\r +

VIRify is a recently developed pipeline for the detection, annotation, and taxonomic classification of viral contigs in metagenomic and metatranscriptomic assemblies. The pipeline is part of the repertoire of analysis services offered by MGnify. VIRify’s taxonomic classification relies on the detection of taxon-specific profile hidden Markov models (HMMs), built upon a set of 22,014 orthologous protein domains and referred to as ViPhOGs.

\r +

VIRify was implemented in CWL.

\r +

+What do I need?

\r +

The current implementation uses CWL version 1.2 dev+2. It was tested using Toil version 4.10 as the workflow engine and conda to manage the software dependencies.

\r +

+Docker - Singularity support

\r +

Soon…

\r +

+Setup environment

\r +
conda env create -f cwl/requirements/conda_env.yml\r
+conda activate viral_pipeline\r
+
\r +

+Basic execution

\r +
cd cwl/\r
+virify.sh -h\r
+
\r +

+A note about metatranscriptomes

\r +

Although VIRify has been benchmarked and validated with metagenomic data in mind, it is also possible to use this tool to detect RNA viruses in metatranscriptome assemblies (e.g. SARS-CoV-2). However, some additional considerations for this purpose are outlined below:
\r +1. Quality control: As for metagenomic data, a thorough quality control of the FASTQ sequence reads to remove low-quality bases, adapters and host contamination (if appropriate) is required prior to assembly. This is especially important for metatranscriptomes as small errors can further decrease the quality and contiguity of the assembly obtained. We have used TrimGalore for this purpose.

\r +

2. Assembly: There are many assemblers available that are appropriate for either metagenomic or single-species transcriptomic data. However, to our knowledge, there is no assembler currently available specifically for metatranscriptomic data. From our preliminary investigations, we have found that transcriptome-specific assemblers (e.g. rnaSPAdes) generate more contiguous and complete metatranscriptome assemblies compared to metagenomic alternatives (e.g. MEGAHIT and metaSPAdes).

\r +

3. Post-processing: Metatranscriptomes generate highly fragmented assemblies. Therefore, filtering contigs based on a set minimum length has a substantial impact in the number of contigs processed in VIRify. It has also been observed that the number of false-positive detections of VirFinder (one of the tools included in VIRify) is lower among larger contigs. The choice of a length threshold will depend on the complexity of the sample and the sequencing technology used, but in our experience any contigs <2 kb should be analysed with caution.

\r +

4. Classification: The classification module of VIRify depends on the presence of a minimum number and proportion of phylogenetically-informative genes within each contig in order to confidently assign a taxonomic lineage. Therefore, short contigs typically obtained from metatranscriptome assemblies remain generally unclassified. For targeted classification of RNA viruses (for instance, to search for Coronavirus-related sequences), alternative DNA- or protein-based classification methods can be used. Two of the possible options are: (i) using MashMap to screen the VIRify contigs against a database of RNA viruses (e.g. Coronaviridae) or (ii) using hmmsearch to screen the proteins obtained in the VIRify contigs against marker genes of the taxon of interest.

\r +

Contact us

\r +MGnify helpdesk""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/27?version=1" ; + schema1:isBasedOn "https://github.com/EBI-Metagenomics/emg-viral-pipeline/blob/master/virify.nf" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for VIRify" ; + schema1:sdDatePublished "2024-08-05 10:33:19 +0100" ; + schema1:url "https://workflowhub.eu/workflows/27/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 24762 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2020-06-08T10:29:47Z" ; + schema1:dateModified "2023-03-24T16:47:02Z" ; + schema1:description """

\r +

+VIRify

\r +

Sankey plot

\r +

VIRify is a recently developed pipeline for the detection, annotation, and taxonomic classification of viral contigs in metagenomic and metatranscriptomic assemblies. The pipeline is part of the repertoire of analysis services offered by MGnify. VIRify’s taxonomic classification relies on the detection of taxon-specific profile hidden Markov models (HMMs), built upon a set of 22,014 orthologous protein domains and referred to as ViPhOGs.

\r +

VIRify was implemented in CWL.

\r +

+What do I need?

\r +

The current implementation uses CWL version 1.2 dev+2. It was tested using Toil version 4.10 as the workflow engine and conda to manage the software dependencies.

\r +

+Docker - Singularity support

\r +

Soon…

\r +

+Setup environment

\r +
conda env create -f cwl/requirements/conda_env.yml\r
+conda activate viral_pipeline\r
+
\r +

+Basic execution

\r +
cd cwl/\r
+virify.sh -h\r
+
\r +

+A note about metatranscriptomes

\r +

Although VIRify has been benchmarked and validated with metagenomic data in mind, it is also possible to use this tool to detect RNA viruses in metatranscriptome assemblies (e.g. SARS-CoV-2). However, some additional considerations for this purpose are outlined below:
\r +1. Quality control: As for metagenomic data, a thorough quality control of the FASTQ sequence reads to remove low-quality bases, adapters and host contamination (if appropriate) is required prior to assembly. This is especially important for metatranscriptomes as small errors can further decrease the quality and contiguity of the assembly obtained. We have used TrimGalore for this purpose.

\r +

2. Assembly: There are many assemblers available that are appropriate for either metagenomic or single-species transcriptomic data. However, to our knowledge, there is no assembler currently available specifically for metatranscriptomic data. From our preliminary investigations, we have found that transcriptome-specific assemblers (e.g. rnaSPAdes) generate more contiguous and complete metatranscriptome assemblies compared to metagenomic alternatives (e.g. MEGAHIT and metaSPAdes).

\r +

3. Post-processing: Metatranscriptomes generate highly fragmented assemblies. Therefore, filtering contigs based on a set minimum length has a substantial impact in the number of contigs processed in VIRify. It has also been observed that the number of false-positive detections of VirFinder (one of the tools included in VIRify) is lower among larger contigs. The choice of a length threshold will depend on the complexity of the sample and the sequencing technology used, but in our experience any contigs <2 kb should be analysed with caution.

\r +

4. Classification: The classification module of VIRify depends on the presence of a minimum number and proportion of phylogenetically-informative genes within each contig in order to confidently assign a taxonomic lineage. Therefore, short contigs typically obtained from metatranscriptome assemblies remain generally unclassified. For targeted classification of RNA viruses (for instance, to search for Coronavirus-related sequences), alternative DNA- or protein-based classification methods can be used. Two of the possible options are: (i) using MashMap to screen the VIRify contigs against a database of RNA viruses (e.g. Coronaviridae) or (ii) using hmmsearch to screen the proteins obtained in the VIRify contigs against marker genes of the taxon of interest.

\r +

Contact us

\r +MGnify helpdesk""" ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "VIRify" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/27?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "Workflow for gene set enrichment analsysis (GSEA) and co-expression analysis (WGCNA) on transcriptomics data to analyze pathways affected in Porto-Sinusoidal Vascular Disease." ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.1040.1" ; + schema1:isBasedOn "https://github.com/aish181095/PSVD-transcriptomics-workflow" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Porto-Sinusoidal Vascular Disease transcriptomics analysis workflow" ; + schema1:sdDatePublished "2024-08-05 10:22:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1040/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 14252 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4381 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T14:13:25Z" ; + schema1:dateModified "2024-06-28T11:33:39Z" ; + schema1:description "Workflow for gene set enrichment analsysis (GSEA) and co-expression analysis (WGCNA) on transcriptomics data to analyze pathways affected in Porto-Sinusoidal Vascular Disease." ; + schema1:image ; + schema1:keywords "Bioinformatics, CWL, Transcriptomics, Workflows" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Porto-Sinusoidal Vascular Disease transcriptomics analysis workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1040?version=1" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """The tool provides a calculaiton of the power spectrum of Stochastic Gravitational Wave Backgorund (SGWB) from a first-order cosmological phase transition based on the parameterisations of Roper Pol et al. (2023). The power spectrum includes two components: from the sound waves excited by collisions of bubbles of the new phase and from the turbulence that is induced by these collisions.\r +\r +The cosmological epoch of the phase transition is described by the temperature, T_star and by the number(s) of relativistic degrees of freedom, g_star that should be specified as parameters.\r +\r +The phase transition itself is characterised by phenomenological parameters, alpha, beta_H and epsilon_turb, the latent heat, the ratio of the Hubble radius to the bubble size at percolation and the fraction of the energy otuput of the phase transition that goes into turbulence.\r +\r +The product Model spectrum outputs the power spectrum for fixed values of these parameters. The product Phase transition parameters reproduces the constraints on the phase transition parameters from the Pulsar Timing Array gravitational wave detectors, reported by Boyer & Neronov (2024), including the estimate of the cosmological magnetic field induced by turbulence.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.815.1" ; + schema1:isBasedOn "https://gitlab.renkulab.io/astronomy/mmoda/sgwb" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Stochastic Gravitational Wave Backgorund (SGWB) tool" ; + schema1:sdDatePublished "2024-08-05 10:24:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/815/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3955 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-04-18T12:44:15Z" ; + schema1:dateModified "2024-04-18T12:54:52Z" ; + schema1:description """The tool provides a calculaiton of the power spectrum of Stochastic Gravitational Wave Backgorund (SGWB) from a first-order cosmological phase transition based on the parameterisations of Roper Pol et al. (2023). The power spectrum includes two components: from the sound waves excited by collisions of bubbles of the new phase and from the turbulence that is induced by these collisions.\r +\r +The cosmological epoch of the phase transition is described by the temperature, T_star and by the number(s) of relativistic degrees of freedom, g_star that should be specified as parameters.\r +\r +The phase transition itself is characterised by phenomenological parameters, alpha, beta_H and epsilon_turb, the latent heat, the ratio of the Hubble radius to the bubble size at percolation and the fraction of the energy otuput of the phase transition that goes into turbulence.\r +\r +The product Model spectrum outputs the power spectrum for fixed values of these parameters. The product Phase transition parameters reproduces the constraints on the phase transition parameters from the Pulsar Timing Array gravitational wave detectors, reported by Boyer & Neronov (2024), including the estimate of the cosmological magnetic field induced by turbulence.\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/815?version=1" ; + schema1:keywords "astronomy" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Stochastic Gravitational Wave Backgorund (SGWB) tool" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/815?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.282.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_virtual_screening/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Protein-ligand Docking tutorial (Fpocket)" ; + schema1:sdDatePublished "2024-08-05 10:30:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/282/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2666 ; + schema1:creator , + ; + schema1:dateCreated "2023-04-14T08:24:40Z" ; + schema1:dateModified "2023-04-14T08:26:20Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/282?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Protein-ligand Docking tutorial (Fpocket)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_virtual_screening/python/workflow.py" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """# Cancer Invasion Workflow\r +\r +## Table of Contents\r +\r +- [Cancer Invasion Workflow](#cancer-invasion-workflow)\r + - [Table of Contents](#table-of-contents)\r + - [Description](#description)\r + - [Contents](#contents)\r + - [Building Blocks](#building-blocks)\r + - [Workflows](#workflows)\r + - [Resources](#resources)\r + - [Tests](#tests)\r + - [Instructions](#instructions)\r + - [Local machine](#local-machine)\r + - [Requirements](#requirements)\r + - [Usage steps](#usage-steps)\r + - [MareNostrum 4](#marenostrum-4)\r + - [Requirements in MN4](#requirements-in-mn4)\r + - [Usage steps in MN4](#usage-steps-in-mn4)\r + - [Mahti or Puhti](#mahti-or-puhti)\r + - [Requirements](#requirements)\r + - [Steps](#steps)\r + - [License](#license)\r + - [Contact](#contact)\r +\r +## Description\r +\r +Uses multiscale simulations to describe cancer progression into invasion.\r +\r +The workflow uses the following building blocks, described in order of execution:\r +\r +1. PhysiBoSS-Invasion\r +\r +For details on individual workflow steps, see the user documentation for each building block.\r +\r +[`GitHub repository`]()\r +\r +\r +## Contents\r +\r +### Building Blocks\r +\r +The ``BuildingBlocks`` folder contains the script to install the\r +Building Blocks used in the Cancer Invasion Workflow.\r +\r +### Workflows\r +\r +The ``Workflow`` folder contains the workflows implementations.\r +\r +Currently contains the implementation using PyCOMPSs and Snakemake (in progress).\r +\r +### Resources\r +\r +The ``Resources`` folder contains dataset files.\r +\r +### Tests\r +\r +The ``Tests`` folder contains the scripts that run each Building Block\r +used in the workflow for the given small dataset.\r +They can be executed individually for testing purposes.\r +\r +## Instructions\r +\r +### Local machine\r +\r +This section explains the requirements and usage for the Cancer Invasion Workflow in a laptop or desktop computer.\r +\r +#### Requirements\r +\r +- [`permedcoe`](https://github.com/PerMedCoE/permedcoe) package\r +- [PyCOMPSs](https://pycompss.readthedocs.io/en/stable/Sections/00_Quickstart.html) / [Snakemake](https://snakemake.readthedocs.io/en/stable/)\r +- [Singularity](https://sylabs.io/guides/3.0/user-guide/installation.html)\r +\r +#### Usage steps\r +\r +1. Clone this repository:\r +\r + ```bash\r + git clone https://github.com/PerMedCoE/cancer-invasion-workflow\r + ```\r +\r +2. Install the Building Blocks required for the Cancer Invasion Workflow:\r +\r + ```bash\r + cancer-invasion-workflow/BuildingBlocks/./install_BBs.sh\r + ```\r +\r +3. Get the required Building Block images from the project [B2DROP](https://b2drop.bsc.es/index.php/f/444350):\r +\r + - Required images:\r + - PhysiCell-Invasion.singularity\r +\r + The path where these files are stored **MUST be exported in the `PERMEDCOE_IMAGES`** environment variable.\r +\r + > :warning: **TIP**: These containers can be built manually as follows (be patient since some of them may take some time):\r + 1. Clone the `BuildingBlocks` repository\r + ```bash\r + git clone https://github.com/PerMedCoE/BuildingBlocks.git\r + ```\r + 2. Build the required Building Block images\r + ```bash\r + cd BuildingBlocks/Resources/images\r + sudo singularity build PhysiCell-Invasion.sif PhysiCell-Invasion.singularity\r + cd ../../..\r + ```\r +\r +**If using PyCOMPSs in local PC** (make sure that PyCOMPSs in installed):\r +\r +4. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflows/PyCOMPSs\r + ```\r +\r +5. Execute `./run.sh`\r +\r +**If using Snakemake in local PC** (make sure that SnakeMake is installed):\r +\r +4. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflows/SnakeMake\r + ```\r +\r +5. Execute `./run.sh`\r + > **TIP**: If you want to run the workflow with a different dataset, please update the `run.sh` script setting the `dataset` variable to the new dataset folder and their file names.\r +\r +\r +### MareNostrum 4\r +\r +This section explains the requirements and usage for the Cancer Invasion Workflow in the MareNostrum 4 supercomputer.\r +\r +#### Requirements in MN4\r +\r +- Access to MN4\r +\r +All Building Blocks are already installed in MN4, and the Cancer Invasion Workflow available.\r +\r +#### Usage steps in MN4\r +\r +1. Load the `COMPSs`, `Singularity` and `permedcoe` modules\r +\r + ```bash\r + export COMPSS_PYTHON_VERSION=3\r + module load COMPSs/3.1\r + module load singularity/3.5.2\r + module use /apps/modules/modulefiles/tools/COMPSs/libraries\r + module load permedcoe\r + ```\r +\r + > **TIP**: Include the loading into your `${HOME}/.bashrc` file to load it automatically on the session start.\r +\r + This commands will load COMPSs and the permedcoe package which provides all necessary dependencies, as well as the path to the singularity container images (`PERMEDCOE_IMAGES` environment variable) and testing dataset (`CANCERINVASIONWORKFLOW_DATASET` environment variable).\r +\r +2. Get a copy of the pilot workflow into your desired folder\r +\r + ```bash\r + mkdir desired_folder\r + cd desired_folder\r + get_cancerinvasionworkflow\r + ```\r +\r +3. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflow/PyCOMPSs\r + ```\r +\r +4. Execute `./launch.sh`\r +\r + This command will launch a job into the job queuing system (SLURM) requesting 2 nodes (one node acting half master and half worker, and other full worker node) for 20 minutes, and is prepared to use the singularity images that are already deployed in MN4 (located into the `PERMEDCOE_IMAGES` environment variable). It uses the dataset located into `../../Resources/data` folder.\r +\r + > :warning: **TIP**: If you want to run the workflow with a different dataset, please edit the `launch.sh` script and define the appropriate dataset path.\r +\r + After the execution, a `results` folder will be available with with Cancer Invasion Workflow results.\r +\r +### Mahti or Puhti\r +\r +This section explains how to run the Cancer Invasion workflow on CSC supercomputers using SnakeMake.\r +\r +#### Requirements\r +\r +- Install snakemake (or check if there is a version installed using `module spider snakemake`)\r +- Install workflow, using the same steps as for the local machine. With the exception that containers have to be built elsewhere.\r +\r +#### Steps\r +\r +\r +1. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflow/SnakeMake\r + ```\r +\r +2. Edit `launch.sh` with the correct partition, account, and resource specifications. \r +\r +3. Execute `./launch.sh`\r +\r + > :warning: Snakemake provides a `--cluster` flag, but this functionality should be avoided as it's really not suited for HPC systems.\r +\r +## License\r +\r +[Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)\r +\r +## Contact\r +\r +\r +\r +This software has been developed for the [PerMedCoE project](https://permedcoe.eu/), funded by the European Commission (EU H2020 [951773](https://cordis.europa.eu/project/id/951773)).\r +\r +![](https://permedcoe.eu/wp-content/uploads/2020/11/logo_1.png "PerMedCoE")\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/477?version=1" ; + schema1:isBasedOn "https://github.com/PerMedCoE/cancer-invasion-workflow" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for PerMedCoE Cancer Invasion" ; + schema1:sdDatePublished "2024-08-05 10:30:24 +0100" ; + schema1:url "https://workflowhub.eu/workflows/477/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 927 ; + schema1:dateCreated "2023-05-23T12:10:39Z" ; + schema1:dateModified "2023-05-23T12:34:23Z" ; + schema1:description """# Cancer Invasion Workflow\r +\r +## Table of Contents\r +\r +- [Cancer Invasion Workflow](#cancer-invasion-workflow)\r + - [Table of Contents](#table-of-contents)\r + - [Description](#description)\r + - [Contents](#contents)\r + - [Building Blocks](#building-blocks)\r + - [Workflows](#workflows)\r + - [Resources](#resources)\r + - [Tests](#tests)\r + - [Instructions](#instructions)\r + - [Local machine](#local-machine)\r + - [Requirements](#requirements)\r + - [Usage steps](#usage-steps)\r + - [MareNostrum 4](#marenostrum-4)\r + - [Requirements in MN4](#requirements-in-mn4)\r + - [Usage steps in MN4](#usage-steps-in-mn4)\r + - [Mahti or Puhti](#mahti-or-puhti)\r + - [Requirements](#requirements)\r + - [Steps](#steps)\r + - [License](#license)\r + - [Contact](#contact)\r +\r +## Description\r +\r +Uses multiscale simulations to describe cancer progression into invasion.\r +\r +The workflow uses the following building blocks, described in order of execution:\r +\r +1. PhysiBoSS-Invasion\r +\r +For details on individual workflow steps, see the user documentation for each building block.\r +\r +[`GitHub repository`]()\r +\r +\r +## Contents\r +\r +### Building Blocks\r +\r +The ``BuildingBlocks`` folder contains the script to install the\r +Building Blocks used in the Cancer Invasion Workflow.\r +\r +### Workflows\r +\r +The ``Workflow`` folder contains the workflows implementations.\r +\r +Currently contains the implementation using PyCOMPSs and Snakemake (in progress).\r +\r +### Resources\r +\r +The ``Resources`` folder contains dataset files.\r +\r +### Tests\r +\r +The ``Tests`` folder contains the scripts that run each Building Block\r +used in the workflow for the given small dataset.\r +They can be executed individually for testing purposes.\r +\r +## Instructions\r +\r +### Local machine\r +\r +This section explains the requirements and usage for the Cancer Invasion Workflow in a laptop or desktop computer.\r +\r +#### Requirements\r +\r +- [`permedcoe`](https://github.com/PerMedCoE/permedcoe) package\r +- [PyCOMPSs](https://pycompss.readthedocs.io/en/stable/Sections/00_Quickstart.html) / [Snakemake](https://snakemake.readthedocs.io/en/stable/)\r +- [Singularity](https://sylabs.io/guides/3.0/user-guide/installation.html)\r +\r +#### Usage steps\r +\r +1. Clone this repository:\r +\r + ```bash\r + git clone https://github.com/PerMedCoE/cancer-invasion-workflow\r + ```\r +\r +2. Install the Building Blocks required for the Cancer Invasion Workflow:\r +\r + ```bash\r + cancer-invasion-workflow/BuildingBlocks/./install_BBs.sh\r + ```\r +\r +3. Get the required Building Block images from the project [B2DROP](https://b2drop.bsc.es/index.php/f/444350):\r +\r + - Required images:\r + - PhysiCell-Invasion.singularity\r +\r + The path where these files are stored **MUST be exported in the `PERMEDCOE_IMAGES`** environment variable.\r +\r + > :warning: **TIP**: These containers can be built manually as follows (be patient since some of them may take some time):\r + 1. Clone the `BuildingBlocks` repository\r + ```bash\r + git clone https://github.com/PerMedCoE/BuildingBlocks.git\r + ```\r + 2. Build the required Building Block images\r + ```bash\r + cd BuildingBlocks/Resources/images\r + sudo singularity build PhysiCell-Invasion.sif PhysiCell-Invasion.singularity\r + cd ../../..\r + ```\r +\r +**If using PyCOMPSs in local PC** (make sure that PyCOMPSs in installed):\r +\r +4. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflows/PyCOMPSs\r + ```\r +\r +5. Execute `./run.sh`\r +\r +**If using Snakemake in local PC** (make sure that SnakeMake is installed):\r +\r +4. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflows/SnakeMake\r + ```\r +\r +5. Execute `./run.sh`\r + > **TIP**: If you want to run the workflow with a different dataset, please update the `run.sh` script setting the `dataset` variable to the new dataset folder and their file names.\r +\r +\r +### MareNostrum 4\r +\r +This section explains the requirements and usage for the Cancer Invasion Workflow in the MareNostrum 4 supercomputer.\r +\r +#### Requirements in MN4\r +\r +- Access to MN4\r +\r +All Building Blocks are already installed in MN4, and the Cancer Invasion Workflow available.\r +\r +#### Usage steps in MN4\r +\r +1. Load the `COMPSs`, `Singularity` and `permedcoe` modules\r +\r + ```bash\r + export COMPSS_PYTHON_VERSION=3\r + module load COMPSs/3.1\r + module load singularity/3.5.2\r + module use /apps/modules/modulefiles/tools/COMPSs/libraries\r + module load permedcoe\r + ```\r +\r + > **TIP**: Include the loading into your `${HOME}/.bashrc` file to load it automatically on the session start.\r +\r + This commands will load COMPSs and the permedcoe package which provides all necessary dependencies, as well as the path to the singularity container images (`PERMEDCOE_IMAGES` environment variable) and testing dataset (`CANCERINVASIONWORKFLOW_DATASET` environment variable).\r +\r +2. Get a copy of the pilot workflow into your desired folder\r +\r + ```bash\r + mkdir desired_folder\r + cd desired_folder\r + get_cancerinvasionworkflow\r + ```\r +\r +3. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflow/PyCOMPSs\r + ```\r +\r +4. Execute `./launch.sh`\r +\r + This command will launch a job into the job queuing system (SLURM) requesting 2 nodes (one node acting half master and half worker, and other full worker node) for 20 minutes, and is prepared to use the singularity images that are already deployed in MN4 (located into the `PERMEDCOE_IMAGES` environment variable). It uses the dataset located into `../../Resources/data` folder.\r +\r + > :warning: **TIP**: If you want to run the workflow with a different dataset, please edit the `launch.sh` script and define the appropriate dataset path.\r +\r + After the execution, a `results` folder will be available with with Cancer Invasion Workflow results.\r +\r +### Mahti or Puhti\r +\r +This section explains how to run the Cancer Invasion workflow on CSC supercomputers using SnakeMake.\r +\r +#### Requirements\r +\r +- Install snakemake (or check if there is a version installed using `module spider snakemake`)\r +- Install workflow, using the same steps as for the local machine. With the exception that containers have to be built elsewhere.\r +\r +#### Steps\r +\r +\r +1. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflow/SnakeMake\r + ```\r +\r +2. Edit `launch.sh` with the correct partition, account, and resource specifications. \r +\r +3. Execute `./launch.sh`\r +\r + > :warning: Snakemake provides a `--cluster` flag, but this functionality should be avoided as it's really not suited for HPC systems.\r +\r +## License\r +\r +[Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)\r +\r +## Contact\r +\r +\r +\r +This software has been developed for the [PerMedCoE project](https://permedcoe.eu/), funded by the European Commission (EU H2020 [951773](https://cordis.europa.eu/project/id/951773)).\r +\r +![](https://permedcoe.eu/wp-content/uploads/2020/11/logo_1.png "PerMedCoE")\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "PerMedCoE Cancer Invasion" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/477?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """`atavide` is a complete workflow for metagenomics data analysis, including QC/QA, optional host removal, assembly and cross-assembly, and individual read based annotations. We have also built in some advanced analytics including tools to assign annotations from reads to contigs, and to generate metagenome-assembled genomes in several different ways, giving you the power to explore your data!\r +\r +`atavide` is 100% snakemake and conda, so you only need to install the snakemake workflow, and then everything else will be installed with conda.\r +\r +Steps:\r +1. QC/QA with [prinseq++](https://github.com/Adrian-Cantu/PRINSEQ-plus-plus)\r +2. optional host removal using bowtie2 and samtools, [as described previously](https://edwards.flinders.edu.au/command-line-deconseq/). To enable this, you need to provide a path to the host db and a host db.\r +\r +Metagenome assembly\r +1. pairwise assembly of each sample using [megahit](https://github.com/voutcn/megahit)\r +2. extraction of all reads that do not assemble using samtools flags\r +3. assembly of all unassembled reads using [megahit](https://github.com/voutcn/megahit)\r +4. compilation of _all_ contigs into a single unified set using [Flye](https://github.com/fenderglass/Flye)\r +5. comparison of reads -> contigs to generate coverage\r +\r +MAG creation\r +1. [metabat](https://bitbucket.org/berkeleylab/metabat/src/master/)\r +2. [concoct](https://github.com/BinPro/CONCOCT)\r +3. Pairwise comparisons using [turbocor](https://github.com/dcjones/turbocor) followed by clustering\r +\r +Read-based annotations\r +1. [Kraken2](https://ccb.jhu.edu/software/kraken2/)\r +2. [singlem](https://github.com/wwood/singlem)\r +3. [SUPER-focus](https://github.com/metageni/SUPER-FOCUS)\r +4. [FOCUS](https://github.com/metageni/FOCUS)\r +\r +Want something else added to the suite? File an issue on github and we'll add it ASAP!\r +\r +### Installation\r +\r +You will need to install\r +1. The NCBI taxonomy database somewhere\r +2. The superfocus databases somewhere, and set the SUPERFOCUS_DB environmental variable\r +\r +Everything else should install automatically.""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.241.1" ; + schema1:isBasedOn "https://github.com/linsalrob/atavide" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for atavide" ; + schema1:sdDatePublished "2024-08-05 10:32:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/241/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5948 ; + schema1:dateCreated "2021-11-21T05:26:02Z" ; + schema1:dateModified "2023-01-16T13:55:08Z" ; + schema1:description """`atavide` is a complete workflow for metagenomics data analysis, including QC/QA, optional host removal, assembly and cross-assembly, and individual read based annotations. We have also built in some advanced analytics including tools to assign annotations from reads to contigs, and to generate metagenome-assembled genomes in several different ways, giving you the power to explore your data!\r +\r +`atavide` is 100% snakemake and conda, so you only need to install the snakemake workflow, and then everything else will be installed with conda.\r +\r +Steps:\r +1. QC/QA with [prinseq++](https://github.com/Adrian-Cantu/PRINSEQ-plus-plus)\r +2. optional host removal using bowtie2 and samtools, [as described previously](https://edwards.flinders.edu.au/command-line-deconseq/). To enable this, you need to provide a path to the host db and a host db.\r +\r +Metagenome assembly\r +1. pairwise assembly of each sample using [megahit](https://github.com/voutcn/megahit)\r +2. extraction of all reads that do not assemble using samtools flags\r +3. assembly of all unassembled reads using [megahit](https://github.com/voutcn/megahit)\r +4. compilation of _all_ contigs into a single unified set using [Flye](https://github.com/fenderglass/Flye)\r +5. comparison of reads -> contigs to generate coverage\r +\r +MAG creation\r +1. [metabat](https://bitbucket.org/berkeleylab/metabat/src/master/)\r +2. [concoct](https://github.com/BinPro/CONCOCT)\r +3. Pairwise comparisons using [turbocor](https://github.com/dcjones/turbocor) followed by clustering\r +\r +Read-based annotations\r +1. [Kraken2](https://ccb.jhu.edu/software/kraken2/)\r +2. [singlem](https://github.com/wwood/singlem)\r +3. [SUPER-focus](https://github.com/metageni/SUPER-FOCUS)\r +4. [FOCUS](https://github.com/metageni/FOCUS)\r +\r +Want something else added to the suite? File an issue on github and we'll add it ASAP!\r +\r +### Installation\r +\r +You will need to install\r +1. The NCBI taxonomy database somewhere\r +2. The superfocus databases somewhere, and set the SUPERFOCUS_DB environmental variable\r +\r +Everything else should install automatically.""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "atavide" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/241?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=13" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-08-05 10:23:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=13" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11175 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:50Z" ; + schema1:dateModified "2024-06-11T12:54:50Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=13" ; + schema1:version 13 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """### Workflow for Metagenomics from bins to metabolic models (GEMs)\r +\r +**Summary**\r + - Prodigal gene prediction\r + - CarveMe genome scale metabolic model reconstruction\r + - MEMOTE for metabolic model testing\r + - SMETANA Species METabolic interaction ANAlysis\r +\r +Other UNLOCK workflows on WorkflowHub: https://workflowhub.eu/projects/16/workflows?view=default
\r +\r +**All tool CWL files and other workflows can be found here:**
\r +Tools: https://gitlab.com/m-unlock/cwl
\r +Workflows: https://gitlab.com/m-unlock/cwl/workflows\r +\r +**How to setup and use an UNLOCK workflow:**
\r +https://m-unlock.gitlab.io/docs/setup/setup.html
\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/372?version=1" ; + schema1:isBasedOn "https://gitlab.com/m-unlock/cwl/-/blob/master/cwl/workflows/workflow_metagenomics_GEM.cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Metagenomic GEMs from Assembly" ; + schema1:sdDatePublished "2024-08-05 10:31:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/372/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 29095 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7276 ; + schema1:creator , + ; + schema1:dateCreated "2022-07-07T08:23:15Z" ; + schema1:dateModified "2023-01-16T14:01:55Z" ; + schema1:description """### Workflow for Metagenomics from bins to metabolic models (GEMs)\r +\r +**Summary**\r + - Prodigal gene prediction\r + - CarveMe genome scale metabolic model reconstruction\r + - MEMOTE for metabolic model testing\r + - SMETANA Species METabolic interaction ANAlysis\r +\r +Other UNLOCK workflows on WorkflowHub: https://workflowhub.eu/projects/16/workflows?view=default
\r +\r +**All tool CWL files and other workflows can be found here:**
\r +Tools: https://gitlab.com/m-unlock/cwl
\r +Workflows: https://gitlab.com/m-unlock/cwl/workflows\r +\r +**How to setup and use an UNLOCK workflow:**
\r +https://m-unlock.gitlab.io/docs/setup/setup.html
\r +""" ; + schema1:image ; + schema1:keywords "Metagenomics, Genomics, GEM, carveme, memote" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Metagenomic GEMs from Assembly" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/372?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + ; + ns1:output , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2024-07-10T16:00:36.610549" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/bacterial-genome-assembly" ; + schema1:license "GPL-3.0-or-later" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "bacterial-genome-assembly/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.24" . + + a schema1:Dataset ; + schema1:datePublished "2022-10-14T16:18:18.328560" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/ChIPseq_PE" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "ChIPseq_PE/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.11" . + + a schema1:Dataset ; + schema1:datePublished "2024-03-06T13:54:12.142581" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-variant-calling" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sars-cov-2-pe-illumina-artic-variant-calling/COVID-19-PE-ARTIC-ILLUMINA" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Loads a single cell counts matrix into an annData format - adding a column called sample with the sample name. (Input format - matrix.mtx, features.tsv and barcodes.tsv)" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/512?version=2" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq: Load counts matrix" ; + schema1:sdDatePublished "2024-08-05 10:24:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/512/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13569 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-05-30T05:44:52Z" ; + schema1:dateModified "2024-05-30T05:44:52Z" ; + schema1:description "Loads a single cell counts matrix into an annData format - adding a column called sample with the sample name. (Input format - matrix.mtx, features.tsv and barcodes.tsv)" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/512?version=1" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "scRNAseq: Load counts matrix" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/512?version=2" ; + schema1:version 2 ; + ns1:input , + , + , + ; + ns1:output . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Quantitative Mass Spectrometry nf-core workflow" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1013?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/quantms" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/quantms" ; + schema1:sdDatePublished "2024-08-05 10:23:24 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1013/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14195 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:08Z" ; + schema1:dateModified "2024-06-11T12:55:08Z" ; + schema1:description "Quantitative Mass Spectrometry nf-core workflow" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1013?version=3" ; + schema1:keywords "dia, lfq, mass-spec, mass-spectrometry, openms, proteogenomics, Proteomics, tmt" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/quantms" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1013?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Mutations Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.290.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_md_setup_mutations/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Protein MD Setup tutorial with mutations" ; + schema1:sdDatePublished "2024-08-05 10:30:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/290/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7377 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-17T13:25:49Z" ; + schema1:dateModified "2022-04-11T09:29:46Z" ; + schema1:description """# Mutations Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/290?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Protein MD Setup tutorial with mutations" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_md_setup_mutations/python/workflow.py" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/278?version=5" ; + schema1:isBasedOn "https://gitlab.bsc.es/lrodrig1/structuralvariants_poc/-/tree/1.1.3/structuralvariants/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CNV_pipeline" ; + schema1:sdDatePublished "2024-08-05 10:31:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/278/ro_crate?version=5" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 9858 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9607 ; + schema1:creator , + ; + schema1:dateCreated "2022-05-10T09:04:10Z" ; + schema1:dateModified "2022-05-10T09:04:10Z" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/278?version=10" ; + schema1:keywords "cancer, CODEX2, ExomeDepth, manta, TransBioNet, variant calling, GRIDSS, structural variants" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CNV_pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/278?version=5" ; + schema1:version 5 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 74039 . + + a schema1:Dataset ; + schema1:datePublished "2024-05-02T14:42:36.593454" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-Trio-phasing-VGP5/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for the identification of circular DNAs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/972?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/circdna" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/circdna" ; + schema1:sdDatePublished "2024-08-05 10:24:11 +0100" ; + schema1:url "https://workflowhub.eu/workflows/972/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8577 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:45Z" ; + schema1:dateModified "2024-06-11T12:54:45Z" ; + schema1:description "Pipeline for the identification of circular DNAs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/972?version=7" ; + schema1:keywords "ampliconarchitect, ampliconsuite, circle-seq, circular, DNA, eccdna, ecdna, extrachromosomal-circular-dna, Genomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/circdna" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/972?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Molecular Structure Checking using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **checking** a **molecular structure** before using it as an input for a **Molecular Dynamics** simulation. The workflow uses the **BioExcel Building Blocks library (biobb)**. The particular structure used is the crystal structure of **human Adenylate Kinase 1A (AK1A)**, in complex with the **AP5A inhibitor** (PDB code [1Z83](https://www.rcsb.org/structure/1z83)). \r +\r +**Structure checking** is a key step before setting up a protein system for **simulations**. A number of **common issues** found in structures at **Protein Data Bank** may compromise the success of the **simulation**, or may suggest that longer **equilibration** procedures are necessary.\r +\r +The **workflow** shows how to:\r +\r +- Run **basic manipulations on structures** (selection of models, chains, alternative locations\r +- Detect and fix **amide assignments** and **wrong chiralities**\r +- Detect and fix **protein backbone** issues (missing fragments, and atoms, capping)\r +- Detect and fix **missing side-chain atoms**\r +- **Add hydrogen atoms** according to several criteria\r +- Detect and classify **atomic clashes**\r +- Detect possible **disulfide bonds (SS)**\r +\r +An implementation of this workflow in a **web-based Graphical User Interface (GUI)** can be found in the [https://mmb.irbbarcelona.org/biobb-wfs/](https://mmb.irbbarcelona.org/biobb-wfs/) server (see [https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check](https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check)).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.775.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_structure_checking" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Molecular Structure Checking" ; + schema1:sdDatePublished "2024-08-05 10:25:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/775/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 47048 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-05T08:36:14Z" ; + schema1:dateModified "2024-03-05T08:38:46Z" ; + schema1:description """# Molecular Structure Checking using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **checking** a **molecular structure** before using it as an input for a **Molecular Dynamics** simulation. The workflow uses the **BioExcel Building Blocks library (biobb)**. The particular structure used is the crystal structure of **human Adenylate Kinase 1A (AK1A)**, in complex with the **AP5A inhibitor** (PDB code [1Z83](https://www.rcsb.org/structure/1z83)). \r +\r +**Structure checking** is a key step before setting up a protein system for **simulations**. A number of **common issues** found in structures at **Protein Data Bank** may compromise the success of the **simulation**, or may suggest that longer **equilibration** procedures are necessary.\r +\r +The **workflow** shows how to:\r +\r +- Run **basic manipulations on structures** (selection of models, chains, alternative locations\r +- Detect and fix **amide assignments** and **wrong chiralities**\r +- Detect and fix **protein backbone** issues (missing fragments, and atoms, capping)\r +- Detect and fix **missing side-chain atoms**\r +- **Add hydrogen atoms** according to several criteria\r +- Detect and classify **atomic clashes**\r +- Detect possible **disulfide bonds (SS)**\r +\r +An implementation of this workflow in a **web-based Graphical User Interface (GUI)** can be found in the [https://mmb.irbbarcelona.org/biobb-wfs/](https://mmb.irbbarcelona.org/biobb-wfs/) server (see [https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check](https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check)).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Molecular Structure Checking" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_structure_checking/blob/main/biobb_wf_structure_checking/notebooks/biobb_wf_structure_checking.ipynb" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Taxonomic classification and profiling of shotgun short- and long-read metagenomic data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1025?version=11" ; + schema1:isBasedOn "https://github.com/nf-core/taxprofiler" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/taxprofiler" ; + schema1:sdDatePublished "2024-08-05 10:23:10 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1025/ro_crate?version=11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 16232 ; + schema1:creator , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-21T03:02:43Z" ; + schema1:dateModified "2024-06-21T03:02:43Z" ; + schema1:description "Taxonomic classification and profiling of shotgun short- and long-read metagenomic data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1025?version=10" ; + schema1:keywords "Classification, illumina, long-reads, Metagenomics, microbiome, nanopore, pathogen, Profiling, shotgun, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/taxprofiler" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1025?version=11" ; + schema1:version 11 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-08-05 10:23:20 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=23" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 21521 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:16Z" ; + schema1:dateModified "2024-06-11T12:55:16Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:version 23 . + + a schema1:Dataset ; + schema1:datePublished "2022-06-03T10:07:46.335781" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/generic-variant-calling-wgs-pe" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "generic-variant-calling-wgs-pe/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.10" . + + a schema1:Dataset ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-variant-calling" ; + schema1:mainEntity ; + schema1:name "COVID-19-PE-ARTIC-ILLUMINA (v0.4)" ; + schema1:sdDatePublished "2021-06-19 03:00:40 +0100" ; + schema1:softwareVersion "v0.4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 64620 ; + schema1:name "COVID-19-PE-ARTIC-ILLUMINA" ; + schema1:programmingLanguage . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.131.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/131/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 76543 ; + schema1:creator , + ; + schema1:dateCreated "2021-06-30T12:18:15Z" ; + schema1:dateModified "2022-09-15T12:31:15Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/131?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_amber_md_setup/8bcda75405183a84476acd7ba733e4cb666ce397/biobb_wf_amber_md_setup/notebooks/mdsetup_lig/biobb_amber_complex_setup_notebook.ipynb" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + ; + schema1:description "This workflow generates binding scores that correlate well with binding affinities using an additional tool TransFS, developed at Oxford University. More info can be found at https://covid19.galaxyproject.org/cheminformatics/" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/16?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Cheminformatics - TransFS scoring" ; + schema1:sdDatePublished "2024-08-05 10:33:40 +0100" ; + schema1:url "https://workflowhub.eu/workflows/16/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1909 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7581 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2020-04-10T14:51:59Z" ; + schema1:dateModified "2023-01-16T13:41:04Z" ; + schema1:description "This workflow generates binding scores that correlate well with binding affinities using an additional tool TransFS, developed at Oxford University. More info can be found at https://covid19.galaxyproject.org/cheminformatics/" ; + schema1:image ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Cheminformatics - TransFS scoring" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/16?version=1" ; + schema1:version 1 ; + ns1:input , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 4174 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 21106 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for screening for functional components of assembled contigs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/987?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/funcscan" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/funcscan" ; + schema1:sdDatePublished "2024-08-05 10:23:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/987/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15247 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:53Z" ; + schema1:dateModified "2024-06-11T12:54:53Z" ; + schema1:description "Pipeline for screening for functional components of assembled contigs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/987?version=8" ; + schema1:keywords "amp, AMR, antibiotic-resistance, antimicrobial-peptides, antimicrobial-resistance-genes, arg, Assembly, bgc, biosynthetic-gene-clusters, contigs, function, Metagenomics, natural-products, screening, secondary-metabolites" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/funcscan" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/987?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Protein 3D structure prediction pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1011?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/proteinfold" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/proteinfold" ; + schema1:sdDatePublished "2024-08-05 10:23:25 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1011/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14542 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-26T03:02:45Z" ; + schema1:dateModified "2024-06-26T03:02:45Z" ; + schema1:description "Protein 3D structure prediction pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1011?version=2" ; + schema1:keywords "alphafold2, protein-fold-prediction, protein-folding, protein-sequences, protein-structure" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/proteinfold" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1011?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.262.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_abc_md_setup/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL ABC MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:32:29 +0100" ; + schema1:url "https://workflowhub.eu/workflows/262/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 229479 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 36638 ; + schema1:creator , + ; + schema1:dateCreated "2022-01-11T08:14:55Z" ; + schema1:dateModified "2023-06-12T08:27:38Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/262?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CWL ABC MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/262?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """# ![nf-core/ampliseq](docs/images/nf-core-ampliseq_logo.png) + +**16S rRNA amplicon sequencing analysis workflow using QIIME2**. + +[![nf-core](https://img.shields.io/badge/nf--core-pipeline-brightgreen.svg)](https://nf-co.re/) +[![DOI](https://zenodo.org/badge/150448201.svg)](https://zenodo.org/badge/latestdoi/150448201) +[![Cite Preprint](https://img.shields.io/badge/Cite%20Us!-Cite%20Publication-important)](https://doi.org/10.3389/fmicb.2020.550420) + +[![GitHub Actions CI Status](https://github.com/nf-core/ampliseq/workflows/nf-core%20CI/badge.svg)](https://github.com/nf-core/ampliseq/actions) +[![GitHub Actions Linting Status](https://github.com/nf-core/ampliseq/workflows/nf-core%20linting/badge.svg)](https://github.com/nf-core/ampliseq/actions) +[![Nextflow](https://img.shields.io/badge/nextflow-%E2%89%A520.04.0-brightgreen.svg)](https://www.nextflow.io/) + +[![install with bioconda](https://img.shields.io/badge/install%20with-bioconda-brightgreen.svg)](https://bioconda.github.io/) +[![Docker](https://img.shields.io/docker/automated/nfcore/ampliseq.svg)](https://hub.docker.com/r/nfcore/ampliseq) +[![Get help on Slack](http://img.shields.io/badge/slack-nf--core%20%23ampliseq-4A154B?logo=slack)](https://nfcore.slack.com/channels/ampliseq) + +## Introduction + +**nfcore/ampliseq** is a bioinformatics analysis pipeline used for 16S rRNA or ITS amplicon sequencing data (currently supported is Illumina paired end or PacBio). + +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It comes with docker containers making installation trivial and results highly reproducible. + +## Quick Start + +1. Install [`nextflow`](https://nf-co.re/usage/installation) + +2. Install any of [`Docker`](https://docs.docker.com/engine/installation/), [`Singularity`](https://www.sylabs.io/guides/3.0/user-guide/) or [`Podman`](https://podman.io/) for full pipeline reproducibility _(please only use [`Conda`](https://conda.io/miniconda.html) as a last resort; see [docs](https://nf-co.re/usage/configuration#basic-configuration-profiles))_ + +3. Download the pipeline and test it on a minimal dataset with a single command: + + ```bash + nextflow run nf-core/ampliseq -profile test, + ``` + + > Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment. + +4. Start running your own analysis! + + ```bash + nextflow run nf-core/ampliseq -profile --input "data" --FW_primer GTGYCAGCMGCCGCGGTAA --RV_primer GGACTACNVGGGTWTCTAAT --metadata "data/Metadata.tsv" + ``` + +See [usage docs](https://nf-co.re/ampliseq/usage) and [parameter docs](https://nf-co.re/ampliseq/parameters) for all of the available options when running the pipeline. + +## Pipeline Summary + +By default, the pipeline currently performs the following: + +* Sequencing quality control ([FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/)) +* Trimming of reads ([Cutadapt](https://journal.embnet.org/index.php/embnetjournal/article/view/200)) +* Illumina read processing with [QIIME2](https://www.nature.com/articles/s41587-019-0209-9) +* Infer Amplicon Sequence Variants (ASVs) ([DADA2](https://doi.org/10.1038/nmeth.3869)) +* Taxonomical classification based on [SILVA](https://www.arb-silva.de/) [v132](https://www.arb-silva.de/documentation/release-132/) or [UNITE](https://unite.ut.ee/) database +* excludes unwanted taxa, produces absolute and relative feature/taxa count tables and plots, plots alpha rarefaction curves, computes alpha and beta diversity indices and plots thereof ([QIIME2](https://www.nature.com/articles/s41587-019-0209-9)) +* Calls differentially abundant taxa ([ANCOM](https://www.ncbi.nlm.nih.gov/pubmed/26028277)) +* Overall pipeline run summaries ([MultiQC](https://multiqc.info/)) + +## Documentation + +The nf-core/ampliseq pipeline comes with documentation about the pipeline: [usage](https://nf-co.re/ampliseq/usage) and [output](https://nf-co.re/ampliseq/output). + +## Credits + +nf-core/ampliseq was originally written by Daniel Straub ([@d4straub](https://github.com/d4straub)) and Alexander Peltzer ([@apeltzer](https://github.com/apeltzer)) for use at the [Quantitative Biology Center (QBiC)](http://www.qbic.life) and [Microbial Ecology, Center for Applied Geosciences](http://www.uni-tuebingen.de/de/104325), part of Eberhard Karls Universität Tübingen (Germany). + +We thank the following people for their extensive assistance in the development of this pipeline (in alphabetical order): + +* [Daniel Lundin](https://github.com/erikrikarddaniel) +* [Diego Brambilla](https://github.com/DiegoBrambilla) +* [Emelie Nilsson](https://github.com/emnilsson) +* [Jeanette Tångrot](https://github.com/jtangrot) +* [Sabrina Krakau](https://github.com/skrakau) + +## Contributions and Support + +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md). + +For further information or help, don't hesitate to get in touch on the [Slack `#ampliseq` channel](https://nfcore.slack.com/channels/ampliseq) (you can join with [this invite](https://nf-co.re/join/slack)). + +## Citations + +If you use `nf-core/ampliseq` for your analysis, please cite the `ampliseq` article as follows: +> Daniel Straub, Nia Blackwell, Adrian Langarica-Fuentes, Alexander Peltzer, Sven Nahnsen, Sara Kleindienst **Interpretations of Environmental Microbial Community Studies Are Biased by the Selected 16S rRNA (Gene) Amplicon Sequencing Pipeline** *Frontiers in Microbiology* 2020, 11:2652 [doi: 10.3389/fmicb.2020.550420](https://doi.org/10.3389/fmicb.2020.550420). + +You can cite the `nf-core/ampliseq` zenodo record for a specific version using the following [doi: 10.5281/zenodo.1493841](https://zenodo.org/badge/latestdoi/150448201) + +You can cite the `nf-core` publication as follows: + +> **The nf-core framework for community-curated bioinformatics pipelines.** +> +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen. +> +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x). +> ReadCube: [Full Access Link](https://rdcu.be/b1GjZ) + + + + +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-08-05 10:22:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6195 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:38Z" ; + schema1:dateModified "2024-06-11T12:54:39Z" ; + schema1:description """# ![nf-core/ampliseq](docs/images/nf-core-ampliseq_logo.png) + +**16S rRNA amplicon sequencing analysis workflow using QIIME2**. + +[![nf-core](https://img.shields.io/badge/nf--core-pipeline-brightgreen.svg)](https://nf-co.re/) +[![DOI](https://zenodo.org/badge/150448201.svg)](https://zenodo.org/badge/latestdoi/150448201) +[![Cite Preprint](https://img.shields.io/badge/Cite%20Us!-Cite%20Publication-important)](https://doi.org/10.3389/fmicb.2020.550420) + +[![GitHub Actions CI Status](https://github.com/nf-core/ampliseq/workflows/nf-core%20CI/badge.svg)](https://github.com/nf-core/ampliseq/actions) +[![GitHub Actions Linting Status](https://github.com/nf-core/ampliseq/workflows/nf-core%20linting/badge.svg)](https://github.com/nf-core/ampliseq/actions) +[![Nextflow](https://img.shields.io/badge/nextflow-%E2%89%A520.04.0-brightgreen.svg)](https://www.nextflow.io/) + +[![install with bioconda](https://img.shields.io/badge/install%20with-bioconda-brightgreen.svg)](https://bioconda.github.io/) +[![Docker](https://img.shields.io/docker/automated/nfcore/ampliseq.svg)](https://hub.docker.com/r/nfcore/ampliseq) +[![Get help on Slack](http://img.shields.io/badge/slack-nf--core%20%23ampliseq-4A154B?logo=slack)](https://nfcore.slack.com/channels/ampliseq) + +## Introduction + +**nfcore/ampliseq** is a bioinformatics analysis pipeline used for 16S rRNA or ITS amplicon sequencing data (currently supported is Illumina paired end or PacBio). + +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It comes with docker containers making installation trivial and results highly reproducible. + +## Quick Start + +1. Install [`nextflow`](https://nf-co.re/usage/installation) + +2. Install any of [`Docker`](https://docs.docker.com/engine/installation/), [`Singularity`](https://www.sylabs.io/guides/3.0/user-guide/) or [`Podman`](https://podman.io/) for full pipeline reproducibility _(please only use [`Conda`](https://conda.io/miniconda.html) as a last resort; see [docs](https://nf-co.re/usage/configuration#basic-configuration-profiles))_ + +3. Download the pipeline and test it on a minimal dataset with a single command: + + ```bash + nextflow run nf-core/ampliseq -profile test, + ``` + + > Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment. + +4. Start running your own analysis! + + ```bash + nextflow run nf-core/ampliseq -profile --input "data" --FW_primer GTGYCAGCMGCCGCGGTAA --RV_primer GGACTACNVGGGTWTCTAAT --metadata "data/Metadata.tsv" + ``` + +See [usage docs](https://nf-co.re/ampliseq/usage) and [parameter docs](https://nf-co.re/ampliseq/parameters) for all of the available options when running the pipeline. + +## Pipeline Summary + +By default, the pipeline currently performs the following: + +* Sequencing quality control ([FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/)) +* Trimming of reads ([Cutadapt](https://journal.embnet.org/index.php/embnetjournal/article/view/200)) +* Illumina read processing with [QIIME2](https://www.nature.com/articles/s41587-019-0209-9) +* Infer Amplicon Sequence Variants (ASVs) ([DADA2](https://doi.org/10.1038/nmeth.3869)) +* Taxonomical classification based on [SILVA](https://www.arb-silva.de/) [v132](https://www.arb-silva.de/documentation/release-132/) or [UNITE](https://unite.ut.ee/) database +* excludes unwanted taxa, produces absolute and relative feature/taxa count tables and plots, plots alpha rarefaction curves, computes alpha and beta diversity indices and plots thereof ([QIIME2](https://www.nature.com/articles/s41587-019-0209-9)) +* Calls differentially abundant taxa ([ANCOM](https://www.ncbi.nlm.nih.gov/pubmed/26028277)) +* Overall pipeline run summaries ([MultiQC](https://multiqc.info/)) + +## Documentation + +The nf-core/ampliseq pipeline comes with documentation about the pipeline: [usage](https://nf-co.re/ampliseq/usage) and [output](https://nf-co.re/ampliseq/output). + +## Credits + +nf-core/ampliseq was originally written by Daniel Straub ([@d4straub](https://github.com/d4straub)) and Alexander Peltzer ([@apeltzer](https://github.com/apeltzer)) for use at the [Quantitative Biology Center (QBiC)](http://www.qbic.life) and [Microbial Ecology, Center for Applied Geosciences](http://www.uni-tuebingen.de/de/104325), part of Eberhard Karls Universität Tübingen (Germany). + +We thank the following people for their extensive assistance in the development of this pipeline (in alphabetical order): + +* [Daniel Lundin](https://github.com/erikrikarddaniel) +* [Diego Brambilla](https://github.com/DiegoBrambilla) +* [Emelie Nilsson](https://github.com/emnilsson) +* [Jeanette Tångrot](https://github.com/jtangrot) +* [Sabrina Krakau](https://github.com/skrakau) + +## Contributions and Support + +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md). + +For further information or help, don't hesitate to get in touch on the [Slack `#ampliseq` channel](https://nfcore.slack.com/channels/ampliseq) (you can join with [this invite](https://nf-co.re/join/slack)). + +## Citations + +If you use `nf-core/ampliseq` for your analysis, please cite the `ampliseq` article as follows: +> Daniel Straub, Nia Blackwell, Adrian Langarica-Fuentes, Alexander Peltzer, Sven Nahnsen, Sara Kleindienst **Interpretations of Environmental Microbial Community Studies Are Biased by the Selected 16S rRNA (Gene) Amplicon Sequencing Pipeline** *Frontiers in Microbiology* 2020, 11:2652 [doi: 10.3389/fmicb.2020.550420](https://doi.org/10.3389/fmicb.2020.550420). + +You can cite the `nf-core/ampliseq` zenodo record for a specific version using the following [doi: 10.5281/zenodo.1493841](https://zenodo.org/badge/latestdoi/150448201) + +You can cite the `nf-core` publication as follows: + +> **The nf-core framework for community-curated bioinformatics pipelines.** +> +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen. +> +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x). +> ReadCube: [Full Access Link](https://rdcu.be/b1GjZ) + + + + +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Genome-wide alternative splicing analysis v.2" ; + schema1:hasPart , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.482.3" ; + schema1:license "CC-BY-NC-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genome-wide alternative splicing analysis v.2" ; + schema1:sdDatePublished "2024-08-05 10:30:11 +0100" ; + schema1:url "https://workflowhub.eu/workflows/482/ro_crate?version=3" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 214419 . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 30115 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 119329 ; + schema1:creator ; + schema1:dateCreated "2023-06-11T11:53:05Z" ; + schema1:dateModified "2023-06-11T11:53:41Z" ; + schema1:description "Genome-wide alternative splicing analysis v.2" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/482?version=6" ; + schema1:keywords "Transcriptomics, Alternative splicing, isoform switching" ; + schema1:license "https://spdx.org/licenses/CC-BY-NC-4.0" ; + schema1:name "Genome-wide alternative splicing analysis v.2" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/482?version=3" ; + schema1:version 3 ; + ns1:input , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.56.4" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_protein-complex_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:31:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/56/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 156554 ; + schema1:creator , + ; + schema1:dateCreated "2022-09-15T11:04:55Z" ; + schema1:dateModified "2023-01-16T13:44:51Z" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/56?version=6" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_protein-complex_md_setup/master/biobb_wf_protein-complex_md_setup/notebooks/biobb_Protein-Complex_MDsetup_tutorial.ipynb" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + ; + schema1:description "This workflow is used form the preparation of protein and ligands for docking. More info can be found at https://covid19.galaxyproject.org/cheminformatics/" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/12?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Cheminformatics - Enumerate ligands for docking" ; + schema1:sdDatePublished "2024-08-05 10:33:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/12/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1801 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7160 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2020-04-10T14:17:44Z" ; + schema1:dateModified "2023-01-16T13:40:34Z" ; + schema1:description "This workflow is used form the preparation of protein and ligands for docking. More info can be found at https://covid19.galaxyproject.org/cheminformatics/" ; + schema1:image ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Cheminformatics - Enumerate ligands for docking" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/12?version=1" ; + schema1:version 1 ; + ns1:input . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 3960 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +****\r +\r +About this workflow:\r +\r +* Repeat this workflow separately for datasets from different tissues. \r +* Inputs = collections of R1 files, and R2 files (all from a single tissue type). \r +* Runs FastQC with default settings, separately for raw reads R1 and R2 collections; all output to MultiQC. \r +* Runs Trimmomatic with initial ILLUMINACLIP step (using standard adapter sequence for TruSeq3 paired-ended), uses settings SLIDINGWINDOW:4:5 LEADING:5 TRAILING:5 MINLEN:25, retain paired (not unpaired) outputs. User can modify at runtime. \r +* Runs FastQC with default settings, separately for trimmed R1 and R2 collections; all output to MultiQC. \r +* From Trimmomatic output: concatenate all R1 reads; concatenate all R2 reads. \r +* Outputs = trimmed merged R1 file, trimmed merged R2 file. \r +* Log files from Trimmomatic to MultiQC, to summarise trimming results. \r +* Note: a known bug with MultiQC html output is that plot is labelled as "R1" reads, when it actually contains information from both R1 and R2 read sets - this is under investigation (and is due to a Trimmomatic output file labelling issue). \r +* MultiQC results table formatted to show % of reads retained after trimming, table included in workflow report. \r +* Note: a known bug is that sometimes the workflow report text resets to default text. To restore, look for an earlier workflow version with correct workflow report text, and copy and paste report text into current version. """ ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.876.1" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for QC and trimming of RNAseq reads - TSI" ; + schema1:sdDatePublished "2024-08-05 10:24:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/876/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 40452 ; + schema1:creator , + ; + schema1:dateCreated "2024-05-08T06:39:28Z" ; + schema1:dateModified "2024-05-09T04:03:15Z" ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +****\r +\r +About this workflow:\r +\r +* Repeat this workflow separately for datasets from different tissues. \r +* Inputs = collections of R1 files, and R2 files (all from a single tissue type). \r +* Runs FastQC with default settings, separately for raw reads R1 and R2 collections; all output to MultiQC. \r +* Runs Trimmomatic with initial ILLUMINACLIP step (using standard adapter sequence for TruSeq3 paired-ended), uses settings SLIDINGWINDOW:4:5 LEADING:5 TRAILING:5 MINLEN:25, retain paired (not unpaired) outputs. User can modify at runtime. \r +* Runs FastQC with default settings, separately for trimmed R1 and R2 collections; all output to MultiQC. \r +* From Trimmomatic output: concatenate all R1 reads; concatenate all R2 reads. \r +* Outputs = trimmed merged R1 file, trimmed merged R2 file. \r +* Log files from Trimmomatic to MultiQC, to summarise trimming results. \r +* Note: a known bug with MultiQC html output is that plot is labelled as "R1" reads, when it actually contains information from both R1 and R2 read sets - this is under investigation (and is due to a Trimmomatic output file labelling issue). \r +* MultiQC results table formatted to show % of reads retained after trimming, table included in workflow report. \r +* Note: a known bug is that sometimes the workflow report text resets to default text. To restore, look for an earlier workflow version with correct workflow report text, and copy and paste report text into current version. """ ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "TSI-annotation" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "QC and trimming of RNAseq reads - TSI" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/876?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 2139822 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-03T15:31:41.978689" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-Trio-phasing-VGP5/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.17" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1017?version=16" ; + schema1:isBasedOn "https://github.com/nf-core/rnafusion" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnafusion" ; + schema1:sdDatePublished "2024-08-05 10:22:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1017/ro_crate?version=16" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11863 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:11Z" ; + schema1:dateModified "2024-06-11T12:55:11Z" ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1017?version=16" ; + schema1:keywords "fusion, fusion-genes, gene-fusion, rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnafusion" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1017?version=16" ; + schema1:version 16 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# CWL-assembly\r +[![Codacy Badge](https://api.codacy.com/project/badge/Grade/684724bbc0134960ab41748f4a4b732f)](https://www.codacy.com/app/mb1069/CWL-assembly?utm_source=github.com&utm_medium=referral&utm_content=EBI-Metagenomics/CWL-assembly&utm_campaign=Badge_Grade)\r +[![Build Status](https://travis-ci.org/EBI-Metagenomics/CWL-assembly.svg?branch=develop)](https://travis-ci.org/EBI-Metagenomics/CWL-assembly)\r +\r +## Description\r +\r +This repository contains two workflows for metagenome and metatranscriptome assembly of short read data. MetaSPAdes is used as default for paired end data, and MEGAHIT for single end data. MEGAHIT can be specified as the default assembler in the yaml file if preferred. Steps include:\r +\r +QC - removal of short reads, low quality regions, adapters and host decontamination\r +\r +Assembly - with metaSPADES or MEGAHIT\r +\r +Post-assembly - Host and PhiX decontamination, contig length filter (500bp), stats generation.\r +\r +Multiple input read files can also be specified for co-assembly.\r +\r +## Requirements\r +\r +This pipeline requires and environment with cwltool, blastn, metaspades and megahit.\r +\r +## Databases\r +\r +Predownload fasta files for host decontamination and generate:\r + - bwa index folder\r + - blast index folder\r + \r +Specify the locations in the yaml file when running the pipeline.\r +\r +\r +## Main pipeline executables\r +\r +src/workflows/metagenome_pipeline.cwl\r +src/workflows/metatranscriptome_pipeline.cwl\r +\r +# Example output directory structure\r +```\r +SRP0741\r + └── SRP074153 Project directory containing all assemblies under that project\r + ├── downloads.yml Raw data download caching logfile, to avoid duplicate downloads of raw data\r + ├── SRR6257\r + │   └── SRR6257420 Run directory\r + │   └── megahit\r + │   ├── 001 Assembly directory\r + │   │ ├── SRR6257420.fasta Trimmed assembly\r + │   │ ├── SRR6257420.fasta.gz Archive trimmed assembly\r + │   │ ├── SRR6257420.fasta.gz.md5 MD5 hash of above archive\r + │   │ ├── coverage.tab Coverage file\r + │   │ ├── final.contigs.fa Raw assembly\r + │   │ ├── job_config.yml CWL job configuration\r + │   │ ├── megahit.log Assembler output log\r + │   │ ├── output.json Human-readable Assembly stats file\r + │   │ ├── sorted.bam BAM file of assembly\r + │   │ ├── sorted.bam.bai Secondary BAM file\r + │   │ └── toil.log cwlToil output log\r + │  └── metaspades Assembly of equivalent data using another assembler (eg metaspades, spades...)\r + │  └── ... \r + │ \r + ├── raw Raw data directory\r + │   └── SRR6257420.fastq.gz Raw data files\r + │\r + └── tmp Temporary directory for assemblies\r + └── SRR6257\r + └── SRR6257420\r + └── megahit\r + └── 001\r +```\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/474?version=1" ; + schema1:isBasedOn "https://github.com/EBI-Metagenomics/CWL-assembly.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Metagenome and metatranscriptome assembly in CWL" ; + schema1:sdDatePublished "2024-08-05 10:30:05 +0100" ; + schema1:url "https://workflowhub.eu/workflows/474/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6441 ; + schema1:dateCreated "2023-05-19T13:59:30Z" ; + schema1:dateModified "2023-05-19T13:59:30Z" ; + schema1:description """# CWL-assembly\r +[![Codacy Badge](https://api.codacy.com/project/badge/Grade/684724bbc0134960ab41748f4a4b732f)](https://www.codacy.com/app/mb1069/CWL-assembly?utm_source=github.com&utm_medium=referral&utm_content=EBI-Metagenomics/CWL-assembly&utm_campaign=Badge_Grade)\r +[![Build Status](https://travis-ci.org/EBI-Metagenomics/CWL-assembly.svg?branch=develop)](https://travis-ci.org/EBI-Metagenomics/CWL-assembly)\r +\r +## Description\r +\r +This repository contains two workflows for metagenome and metatranscriptome assembly of short read data. MetaSPAdes is used as default for paired end data, and MEGAHIT for single end data. MEGAHIT can be specified as the default assembler in the yaml file if preferred. Steps include:\r +\r +QC - removal of short reads, low quality regions, adapters and host decontamination\r +\r +Assembly - with metaSPADES or MEGAHIT\r +\r +Post-assembly - Host and PhiX decontamination, contig length filter (500bp), stats generation.\r +\r +Multiple input read files can also be specified for co-assembly.\r +\r +## Requirements\r +\r +This pipeline requires and environment with cwltool, blastn, metaspades and megahit.\r +\r +## Databases\r +\r +Predownload fasta files for host decontamination and generate:\r + - bwa index folder\r + - blast index folder\r + \r +Specify the locations in the yaml file when running the pipeline.\r +\r +\r +## Main pipeline executables\r +\r +src/workflows/metagenome_pipeline.cwl\r +src/workflows/metatranscriptome_pipeline.cwl\r +\r +# Example output directory structure\r +```\r +SRP0741\r + └── SRP074153 Project directory containing all assemblies under that project\r + ├── downloads.yml Raw data download caching logfile, to avoid duplicate downloads of raw data\r + ├── SRR6257\r + │   └── SRR6257420 Run directory\r + │   └── megahit\r + │   ├── 001 Assembly directory\r + │   │ ├── SRR6257420.fasta Trimmed assembly\r + │   │ ├── SRR6257420.fasta.gz Archive trimmed assembly\r + │   │ ├── SRR6257420.fasta.gz.md5 MD5 hash of above archive\r + │   │ ├── coverage.tab Coverage file\r + │   │ ├── final.contigs.fa Raw assembly\r + │   │ ├── job_config.yml CWL job configuration\r + │   │ ├── megahit.log Assembler output log\r + │   │ ├── output.json Human-readable Assembly stats file\r + │   │ ├── sorted.bam BAM file of assembly\r + │   │ ├── sorted.bam.bai Secondary BAM file\r + │   │ └── toil.log cwlToil output log\r + │  └── metaspades Assembly of equivalent data using another assembler (eg metaspades, spades...)\r + │  └── ... \r + │ \r + ├── raw Raw data directory\r + │   └── SRR6257420.fastq.gz Raw data files\r + │\r + └── tmp Temporary directory for assemblies\r + └── SRR6257\r + └── SRR6257420\r + └── megahit\r + └── 001\r +```\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/474?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Metagenome and metatranscriptome assembly in CWL" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/474?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1023?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/smrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/smrnaseq" ; + schema1:sdDatePublished "2024-08-05 10:23:12 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1023/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11092 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:18Z" ; + schema1:dateModified "2024-06-11T12:55:18Z" ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1023?version=10" ; + schema1:keywords "small-rna, smrna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/smrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1023?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# GSC (Genotype Sparse Compression)\r +Genotype Sparse Compression (GSC) is an advanced tool for lossless compression of VCF files, designed to efficiently store and manage VCF files in a compressed format. It accepts VCF/BCF files as input and utilizes advanced compression techniques to significantly reduce storage requirements while ensuring fast query capabilities. In our study, we successfully compressed the VCF files from the 1000 Genomes Project (1000Gpip3), consisting of 2504 samples and 80 million variants, from an uncompressed VCF file of 803.70GB to approximately 1GB.\r +\r +## Requirements \r +### GSC requires:\r +\r +- **Compiler Compatibility**: GSC requires a modern C++14-ready compiler, such as:\r + - g++ version 10.1.0 or higher\r +\r +- **Build System**: Make build system is necessary for compiling GSC.\r +\r +- **Operating System**: GSC supports 64-bit operating systems, including:\r + - Linux (Ubuntu 18.04)\r + \r +## Installation\r +To download, build and install GSC use the following commands.\r +```bash\r +git clone https://github.com/luo-xiaolong/GSC.git\r +cd GSC\r +make\r +```\r +To clean the GSC build use:\r +```bash\r +make clean\r +```\r +## Usage\r +```bash\r +Usage: gsc [option] [arguments] \r +Available options: \r + compress - compress VCF/BCF file\r + decompress - query and decompress to VCF/BCF file\r +```\r +- Compress the input VCF/BCF file\r +```bash\r +Usage of gsc compress:\r +\r + gsc compress [options] [--in [in_file]] [--out [out_file]]\r +\r +Where:\r +\r + [options] Optional flags and parameters for compression.\r + -i, --in [in_file] Specify the input file (default: VCF or VCF.GZ). If omitted, input is taken from standard input (stdin).\r + -o, --out [out_file] Specify the output file. If omitted, output is sent to standard output (stdout).\r +\r +Options:\r +\r + -M, --mode_lossly Choose lossy compression mode (lossless by default).\r + -b, --bcf Input is a BCF file (default: VCF or VCF.GZ).\r + -p, --ploidy [X] Set ploidy of samples in input VCF to [X] (default: 2).\r + -t, --threads [X] Set number of threads to [X] (default: 1).\r + -d, --depth [X] Set maximum replication depth to [X] (default: 100, 0 means no matches).\r + -m, --merge [X] Specify files to merge, separated by commas (e.g., -m chr1.vcf,chr2.vcf), or '@' followed by a file containing a list of VCF files (e.g., -m @file_with_IDs.txt). By default, all VCF files are compressed.\r +```\r +- Decompress / Query\r +```bash\r +Usage of gsc decompress and query:\r +\r + gsc decompress [options] --in [in_file] --out [out_file]\r +\r +Where:\r + [options] Optional flags and parameters for compression.\r + -i, --in [in_file] Specify the input file . If omitted, input is taken from standard input (stdin).\r + -o, --out [out_file] Specify the output file (default: VCF). If omitted, output is sent to standard output (stdout).\r +\r +Options:\r +\r + General Options:\r +\r + -M, --mode_lossly Choose lossy compression mode (default: lossless).\r + -b, --bcf Output a BCF file (default: VCF).\r +\r + Filter options (applicable in lossy compression mode only): \r +\r + -r, --range [X] Specify range in format [start],[end] (e.g., -r 4999756,4999852).\r + -s, --samples [X] Samples separated by comms (e.g., -s HG03861,NA18639) OR '@' sign followed by the name of a file with sample name(s) separated by whitespaces (for exaple: -s @file_with_IDs.txt). By default all samples/individuals are decompressed. \r + --header-only Output only the header of the VCF/BCF.\r + --no-header Output without the VCF/BCF header (only genotypes).\r + -G, --no-genotype Don't output sample genotypes (only #CHROM, POS, ID, REF, ALT, QUAL, FILTER, and INFO columns).\r + -C, --out-ac-an Write AC/AN to the INFO field.\r + -S, --split Split output into multiple files (one per chromosome).\r + -I, [ID=^] Include only sites with specified ID (e.g., -I "ID=rs6040355").\r + --minAC [X] Include only sites with AC <= X.\r + --maxAC [X] Include only sites with AC >= X.\r + --minAF [X] Include only sites with AF >= X (X: 0 to 1).\r + --maxAF [X] Include only sites with AF <= X (X: 0 to 1).\r + --min-qual [X] Include only sites with QUAL >= X.\r + --max-qual [X] Include only sites with QUAL <= X.\r +```\r +## Example\r +There is an example VCF/VCF.gz/BCF file, `toy.vcf`/`toy.vcf.gz`/`toy.bcf`, in the toy folder, which can be used to test GSC\r +### compress\r +\r +#### lossless compression:\r +The input file format is VCF. You can compress a VCF file in lossless mode using one of the following methods:\r +1. **Explicit input and output file parameters**:\r + \r + Use the `--in` option to specify the input VCF file and the `--out` option for the output compressed file.\r + ```bash\r + ./gsc compress --in toy/toy.vcf --out toy/toy_lossless.gsc\r + ```\r +2. **Input file parameter and output redirection**:\r + \r + Use the `--out` option for the output compressed file and redirect the input VCF file into the command.\r + ```bash\r + ./gsc compress --out toy/toy_lossless.gsc < toy/toy.vcf\r + ```\r +3. **Output file redirection and input file parameter**:\r + \r + Specify the input VCF file with the `--in` option and redirect the output to create the compressed file.\r + ```bash\r + ./gsc compress --in toy/toy.vcf > toy/toy_lossless.gsc\r + ```\r +4. **Input and output redirection**:\r + \r + Use shell redirection for both input and output. This method does not use the `--in` and `--out` options.\r + ```bash\r + ./gsc compress < toy/toy.vcf > toy/toy_lossless.gsc\r + ```\r +This will create a file:\r +* `toy_lossless.gsc` - The compressed archive of the entire VCF file.\r +\r +#### lossy compression:\r +\r +The input file format is VCF. The commands are similar to those used for lossless compression, with the addition of the `-M` parameter to enable lossy compression.\r +\r + For example, to compress a VCF file in lossy mode:\r +\r + ```bash\r + ./gsc compress -M --in toy/toy.vcf --out toy/toy_lossy.gsc\r + ```\r + Or using redirection:\r + ```bash\r + ./gsc compress -M --out toy/toy_lossy.gsc < toy/toy.vcf\r + ``` \r + This will create a file:\r + * `toy_lossy.gsc` - The compressed archive of the entire VCF file is implemented with lossy compression. It only retains the 'GT' subfield within the INFO and FORMAT fields, and excludes all other subfields..\r + \r +### Decompress (The commands are similar to those used for compression)\r +lossless decompression:\r +\r +To decompress the compressed toy_lossless.gsc into a VCF file named toy_lossless.vcf:\r +```bash\r +./gsc decompress --in toy/toy_lossless.gsc --out toy/toy_lossless.vcf\r +```\r +lossy decompression:\r +\r +To decompress the compressed toy_lossy.gsc into a VCF file named toy_lossy.vcf:\r +```bash\r +./gsc decompress -M --in toy/toy_lossy.gsc --out toy/toy_lossy.vcf\r +```\r +## Dockerfile\r +Dockerfile can be used to build a Docker image with all necessary dependencies and GSC compressor. The image is based on Ubuntu 18.04. To build a Docker image and run a Docker container, you need Docker Desktop (https://www.docker.com). Example commands (run it within a directory with Dockerfile):\r +```bash\r +docker build -t gsc_project .\r +docker run -it gsc_project\r +```\r +## Citations\r +- **bio.tools ID**: `gsc_genotype_sparse_compression`\r +- **Research Resource Identifier (RRID)**: `SCR_025071`\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.887.1" ; + schema1:isBasedOn "https://github.com/luo-xiaolong/GSC.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for GSC (Genotype Sparse Compression)" ; + schema1:sdDatePublished "2024-08-05 10:24:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/887/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 838 ; + schema1:creator ; + schema1:dateCreated "2024-05-18T13:18:00Z" ; + schema1:dateModified "2024-05-18T13:18:31Z" ; + schema1:description """# GSC (Genotype Sparse Compression)\r +Genotype Sparse Compression (GSC) is an advanced tool for lossless compression of VCF files, designed to efficiently store and manage VCF files in a compressed format. It accepts VCF/BCF files as input and utilizes advanced compression techniques to significantly reduce storage requirements while ensuring fast query capabilities. In our study, we successfully compressed the VCF files from the 1000 Genomes Project (1000Gpip3), consisting of 2504 samples and 80 million variants, from an uncompressed VCF file of 803.70GB to approximately 1GB.\r +\r +## Requirements \r +### GSC requires:\r +\r +- **Compiler Compatibility**: GSC requires a modern C++14-ready compiler, such as:\r + - g++ version 10.1.0 or higher\r +\r +- **Build System**: Make build system is necessary for compiling GSC.\r +\r +- **Operating System**: GSC supports 64-bit operating systems, including:\r + - Linux (Ubuntu 18.04)\r + \r +## Installation\r +To download, build and install GSC use the following commands.\r +```bash\r +git clone https://github.com/luo-xiaolong/GSC.git\r +cd GSC\r +make\r +```\r +To clean the GSC build use:\r +```bash\r +make clean\r +```\r +## Usage\r +```bash\r +Usage: gsc [option] [arguments] \r +Available options: \r + compress - compress VCF/BCF file\r + decompress - query and decompress to VCF/BCF file\r +```\r +- Compress the input VCF/BCF file\r +```bash\r +Usage of gsc compress:\r +\r + gsc compress [options] [--in [in_file]] [--out [out_file]]\r +\r +Where:\r +\r + [options] Optional flags and parameters for compression.\r + -i, --in [in_file] Specify the input file (default: VCF or VCF.GZ). If omitted, input is taken from standard input (stdin).\r + -o, --out [out_file] Specify the output file. If omitted, output is sent to standard output (stdout).\r +\r +Options:\r +\r + -M, --mode_lossly Choose lossy compression mode (lossless by default).\r + -b, --bcf Input is a BCF file (default: VCF or VCF.GZ).\r + -p, --ploidy [X] Set ploidy of samples in input VCF to [X] (default: 2).\r + -t, --threads [X] Set number of threads to [X] (default: 1).\r + -d, --depth [X] Set maximum replication depth to [X] (default: 100, 0 means no matches).\r + -m, --merge [X] Specify files to merge, separated by commas (e.g., -m chr1.vcf,chr2.vcf), or '@' followed by a file containing a list of VCF files (e.g., -m @file_with_IDs.txt). By default, all VCF files are compressed.\r +```\r +- Decompress / Query\r +```bash\r +Usage of gsc decompress and query:\r +\r + gsc decompress [options] --in [in_file] --out [out_file]\r +\r +Where:\r + [options] Optional flags and parameters for compression.\r + -i, --in [in_file] Specify the input file . If omitted, input is taken from standard input (stdin).\r + -o, --out [out_file] Specify the output file (default: VCF). If omitted, output is sent to standard output (stdout).\r +\r +Options:\r +\r + General Options:\r +\r + -M, --mode_lossly Choose lossy compression mode (default: lossless).\r + -b, --bcf Output a BCF file (default: VCF).\r +\r + Filter options (applicable in lossy compression mode only): \r +\r + -r, --range [X] Specify range in format [start],[end] (e.g., -r 4999756,4999852).\r + -s, --samples [X] Samples separated by comms (e.g., -s HG03861,NA18639) OR '@' sign followed by the name of a file with sample name(s) separated by whitespaces (for exaple: -s @file_with_IDs.txt). By default all samples/individuals are decompressed. \r + --header-only Output only the header of the VCF/BCF.\r + --no-header Output without the VCF/BCF header (only genotypes).\r + -G, --no-genotype Don't output sample genotypes (only #CHROM, POS, ID, REF, ALT, QUAL, FILTER, and INFO columns).\r + -C, --out-ac-an Write AC/AN to the INFO field.\r + -S, --split Split output into multiple files (one per chromosome).\r + -I, [ID=^] Include only sites with specified ID (e.g., -I "ID=rs6040355").\r + --minAC [X] Include only sites with AC <= X.\r + --maxAC [X] Include only sites with AC >= X.\r + --minAF [X] Include only sites with AF >= X (X: 0 to 1).\r + --maxAF [X] Include only sites with AF <= X (X: 0 to 1).\r + --min-qual [X] Include only sites with QUAL >= X.\r + --max-qual [X] Include only sites with QUAL <= X.\r +```\r +## Example\r +There is an example VCF/VCF.gz/BCF file, `toy.vcf`/`toy.vcf.gz`/`toy.bcf`, in the toy folder, which can be used to test GSC\r +### compress\r +\r +#### lossless compression:\r +The input file format is VCF. You can compress a VCF file in lossless mode using one of the following methods:\r +1. **Explicit input and output file parameters**:\r + \r + Use the `--in` option to specify the input VCF file and the `--out` option for the output compressed file.\r + ```bash\r + ./gsc compress --in toy/toy.vcf --out toy/toy_lossless.gsc\r + ```\r +2. **Input file parameter and output redirection**:\r + \r + Use the `--out` option for the output compressed file and redirect the input VCF file into the command.\r + ```bash\r + ./gsc compress --out toy/toy_lossless.gsc < toy/toy.vcf\r + ```\r +3. **Output file redirection and input file parameter**:\r + \r + Specify the input VCF file with the `--in` option and redirect the output to create the compressed file.\r + ```bash\r + ./gsc compress --in toy/toy.vcf > toy/toy_lossless.gsc\r + ```\r +4. **Input and output redirection**:\r + \r + Use shell redirection for both input and output. This method does not use the `--in` and `--out` options.\r + ```bash\r + ./gsc compress < toy/toy.vcf > toy/toy_lossless.gsc\r + ```\r +This will create a file:\r +* `toy_lossless.gsc` - The compressed archive of the entire VCF file.\r +\r +#### lossy compression:\r +\r +The input file format is VCF. The commands are similar to those used for lossless compression, with the addition of the `-M` parameter to enable lossy compression.\r +\r + For example, to compress a VCF file in lossy mode:\r +\r + ```bash\r + ./gsc compress -M --in toy/toy.vcf --out toy/toy_lossy.gsc\r + ```\r + Or using redirection:\r + ```bash\r + ./gsc compress -M --out toy/toy_lossy.gsc < toy/toy.vcf\r + ``` \r + This will create a file:\r + * `toy_lossy.gsc` - The compressed archive of the entire VCF file is implemented with lossy compression. It only retains the 'GT' subfield within the INFO and FORMAT fields, and excludes all other subfields..\r + \r +### Decompress (The commands are similar to those used for compression)\r +lossless decompression:\r +\r +To decompress the compressed toy_lossless.gsc into a VCF file named toy_lossless.vcf:\r +```bash\r +./gsc decompress --in toy/toy_lossless.gsc --out toy/toy_lossless.vcf\r +```\r +lossy decompression:\r +\r +To decompress the compressed toy_lossy.gsc into a VCF file named toy_lossy.vcf:\r +```bash\r +./gsc decompress -M --in toy/toy_lossy.gsc --out toy/toy_lossy.vcf\r +```\r +## Dockerfile\r +Dockerfile can be used to build a Docker image with all necessary dependencies and GSC compressor. The image is based on Ubuntu 18.04. To build a Docker image and run a Docker container, you need Docker Desktop (https://www.docker.com). Example commands (run it within a directory with Dockerfile):\r +```bash\r +docker build -t gsc_project .\r +docker run -it gsc_project\r +```\r +## Citations\r +- **bio.tools ID**: `gsc_genotype_sparse_compression`\r +- **Research Resource Identifier (RRID)**: `SCR_025071`\r +""" ; + schema1:keywords "Bioinformatics, Genomics, C++" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "GSC (Genotype Sparse Compression)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/887?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:description "Analysis of variation within individual COVID-19 samples using Illumina Paired End data. More info can be found at https://covid19.galaxyproject.org/genomics/" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/7?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genomics - PE Variation" ; + schema1:sdDatePublished "2024-08-05 10:33:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/7/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 7268 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 37131 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2020-04-10T12:52:22Z" ; + schema1:dateModified "2023-05-30T12:07:57Z" ; + schema1:description "Analysis of variation within individual COVID-19 samples using Illumina Paired End data. More info can be found at https://covid19.galaxyproject.org/genomics/" ; + schema1:image ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Genomics - PE Variation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/7?version=1" ; + schema1:version 1 ; + ns1:input , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 13428 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.54.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_ligand_parameterization" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter GMX Notebook Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-08-05 10:25:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/54/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14629 ; + schema1:creator , + , + ; + schema1:dateCreated "2021-06-29T07:40:34Z" ; + schema1:dateModified "2022-09-15T10:58:18Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/54?version=5" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter GMX Notebook Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_ligand_parameterization/28ef9a099ddff3096ed47477ff72b4b08f8eb355/biobb_wf_ligand_parameterization/notebooks/biobb_ligand_parameterization_tutorial.ipynb" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# GermlineShortV_biovalidation\r +\r + - [Description](#description)\r + - [Diagram](#diagram)\r + - [User guide](#user-guide)\r + - [Quick start guide](#quick-start-guide)\r + - [Benchmarking](#benchmarking)\r + - [Workflow summaries](#workflow-summaries)\r + - [Metadata](#metadata)\r + - [Component tools](#component-tools)\r + - [Required (minimum)\r + inputs/parameters](#required-minimum-inputsparameters) \r + [Preparing your own input files](#preparing-input-files)\r + - [Additional notes](#additional-notes)\r + - [Understanding your outputs](#understanding-your-outputs) \r + - [Performance metrics explained](#performance-metrics-explained) \r + - [Help/FAQ/Troubleshooting](#helpfaqtroubleshooting)\r + - [Acknowledgements/citations/credits](#acknowledgementscitationscredits)\r +\r +## Description \r +Population-scale WGS cohorts are essential resources for genetic analyses including heritable diseases, evolutionary genomics, conservation biology, and population genomics. Processing raw reads into analysis-ready variants remains challenging. Various mapping and variant calling pipelines have been made publicly available in recent decades. Designing a mapping and variant calling pipeline to meet your needs is dependent on the compute infrastructure you’re working on, the types of variants you’re primarily interested in, and the sequencing technology you use to generate raw sequencing data. Keep in mind that the tools you use to build your pipeline can affect variant calling accuracy. Further, optimisation and customisation of these tools’ commands can also affect their performance. Best-practice recommendations for variant calling pipelines vary dramatically between species and research questions, depending on the availability of genomic resources for the population of interest, genome structure, and clinical relevance of the resulting variant dataset. It is important to not only design a robust variant calling pipeline but also fine-tune it to achieve optimal performance for your dataset and research question. \r +\r +There are various measurements that you can apply to evaluate the biological accuracy of your germline variant calling pipeline. Currently, no best practice methods for interrogating joint-called variant sets exist in the literature. A number of publicly available, human ‘gold standard’ truth datasets including Platinum Genomes and Genome in a Bottle (GIAB) are useful for benchmarking across high confidence regions of the genome and evaluating the recall and precision of the pipeline. We recommend individuals working with human datasets benchmark their germline variant calling pipelines using one of these datasets. Unfortunately, these resources are not typically available for non-human organisms. \r +\r +Here, we present protocols for benchmarking and validating germline short variant (SNVs and indels) datasets using a combination of methods that can capture the quality of your variant sets for human, non-human model, and non-model organisms. The process you can apply will depend on the organism you’re working with and the genomic resources available to that organism. \r +\r +## Diagram \r +\r +

\r + \r +

\r +\r +## User guide \r +### Quick start guide \r +\r +These bash scripts were written for the University of Sydney’s high performance computer, Artemis. They can be run on the command line or submitted as PBS jobs. These scripts assume your input is a gzipped multi-sample (cohort) VCF file. Before running, edit the PBS project directive and define the variables at the top of the script. All software used in this protocol is installed on Artemis- to use alternate versions or run on a different compute infrastructure, edit the modules according to your needs. \r +\r +#### Human datasets \r +For human datasets, we recommend you benchmark your germline variant calling pipeline using a gold standard dataset such as Platinum Genomes. Raw sequence data in FASTQ format for these datasets can be downloaded along with their high confidence variant calls and regions from public repositories. See [Preparing input files]() for more information on how to download and prepare these files. \r +\r +##### 1. Collect vcf summary metrics \r +Edit the PBS -P directive and variables for your dataset in `vcfstat.sh`. Then run script with: \r +\r +```\r +qsub vcfstat.sh (or bash vcfstat.sh)\r +```\r +This will produce summary and quality metrics reports and plots for your cohort. It will also produce summary and detail files for known variant representation. BCFtools stats plots will be housed in a directory labelled `${cohort}_vcfplots`. \r +\r +##### 2. Biological benchmarking using a truth set \r +\r +Edit the PBS -P directive and variables for your files. Then run script with: \r +\r +```\r +qsub run_happy.sh\r +```\r +This script will subset your multi-sample VCF into individual samples, prepare them for hap.py, and output a number of files including summary metrics (including recall, precision and F1-score) and ROC count files that can be used to produce ROC curves, separately for SNVs and indels. See the [hap.py user guide](https://github.com/Illumina/hap.py/blob/master/doc/happy.md) for more information on how to interpret hap.py output. ROC curves of Hap.py runs can be plotted using the script [rocplot.Rscript](https://github.com/Illumina/hap.py/blob/master/src/R/rocplot.Rscript). \r +\r +#### Non-human model organism datasets\r +\r +##### 1. Collect vcf summary metrics \r +Edit the PBS -P directive and variables for your dataset in `vcfstat.sh`. We recommend you use the set of known variants used for base quality score recalibration to validate population level variants. If you used trio data, unhash the Mendelian error command within the script. Then run script with: \r +\r +```\r +qsub vcfstat.sh (or bash vcfstat.sh)\r +```\r +This will produce summary and quality metrics reports and plots for your cohort. It will also produce summary and detail files for known variant representation. BCFtools stats plots will be housed in a directory labelled `${cohort}_vcfplots`. \r +#### Non-model organism datasets \r +\r +##### 1. Collect vcf summary metrics \r +\r +Edit the PBS -P directive and variables for your dataset in `vcfstat_nonmodel.sh`. Then run script with: \r +\r +```\r +qsub vcfstat_nonmodel.sh (or bash vcfstat_nonmodel.sh)\r +```\r +\r +This will produce summary and quality metrics reports and plots for your cohort. It will also produce summary and detail files for known variant representation. BCFtools stats plots will be housed in a directory labelled `${cohort}_vcfplots`. \r +\r +## Benchmarking \r +Coming soon! \r +\r +## Workflow summaries \r +### Metadata \r +|metadata field | workflow_name / workflow_version |\r +|-------------------|:---------------------------------:|\r +|Version | 1.0 |\r +|Maturity | stable |\r +|Creators | Georgie Samaha, Tracy Chew, Cali Willet |\r +|Source | NA |\r +|License | NA |\r +|Workflow manager | NA |\r +|Container | None |\r +|Install method | Manual |\r +|GitHub | NA |\r +|bio.tools | NA |\r +|BioContainers | NA | \r +|bioconda | NA |\r +\r +### Component tools \r +\r +bcftools/1.14 \r +htslib/1.14 \r +python/3.8.2 \r +R/4.1.1 \r +hap.py/0.3.14 \r +\r +### Required (minimum) inputs/parameters \r +\r +- Multi-sample or single sample VCF file (VCF.gz format)\r +- List of sample IDs that match the VCF (.txt format)\r +- Known variant dataset (VCF format. Human and non-human model organisms only)\r +- Pedigree file (format: mother,father,offspring. Trios or Platinum Genomes only)\r +- Truth set variant calls (VCF.gz format. Human, Platinum Genomes only)\r +- High confidence call regions (BED format. Human, Platinum Genomes only)\r +\r +### Preparing input files \r +\r +#### Gold standard variant truth sets \r +\r +The benchmarking protocol for human datasets assumes you have performed mapping and germline variant calling on a gold standard truth set. These datasets contain millions of variants that have been confirmed using orthologous technologies [Eberle et al. 2017](https://doi.org/10.1101/gr.210500.116). \r +\r +We recommend you use the Platinum Genomes dataset for benchmarking germline variant calling pipelines that include joint genotyping of multiple samples. Six members, comprising two trios, of the Platinum Genomes dataset can be downloaded from the Illumina BaseSpace Sequence Hub, the ENA, or dbGaP. The Platinum Genomes dataset contains multiple files including the following files you will need for running `run_happy.sh`: \r +- Paired-end FASTQ files for each sample\r +- High-confidence germline variant VCF files for each sample\r +- High-confidence genomic regions (BED format)\r +\r +Currently, these files are available for Hg19 (GRCh37) and Hg38 (GRCh38) . Links to raw data are [here](https://github.com/Illumina/PlatinumGenomes). BaseSpace offers a command line tool for downloading files, see [here](https://developer.basespace.illumina.com/docs/content/documentation/cli/cli-examples) for instructions. \r +\r +#### Providing your own ‘truth set’ \r +*A word of caution*- testing the performance of your pipeline using a truth set is only intended to estimate the overall quality of your pipeline and detect any potential sources of error in your method. It is not intended to test the truthfulness of your variant set. See [here](https://gatk.broadinstitute.org/hc/en-us/articles/360035531572-Evaluating-the-quality-of-a-germline-short-variant-callset) for further discussion of the assumptions we make about truth sets. Most non-human organisms do not have access to gold standard truth set resources like the Platinum Genomes dataset. However there are a few alternative options you could try: \r + - Genotyping arrays: if you have genotyping data for the same samples you tested your germline variant calling pipeline with, you can reformat these to VCF using a tool like [PLINK’s recode](https://www.cog-genomics.org/plink/1.9/data#recode) and use it as a truth set. \r + - Known variant datasets: if your organism of interest has a set of known population-level variants you can use these as a truth-set. Just remember that these variants might not always be validated (i.e. dbSNP). \r +\r +Using this method you will need to also provide your own high-confidence regions file in BED format. The location and size of these regions will depend on your dataset, organism, reference assembly and sequencing method. Typically these regions would exclude centromeres, telomeres and repetitive parts of the genome that are likely to complicate variant calling. \r +\r +\r +## Additional notes \r +\r +Test data for Hap.py can be found [here](https://github.com/Illumina/hap.py/blob/master/doc/microbench.md) \r +\r +Instructions on how to install Hap.py can be found [here](https://github.com/Illumina/hap.py#installation) \r +\r +This warning may be thrown by Hap.py and can be ignored: `WARNING No reference file found at default locations. You can set the environment variable 'HGREF' or 'HG19' to point to a suitable Fasta file.` \r +\r +\r +### Understanding your outputs \r +The following files will be produced and stored in your designated working directory. They will all be labelled with your specified cohort name. \r +\r +#### Variant based metrics \r +Produced by BCFtools stats command. Output file:\r +- ${cohort}.bcftools.metrics \r +- ${cohort}_bcftools.metrics_vcfstatplots (directory and files) \r +\r +#### Sample based metrics \r +Produced by BCFtools smplstats and mendelian commands. Output files:\r +- ${cohort}.smplstats\r +- ${cohort}.smplstats.pdf\r +- ${cohort}.Mendelianerr\r +\r +#### Known variant concordance \r +Produced by GATK CollectVariantCallingMetrics command. Output files:\r +- ${cohort}.known.variant_calling_summary_metrics\r +- ${cohort}.known.variant_calling_detail_metrics\r +\r +#### Biological validation using a truth set \r +Produced by Hap.py. Output files:\r +- ${sample}.happy.metrics.json.gz\r +- ${sample}.happy.roc.all.csv.gz\r +- ${sample}.happy.roc.Locations.INDEL.csv.gz\r +- ${sample}.happy.roc.Locations.INDEL.PASS.csv.gz\r +- ${sample}.happy.roc.Locations.SNP.csv.gz\r +- ${sample}.happy.roc.Locations.SNP.PASS.csv.gz\r +- ${sample}.happy.roc.tsv\r +- ${sample}.happy.runinfo.json\r +- ${sample}.happy.summary.csv\r +\r +### Performance metrics explained \r +\r +|Metric |Expected/ideal value |Tool |Relevance |\r +|--------------------------------------|----------------------------------------------------|---------------|---------------------------------------------------------------------------------------------------------------|\r +|Number of SNVs and indels (per sample)|Human WGS: ~4.4M, Human WES: ~41k, Species dependent|bcftools stats |Population, sequencing approach, and genomic region dependent. Alone, this metric cannot indicate data quality.|\r +|Indel length distribution |Indel length range is 1-10,000bp. |bcftools stats |Increased length is conflated with reduced mapping quality. Distribution is dataset dependent. Recommend filtering for high quality.|\r +|Depth of coverage |Depends on the sequencing coverage of samples. |bcftools stats |Dramatic deviation from expected distribution can indicate artifactual bias. |\r +|Substitution type counts |See TiTv ratio. |bcftools stats |Twice as many possible transversions as transitions. See [here](https://dx.doi.org/10.1093%2Fbioinformatics%2Fbtu668) |\r +|TiTv ratio (genome wide) |For mammals: WGS: 2.0-2.1, WES: 3.0-3.3 |bcftools stats |Dramatic deviation from expected ratio can indicate artifactual bias. Typically elevated in coding regions where transversions are more likely to occur. |\r +|Base quality distribution |Dataset dependent. |bcftools stats |This will reflect the quality based filtering you performed. Dramatic deviation from expected ratio can indicate artifactual bias.|\r +|Indel ratio |Common: ~1.0, Rare: 0.2-0.5 |GATK CollectVariantCallingMetrics|This should be evaluated after custom filtering variants for your needs. Dramatic deviation from expected ratio can indicate artifactual bias.|\r +|Het/hom(non-ref) |~2.0 assuming Hardy-Weinberg equilibrium. |GATK CollectVariantCallingMetrics|Ancestry dependent, can vary dramatically. See [Wang et al. 2015](https://dx.doi.org/10.1093%2Fbioinformatics%2Fbtu668)|\r +|Mendelian error |0 |BCFtools +mendelian|Mendelian inheritance errors are likely erroneous genotype calls. See [Pilipenko et al. 2014](https://dx.doi.org/10.1186%2F1753-6561-8-S1-S21)|\r +|True positives |Dataset dependent. |Hap.py |Number of query variants that are present in the truth set. |\r +|False negatives |Dataset dependent. |Hap.py |Number of variants in truth set, not present in query VCF. |\r +|False positives |Dataset dependent. |Hap.py |Number of variants in query VCF, not present in truth set. |\r +|Recall |1 |Hap.py |Absence of false negatives. See [Krusche et al. 2019](https://doi.org/10.1038/s41587-019-0054-x) |\r +|Precision |1 |Hap.py |Absence of false positives. See [Krusche et al. 2019](https://doi.org/10.1038/s41587-019-0054-x) |\r +|F1-score |1 |Hap.py |Harmonic mean of recall and precision. See [Krusche et al. 2019](https://doi.org/10.1038/s41587-019-0054-x) |\r +|Genotype errors (FP.GT) |Dataset dependent. |Hap.py |Number of query variants with incorrect genotype |\r +\r +### Resources and references \r +\r +Eberle, M. A., Fritzilas, E., Krusche, P., Källberg, M., Moore, B. L., Bekritsky, M. A., Iqbal, Z., Chuang, H. Y., Humphray, S. J., Halpern, A. L., Kruglyak, S., Margulies, E. H., McVean, G., & Bentley, D. R. (2017). A reference data set of 5.4 million phased human variants validated by genetic inheritance from sequencing a three-generation 17-member pedigree. Genome research, 27(1), 157–164. https://doi.org/10.1101/gr.210500.116 \r +\r +Koboldt, D.C. Best practises for variant calling in clinical sequencing. Genome Med 12, 91 (2020). https://doi.org/10.1186/s13073-020-00791-w \r +\r +Krusche, P., Trigg, L., Boutros, P.C. et al. Best practices for benchmarking germline small-variant calls in human genomes. Nat Biotechnol 37, 555–560 (2019). https://doi.org/10.1038/s41587-019-0054-x \r +\r +Marshall, C.R., Chowdhury, S., Taft, R.J. et al. Best practices for the analytical validation of clinical whole-genome sequencing intended for the diagnosis of germline disease. npj Genom. Med. 5, 47 (2020). https://doi.org/10.1038/s41525-020-00154-9 \r +\r +Pilipenko, V.V., He, H., Kurowski, B.G. et al. Using Mendelian inheritance errors as quality control criteria in whole genome sequencing data set. BMC Proc 8, S21 (2014). https://doi.org/10.1186/1753-6561-8-S1-S21 \r +\r +Wang, J., Raskin, J., Samuels, D., Shyr, Y., Guo, Y., Genome measures used for quality control are dependent on gene function and ancestry, Bioinformatics 31, 318–323 (2015) https://doi.org/10.1093/bioinformatics/btu668 \r +\r +\r +## Help/FAQ/Troubleshooting\r +\r +If Hap.py throws an error, search the [issues at Hap.py GitHub repository](https://github.com/Illumina/hap.py/issues) and attempt to resolve it before submitting an issue here. \r +\r +## Acknowledgements/citations/credits \r +\r +### Authors \r +- Georgie Samaha (Sydney Informatics Hub, University of Sydney) \r +- Tracy Chew (Sydney Informatics Hub, University of Sydney) \r +- Cali Willet (Sydney Informatics Hub, University of Sydney) \r +- Nandan Deshpande (Sydney Informatics Hub, University of Sydney)\r +\r +Acknowledgements (and co-authorship, where appropriate) are an important way for us to demonstrate the value we bring to your research. Your research outcomes are vital for ongoing funding of the Sydney Informatics Hub and national compute facilities. We suggest including the following acknowledgement in any publications that follow from this work: \r +\r +The authors acknowledge the technical assistance provided by the Sydney Informatics Hub, a Core Research Facility of the University of Sydney and the Australian BioCommons which is enabled by NCRIS via Bioplatforms Australia. \r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.339.1" ; + schema1:isBasedOn "https://github.com/Sydney-Informatics-Hub/GermlineShortV_biovalidation.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for GermlineShortV_biovalidation" ; + schema1:sdDatePublished "2024-08-05 10:32:10 +0100" ; + schema1:url "https://workflowhub.eu/workflows/339/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 61599 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 19380 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2022-05-05T05:02:08Z" ; + schema1:dateModified "2023-01-16T13:59:52Z" ; + schema1:description """# GermlineShortV_biovalidation\r +\r + - [Description](#description)\r + - [Diagram](#diagram)\r + - [User guide](#user-guide)\r + - [Quick start guide](#quick-start-guide)\r + - [Benchmarking](#benchmarking)\r + - [Workflow summaries](#workflow-summaries)\r + - [Metadata](#metadata)\r + - [Component tools](#component-tools)\r + - [Required (minimum)\r + inputs/parameters](#required-minimum-inputsparameters) \r + [Preparing your own input files](#preparing-input-files)\r + - [Additional notes](#additional-notes)\r + - [Understanding your outputs](#understanding-your-outputs) \r + - [Performance metrics explained](#performance-metrics-explained) \r + - [Help/FAQ/Troubleshooting](#helpfaqtroubleshooting)\r + - [Acknowledgements/citations/credits](#acknowledgementscitationscredits)\r +\r +## Description \r +Population-scale WGS cohorts are essential resources for genetic analyses including heritable diseases, evolutionary genomics, conservation biology, and population genomics. Processing raw reads into analysis-ready variants remains challenging. Various mapping and variant calling pipelines have been made publicly available in recent decades. Designing a mapping and variant calling pipeline to meet your needs is dependent on the compute infrastructure you’re working on, the types of variants you’re primarily interested in, and the sequencing technology you use to generate raw sequencing data. Keep in mind that the tools you use to build your pipeline can affect variant calling accuracy. Further, optimisation and customisation of these tools’ commands can also affect their performance. Best-practice recommendations for variant calling pipelines vary dramatically between species and research questions, depending on the availability of genomic resources for the population of interest, genome structure, and clinical relevance of the resulting variant dataset. It is important to not only design a robust variant calling pipeline but also fine-tune it to achieve optimal performance for your dataset and research question. \r +\r +There are various measurements that you can apply to evaluate the biological accuracy of your germline variant calling pipeline. Currently, no best practice methods for interrogating joint-called variant sets exist in the literature. A number of publicly available, human ‘gold standard’ truth datasets including Platinum Genomes and Genome in a Bottle (GIAB) are useful for benchmarking across high confidence regions of the genome and evaluating the recall and precision of the pipeline. We recommend individuals working with human datasets benchmark their germline variant calling pipelines using one of these datasets. Unfortunately, these resources are not typically available for non-human organisms. \r +\r +Here, we present protocols for benchmarking and validating germline short variant (SNVs and indels) datasets using a combination of methods that can capture the quality of your variant sets for human, non-human model, and non-model organisms. The process you can apply will depend on the organism you’re working with and the genomic resources available to that organism. \r +\r +## Diagram \r +\r +

\r + \r +

\r +\r +## User guide \r +### Quick start guide \r +\r +These bash scripts were written for the University of Sydney’s high performance computer, Artemis. They can be run on the command line or submitted as PBS jobs. These scripts assume your input is a gzipped multi-sample (cohort) VCF file. Before running, edit the PBS project directive and define the variables at the top of the script. All software used in this protocol is installed on Artemis- to use alternate versions or run on a different compute infrastructure, edit the modules according to your needs. \r +\r +#### Human datasets \r +For human datasets, we recommend you benchmark your germline variant calling pipeline using a gold standard dataset such as Platinum Genomes. Raw sequence data in FASTQ format for these datasets can be downloaded along with their high confidence variant calls and regions from public repositories. See [Preparing input files]() for more information on how to download and prepare these files. \r +\r +##### 1. Collect vcf summary metrics \r +Edit the PBS -P directive and variables for your dataset in `vcfstat.sh`. Then run script with: \r +\r +```\r +qsub vcfstat.sh (or bash vcfstat.sh)\r +```\r +This will produce summary and quality metrics reports and plots for your cohort. It will also produce summary and detail files for known variant representation. BCFtools stats plots will be housed in a directory labelled `${cohort}_vcfplots`. \r +\r +##### 2. Biological benchmarking using a truth set \r +\r +Edit the PBS -P directive and variables for your files. Then run script with: \r +\r +```\r +qsub run_happy.sh\r +```\r +This script will subset your multi-sample VCF into individual samples, prepare them for hap.py, and output a number of files including summary metrics (including recall, precision and F1-score) and ROC count files that can be used to produce ROC curves, separately for SNVs and indels. See the [hap.py user guide](https://github.com/Illumina/hap.py/blob/master/doc/happy.md) for more information on how to interpret hap.py output. ROC curves of Hap.py runs can be plotted using the script [rocplot.Rscript](https://github.com/Illumina/hap.py/blob/master/src/R/rocplot.Rscript). \r +\r +#### Non-human model organism datasets\r +\r +##### 1. Collect vcf summary metrics \r +Edit the PBS -P directive and variables for your dataset in `vcfstat.sh`. We recommend you use the set of known variants used for base quality score recalibration to validate population level variants. If you used trio data, unhash the Mendelian error command within the script. Then run script with: \r +\r +```\r +qsub vcfstat.sh (or bash vcfstat.sh)\r +```\r +This will produce summary and quality metrics reports and plots for your cohort. It will also produce summary and detail files for known variant representation. BCFtools stats plots will be housed in a directory labelled `${cohort}_vcfplots`. \r +#### Non-model organism datasets \r +\r +##### 1. Collect vcf summary metrics \r +\r +Edit the PBS -P directive and variables for your dataset in `vcfstat_nonmodel.sh`. Then run script with: \r +\r +```\r +qsub vcfstat_nonmodel.sh (or bash vcfstat_nonmodel.sh)\r +```\r +\r +This will produce summary and quality metrics reports and plots for your cohort. It will also produce summary and detail files for known variant representation. BCFtools stats plots will be housed in a directory labelled `${cohort}_vcfplots`. \r +\r +## Benchmarking \r +Coming soon! \r +\r +## Workflow summaries \r +### Metadata \r +|metadata field | workflow_name / workflow_version |\r +|-------------------|:---------------------------------:|\r +|Version | 1.0 |\r +|Maturity | stable |\r +|Creators | Georgie Samaha, Tracy Chew, Cali Willet |\r +|Source | NA |\r +|License | NA |\r +|Workflow manager | NA |\r +|Container | None |\r +|Install method | Manual |\r +|GitHub | NA |\r +|bio.tools | NA |\r +|BioContainers | NA | \r +|bioconda | NA |\r +\r +### Component tools \r +\r +bcftools/1.14 \r +htslib/1.14 \r +python/3.8.2 \r +R/4.1.1 \r +hap.py/0.3.14 \r +\r +### Required (minimum) inputs/parameters \r +\r +- Multi-sample or single sample VCF file (VCF.gz format)\r +- List of sample IDs that match the VCF (.txt format)\r +- Known variant dataset (VCF format. Human and non-human model organisms only)\r +- Pedigree file (format: mother,father,offspring. Trios or Platinum Genomes only)\r +- Truth set variant calls (VCF.gz format. Human, Platinum Genomes only)\r +- High confidence call regions (BED format. Human, Platinum Genomes only)\r +\r +### Preparing input files \r +\r +#### Gold standard variant truth sets \r +\r +The benchmarking protocol for human datasets assumes you have performed mapping and germline variant calling on a gold standard truth set. These datasets contain millions of variants that have been confirmed using orthologous technologies [Eberle et al. 2017](https://doi.org/10.1101/gr.210500.116). \r +\r +We recommend you use the Platinum Genomes dataset for benchmarking germline variant calling pipelines that include joint genotyping of multiple samples. Six members, comprising two trios, of the Platinum Genomes dataset can be downloaded from the Illumina BaseSpace Sequence Hub, the ENA, or dbGaP. The Platinum Genomes dataset contains multiple files including the following files you will need for running `run_happy.sh`: \r +- Paired-end FASTQ files for each sample\r +- High-confidence germline variant VCF files for each sample\r +- High-confidence genomic regions (BED format)\r +\r +Currently, these files are available for Hg19 (GRCh37) and Hg38 (GRCh38) . Links to raw data are [here](https://github.com/Illumina/PlatinumGenomes). BaseSpace offers a command line tool for downloading files, see [here](https://developer.basespace.illumina.com/docs/content/documentation/cli/cli-examples) for instructions. \r +\r +#### Providing your own ‘truth set’ \r +*A word of caution*- testing the performance of your pipeline using a truth set is only intended to estimate the overall quality of your pipeline and detect any potential sources of error in your method. It is not intended to test the truthfulness of your variant set. See [here](https://gatk.broadinstitute.org/hc/en-us/articles/360035531572-Evaluating-the-quality-of-a-germline-short-variant-callset) for further discussion of the assumptions we make about truth sets. Most non-human organisms do not have access to gold standard truth set resources like the Platinum Genomes dataset. However there are a few alternative options you could try: \r + - Genotyping arrays: if you have genotyping data for the same samples you tested your germline variant calling pipeline with, you can reformat these to VCF using a tool like [PLINK’s recode](https://www.cog-genomics.org/plink/1.9/data#recode) and use it as a truth set. \r + - Known variant datasets: if your organism of interest has a set of known population-level variants you can use these as a truth-set. Just remember that these variants might not always be validated (i.e. dbSNP). \r +\r +Using this method you will need to also provide your own high-confidence regions file in BED format. The location and size of these regions will depend on your dataset, organism, reference assembly and sequencing method. Typically these regions would exclude centromeres, telomeres and repetitive parts of the genome that are likely to complicate variant calling. \r +\r +\r +## Additional notes \r +\r +Test data for Hap.py can be found [here](https://github.com/Illumina/hap.py/blob/master/doc/microbench.md) \r +\r +Instructions on how to install Hap.py can be found [here](https://github.com/Illumina/hap.py#installation) \r +\r +This warning may be thrown by Hap.py and can be ignored: `WARNING No reference file found at default locations. You can set the environment variable 'HGREF' or 'HG19' to point to a suitable Fasta file.` \r +\r +\r +### Understanding your outputs \r +The following files will be produced and stored in your designated working directory. They will all be labelled with your specified cohort name. \r +\r +#### Variant based metrics \r +Produced by BCFtools stats command. Output file:\r +- ${cohort}.bcftools.metrics \r +- ${cohort}_bcftools.metrics_vcfstatplots (directory and files) \r +\r +#### Sample based metrics \r +Produced by BCFtools smplstats and mendelian commands. Output files:\r +- ${cohort}.smplstats\r +- ${cohort}.smplstats.pdf\r +- ${cohort}.Mendelianerr\r +\r +#### Known variant concordance \r +Produced by GATK CollectVariantCallingMetrics command. Output files:\r +- ${cohort}.known.variant_calling_summary_metrics\r +- ${cohort}.known.variant_calling_detail_metrics\r +\r +#### Biological validation using a truth set \r +Produced by Hap.py. Output files:\r +- ${sample}.happy.metrics.json.gz\r +- ${sample}.happy.roc.all.csv.gz\r +- ${sample}.happy.roc.Locations.INDEL.csv.gz\r +- ${sample}.happy.roc.Locations.INDEL.PASS.csv.gz\r +- ${sample}.happy.roc.Locations.SNP.csv.gz\r +- ${sample}.happy.roc.Locations.SNP.PASS.csv.gz\r +- ${sample}.happy.roc.tsv\r +- ${sample}.happy.runinfo.json\r +- ${sample}.happy.summary.csv\r +\r +### Performance metrics explained \r +\r +|Metric |Expected/ideal value |Tool |Relevance |\r +|--------------------------------------|----------------------------------------------------|---------------|---------------------------------------------------------------------------------------------------------------|\r +|Number of SNVs and indels (per sample)|Human WGS: ~4.4M, Human WES: ~41k, Species dependent|bcftools stats |Population, sequencing approach, and genomic region dependent. Alone, this metric cannot indicate data quality.|\r +|Indel length distribution |Indel length range is 1-10,000bp. |bcftools stats |Increased length is conflated with reduced mapping quality. Distribution is dataset dependent. Recommend filtering for high quality.|\r +|Depth of coverage |Depends on the sequencing coverage of samples. |bcftools stats |Dramatic deviation from expected distribution can indicate artifactual bias. |\r +|Substitution type counts |See TiTv ratio. |bcftools stats |Twice as many possible transversions as transitions. See [here](https://dx.doi.org/10.1093%2Fbioinformatics%2Fbtu668) |\r +|TiTv ratio (genome wide) |For mammals: WGS: 2.0-2.1, WES: 3.0-3.3 |bcftools stats |Dramatic deviation from expected ratio can indicate artifactual bias. Typically elevated in coding regions where transversions are more likely to occur. |\r +|Base quality distribution |Dataset dependent. |bcftools stats |This will reflect the quality based filtering you performed. Dramatic deviation from expected ratio can indicate artifactual bias.|\r +|Indel ratio |Common: ~1.0, Rare: 0.2-0.5 |GATK CollectVariantCallingMetrics|This should be evaluated after custom filtering variants for your needs. Dramatic deviation from expected ratio can indicate artifactual bias.|\r +|Het/hom(non-ref) |~2.0 assuming Hardy-Weinberg equilibrium. |GATK CollectVariantCallingMetrics|Ancestry dependent, can vary dramatically. See [Wang et al. 2015](https://dx.doi.org/10.1093%2Fbioinformatics%2Fbtu668)|\r +|Mendelian error |0 |BCFtools +mendelian|Mendelian inheritance errors are likely erroneous genotype calls. See [Pilipenko et al. 2014](https://dx.doi.org/10.1186%2F1753-6561-8-S1-S21)|\r +|True positives |Dataset dependent. |Hap.py |Number of query variants that are present in the truth set. |\r +|False negatives |Dataset dependent. |Hap.py |Number of variants in truth set, not present in query VCF. |\r +|False positives |Dataset dependent. |Hap.py |Number of variants in query VCF, not present in truth set. |\r +|Recall |1 |Hap.py |Absence of false negatives. See [Krusche et al. 2019](https://doi.org/10.1038/s41587-019-0054-x) |\r +|Precision |1 |Hap.py |Absence of false positives. See [Krusche et al. 2019](https://doi.org/10.1038/s41587-019-0054-x) |\r +|F1-score |1 |Hap.py |Harmonic mean of recall and precision. See [Krusche et al. 2019](https://doi.org/10.1038/s41587-019-0054-x) |\r +|Genotype errors (FP.GT) |Dataset dependent. |Hap.py |Number of query variants with incorrect genotype |\r +\r +### Resources and references \r +\r +Eberle, M. A., Fritzilas, E., Krusche, P., Källberg, M., Moore, B. L., Bekritsky, M. A., Iqbal, Z., Chuang, H. Y., Humphray, S. J., Halpern, A. L., Kruglyak, S., Margulies, E. H., McVean, G., & Bentley, D. R. (2017). A reference data set of 5.4 million phased human variants validated by genetic inheritance from sequencing a three-generation 17-member pedigree. Genome research, 27(1), 157–164. https://doi.org/10.1101/gr.210500.116 \r +\r +Koboldt, D.C. Best practises for variant calling in clinical sequencing. Genome Med 12, 91 (2020). https://doi.org/10.1186/s13073-020-00791-w \r +\r +Krusche, P., Trigg, L., Boutros, P.C. et al. Best practices for benchmarking germline small-variant calls in human genomes. Nat Biotechnol 37, 555–560 (2019). https://doi.org/10.1038/s41587-019-0054-x \r +\r +Marshall, C.R., Chowdhury, S., Taft, R.J. et al. Best practices for the analytical validation of clinical whole-genome sequencing intended for the diagnosis of germline disease. npj Genom. Med. 5, 47 (2020). https://doi.org/10.1038/s41525-020-00154-9 \r +\r +Pilipenko, V.V., He, H., Kurowski, B.G. et al. Using Mendelian inheritance errors as quality control criteria in whole genome sequencing data set. BMC Proc 8, S21 (2014). https://doi.org/10.1186/1753-6561-8-S1-S21 \r +\r +Wang, J., Raskin, J., Samuels, D., Shyr, Y., Guo, Y., Genome measures used for quality control are dependent on gene function and ancestry, Bioinformatics 31, 318–323 (2015) https://doi.org/10.1093/bioinformatics/btu668 \r +\r +\r +## Help/FAQ/Troubleshooting\r +\r +If Hap.py throws an error, search the [issues at Hap.py GitHub repository](https://github.com/Illumina/hap.py/issues) and attempt to resolve it before submitting an issue here. \r +\r +## Acknowledgements/citations/credits \r +\r +### Authors \r +- Georgie Samaha (Sydney Informatics Hub, University of Sydney) \r +- Tracy Chew (Sydney Informatics Hub, University of Sydney) \r +- Cali Willet (Sydney Informatics Hub, University of Sydney) \r +- Nandan Deshpande (Sydney Informatics Hub, University of Sydney)\r +\r +Acknowledgements (and co-authorship, where appropriate) are an important way for us to demonstrate the value we bring to your research. Your research outcomes are vital for ongoing funding of the Sydney Informatics Hub and national compute facilities. We suggest including the following acknowledgement in any publications that follow from this work: \r +\r +The authors acknowledge the technical assistance provided by the Sydney Informatics Hub, a Core Research Facility of the University of Sydney and the Australian BioCommons which is enabled by NCRIS via Bioplatforms Australia. \r +""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "GermlineShortV_biovalidation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/339?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1017?version=13" ; + schema1:isBasedOn "https://github.com/nf-core/rnafusion" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnafusion" ; + schema1:sdDatePublished "2024-08-05 10:22:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1017/ro_crate?version=13" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10551 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:10Z" ; + schema1:dateModified "2024-06-11T12:55:10Z" ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1017?version=16" ; + schema1:keywords "fusion, fusion-genes, gene-fusion, rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnafusion" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1017?version=13" ; + schema1:version 13 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "SARS-CoV-2 variant prediction using Read It And Keep, fastp, bbmap and iVar" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/519?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for SARS-CoV-2 Illumina Amplicon pipeline - SANBI - v1.2" ; + schema1:sdDatePublished "2024-08-05 10:30:02 +0100" ; + schema1:url "https://workflowhub.eu/workflows/519/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 55051 ; + schema1:creator , + ; + schema1:dateCreated "2023-06-28T11:42:46Z" ; + schema1:dateModified "2023-06-30T05:14:33Z" ; + schema1:description "SARS-CoV-2 variant prediction using Read It And Keep, fastp, bbmap and iVar" ; + schema1:keywords "covid-19, ARTIC, SARS-CoV-2, SANBI" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "SARS-CoV-2 Illumina Amplicon pipeline - SANBI - v1.2" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/519?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=13" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-08-05 10:23:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=13" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5840 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:04Z" ; + schema1:dateModified "2024-06-11T12:55:04Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=13" ; + schema1:version 13 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """This workflow represents the Default ML Pipeline for AutoML feature from MLme. Machine Learning Made Easy (MLme) is a novel tool that simplifies machine learning (ML) for researchers. By integrating four essential functionalities, namely data exploration, AutoML, CustomML, and visualization, MLme fulfills the diverse requirements of researchers while eliminating the need for extensive coding efforts. MLme serves as a valuable resource that empowers researchers of all technical levels to leverage ML for insightful data analysis and enhance research outcomes. By simplifying and automating various stages of the ML workflow, it enables researchers to allocate more time to their core research tasks, thereby enhancing efficiency and productivity.\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.571.1" ; + schema1:license "CC0-1.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for MLme: Machine Learning Made Easy" ; + schema1:sdDatePublished "2024-08-05 10:27:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/571/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 208026 ; + schema1:creator ; + schema1:dateCreated "2023-09-15T14:36:32Z" ; + schema1:dateModified "2023-09-15T15:36:33Z" ; + schema1:description """This workflow represents the Default ML Pipeline for AutoML feature from MLme. Machine Learning Made Easy (MLme) is a novel tool that simplifies machine learning (ML) for researchers. By integrating four essential functionalities, namely data exploration, AutoML, CustomML, and visualization, MLme fulfills the diverse requirements of researchers while eliminating the need for extensive coding efforts. MLme serves as a valuable resource that empowers researchers of all technical levels to leverage ML for insightful data analysis and enhance research outcomes. By simplifying and automating various stages of the ML workflow, it enables researchers to allocate more time to their core research tasks, thereby enhancing efficiency and productivity.\r +\r +""" ; + schema1:keywords "Bioinformatics, Machine Learning, automated workflows, GUI" ; + schema1:license "https://spdx.org/licenses/CC0-1.0" ; + schema1:name "MLme: Machine Learning Made Easy" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/571?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A pipeline to demultiplex, QC and map Nanopore data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1002?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/nanoseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/nanoseq" ; + schema1:sdDatePublished "2024-08-05 10:23:33 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1002/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8657 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:05Z" ; + schema1:dateModified "2024-06-11T12:55:05Z" ; + schema1:description "A pipeline to demultiplex, QC and map Nanopore data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1002?version=5" ; + schema1:keywords "Alignment, demultiplexing, nanopore, QC" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/nanoseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1002?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Rare disease researchers workflow is that they submit their raw data (fastq), run the mapping and variant calling RD-Connect pipeline and obtain unannotated gvcf files to further submit to the RD-Connect GPAP or analyse on their own.\r +\r +This demonstrator focuses on the variant calling pipeline. The raw genomic data is processed using the RD-Connect pipeline ([Laurie et al., 2016](https://www.ncbi.nlm.nih.gov/pubmed/27604516)) running on the standards (GA4GH) compliant, interoperable container orchestration platform.\r +\r +This demonstrator will be aligned with the current implementation study on [Development of Architecture for Software Containers at ELIXIR and its use by EXCELERATE use-case communities](docs/Appendix%201%20-%20Project%20Plan%202018-biocontainers%2020171117.pdf) \r +\r +For this implementation, different steps are required:\r +\r +1. Adapt the pipeline to CWL and dockerise elements \r +2. Align with IS efforts on software containers to package the different components (Nextflow) \r +3. Submit trio of Illumina NA12878 Platinum Genome or Exome to the GA4GH platform cloud (by Aspera or ftp server)\r +4. Run the RD-Connect pipeline on the container platform\r +5. Return corresponding gvcf files\r +6. OPTIONAL: annotate and update to RD-Connect playground instance\r +\r +N.B: The demonstrator might have some manual steps, which will not be in production. \r +\r +## RD-Connect pipeline\r +\r +Detailed information about the RD-Connect pipeline can be found in [Laurie et al., 2016](https://www.ncbi.nlm.nih.gov/pubmed/?term=27604516)\r +\r +![alt text](https://raw.githubusercontent.com/inab/Wetlab2Variations/eosc-life/docs/RD-Connect_pipeline.jpg)\r +\r +## The applications\r +\r +**1\\. Name of the application: Adaptor removal**\r +Function: remove sequencing adaptors \r +Container (readiness status, location, version): [cutadapt (v.1.18)](https://hub.docker.com/r/cnag/cutadapt) \r +Required resources in cores and RAM: current container size 169MB \r +Input data (amount, format, directory..): raw fastq \r +Output data: paired fastq without adaptors \r +\r +**2\\. Name of the application: Mapping and bam sorting** \r +Function: align data to reference genome \r +Container : [bwa-mem (v.0.7.17)](https://hub.docker.com/r/cnag/bwa) / [Sambamba (v. 0.6.8 )](https://hub.docker.com/r/cnag/sambamba)(or samtools) \r +Resources :current container size 111MB / 32MB \r +Input data: paired fastq without adaptors \r +Output data: sorted bam \r +\r +**3\\. Name of the application: MarkDuplicates** \r +Function: Mark (and remove) duplicates \r +Container: [Picard (v.2.18.25)](https://hub.docker.com/r/cnag/picard)\r +Resources: current container size 261MB \r +Input data:sorted bam \r +Output data: Sorted bam with marked (or removed) duplicates \r +\r +**4\\. Name of the application: Base quality recalibration (BQSR)** \r +Function: Base quality recalibration \r +Container: [GATK (v.3.6-0)](https://hub.docker.com/r/cnag/gatk)\r +Resources: current container size 270MB \r +Input data: Sorted bam with marked (or removed) duplicates \r +Output data: Sorted bam with marked duplicates & base quality recalculated \r +\r +**5\\. Name of the application: Variant calling** \r +Function: variant calling \r +Container: [GATK (v.3.6-0)](https://hub.docker.com/r/cnag/gatk)\r +Resources: current container size 270MB \r +Input data:Sorted bam with marked duplicates & base quality recalculated \r +Output data: unannotated gvcf per sample \r +\r +**6\\. (OPTIONAL)Name of the application: Quality of the fastq** \r +Function: report on the sequencing quality \r +Container: [fastqc 0.11.8](https://hub.docker.com/r/cnag/fastqc)\r +Resources: current container size 173MB \r +Input data: raw fastq \r +Output data: QC report \r +\r +## Licensing\r +\r +GATK declares that archived packages are made available for free to academic researchers under a limited license for non-commercial use. If you need to use one of these packages for commercial use. https://software.broadinstitute.org/gatk/download/archive """ ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.107.1" ; + schema1:isBasedOn "https://github.com/inab/Wetlab2Variations/blob/eosc-life/cwl-workflows/workflows/workflow.cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for VariantCaller_GATK3.6" ; + schema1:sdDatePublished "2024-08-05 10:33:20 +0100" ; + schema1:url "https://workflowhub.eu/workflows/107/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 30352 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7205 ; + schema1:creator , + ; + schema1:dateCreated "2021-02-18T15:01:31Z" ; + schema1:dateModified "2023-04-21T12:35:06Z" ; + schema1:description """Rare disease researchers workflow is that they submit their raw data (fastq), run the mapping and variant calling RD-Connect pipeline and obtain unannotated gvcf files to further submit to the RD-Connect GPAP or analyse on their own.\r +\r +This demonstrator focuses on the variant calling pipeline. The raw genomic data is processed using the RD-Connect pipeline ([Laurie et al., 2016](https://www.ncbi.nlm.nih.gov/pubmed/27604516)) running on the standards (GA4GH) compliant, interoperable container orchestration platform.\r +\r +This demonstrator will be aligned with the current implementation study on [Development of Architecture for Software Containers at ELIXIR and its use by EXCELERATE use-case communities](docs/Appendix%201%20-%20Project%20Plan%202018-biocontainers%2020171117.pdf) \r +\r +For this implementation, different steps are required:\r +\r +1. Adapt the pipeline to CWL and dockerise elements \r +2. Align with IS efforts on software containers to package the different components (Nextflow) \r +3. Submit trio of Illumina NA12878 Platinum Genome or Exome to the GA4GH platform cloud (by Aspera or ftp server)\r +4. Run the RD-Connect pipeline on the container platform\r +5. Return corresponding gvcf files\r +6. OPTIONAL: annotate and update to RD-Connect playground instance\r +\r +N.B: The demonstrator might have some manual steps, which will not be in production. \r +\r +## RD-Connect pipeline\r +\r +Detailed information about the RD-Connect pipeline can be found in [Laurie et al., 2016](https://www.ncbi.nlm.nih.gov/pubmed/?term=27604516)\r +\r +![alt text](https://raw.githubusercontent.com/inab/Wetlab2Variations/eosc-life/docs/RD-Connect_pipeline.jpg)\r +\r +## The applications\r +\r +**1\\. Name of the application: Adaptor removal**\r +Function: remove sequencing adaptors \r +Container (readiness status, location, version): [cutadapt (v.1.18)](https://hub.docker.com/r/cnag/cutadapt) \r +Required resources in cores and RAM: current container size 169MB \r +Input data (amount, format, directory..): raw fastq \r +Output data: paired fastq without adaptors \r +\r +**2\\. Name of the application: Mapping and bam sorting** \r +Function: align data to reference genome \r +Container : [bwa-mem (v.0.7.17)](https://hub.docker.com/r/cnag/bwa) / [Sambamba (v. 0.6.8 )](https://hub.docker.com/r/cnag/sambamba)(or samtools) \r +Resources :current container size 111MB / 32MB \r +Input data: paired fastq without adaptors \r +Output data: sorted bam \r +\r +**3\\. Name of the application: MarkDuplicates** \r +Function: Mark (and remove) duplicates \r +Container: [Picard (v.2.18.25)](https://hub.docker.com/r/cnag/picard)\r +Resources: current container size 261MB \r +Input data:sorted bam \r +Output data: Sorted bam with marked (or removed) duplicates \r +\r +**4\\. Name of the application: Base quality recalibration (BQSR)** \r +Function: Base quality recalibration \r +Container: [GATK (v.3.6-0)](https://hub.docker.com/r/cnag/gatk)\r +Resources: current container size 270MB \r +Input data: Sorted bam with marked (or removed) duplicates \r +Output data: Sorted bam with marked duplicates & base quality recalculated \r +\r +**5\\. Name of the application: Variant calling** \r +Function: variant calling \r +Container: [GATK (v.3.6-0)](https://hub.docker.com/r/cnag/gatk)\r +Resources: current container size 270MB \r +Input data:Sorted bam with marked duplicates & base quality recalculated \r +Output data: unannotated gvcf per sample \r +\r +**6\\. (OPTIONAL)Name of the application: Quality of the fastq** \r +Function: report on the sequencing quality \r +Container: [fastqc 0.11.8](https://hub.docker.com/r/cnag/fastqc)\r +Resources: current container size 173MB \r +Input data: raw fastq \r +Output data: QC report \r +\r +## Licensing\r +\r +GATK declares that archived packages are made available for free to academic researchers under a limited license for non-commercial use. If you need to use one of these packages for commercial use. https://software.broadinstitute.org/gatk/download/archive """ ; + schema1:image ; + schema1:keywords "CWL, variant_calling" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "VariantCaller_GATK3.6" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/107?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + ; + ns1:output , + . + + a schema1:Dataset ; + schema1:datePublished "2023-01-13T10:20:05.455448" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/chipseq-sr" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "chipseq-sr/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.3" . + + a schema1:Dataset ; + schema1:datePublished "2024-06-19T07:13:32.852335" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/quality-and-contamination-control" ; + schema1:license "GPL-3.0-or-later" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "quality-and-contamination-control/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + schema1:datePublished "2023-10-19T15:11:18.072068" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "atacseq/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.14" . + + a schema1:Dataset ; + schema1:datePublished "2021-10-14T14:17:22.150269" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-ivar-analysis" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:sdDatePublished "2021-10-27 15:45:06 +0100" ; + schema1:softwareVersion "v0.2" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.8" . + + a schema1:Dataset ; + schema1:datePublished "2021-09-23T09:39:44.360747" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-ont-artic-variant-calling" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:sdDatePublished "2021-10-27 15:45:07 +0100" ; + schema1:softwareVersion "v0.3" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.7" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "From Copernicus Sentinel 5P data to panoply visualization of volcanic activity impact to atmosphere" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/756?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Sentinel 5P volcanic data visualization" ; + schema1:sdDatePublished "2024-08-05 10:25:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/756/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2951 ; + schema1:creator ; + schema1:dateCreated "2024-02-15T11:46:22Z" ; + schema1:dateModified "2024-02-15T11:47:12Z" ; + schema1:description "From Copernicus Sentinel 5P data to panoply visualization of volcanic activity impact to atmosphere" ; + schema1:isPartOf ; + schema1:keywords "Climate" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Sentinel 5P volcanic data visualization" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/756?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1017?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/rnafusion" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnafusion" ; + schema1:sdDatePublished "2024-08-05 10:22:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1017/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10513 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:10Z" ; + schema1:dateModified "2024-06-11T12:55:10Z" ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1017?version=16" ; + schema1:keywords "fusion, fusion-genes, gene-fusion, rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnafusion" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1017?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + schema1:datePublished "2024-02-14T16:45:00.609259" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/assembly-with-flye" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "assembly-with-flye/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:Dataset ; + schema1:datePublished "2021-12-06T14:19:09.194259" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/gromacs-dctmd" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "gromacs-dctmd/main" ; + schema1:sdDatePublished "2021-12-07 03:00:58 +0000" ; + schema1:softwareVersion "v0.1.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Variant Interpretation Pipeline (VIP) that annotates, filters and reports prioritized causal variants in humans, see https://github.com/molgenis/vip for more information." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/125?version=1" ; + schema1:isBasedOn "https://github.com/molgenis/vip" ; + schema1:license "LGPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for MOLGENIS/VIP: Variant Interpretation Pipeline" ; + schema1:sdDatePublished "2024-08-05 10:22:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/125/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15164 ; + schema1:dateCreated "2021-06-21T08:33:47Z" ; + schema1:dateModified "2024-06-12T09:57:49Z" ; + schema1:description "Variant Interpretation Pipeline (VIP) that annotates, filters and reports prioritized causal variants in humans, see https://github.com/molgenis/vip for more information." ; + schema1:keywords "Annotation, Report, VCF, Classification, SV, Pipeline, Bioinformatics, Genomics, Workflows, Java, SNPs, variation, Nextflow" ; + schema1:license "https://spdx.org/licenses/LGPL-3.0" ; + schema1:name "MOLGENIS/VIP: Variant Interpretation Pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/125?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# The Polygenic Score Catalog Calculator (`pgsc_calc`)\r +\r +[![Documentation Status](https://readthedocs.org/projects/pgsc-calc/badge/?version=latest)](https://pgsc-calc.readthedocs.io/en/latest/?badge=latest)\r +[![pgscatalog/pgsc_calc CI](https://github.com/PGScatalog/pgsc_calc/actions/workflows/ci.yml/badge.svg)](https://github.com/PGScatalog/pgsc_calc/actions/workflows/ci.yml)\r +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.5970794.svg)](https://doi.org/10.5281/zenodo.5970794)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-≥22.10.0-23aa62.svg?labelColor=000000)](https://www.nextflow.io/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +\r +## Introduction\r +\r +`pgsc_calc` is a bioinformatics best-practice analysis pipeline for calculating\r +polygenic [risk] scores on samples with imputed genotypes using existing scoring\r +files from the [Polygenic Score (PGS) Catalog](https://www.pgscatalog.org/)\r +and/or user-defined PGS/PRS.\r +\r +## Pipeline summary\r +\r +

\r + \r +

\r +\r +The workflow performs the following steps:\r +\r +* Downloading scoring files using the PGS Catalog API in a specified genome build (GRCh37 and GRCh38).\r +* Reading custom scoring files (and performing a liftover if genotyping data is in a different build).\r +* Automatically combines and creates scoring files for efficient parallel computation of multiple PGS\r + - Matching variants in the scoring files against variants in the target dataset (in plink bfile/pfile or VCF format)\r +* Calculates PGS for all samples (linear sum of weights and dosages)\r +* Creates a summary report to visualize score distributions and pipeline metadata (variant matching QC)\r +\r +And optionally:\r +\r +- Genetic Ancestry: calculate similarity of target samples to populations in a\r + reference dataset ([1000 Genomes (1000G)](http://www.nature.com/nature/journal/v526/n7571/full/nature15393.html)), using principal components analysis (PCA)\r +- PGS Normalization: Using reference population data and/or PCA projections to report\r + individual-level PGS predictions (e.g. percentiles, z-scores) that account for genetic ancestry\r +\r +See documentation for a list of planned [features under development](https://pgsc-calc.readthedocs.io/en/latest/index.html#Features-under-development).\r +\r +## Quick start\r +\r +1. Install\r +[`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation)\r +(`>=22.10.0`)\r +\r +2. Install [`Docker`](https://docs.docker.com/engine/installation/) or\r +[`Singularity (v3.8.3 minimum)`](https://www.sylabs.io/guides/3.0/user-guide/)\r +(please only use [`Conda`](https://conda.io/miniconda.html) as a last resort)\r +\r +3. Download the pipeline and test it on a minimal dataset with a single command:\r +\r + ```console\r + nextflow run pgscatalog/pgsc_calc -profile test,\r + ```\r +\r +4. Start running your own analysis!\r +\r + ```console\r + nextflow run pgscatalog/pgsc_calc -profile --input samplesheet.csv --pgs_id PGS001229\r + ```\r +\r +See [getting\r +started](https://pgsc-calc.readthedocs.io/en/latest/getting-started.html) for more\r +details.\r +\r +## Documentation\r +\r +[Full documentation is available on Read the Docs](https://pgsc-calc.readthedocs.io/)\r +\r +## Credits\r +\r +pgscatalog/pgsc_calc is developed as part of the PGS Catalog project, a\r +collaboration between the University of Cambridge’s Department of Public Health\r +and Primary Care (Michael Inouye, Samuel Lambert) and the European\r +Bioinformatics Institute (Helen Parkinson, Laura Harris).\r +\r +The pipeline seeks to provide a standardized workflow for PGS calculation and\r +ancestry inference implemented in nextflow derived from an existing set of\r +tools/scripts developed by Inouye lab (Rodrigo Canovas, Scott Ritchie, Jingqin\r +Wu) and PGS Catalog teams (Samuel Lambert, Laurent Gil).\r +\r +The adaptation of the codebase, nextflow implementation, and PGS Catalog features\r +are written by Benjamin Wingfield, Samuel Lambert, Laurent Gil with additional input\r +from Aoife McMahon (EBI). Development of new features, testing, and code review\r +is ongoing including Inouye lab members (Rodrigo Canovas, Scott Ritchie) and others. A\r +manuscript describing the tool is *in preparation*. In the meantime if you use the\r +tool we ask you to cite the repo and the paper describing the PGS Catalog\r +resource:\r +\r +- >PGS Catalog Calculator _(in preparation)_. PGS Catalog\r + Team. [https://github.com/PGScatalog/pgsc_calc](https://github.com/PGScatalog/pgsc_calc)\r +- >Lambert _et al._ (2021) The Polygenic Score Catalog as an open database for\r +reproducibility and systematic evaluation. Nature Genetics. 53:420–425\r +doi:[10.1038/s41588-021-00783-5](https://doi.org/10.1038/s41588-021-00783-5).\r +\r +This pipeline is distrubuted under an [Apache License](LICENSE) amd uses code and \r +infrastructure developed and maintained by the [nf-core](https://nf-co.re) community \r +(Ewels *et al. Nature Biotech* (2020) doi:[10.1038/s41587-020-0439-x](https://doi.org/10.1038/s41587-020-0439-x)), \r +reused here under the [MIT license](https://github.com/nf-core/tools/blob/master/LICENSE).\r +\r +Additional references of open-source tools and data used in this pipeline are described in\r +[`CITATIONS.md`](CITATIONS.md).\r +\r +This work has received funding from EMBL-EBI core funds, the Baker Institute,\r +the University of Cambridge, Health Data Research UK (HDRUK), and the European\r +Union’s Horizon 2020 research and innovation programme under grant agreement No\r +101016775 INTERVENE.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/556?version=4" ; + schema1:isBasedOn "https://github.com/PGScatalog/pgsc_calc" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for The Polygenic Score Catalog Calculator" ; + schema1:sdDatePublished "2024-08-05 10:28:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/556/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1673 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-08-11T14:30:22Z" ; + schema1:dateModified "2023-08-11T14:30:22Z" ; + schema1:description """# The Polygenic Score Catalog Calculator (`pgsc_calc`)\r +\r +[![Documentation Status](https://readthedocs.org/projects/pgsc-calc/badge/?version=latest)](https://pgsc-calc.readthedocs.io/en/latest/?badge=latest)\r +[![pgscatalog/pgsc_calc CI](https://github.com/PGScatalog/pgsc_calc/actions/workflows/ci.yml/badge.svg)](https://github.com/PGScatalog/pgsc_calc/actions/workflows/ci.yml)\r +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.5970794.svg)](https://doi.org/10.5281/zenodo.5970794)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-≥22.10.0-23aa62.svg?labelColor=000000)](https://www.nextflow.io/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +\r +## Introduction\r +\r +`pgsc_calc` is a bioinformatics best-practice analysis pipeline for calculating\r +polygenic [risk] scores on samples with imputed genotypes using existing scoring\r +files from the [Polygenic Score (PGS) Catalog](https://www.pgscatalog.org/)\r +and/or user-defined PGS/PRS.\r +\r +## Pipeline summary\r +\r +

\r + \r +

\r +\r +The workflow performs the following steps:\r +\r +* Downloading scoring files using the PGS Catalog API in a specified genome build (GRCh37 and GRCh38).\r +* Reading custom scoring files (and performing a liftover if genotyping data is in a different build).\r +* Automatically combines and creates scoring files for efficient parallel computation of multiple PGS\r + - Matching variants in the scoring files against variants in the target dataset (in plink bfile/pfile or VCF format)\r +* Calculates PGS for all samples (linear sum of weights and dosages)\r +* Creates a summary report to visualize score distributions and pipeline metadata (variant matching QC)\r +\r +And optionally:\r +\r +- Genetic Ancestry: calculate similarity of target samples to populations in a\r + reference dataset ([1000 Genomes (1000G)](http://www.nature.com/nature/journal/v526/n7571/full/nature15393.html)), using principal components analysis (PCA)\r +- PGS Normalization: Using reference population data and/or PCA projections to report\r + individual-level PGS predictions (e.g. percentiles, z-scores) that account for genetic ancestry\r +\r +See documentation for a list of planned [features under development](https://pgsc-calc.readthedocs.io/en/latest/index.html#Features-under-development).\r +\r +## Quick start\r +\r +1. Install\r +[`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation)\r +(`>=22.10.0`)\r +\r +2. Install [`Docker`](https://docs.docker.com/engine/installation/) or\r +[`Singularity (v3.8.3 minimum)`](https://www.sylabs.io/guides/3.0/user-guide/)\r +(please only use [`Conda`](https://conda.io/miniconda.html) as a last resort)\r +\r +3. Download the pipeline and test it on a minimal dataset with a single command:\r +\r + ```console\r + nextflow run pgscatalog/pgsc_calc -profile test,\r + ```\r +\r +4. Start running your own analysis!\r +\r + ```console\r + nextflow run pgscatalog/pgsc_calc -profile --input samplesheet.csv --pgs_id PGS001229\r + ```\r +\r +See [getting\r +started](https://pgsc-calc.readthedocs.io/en/latest/getting-started.html) for more\r +details.\r +\r +## Documentation\r +\r +[Full documentation is available on Read the Docs](https://pgsc-calc.readthedocs.io/)\r +\r +## Credits\r +\r +pgscatalog/pgsc_calc is developed as part of the PGS Catalog project, a\r +collaboration between the University of Cambridge’s Department of Public Health\r +and Primary Care (Michael Inouye, Samuel Lambert) and the European\r +Bioinformatics Institute (Helen Parkinson, Laura Harris).\r +\r +The pipeline seeks to provide a standardized workflow for PGS calculation and\r +ancestry inference implemented in nextflow derived from an existing set of\r +tools/scripts developed by Inouye lab (Rodrigo Canovas, Scott Ritchie, Jingqin\r +Wu) and PGS Catalog teams (Samuel Lambert, Laurent Gil).\r +\r +The adaptation of the codebase, nextflow implementation, and PGS Catalog features\r +are written by Benjamin Wingfield, Samuel Lambert, Laurent Gil with additional input\r +from Aoife McMahon (EBI). Development of new features, testing, and code review\r +is ongoing including Inouye lab members (Rodrigo Canovas, Scott Ritchie) and others. A\r +manuscript describing the tool is *in preparation*. In the meantime if you use the\r +tool we ask you to cite the repo and the paper describing the PGS Catalog\r +resource:\r +\r +- >PGS Catalog Calculator _(in preparation)_. PGS Catalog\r + Team. [https://github.com/PGScatalog/pgsc_calc](https://github.com/PGScatalog/pgsc_calc)\r +- >Lambert _et al._ (2021) The Polygenic Score Catalog as an open database for\r +reproducibility and systematic evaluation. Nature Genetics. 53:420–425\r +doi:[10.1038/s41588-021-00783-5](https://doi.org/10.1038/s41588-021-00783-5).\r +\r +This pipeline is distrubuted under an [Apache License](LICENSE) amd uses code and \r +infrastructure developed and maintained by the [nf-core](https://nf-co.re) community \r +(Ewels *et al. Nature Biotech* (2020) doi:[10.1038/s41587-020-0439-x](https://doi.org/10.1038/s41587-020-0439-x)), \r +reused here under the [MIT license](https://github.com/nf-core/tools/blob/master/LICENSE).\r +\r +Additional references of open-source tools and data used in this pipeline are described in\r +[`CITATIONS.md`](CITATIONS.md).\r +\r +This work has received funding from EMBL-EBI core funds, the Baker Institute,\r +the University of Cambridge, Health Data Research UK (HDRUK), and the European\r +Union’s Horizon 2020 research and innovation programme under grant agreement No\r +101016775 INTERVENE.\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/556?version=3" ; + schema1:keywords "Nextflow, Workflows, polygenic risk score, polygenic score, prediction, GWAS, genomic ancestry" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "The Polygenic Score Catalog Calculator" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/556?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# skim2mt\r +\r +**skim2mt** is a snakemake pipeline for the batch assembly, annotation, and phylogenetic analysis of mitochondrial genomes from low coverage genome skims. The pipeline was designed to work with sequence data from museum collections. However, it should also work with genome skims from recently collected samples.\r +\r +## Contents\r + - [Setup](#setup)\r + - [Example data](#example-data)\r + - [Input](#input)\r + - [Output](#output)\r + - [Filtering contaminants](#filtering-contaminants)\r + - [Assembly and annotation only](#assembly-and-annotation-only)\r + - [Running your own data](#running-your-own-data)\r + - [Getting help](#getting-help)\r + - [Citations](#citations)\r +\r +## Setup\r +\r +The pipeline is written in Snakemake and uses conda and singularity to install the necessary tools.\r +\r +It is *strongly recommended* to install conda using Mambaforge. See details here https://snakemake.readthedocs.io/en/stable/getting_started/installation.html\r +\r +Once conda is installed, you can pull the github repo and set up the base conda environment.\r +\r +```\r +# get github repo\r +git clone https://github.com/o-william-white/skim2mt\r +\r +# change dir\r +cd skim2mt\r +\r +# setup conda env\r +conda env create -n snakemake -f workflow/envs/conda_env.yaml\r +```\r +\r +
\r +\r +
\r +\r +## Example data\r +\r +Before you run your own data, it is recommended to run the example datasets provided . This will confirm there are no user-specific issues with the setup and it also installs all the dependencies. The example data includes simulated mitochondrial data from 25 different butterfly species. \r +\r +To run the example data, use the code below. **Note that you need to change the user email to your own address**. The email is required by the Bio Entrez package to fetch reference sequences. The first time you run the pipeline, it will take some time to install each of the conda environments, so it is a good time to take a tea break :).\r +```\r +conda activate snakemake\r +\r +snakemake \\\r + --cores 4 \\\r + --use-conda \\\r + --use-singularity \\ \r + --config user_email=user@example_email.com\r +```\r +\r +
\r +\r +
\r +\r +## Input\r +\r +Snakemake requires a `config.yaml` and `samples.csv` to define input parameters and sequence data for each sample. \r +\r +For the example data provided, the config file is located here `config/config.yaml` and it looks like this:\r +```\r +# path to sample sheet csv with columns for ID,forward,reverse,taxid,seed,gene\r +samples: config/samples.csv\r +\r +# user email\r +user_email: user@example_email.com\r +\r +# getorganelle reference (go_fetch, custom)\r +go_reference: go_fetch\r +\r +# forward adapter\r +forward_adapter: AGATCGGAAGAGCACACGTCTGAACTCCAGTCA\r +\r +# reverse adapter\r +reverse_adapter: AGATCGGAAGAGCGTCGTGTAGGGAAAGAGTGT\r +\r +# fastp deduplication (True/False)\r +fastp_dedup: True\r +\r +# mitos refseq database (refseq39, refseq63f, refseq63m, refseq63o, refseq89f, refseq89m, refseq89o)\r +mitos_refseq: refseq39\r +\r +# mito code (2 = Vertebrate, 4 = Mold, 5 = Invertebrate, 9 = Echinoderm, 13 = Ascidian, 14 = Alternative flatworm)\r +mitos_code: 5\r +\r +# alignment trimming method to use (gblocks or clipkit)\r +alignment_trim: gblocks\r +\r +# alignment missing data threshold for alignment (0.0 - 1.0)\r +missing_threshold: 0.5\r +\r +# name of outgroup sample (optional)\r +# use "NA" if there is no obvious outgroup\r +# if more than one outgroup use a comma separated list i.e. "sampleA,sampleB"\r +outgroup: Eurema_blanda\r +\r +# plot dimensions (cm)\r +plot_height: 20\r +plot_width: 20\r +```\r +\r +The example samples.csv file is located here `config/samples.csv` and it looks like this (note that the seed and gene columns are only required if the custom getorganelle database option is specified in the config file):\r +\r +\r + ID | forward | reverse | taxid | seed | gene \r +----|---------|---------|-------|------|------\r +Adelpha_iphiclus | .test/reads/Adelpha_iphiclus_1.fq.gz | .test/reads/Adelpha_iphiclus_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Anartia_jatrophae_saturata | .test/reads/Anartia_jatrophae_saturata_1.fq.gz | .test/reads/Anartia_jatrophae_saturata_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Araschnia_levana | .test/reads/Araschnia_levana_1.fq.gz | .test/reads/Araschnia_levana_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Auzakia_danava | .test/reads/Auzakia_danava_1.fq.gz | .test/reads/Auzakia_danava_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Baeotus_beotus | .test/reads/Baeotus_beotus_1.fq.gz | .test/reads/Baeotus_beotus_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Catacroptera_cloanthe | .test/reads/Catacroptera_cloanthe_1.fq.gz | .test/reads/Catacroptera_cloanthe_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Chalinga_pratti | .test/reads/Chalinga_pratti_1.fq.gz | .test/reads/Chalinga_pratti_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Diaethria_gabaza_eupepla | .test/reads/Diaethria_gabaza_eupepla_1.fq.gz | .test/reads/Diaethria_gabaza_eupepla_2.fq.gz | 127268 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Doleschallia_melana | .test/reads/Doleschallia_melana_1.fq.gz | .test/reads/Doleschallia_melana_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Eurema_blanda | .test/reads/Eurema_blanda_1.fq.gz | .test/reads/Eurema_blanda_2.fq.gz | 42450 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Hypolimnas_usambara | .test/reads/Hypolimnas_usambara_1.fq.gz | .test/reads/Hypolimnas_usambara_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Junonia_villida | .test/reads/Junonia_villida_1.fq.gz | .test/reads/Junonia_villida_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Kallima_paralekta | .test/reads/Kallima_paralekta_1.fq.gz | .test/reads/Kallima_paralekta_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Kallimoides_rumia | .test/reads/Kallimoides_rumia_1.fq.gz | .test/reads/Kallimoides_rumia_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Litinga_cottini | .test/reads/Litinga_cottini_1.fq.gz | .test/reads/Litinga_cottini_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Mallika_jacksoni | .test/reads/Mallika_jacksoni_1.fq.gz | .test/reads/Mallika_jacksoni_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Moduza_procris | .test/reads/Moduza_procris_1.fq.gz | .test/reads/Moduza_procris_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Parasarpa_zayla | .test/reads/Parasarpa_zayla_1.fq.gz | .test/reads/Parasarpa_zayla_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Phaedyma_columella | .test/reads/Phaedyma_columella_1.fq.gz | .test/reads/Phaedyma_columella_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Precis_pelarga | .test/reads/Precis_pelarga_1.fq.gz | .test/reads/Precis_pelarga_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Protogoniomorpha_temora | .test/reads/Protogoniomorpha_temora_1.fq.gz | .test/reads/Protogoniomorpha_temora_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Salamis_cacta | .test/reads/Salamis_cacta_1.fq.gz | .test/reads/Salamis_cacta_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Smyrna_blomfildia | .test/reads/Smyrna_blomfildia_1.fq.gz | .test/reads/Smyrna_blomfildia_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Tacola_larymna | .test/reads/Tacola_larymna_1.fq.gz | .test/reads/Tacola_larymna_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Yoma_algina | .test/reads/Yoma_algina_1.fq.gz | .test/reads/Yoma_algina_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +\r +\r +
\r +\r +
\r +\r +## Output\r +\r +All output files are saved to the `results` direcotry. Below is a table summarising all of the output files generated by the pipeline.\r +\r +| Directory | Description |\r +|-----------------------|---------------------------|\r +| fastqc_raw | Fastqc reports for raw input reads |\r +| fastp | Fastp reports from quality control of raw reads |\r +| fastqc_qc | Fastqc reports for quality controlled reads |\r +| go_fetch | Optional output containing reference databasesused by GetOrganelle |\r +| getorganelle | GetOrganelle output with a directory for each sample |\r +| assembled_sequence | Assembled sequences selected from GetOrganelle output and renamed |\r +| seqkit | Seqkit summary of each assembly |\r +| blastn | Blastn output of each assembly |\r +| minimap | Mapping output of quality filtered reads against each assembly |\r +| blobtools | Blobtools assembly summary collating blastn and mapping output |\r +| assess_assembly | Plots of annotations, mean depth, GC content and proportion mismatches |\r +| annotations | Annotation outputs of mitos |\r +| summary | Summary per sample (seqkit stats), contig (GC content, length, coverage, taxonomy and annotations) and annotated gene counts |\r +| annotated_genes | Unaligned fasta files of annotated genes identified across all samples |\r +| mafft | Mafft aligned fasta files of annotated genes identified across all samples |\r +| mafft_filtered | Mafft aligned fasta files after the removal of sequences based on a missing data threshold |\r +| alignment_trim | Ambiguous parts of alignment removed using either gblocks or clipkit |\r +| iqtree | Iqtree phylogenetic analysis of annotated genes |\r +| plot_tree | Plots of phylogenetic trees |\r +\r +
\r +\r +
\r +\r +## Filtering contaminants\r +\r +If you are working with museum collections, it is possible that you may assemble and annotate sequences from contaminant/non-target species. *Contaminant sequences can be identified based on the blast search output or unusual placement in the phylogenetic trees* (see blobtools and plot_tree outputs). \r +\r +A supplementary python script `format_alignments.py `is provided to remove putative contaminants from alignments, and format the alignments for downstream phylogenetic analysis.\r +\r +For example, let's say we wanted to remove all sequences from the sample "Kallima_paralekta" and atp6 gene sequences, you could run the script as shown below. The script works by identifying and removing sequences that have names with `Kallima_paralekta` or `atp6` in the sequence names. The filtered alignments are written to a new output directory `filter_alignments_output`.\r +\r +```\r +python workflow/scripts/format_alignments.py \\\r + --input results/mafft_filtered/ \\\r + --cont Kallima_paralekta atp6 \\\r + --output filter_alignments_output\r +```\r +\r +*Note that the output fasta files have been reformatted so each alignment file is named after the gene and each sequence is named after the sample.* This is useful if you would like to run our related pipeline **gene2phylo** for further phylogenetic analyses.\r +\r +
\r +\r +
\r +\r +## Assembly and annotation only\r +\r +If you are only interested in the assembly of mitochondrial sequences and annotation of genes without the phylogenetic analysis, you can stop the pipeline from running the gene alignment and phylogenetic analyses using the `--omit-from` parameter.\r +```\r +snakemake \\\r + --cores 4 \\\r + --use-conda \\\r + --use-singularity \\\r + --config user_email=user@example_email.com \\\r + --omit-from mafft\r +```\r +\r +
\r +\r +
\r +\r +## Running your own data\r +\r +The first thing you need to do is generate your own config.yaml and samples.csv files, using the files provided as a template.\r +\r +GetOrganelle requires reference data in the format of seed and gene reference fasta files. By default the pipeline uses a basic python script called go_fetch.py https://github.com/o-william-white/go_fetch to download and format reference data formatted for GetOrganelle. \r +\r +go_fetch.py works by searching NCBI based on the NCBI taxonomy specified by the taxid column in the samples.csv file. Note that the seed and gene columns in the samples.csv file are only required if you want to provide your own custom GetOrganelle seed and gene reference databases. \r +\r +You can use the default reference data for GetOrganelle, but I would recommend using custom reference databases where possible. See here for details of how to set up your own databases https://github.com/Kinggerm/GetOrganelle/wiki/FAQ#how-to-assemble-a-target-organelle-genome-using-my-own-reference\r +\r +## Getting help\r +\r +If you have any questions, please do get in touch in the issues or by email o.william.white@gmail.com\r +\r +
\r +\r +
\r +\r +## Citations\r +\r +If you use the pipeline, please cite our bioarxiv preprint: https://doi.org/10.1101/2023.08.11.552985\r +\r +Since the pipeline is a wrapper for several other bioinformatic tools we also ask that you cite the tools used by the pipeline:\r + - Fastqc https://github.com/s-andrews/FastQC\r + - Fastp https://doi.org/10.1093/bioinformatics/bty560\r + - GetOrganelle https://doi.org/10.1186/s13059-020-02154-5\r + - Blastn https://doi.org/10.1186/1471-2105-10-421\r + - Minimap2 https://doi.org/10.1093/bioinformatics/bty191\r + - Blobtools https://doi.org/10.12688/f1000research.12232.1\r + - Seqkit https://doi.org/10.1371/journal.pone.0163962\r + - MITOS2 https://doi.org/10.1016/j.ympev.2012.08.023\r + - Gblocks (default) https://doi.org/10.1093/oxfordjournals.molbev.a026334\r + - Clipkit (optional) https://doi.org/10.1371/journal.pbio.3001007\r + - Mafft https://doi.org/10.1093/molbev/mst010\r + - Iqtree https://doi.org/10.1093/molbev/msu300\r + - ete3 https://doi.org/10.1093/molbev/msw046\r + - ggtree https://doi.org/10.1111/2041-210X.12628\r +\r +
\r +\r +
\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/791?version=1" ; + schema1:isBasedOn "https://github.com/o-william-white/skim2mt.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for skim2mt" ; + schema1:sdDatePublished "2024-08-05 10:25:12 +0100" ; + schema1:url "https://workflowhub.eu/workflows/791/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2844 ; + schema1:dateCreated "2024-03-12T15:03:20Z" ; + schema1:dateModified "2024-03-12T15:03:20Z" ; + schema1:description """# skim2mt\r +\r +**skim2mt** is a snakemake pipeline for the batch assembly, annotation, and phylogenetic analysis of mitochondrial genomes from low coverage genome skims. The pipeline was designed to work with sequence data from museum collections. However, it should also work with genome skims from recently collected samples.\r +\r +## Contents\r + - [Setup](#setup)\r + - [Example data](#example-data)\r + - [Input](#input)\r + - [Output](#output)\r + - [Filtering contaminants](#filtering-contaminants)\r + - [Assembly and annotation only](#assembly-and-annotation-only)\r + - [Running your own data](#running-your-own-data)\r + - [Getting help](#getting-help)\r + - [Citations](#citations)\r +\r +## Setup\r +\r +The pipeline is written in Snakemake and uses conda and singularity to install the necessary tools.\r +\r +It is *strongly recommended* to install conda using Mambaforge. See details here https://snakemake.readthedocs.io/en/stable/getting_started/installation.html\r +\r +Once conda is installed, you can pull the github repo and set up the base conda environment.\r +\r +```\r +# get github repo\r +git clone https://github.com/o-william-white/skim2mt\r +\r +# change dir\r +cd skim2mt\r +\r +# setup conda env\r +conda env create -n snakemake -f workflow/envs/conda_env.yaml\r +```\r +\r +
\r +\r +
\r +\r +## Example data\r +\r +Before you run your own data, it is recommended to run the example datasets provided . This will confirm there are no user-specific issues with the setup and it also installs all the dependencies. The example data includes simulated mitochondrial data from 25 different butterfly species. \r +\r +To run the example data, use the code below. **Note that you need to change the user email to your own address**. The email is required by the Bio Entrez package to fetch reference sequences. The first time you run the pipeline, it will take some time to install each of the conda environments, so it is a good time to take a tea break :).\r +```\r +conda activate snakemake\r +\r +snakemake \\\r + --cores 4 \\\r + --use-conda \\\r + --use-singularity \\ \r + --config user_email=user@example_email.com\r +```\r +\r +
\r +\r +
\r +\r +## Input\r +\r +Snakemake requires a `config.yaml` and `samples.csv` to define input parameters and sequence data for each sample. \r +\r +For the example data provided, the config file is located here `config/config.yaml` and it looks like this:\r +```\r +# path to sample sheet csv with columns for ID,forward,reverse,taxid,seed,gene\r +samples: config/samples.csv\r +\r +# user email\r +user_email: user@example_email.com\r +\r +# getorganelle reference (go_fetch, custom)\r +go_reference: go_fetch\r +\r +# forward adapter\r +forward_adapter: AGATCGGAAGAGCACACGTCTGAACTCCAGTCA\r +\r +# reverse adapter\r +reverse_adapter: AGATCGGAAGAGCGTCGTGTAGGGAAAGAGTGT\r +\r +# fastp deduplication (True/False)\r +fastp_dedup: True\r +\r +# mitos refseq database (refseq39, refseq63f, refseq63m, refseq63o, refseq89f, refseq89m, refseq89o)\r +mitos_refseq: refseq39\r +\r +# mito code (2 = Vertebrate, 4 = Mold, 5 = Invertebrate, 9 = Echinoderm, 13 = Ascidian, 14 = Alternative flatworm)\r +mitos_code: 5\r +\r +# alignment trimming method to use (gblocks or clipkit)\r +alignment_trim: gblocks\r +\r +# alignment missing data threshold for alignment (0.0 - 1.0)\r +missing_threshold: 0.5\r +\r +# name of outgroup sample (optional)\r +# use "NA" if there is no obvious outgroup\r +# if more than one outgroup use a comma separated list i.e. "sampleA,sampleB"\r +outgroup: Eurema_blanda\r +\r +# plot dimensions (cm)\r +plot_height: 20\r +plot_width: 20\r +```\r +\r +The example samples.csv file is located here `config/samples.csv` and it looks like this (note that the seed and gene columns are only required if the custom getorganelle database option is specified in the config file):\r +\r +\r + ID | forward | reverse | taxid | seed | gene \r +----|---------|---------|-------|------|------\r +Adelpha_iphiclus | .test/reads/Adelpha_iphiclus_1.fq.gz | .test/reads/Adelpha_iphiclus_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Anartia_jatrophae_saturata | .test/reads/Anartia_jatrophae_saturata_1.fq.gz | .test/reads/Anartia_jatrophae_saturata_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Araschnia_levana | .test/reads/Araschnia_levana_1.fq.gz | .test/reads/Araschnia_levana_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Auzakia_danava | .test/reads/Auzakia_danava_1.fq.gz | .test/reads/Auzakia_danava_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Baeotus_beotus | .test/reads/Baeotus_beotus_1.fq.gz | .test/reads/Baeotus_beotus_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Catacroptera_cloanthe | .test/reads/Catacroptera_cloanthe_1.fq.gz | .test/reads/Catacroptera_cloanthe_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Chalinga_pratti | .test/reads/Chalinga_pratti_1.fq.gz | .test/reads/Chalinga_pratti_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Diaethria_gabaza_eupepla | .test/reads/Diaethria_gabaza_eupepla_1.fq.gz | .test/reads/Diaethria_gabaza_eupepla_2.fq.gz | 127268 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Doleschallia_melana | .test/reads/Doleschallia_melana_1.fq.gz | .test/reads/Doleschallia_melana_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Eurema_blanda | .test/reads/Eurema_blanda_1.fq.gz | .test/reads/Eurema_blanda_2.fq.gz | 42450 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Hypolimnas_usambara | .test/reads/Hypolimnas_usambara_1.fq.gz | .test/reads/Hypolimnas_usambara_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Junonia_villida | .test/reads/Junonia_villida_1.fq.gz | .test/reads/Junonia_villida_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Kallima_paralekta | .test/reads/Kallima_paralekta_1.fq.gz | .test/reads/Kallima_paralekta_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Kallimoides_rumia | .test/reads/Kallimoides_rumia_1.fq.gz | .test/reads/Kallimoides_rumia_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Litinga_cottini | .test/reads/Litinga_cottini_1.fq.gz | .test/reads/Litinga_cottini_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Mallika_jacksoni | .test/reads/Mallika_jacksoni_1.fq.gz | .test/reads/Mallika_jacksoni_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Moduza_procris | .test/reads/Moduza_procris_1.fq.gz | .test/reads/Moduza_procris_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Parasarpa_zayla | .test/reads/Parasarpa_zayla_1.fq.gz | .test/reads/Parasarpa_zayla_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Phaedyma_columella | .test/reads/Phaedyma_columella_1.fq.gz | .test/reads/Phaedyma_columella_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Precis_pelarga | .test/reads/Precis_pelarga_1.fq.gz | .test/reads/Precis_pelarga_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Protogoniomorpha_temora | .test/reads/Protogoniomorpha_temora_1.fq.gz | .test/reads/Protogoniomorpha_temora_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Salamis_cacta | .test/reads/Salamis_cacta_1.fq.gz | .test/reads/Salamis_cacta_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Smyrna_blomfildia | .test/reads/Smyrna_blomfildia_1.fq.gz | .test/reads/Smyrna_blomfildia_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Tacola_larymna | .test/reads/Tacola_larymna_1.fq.gz | .test/reads/Tacola_larymna_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Yoma_algina | .test/reads/Yoma_algina_1.fq.gz | .test/reads/Yoma_algina_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +\r +\r +
\r +\r +
\r +\r +## Output\r +\r +All output files are saved to the `results` direcotry. Below is a table summarising all of the output files generated by the pipeline.\r +\r +| Directory | Description |\r +|-----------------------|---------------------------|\r +| fastqc_raw | Fastqc reports for raw input reads |\r +| fastp | Fastp reports from quality control of raw reads |\r +| fastqc_qc | Fastqc reports for quality controlled reads |\r +| go_fetch | Optional output containing reference databasesused by GetOrganelle |\r +| getorganelle | GetOrganelle output with a directory for each sample |\r +| assembled_sequence | Assembled sequences selected from GetOrganelle output and renamed |\r +| seqkit | Seqkit summary of each assembly |\r +| blastn | Blastn output of each assembly |\r +| minimap | Mapping output of quality filtered reads against each assembly |\r +| blobtools | Blobtools assembly summary collating blastn and mapping output |\r +| assess_assembly | Plots of annotations, mean depth, GC content and proportion mismatches |\r +| annotations | Annotation outputs of mitos |\r +| summary | Summary per sample (seqkit stats), contig (GC content, length, coverage, taxonomy and annotations) and annotated gene counts |\r +| annotated_genes | Unaligned fasta files of annotated genes identified across all samples |\r +| mafft | Mafft aligned fasta files of annotated genes identified across all samples |\r +| mafft_filtered | Mafft aligned fasta files after the removal of sequences based on a missing data threshold |\r +| alignment_trim | Ambiguous parts of alignment removed using either gblocks or clipkit |\r +| iqtree | Iqtree phylogenetic analysis of annotated genes |\r +| plot_tree | Plots of phylogenetic trees |\r +\r +
\r +\r +
\r +\r +## Filtering contaminants\r +\r +If you are working with museum collections, it is possible that you may assemble and annotate sequences from contaminant/non-target species. *Contaminant sequences can be identified based on the blast search output or unusual placement in the phylogenetic trees* (see blobtools and plot_tree outputs). \r +\r +A supplementary python script `format_alignments.py `is provided to remove putative contaminants from alignments, and format the alignments for downstream phylogenetic analysis.\r +\r +For example, let's say we wanted to remove all sequences from the sample "Kallima_paralekta" and atp6 gene sequences, you could run the script as shown below. The script works by identifying and removing sequences that have names with `Kallima_paralekta` or `atp6` in the sequence names. The filtered alignments are written to a new output directory `filter_alignments_output`.\r +\r +```\r +python workflow/scripts/format_alignments.py \\\r + --input results/mafft_filtered/ \\\r + --cont Kallima_paralekta atp6 \\\r + --output filter_alignments_output\r +```\r +\r +*Note that the output fasta files have been reformatted so each alignment file is named after the gene and each sequence is named after the sample.* This is useful if you would like to run our related pipeline **gene2phylo** for further phylogenetic analyses.\r +\r +
\r +\r +
\r +\r +## Assembly and annotation only\r +\r +If you are only interested in the assembly of mitochondrial sequences and annotation of genes without the phylogenetic analysis, you can stop the pipeline from running the gene alignment and phylogenetic analyses using the `--omit-from` parameter.\r +```\r +snakemake \\\r + --cores 4 \\\r + --use-conda \\\r + --use-singularity \\\r + --config user_email=user@example_email.com \\\r + --omit-from mafft\r +```\r +\r +
\r +\r +
\r +\r +## Running your own data\r +\r +The first thing you need to do is generate your own config.yaml and samples.csv files, using the files provided as a template.\r +\r +GetOrganelle requires reference data in the format of seed and gene reference fasta files. By default the pipeline uses a basic python script called go_fetch.py https://github.com/o-william-white/go_fetch to download and format reference data formatted for GetOrganelle. \r +\r +go_fetch.py works by searching NCBI based on the NCBI taxonomy specified by the taxid column in the samples.csv file. Note that the seed and gene columns in the samples.csv file are only required if you want to provide your own custom GetOrganelle seed and gene reference databases. \r +\r +You can use the default reference data for GetOrganelle, but I would recommend using custom reference databases where possible. See here for details of how to set up your own databases https://github.com/Kinggerm/GetOrganelle/wiki/FAQ#how-to-assemble-a-target-organelle-genome-using-my-own-reference\r +\r +## Getting help\r +\r +If you have any questions, please do get in touch in the issues or by email o.william.white@gmail.com\r +\r +
\r +\r +
\r +\r +## Citations\r +\r +If you use the pipeline, please cite our bioarxiv preprint: https://doi.org/10.1101/2023.08.11.552985\r +\r +Since the pipeline is a wrapper for several other bioinformatic tools we also ask that you cite the tools used by the pipeline:\r + - Fastqc https://github.com/s-andrews/FastQC\r + - Fastp https://doi.org/10.1093/bioinformatics/bty560\r + - GetOrganelle https://doi.org/10.1186/s13059-020-02154-5\r + - Blastn https://doi.org/10.1186/1471-2105-10-421\r + - Minimap2 https://doi.org/10.1093/bioinformatics/bty191\r + - Blobtools https://doi.org/10.12688/f1000research.12232.1\r + - Seqkit https://doi.org/10.1371/journal.pone.0163962\r + - MITOS2 https://doi.org/10.1016/j.ympev.2012.08.023\r + - Gblocks (default) https://doi.org/10.1093/oxfordjournals.molbev.a026334\r + - Clipkit (optional) https://doi.org/10.1371/journal.pbio.3001007\r + - Mafft https://doi.org/10.1093/molbev/mst010\r + - Iqtree https://doi.org/10.1093/molbev/msu300\r + - ete3 https://doi.org/10.1093/molbev/msw046\r + - ggtree https://doi.org/10.1111/2041-210X.12628\r +\r +
\r +\r +
\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "skim2mt" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/791?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """IDR is based on OMERO and thus all what we show in this notebook can be easily adjusted for use against another OMERO server, e.g. your institutional OMERO server instance.\r +\r +The main objective of this notebook is to demonstrate how public resources such as the IDR can be used to train your neural network or validate software tools.\r +\r +The authors of the PLOS Biology paper, "Nessys: A new set of tools for the automated detection of nuclei within intact tissues and dense 3D cultures" published in August 2019: https://doi.org/10.1371/journal.pbio.3000388, considered several image segmenation packages, but they did not use the approach described in this notebook.\r +\r +We will analyse the data using Cellpose and compare the output with the original segmentation produced by the authors. StarDist was not considered by the authors. Our workflow shows how public repository can be accessed and data inside it used to validate software tools or new algorithms.\r +\r +We will use an image (id=6001247) referenced in the paper. The image can be viewed online in the Image Data Resource (IDR).\r +\r +We will use a predefined model from Cellpose as a starting point. Steps to access data from IDR could be re-used if you wish to create a new model (outside the scope of this notebook).\r +\r +## Launch\r +This notebook uses the [environment_cellpose.yml](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/environment_cellpose.yml) file.\r +\r +See [Setup](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/setup.md).""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.489.1" ; + schema1:isBasedOn "https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/Cellpose.ipynb" ; + schema1:license "BSD-2-Clause" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Validate a tool against IDR data: Load Image with labels from IDR, re-analyze using Cellpose" ; + schema1:sdDatePublished "2024-08-05 10:30:14 +0100" ; + schema1:url "https://workflowhub.eu/workflows/489/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 525382 ; + schema1:creator , + ; + schema1:dateCreated "2023-06-01T09:47:01Z" ; + schema1:dateModified "2023-06-01T10:57:27Z" ; + schema1:description """IDR is based on OMERO and thus all what we show in this notebook can be easily adjusted for use against another OMERO server, e.g. your institutional OMERO server instance.\r +\r +The main objective of this notebook is to demonstrate how public resources such as the IDR can be used to train your neural network or validate software tools.\r +\r +The authors of the PLOS Biology paper, "Nessys: A new set of tools for the automated detection of nuclei within intact tissues and dense 3D cultures" published in August 2019: https://doi.org/10.1371/journal.pbio.3000388, considered several image segmenation packages, but they did not use the approach described in this notebook.\r +\r +We will analyse the data using Cellpose and compare the output with the original segmentation produced by the authors. StarDist was not considered by the authors. Our workflow shows how public repository can be accessed and data inside it used to validate software tools or new algorithms.\r +\r +We will use an image (id=6001247) referenced in the paper. The image can be viewed online in the Image Data Resource (IDR).\r +\r +We will use a predefined model from Cellpose as a starting point. Steps to access data from IDR could be re-used if you wish to create a new model (outside the scope of this notebook).\r +\r +## Launch\r +This notebook uses the [environment_cellpose.yml](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/environment_cellpose.yml) file.\r +\r +See [Setup](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/setup.md).""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "imaging, Machine Learning, Python" ; + schema1:license "https://spdx.org/licenses/BSD-2-Clause" ; + schema1:name "Validate a tool against IDR data: Load Image with labels from IDR, re-analyze using Cellpose" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/Cellpose.ipynb" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 513401 ; + schema1:url "https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/includes/CellposeIDR.png" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.280.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_ligand_parameterization/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python GMX Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-08-05 10:31:02 +0100" ; + schema1:url "https://workflowhub.eu/workflows/280/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1728 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-22T09:52:07Z" ; + schema1:dateModified "2023-01-16T13:58:31Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/280?version=3" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python GMX Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_ligand_parameterization/python/workflow.py" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """# MoP2- DSL2 version of Master of Pores\r +[![Docker Build Status](https://img.shields.io/docker/automated/biocorecrg/nanopore.svg)](https://cloud.docker.com/u/biocorecrg/repository/docker/biocorecrg/nanopore/builds)\r +[![mop2-CI](https://github.com/biocorecrg/MoP2/actions/workflows/build.yml/badge.svg)](https://github.com/biocorecrg/MoP2/actions/workflows/build.yml)\r +[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)\r +[![Nextflow version](https://img.shields.io/badge/Nextflow-21.04.1-brightgreen)](https://www.nextflow.io/)\r +[![Nextflow DSL2](https://img.shields.io/badge/Nextflow-DSL2-brightgreen)](https://www.nextflow.io/)\r +[![Singularity version](https://img.shields.io/badge/Singularity-v3.2.1-green.svg)](https://www.sylabs.io/)\r +[![Docker version](https://img.shields.io/badge/Docker-v20.10.8-blue)](https://www.docker.com/)\r +\r +
\r +\r +![MOP2](https://github.com/biocorecrg/MoP2/blob/main/img/master_red.jpg?raw=true)\r +\r +\r +Inspired by Metallica's [Master Of Puppets](https://www.youtube.com/watch?v=S7blkui3nQc)\r +\r +## Install\r +Please install nextflow and singularity or docker before.\r +\r +Then download the repo:\r +\r +```\r +git clone --depth 1 --recurse-submodules git@github.com:biocorecrg/MOP2.git\r +```\r +\r +You can use INSTALL.sh to download the version 3.4.5 of guppy or you can replace it with the version you prefer. Please consider that the support of VBZ compression of fast5 started with version 3.4.X. \r +\r +```\r +cd MoP2; sh INSTALL.sh\r +```\r +\r +## Testing\r +You can replace ```-with-singularity``` with ```-with-docker``` if you want to use the docker engine.\r +\r +```\r +cd mop_preprocess\r +nextflow run mop_preprocess.nf -with-singularity -bg -profile local > log\r +\r +```\r +\r +## Reference\r +If you use this tool, please cite our papers:\r +\r +["Nanopore Direct RNA Sequencing Data Processing and Analysis Using MasterOfPores"\r +Cozzuto L, Delgado-Tejedor A, Hermoso Pulido T, Novoa EM, Ponomarenko J. *N. Methods Mol Biol. 2023*;2624:185-205. doi: 10.1007/978-1-0716-2962-8_13.](https://link.springer.com/protocol/10.1007/978-1-0716-2962-8_13)\r +\r +["MasterOfPores: A Workflow for the Analysis of Oxford Nanopore Direct RNA Sequencing Datasets"\r +Luca Cozzuto, Huanle Liu, Leszek P. Pryszcz, Toni Hermoso Pulido, Anna Delgado-Tejedor, Julia Ponomarenko, Eva Maria Novoa.\r +*Front. Genet., 17 March 2020.* https://doi.org/10.3389/fgene.2020.00211](https://www.frontiersin.org/articles/10.3389/fgene.2020.00211/full)\r +\r +\r +## Documentation\r +The documentation is available at [https://biocorecrg.github.io/MOP2/docs/](https://biocorecrg.github.io/MOP2/docs/about.html)\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/438?version=1" ; + schema1:isBasedOn "https://github.com/biocorecrg/MOP2.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Master of Pores 2" ; + schema1:sdDatePublished "2024-08-05 10:31:17 +0100" ; + schema1:url "https://workflowhub.eu/workflows/438/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 24119 ; + schema1:dateCreated "2023-02-20T15:48:34Z" ; + schema1:dateModified "2023-02-20T15:48:34Z" ; + schema1:description """# MoP2- DSL2 version of Master of Pores\r +[![Docker Build Status](https://img.shields.io/docker/automated/biocorecrg/nanopore.svg)](https://cloud.docker.com/u/biocorecrg/repository/docker/biocorecrg/nanopore/builds)\r +[![mop2-CI](https://github.com/biocorecrg/MoP2/actions/workflows/build.yml/badge.svg)](https://github.com/biocorecrg/MoP2/actions/workflows/build.yml)\r +[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)\r +[![Nextflow version](https://img.shields.io/badge/Nextflow-21.04.1-brightgreen)](https://www.nextflow.io/)\r +[![Nextflow DSL2](https://img.shields.io/badge/Nextflow-DSL2-brightgreen)](https://www.nextflow.io/)\r +[![Singularity version](https://img.shields.io/badge/Singularity-v3.2.1-green.svg)](https://www.sylabs.io/)\r +[![Docker version](https://img.shields.io/badge/Docker-v20.10.8-blue)](https://www.docker.com/)\r +\r +
\r +\r +![MOP2](https://github.com/biocorecrg/MoP2/blob/main/img/master_red.jpg?raw=true)\r +\r +\r +Inspired by Metallica's [Master Of Puppets](https://www.youtube.com/watch?v=S7blkui3nQc)\r +\r +## Install\r +Please install nextflow and singularity or docker before.\r +\r +Then download the repo:\r +\r +```\r +git clone --depth 1 --recurse-submodules git@github.com:biocorecrg/MOP2.git\r +```\r +\r +You can use INSTALL.sh to download the version 3.4.5 of guppy or you can replace it with the version you prefer. Please consider that the support of VBZ compression of fast5 started with version 3.4.X. \r +\r +```\r +cd MoP2; sh INSTALL.sh\r +```\r +\r +## Testing\r +You can replace ```-with-singularity``` with ```-with-docker``` if you want to use the docker engine.\r +\r +```\r +cd mop_preprocess\r +nextflow run mop_preprocess.nf -with-singularity -bg -profile local > log\r +\r +```\r +\r +## Reference\r +If you use this tool, please cite our papers:\r +\r +["Nanopore Direct RNA Sequencing Data Processing and Analysis Using MasterOfPores"\r +Cozzuto L, Delgado-Tejedor A, Hermoso Pulido T, Novoa EM, Ponomarenko J. *N. Methods Mol Biol. 2023*;2624:185-205. doi: 10.1007/978-1-0716-2962-8_13.](https://link.springer.com/protocol/10.1007/978-1-0716-2962-8_13)\r +\r +["MasterOfPores: A Workflow for the Analysis of Oxford Nanopore Direct RNA Sequencing Datasets"\r +Luca Cozzuto, Huanle Liu, Leszek P. Pryszcz, Toni Hermoso Pulido, Anna Delgado-Tejedor, Julia Ponomarenko, Eva Maria Novoa.\r +*Front. Genet., 17 March 2020.* https://doi.org/10.3389/fgene.2020.00211](https://www.frontiersin.org/articles/10.3389/fgene.2020.00211/full)\r +\r +\r +## Documentation\r +The documentation is available at [https://biocorecrg.github.io/MOP2/docs/](https://biocorecrg.github.io/MOP2/docs/about.html)\r +""" ; + schema1:keywords "nanopore, ONT, dRNAseq, Transcriptomics, metatranscriptomics" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Master of Pores 2" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/438?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=16" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-08-05 10:23:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=16" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17391 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:59Z" ; + schema1:dateModified "2024-06-11T12:54:59Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=16" ; + schema1:version 16 . + + a schema1:Dataset ; + schema1:datePublished "2022-11-29T12:22:16.778376" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/cutandrun" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "cutandrun/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.2" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """# Inclusion Body Myositis Active Subnetwork Identification Workflow\r +\r +Workflow for Creating a large disease network from various datasets and databases for IBM, and applying the active subnetwork identification method MOGAMUN.\r +\r +## Links\r +[WorkflowHub](https://workflowhub.eu/workflows/681)\r +\r +[Docker Image](https://hub.docker.com/R/jdwijnbergen/multi-omics_asi)\r +\r +[Docker Image build requirements](https://doi.org/10.5281/zenodo.10210364)\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/681?version=2" ; + schema1:isBasedOn "https://github.com/jdwijnbergen/IBM_ASI_workflow.git" ; + schema1:license "notspecified" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Inclusion Body Myositis Active Subnetwork Identification Workflow" ; + schema1:sdDatePublished "2024-08-05 10:25:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/681/ro_crate?version=2" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 27177 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6953 ; + schema1:creator , + ; + schema1:dateCreated "2023-11-27T15:50:23Z" ; + schema1:dateModified "2023-11-27T15:50:23Z" ; + schema1:description """# Inclusion Body Myositis Active Subnetwork Identification Workflow\r +\r +Workflow for Creating a large disease network from various datasets and databases for IBM, and applying the active subnetwork identification method MOGAMUN.\r +\r +## Links\r +[WorkflowHub](https://workflowhub.eu/workflows/681)\r +\r +[Docker Image](https://hub.docker.com/R/jdwijnbergen/multi-omics_asi)\r +\r +[Docker Image build requirements](https://doi.org/10.5281/zenodo.10210364)\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/681?version=6" ; + schema1:keywords "Bioinformatics, CWL, Genomics, Transcriptomics, Protein-Protein Interaction" ; + schema1:license "https://choosealicense.com/no-permission/" ; + schema1:name "Inclusion Body Myositis Active Subnetwork Identification Workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/681?version=2" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "The workflow takes a trimmed Illumina paired-end reads collection, runs Meryl to create a K-mer database, Genomescope2 to estimate genome properties and Smudgeplot to estimate ploidy. The main results are K-mer ddatabase and genome profiling plots, tables, and values useful for downstream analysis. Default K-mer length and ploidy for Genomescope are 21 and 2, respectively. " ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/698?version=1" ; + schema1:isBasedOn "https://github.com/ERGA-consortium/pipelines/tree/main/assembly/galaxy" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ERGA Profiling Illumina v2311 (WF1)" ; + schema1:sdDatePublished "2024-08-05 10:26:02 +0100" ; + schema1:url "https://workflowhub.eu/workflows/698/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 39713 ; + schema1:creator , + ; + schema1:dateCreated "2024-01-08T15:55:18Z" ; + schema1:dateModified "2024-01-08T15:57:54Z" ; + schema1:description "The workflow takes a trimmed Illumina paired-end reads collection, runs Meryl to create a K-mer database, Genomescope2 to estimate genome properties and Smudgeplot to estimate ploidy. The main results are K-mer ddatabase and genome profiling plots, tables, and values useful for downstream analysis. Default K-mer length and ploidy for Genomescope are 21 and 2, respectively. " ; + schema1:image ; + schema1:isPartOf , + ; + schema1:keywords "name:PROFILING, ERGA, illumina" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ERGA Profiling Illumina v2311 (WF1)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/1.Genome_Profiling/Galaxy-Workflow-ERGA_Profiling_Illumina_v2311_(WF1).ga" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 118483 ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/1.Genome_Profiling/pics/Prof_illu_2311.png" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-08-05 10:23:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4227 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:03Z" ; + schema1:dateModified "2024-06-11T12:55:03Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.260.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_md_setup/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL Amber Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:32:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/260/ro_crate?version=2" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 146391 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 30384 ; + schema1:creator , + ; + schema1:dateCreated "2023-06-08T07:26:33Z" ; + schema1:dateModified "2023-06-08T07:32:27Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/260?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CWL Amber Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_md_setup/cwl/workflow.cwl" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Taxonomic classification and profiling of shotgun metagenomic data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1025?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/taxprofiler" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/taxprofiler" ; + schema1:sdDatePublished "2024-08-05 10:23:08 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1025/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12635 ; + schema1:creator , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:19Z" ; + schema1:dateModified "2024-06-11T12:55:19Z" ; + schema1:description "Taxonomic classification and profiling of shotgun metagenomic data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1025?version=10" ; + schema1:keywords "Classification, illumina, long-reads, Metagenomics, microbiome, nanopore, pathogen, Profiling, shotgun, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/taxprofiler" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1025?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.817.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/main/biobb_wf_amber_abc_md_setup/docker" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Docker ABC MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:24:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/817/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 773 ; + schema1:creator , + ; + schema1:dateCreated "2024-04-22T08:35:16Z" ; + schema1:dateModified "2024-05-22T13:49:45Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Docker ABC MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/main/biobb_wf_amber_abc_md_setup/docker/Dockerfile" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/278?version=3" ; + schema1:isBasedOn "https://gitlab.bsc.es/lrodrig1/structuralvariants_poc/-/tree/1.1.3/structuralvariants/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CNV_pipeline" ; + schema1:sdDatePublished "2024-08-05 10:31:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/278/ro_crate?version=3" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 8992 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8794 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-23T14:19:55Z" ; + schema1:dateModified "2022-04-11T09:29:33Z" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/278?version=10" ; + schema1:keywords "cancer, CODEX2, ExomeDepth, manta, TransBioNet, variant calling, GRIDSS, structural variants" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CNV_pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/278?version=3" ; + schema1:version 3 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 65152 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Mutations Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.289.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_md_setup_mutations/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL Protein MD Setup tutorial with mutations" ; + schema1:sdDatePublished "2024-08-05 10:32:29 +0100" ; + schema1:url "https://workflowhub.eu/workflows/289/ro_crate?version=2" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 82519 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 22497 ; + schema1:creator , + ; + schema1:dateCreated "2023-06-07T12:22:06Z" ; + schema1:dateModified "2023-06-07T12:30:54Z" ; + schema1:description """# Mutations Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/289?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CWL Protein MD Setup tutorial with mutations" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_md_setup_mutations/cwl/workflow.cwl" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=12" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-08-05 10:23:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=12" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14588 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:59Z" ; + schema1:dateModified "2024-06-11T12:54:59Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=12" ; + schema1:version 12 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Analysis pipeline for CUT&RUN and CUT&TAG experiments that includes sequencing QC, spike-in normalisation, IgG control normalisation, peak calling and downstream peak analysis." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/976?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/cutandrun" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/cutandrun" ; + schema1:sdDatePublished "2024-08-05 10:24:09 +0100" ; + schema1:url "https://workflowhub.eu/workflows/976/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15374 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:47Z" ; + schema1:dateModified "2024-06-11T12:54:47Z" ; + schema1:description "Analysis pipeline for CUT&RUN and CUT&TAG experiments that includes sequencing QC, spike-in normalisation, IgG control normalisation, peak calling and downstream peak analysis." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/976?version=7" ; + schema1:keywords "cutandrun, cutandrun-seq, cutandtag, cutandtag-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/cutandrun" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/976?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Purge-duplicates-from-hifiasm-assembly\r +\r +## General recommendations for using `Purge-duplicates-from-hifiasm-assembly`\r +\r +Please see the [`Genome assembly with hifiasm on Galaxy Australia`](https://australianbiocommons.github.io/how-to-guides/genome_assembly/hifi_assembly) guide.\r +\r +## Acknowledgements\r +\r +The workflow & the [doc_guidelines template used](https://github.com/AustralianBioCommons/doc_guidelines) are \r +supported by the Australian BioCommons via Bioplatforms Australia funding, the Australian Research Data Commons \r +(https://doi.org/10.47486/PL105) and the Queensland Government RICF programme. Bioplatforms Australia and the \r +Australian Research Data Commons are enabled by the National Collaborative Research Infrastructure Strategy (NCRIS).\r +\r +\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.237.2" ; + schema1:isBasedOn "https://github.com/AustralianBioCommons/Purge-duplicates-from-hifiasm-assembly" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Purge duplicates from hifiasm assembly v1.0" ; + schema1:sdDatePublished "2024-08-05 10:31:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/237/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 23112 ; + schema1:creator , + ; + schema1:dateCreated "2022-10-17T02:53:20Z" ; + schema1:dateModified "2023-01-30T18:21:31Z" ; + schema1:description """# Purge-duplicates-from-hifiasm-assembly\r +\r +## General recommendations for using `Purge-duplicates-from-hifiasm-assembly`\r +\r +Please see the [`Genome assembly with hifiasm on Galaxy Australia`](https://australianbiocommons.github.io/how-to-guides/genome_assembly/hifi_assembly) guide.\r +\r +## Acknowledgements\r +\r +The workflow & the [doc_guidelines template used](https://github.com/AustralianBioCommons/doc_guidelines) are \r +supported by the Australian BioCommons via Bioplatforms Australia funding, the Australian Research Data Commons \r +(https://doi.org/10.47486/PL105) and the Queensland Government RICF programme. Bioplatforms Australia and the \r +Australian Research Data Commons are enabled by the National Collaborative Research Infrastructure Strategy (NCRIS).\r +\r +\r +\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/237?version=1" ; + schema1:isPartOf , + ; + schema1:keywords "Assembly, purge_dups, HiFi" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Purge duplicates from hifiasm assembly v1.0" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/237?version=2" ; + schema1:version 2 ; + ns1:input , + ; + ns1:output , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/998?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/methylseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/methylseq" ; + schema1:sdDatePublished "2024-08-05 10:22:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/998/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5320 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:01Z" ; + schema1:dateModified "2024-06-11T12:55:01Z" ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/998?version=14" ; + schema1:keywords "bisulfite-sequencing, dna-methylation, em-seq, epigenome, Epigenomics, methyl-seq, pbat, rrbs" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/methylseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/998?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + schema1:datePublished "2021-12-20T17:04:47.636637" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-wgs-variant-calling" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sars-cov-2-pe-illumina-wgs-variant-calling/COVID-19-PE-WGS-ILLUMINA" ; + schema1:sdDatePublished "2021-12-21 03:01:00 +0000" ; + schema1:softwareVersion "v0.2.2" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Genes and transcripts annotation with Isoseq using uLTRA and TAMA" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/993?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/isoseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/isoseq" ; + schema1:sdDatePublished "2024-08-05 10:23:46 +0100" ; + schema1:url "https://workflowhub.eu/workflows/993/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8438 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:58Z" ; + schema1:dateModified "2024-06-11T12:54:58Z" ; + schema1:description "Genes and transcripts annotation with Isoseq using uLTRA and TAMA" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/993?version=6" ; + schema1:keywords "isoseq, isoseq-3, rna, tama, ultra" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/isoseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/993?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=17" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-08-05 10:22:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=17" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12056 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:40Z" ; + schema1:dateModified "2024-06-11T12:54:40Z" ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=17" ; + schema1:version 17 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:description """**Assembly and quantification metatranscriptome using metagenome data**.\r +\r +Version: see VERSION\r +\r +## Introduction\r +\r +**MetaGT** is a bioinformatics analysis pipeline used for improving and quantification \r +metatranscriptome assembly using metagenome data. The pipeline supports Illumina sequencing \r +data and complete metagenome and metatranscriptome assemblies. The pipeline involves the \r +alignment of metatranscriprome assembly to the metagenome assembly with further extracting CDSs,\r +which are covered by transcripts.\r +\r +The pipeline is built using Nextflow, a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It comes with docker containers making installation trivial and results highly reproducible. The Nextflow DSL2 implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies.\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow-%E2%89%A520.04.0-brightgreen.svg)](https://www.nextflow.io/)\r +\r +[![install with bioconda](https://img.shields.io/badge/install%20with-bioconda-brightgreen.svg)](https://bioconda.github.io/)\r +\r +## Quick Start\r +\r +1. Install [`nextflow`](https://nf-co.re/usage/installation)\r +\r +2. Install any of [`Conda`](https://conda.io/miniconda.html) for full pipeline reproducibility \r +\r +3. Download the pipeline, e.g. by cloning metaGT GitHub repository:\r +\r + ```bash\r + git clone git@github.com:ablab/metaGT.git\r + ```\r + \r +4. Test it on a minimal dataset by running:\r +\r + ```bash\r + nextflow run metaGT -profile test,conda\r + ```\r + \r +5. Start running your own analysis!\r + > Typical command for analysis using reads:\r +\r + ```bash\r + nextflow run metaGT -profile --dna_reads '*_R{1,2}.fastq.gz' --rna_reads '*_R{1,2}.fastq.gz'\r + ```\r + > Typical command for analysis using multiple files with reads:\r +\r + ```bash\r + nextflow run metaGT -profile --dna_reads '*.yaml' --rna_reads '*.yaml' --yaml\r + ```\r + > Typical command for analysis using assemblies:\r +\r + ```bash\r + nextflow run metaGT -profile --genome '*.fasta' --transcriptome '*.fasta'\r + ```\r +## Pipeline Summary\r +Optionally, if raw reades are used:\r +\r +\r +\r +* Sequencing quality control (`FastQC`)\r +* Assembly metagenome or metatranscriptome (`metaSPAdes, rnaSPAdes `)\r +\r +By default, the pipeline currently performs the following:\r +\r +* Annotation metagenome (`Prokka`)\r +* Aligning metatranscriptome on metagenome (`minimap2`)\r +* Annotation unaligned transcripts (`TransDecoder`)\r +* Clustering covered CDS and CDS from unaligned transcripts (`MMseqs2`)\r +* Quantifying abundances of transcripts (`kallisto`)\r +\r +## Citation\r +\r +MetaGT was developed by Daria Shafranskaya and Andrey Prjibelski.\r +If you use it in your research please cite:\r +\r +[MetaGT: A pipeline for de novo assembly of metatranscriptomes with the aid of metagenomic data](https://doi.org/10.3389/fmicb.2022.981458)\r +\r +## Feedback and bug report\r +\r +If you have any questions, please leave an issue at out [GitHub page](https://github.com/ablab/metaGT/issues).\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/454?version=1" ; + schema1:isBasedOn "https://github.com/ablab/metaGT" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for MetaGT: A pipeline for de novo assembly of metatranscriptomes with the aid of metagenomic data" ; + schema1:sdDatePublished "2024-08-05 10:31:06 +0100" ; + schema1:url "https://workflowhub.eu/workflows/454/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12022 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-04-12T09:18:18Z" ; + schema1:dateModified "2023-04-13T06:50:58Z" ; + schema1:description """**Assembly and quantification metatranscriptome using metagenome data**.\r +\r +Version: see VERSION\r +\r +## Introduction\r +\r +**MetaGT** is a bioinformatics analysis pipeline used for improving and quantification \r +metatranscriptome assembly using metagenome data. The pipeline supports Illumina sequencing \r +data and complete metagenome and metatranscriptome assemblies. The pipeline involves the \r +alignment of metatranscriprome assembly to the metagenome assembly with further extracting CDSs,\r +which are covered by transcripts.\r +\r +The pipeline is built using Nextflow, a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It comes with docker containers making installation trivial and results highly reproducible. The Nextflow DSL2 implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies.\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow-%E2%89%A520.04.0-brightgreen.svg)](https://www.nextflow.io/)\r +\r +[![install with bioconda](https://img.shields.io/badge/install%20with-bioconda-brightgreen.svg)](https://bioconda.github.io/)\r +\r +## Quick Start\r +\r +1. Install [`nextflow`](https://nf-co.re/usage/installation)\r +\r +2. Install any of [`Conda`](https://conda.io/miniconda.html) for full pipeline reproducibility \r +\r +3. Download the pipeline, e.g. by cloning metaGT GitHub repository:\r +\r + ```bash\r + git clone git@github.com:ablab/metaGT.git\r + ```\r + \r +4. Test it on a minimal dataset by running:\r +\r + ```bash\r + nextflow run metaGT -profile test,conda\r + ```\r + \r +5. Start running your own analysis!\r + > Typical command for analysis using reads:\r +\r + ```bash\r + nextflow run metaGT -profile --dna_reads '*_R{1,2}.fastq.gz' --rna_reads '*_R{1,2}.fastq.gz'\r + ```\r + > Typical command for analysis using multiple files with reads:\r +\r + ```bash\r + nextflow run metaGT -profile --dna_reads '*.yaml' --rna_reads '*.yaml' --yaml\r + ```\r + > Typical command for analysis using assemblies:\r +\r + ```bash\r + nextflow run metaGT -profile --genome '*.fasta' --transcriptome '*.fasta'\r + ```\r +## Pipeline Summary\r +Optionally, if raw reades are used:\r +\r +\r +\r +* Sequencing quality control (`FastQC`)\r +* Assembly metagenome or metatranscriptome (`metaSPAdes, rnaSPAdes `)\r +\r +By default, the pipeline currently performs the following:\r +\r +* Annotation metagenome (`Prokka`)\r +* Aligning metatranscriptome on metagenome (`minimap2`)\r +* Annotation unaligned transcripts (`TransDecoder`)\r +* Clustering covered CDS and CDS from unaligned transcripts (`MMseqs2`)\r +* Quantifying abundances of transcripts (`kallisto`)\r +\r +## Citation\r +\r +MetaGT was developed by Daria Shafranskaya and Andrey Prjibelski.\r +If you use it in your research please cite:\r +\r +[MetaGT: A pipeline for de novo assembly of metatranscriptomes with the aid of metagenomic data](https://doi.org/10.3389/fmicb.2022.981458)\r +\r +## Feedback and bug report\r +\r +If you have any questions, please leave an issue at out [GitHub page](https://github.com/ablab/metaGT/issues).\r +""" ; + schema1:keywords "Metagenomics, metatranscriptomics, expression, Multi-omics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "MetaGT: A pipeline for de novo assembly of metatranscriptomes with the aid of metagenomic data" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/454?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2024-05-30T07:39:05.349250" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "atacseq/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """Workflow for Metagenomics from raw reads to annotated bins.\r +Steps:\r + - workflow_quality.cwl:\r + - FastQC (control)\r + - fastp (trimming)\r + - Kraken2 (Taxonomic Read Classification\r + - SPAdes (Assembly)\r + - QUAST (Assembly quality report)\r + - BBmap (Read mapping to assembly)\r + - sam_to_bam (sam to indexed bam)\r + - metabatContigDepths (jgi_summarize_bam_contig_depths)\r + - MetaBat2 (binning)\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/64?version=8" ; + schema1:isBasedOn "https://gitlab.com/m-unlock/cwl/-/blob/master/cwl/workflows/workflow_metagenomics_binning.cwl" ; + schema1:license "notspecified" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Metagenomics workflow" ; + schema1:sdDatePublished "2024-08-05 10:31:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/64/ro_crate?version=8" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 51515 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11611 ; + schema1:creator , + ; + schema1:dateCreated "2021-05-06T06:03:42Z" ; + schema1:dateModified "2021-05-06T06:03:42Z" ; + schema1:description """Workflow for Metagenomics from raw reads to annotated bins.\r +Steps:\r + - workflow_quality.cwl:\r + - FastQC (control)\r + - fastp (trimming)\r + - Kraken2 (Taxonomic Read Classification\r + - SPAdes (Assembly)\r + - QUAST (Assembly quality report)\r + - BBmap (Read mapping to assembly)\r + - sam_to_bam (sam to indexed bam)\r + - metabatContigDepths (jgi_summarize_bam_contig_depths)\r + - MetaBat2 (binning)\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/64?version=10" ; + schema1:keywords "Metagenomics, microbial, metagenome, binning" ; + schema1:license "https://choosealicense.com/no-permission/" ; + schema1:name "Metagenomics workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/64?version=8" ; + schema1:version 8 ; + ns1:input , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2023-10-03T10:13:49.682562" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "atacseq/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.12" . + + a schema1:Dataset ; + schema1:datePublished "2021-12-20T17:04:47.641102" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-variation-reporting" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sars-cov-2-variation-reporting/COVID-19-VARIATION-REPORTING" ; + schema1:sdDatePublished "2021-12-21 03:00:59 +0000" ; + schema1:softwareVersion "v0.1.2" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 1287940 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:MediaObject ; + schema1:contentSize 1423529 ; + schema1:dateModified "2024-07-05T08:47:41+00:00" ; + schema1:name "housing.csv" ; + schema1:sdDatePublished "2024-07-05T11:38:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1266413 ; + schema1:dateModified "2024-07-05T11:38:26+00:00" ; + schema1:name "housing_one_hot_encoded.csv" ; + schema1:sdDatePublished "2024-07-05T11:38:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 141018 ; + schema1:dateModified "2024-07-05T11:38:46+00:00" ; + schema1:name "lat_lon_plot.png" ; + schema1:sdDatePublished "2024-07-05T11:38:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 22381 ; + schema1:dateModified "2024-07-05T11:38:46+00:00" ; + schema1:name "median_income_plot.png" ; + schema1:sdDatePublished "2024-07-05T11:38:51+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=18" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-08-05 10:23:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=18" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8900 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:04Z" ; + schema1:dateModified "2024-06-11T12:55:04Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=18" ; + schema1:version 18 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=19" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-08-05 10:24:26 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=19" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10110 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:12Z" ; + schema1:dateModified "2024-06-11T12:55:12Z" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=19" ; + schema1:version 19 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.128.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_virtual-screening" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein-ligand Docking tutorial (PDBe REST API)" ; + schema1:sdDatePublished "2024-08-05 10:30:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/128/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 37193 ; + schema1:creator , + , + ; + schema1:dateCreated "2022-09-15T11:18:43Z" ; + schema1:dateModified "2023-01-16T13:50:19Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/128?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein-ligand Docking tutorial (PDBe REST API)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_virtual-screening/master/biobb_wf_virtual-screening/notebooks/ebi_api/wf_vs_ebi_api.ipynb" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.291.1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python GMX OPLS/AA Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-08-05 10:32:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/291/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1758 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-17T14:08:25Z" ; + schema1:dateModified "2022-03-23T10:04:10Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/291?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python GMX OPLS/AA Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/291?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/963?version=11" ; + schema1:isBasedOn "https://github.com/nf-core/airrflow" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/airrflow" ; + schema1:sdDatePublished "2024-08-05 10:24:22 +0100" ; + schema1:url "https://workflowhub.eu/workflows/963/ro_crate?version=11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12810 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:37Z" ; + schema1:dateModified "2024-06-11T12:54:37Z" ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/963?version=13" ; + schema1:keywords "airr, b-cell, immcantation, immunorepertoire, repseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/airrflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/963?version=11" ; + schema1:version 11 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.276.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_md_setup/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:31:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/276/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6826 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-22T09:38:26Z" ; + schema1:dateModified "2023-01-16T13:57:38Z" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/276?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_md_setup/python/workflow.py" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Masking repeats in a genome using RepeatMasker" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/753?version=1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Masking repeats with RepeatMasker" ; + schema1:sdDatePublished "2024-08-05 10:25:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/753/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7029 ; + schema1:creator ; + schema1:dateCreated "2024-02-15T11:38:35Z" ; + schema1:dateModified "2024-02-15T11:38:35Z" ; + schema1:description "Masking repeats in a genome using RepeatMasker" ; + schema1:isPartOf ; + schema1:keywords "genome-annotation" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Masking repeats with RepeatMasker" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/753?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Analysis of Chromosome Conformation Capture data (Hi-C)" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/989?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/hic" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hic" ; + schema1:sdDatePublished "2024-08-05 10:23:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/989/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5332 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:54Z" ; + schema1:dateModified "2024-06-11T12:54:54Z" ; + schema1:description "Analysis of Chromosome Conformation Capture data (Hi-C)" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/989?version=7" ; + schema1:keywords "chromosome-conformation-capture, Hi-C" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hic" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/989?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1017?version=17" ; + schema1:isBasedOn "https://github.com/nf-core/rnafusion" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnafusion" ; + schema1:sdDatePublished "2024-08-05 10:22:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1017/ro_crate?version=17" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13080 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:11Z" ; + schema1:dateModified "2024-06-11T12:55:11Z" ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1017?version=16" ; + schema1:keywords "fusion, fusion-genes, gene-fusion, rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnafusion" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1017?version=17" ; + schema1:version 17 . + + a schema1:Dataset ; + schema1:datePublished "2023-10-26T10:12:46.371610" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Scaffolding-HiC-VGP8" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Scaffolding-HiC-VGP8/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.14" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-08-05 10:23:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3978 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:02Z" ; + schema1:dateModified "2024-06-11T12:55:02Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """## Summary\r +\r +The validation process proposed has two pipelines for filtering PPIs predicted by some _IN SILICO_ detection method, both pipelines can be executed separately. The first pipeline (i) filter according to association rules of cellular locations extracted from HINT database. The second pipeline (ii) filter according to scientific papers where both proteins in the PPIs appear in interaction context in the sentences.\r +\r +The pipeline (i) starts extracting cellular component annotations from HINT PPIs building a dataset and then the Apriori algorithm is applied in this dataset in an iterative process that repeat the application of this algorithm till the rules cover 15 main locations in the cell. This process generate a database with association rules with two main columns: antecedent and consequent, meaning that a location that occurs in antecedent also occurs with the location in consequent. The filtering task evaluate the PPI checking if some location annotated for the first protein is in the antecedent column and if some location of the second protein is also in the same rule but in the consequent column. If so, the PPI passes according to the criteria.\r +\r +The pipeline (ii) starts getting all papers that mention both proteins in the PPIs and extrating their content using the NCBI [API](https://www.ncbi.nlm.nih.gov/home/develop/api/). These XML files are cleaned removing hypertext markup and references to figures, tables and supplementary materials. The paragraphs of the remaining articles content are processed by Natural language processing steps to extract sentences, tokens, stopwords removal to remove words extremely common in english language and do not help to identify the context of interest, prioritizing tokens using part-of-speech tagging to keep just nouns and verbs. Then the sentences filtered goes to the task that identifies the proteins of the PPI in evaluation among the tokens and also tries to identify tokens or set of tokens that mention experimental methods. The sentences that have the proteins of interest are filtered if the nouns and verbs have some of the items of the list of words indicating interaction relation (recruit, bind, interact, signaling, etc). Finally, a report is made by pair with the article identifiers, the sentences, the proteins and interacting words found.\r +\r +The figure below illustrates all the tasks of these pipelines.\r +\r +
\r + pipeline\r +
\r +\r +## Requirements:\r +* Python packages needed:\r + - pip3 install pandas\r + - pip3 install rdflib\r + - pip3 install mlxtend\r + - pip3 install inflect\r + - pip3 install nltk\r + - pip3 install biopython\r + - pip3 install lxml\r + - pip3 install bs4 (beautiful soup)\r +\r +## Usage Instructions\r +### Preparation:\r +1. ````git clone https://github.com/YasCoMa/ppi_validation_process.git````\r +2. `pip3 install -r requirements.txt`\r +3. ````cd ppi_validation_process/pipe_location_assocRules/````\r +4. ````unzip pygosemsim.zip````\r +5. ````cd ../````\r +\r +### Filtering by association rules of cellular locations (first filtering part) - File ````pipe_location_assocRules/find_pattern.py```` :\r +* Pipeline parameters:\r + - __-fo__ or __--folder__
\r + Folder to store the files (use the folder where the other required file can be found)\r + - __-if__ or __--interactome_file__
\r + File with the pairs (two columns with uniprot identifiers in tsv format)
\r +\r + Example of this file: pipe_location_assocRules/running_example/all_pairs.tsv\r +\r +\r +* Running modes examples:\r + 1. Go to the first filtering part folder:
\r + ````cd pipe_location_assocRules/````\r +\r + 2. Uncompress annotation_data.zip\r + \r + 3. Run:
\r + ````python3 find_pattern.py -fo running_example/ -if all_pairs.tsv````\r +\r +\r +### Filtering by text mining on scientific papers (second filtering part) - File ````ppi_pubminer/pubmed_pmc_literature_pipeline.py````:\r +\r +* Pipeline parameters:\r + - __-em__ or __--execution_mode__
\r + Use to indicate the execution mode desired:
\r + 1 - Mode using a list of protein pairs as bait
\r + 2 - Mode that tries to find sentences of PPI context for any protein pairs given a list of articles\r + \r + - __-fo__ or __--folder__
\r + Folder to store the files (use the folder where the other required file can be found)\r +\r + - __-rtm1__ or __--running_type_mode_1__
\r + Use to indicate which execution step you want to run for mode 1 (it is desirable following the order showed):
\r + 0 (default) - Run all steps
\r + 1 - Run step 1 (Get mentions of both proteins in PMC articles)
\r + 2 - Run step 2 (Get the PMC or Pubmed files, clean and store them)
\r + 3 - Run step 3 (Get the exact sentences where the proteins were found on interacting context)\r +\r + - __-rtm2__ or __--running_type_mode_2__
\r + Use to indicate which execution step you want to run for mode 2 (it is desirable following the order showed):
\r + 0 (default) - Run all steps
\r + 1 - Run step 1 (Get the PMC or Pubmed files from the given list, clean and store them)
\r + 2 - Run step 2 (Get the exact sentences where the proteins were found on an interacting context)\r +\r + - __-fp__ or __--file_pairs__
\r + (For mode 1) File with the pairs (two columns with uniprot identifiers in tsv format)
\r + \r + Example of this file: ppipubminer/running_example/mode_1/all_pairs.tsv\r +\r + - __-fe__ or __--file_evaluation__
\r + (For mode 1) File exported after step 1 execution in tsv format
\r +\r + - __-fa__ or __--file_articles__
\r + (For mode 2) File with the articles (First column indicating if it is from pmc or pubmed and the second one is the article id) in tsv format)
\r + \r + Example of this file: ppipubminer/running_example/mode_2/articles_info.tsv\r +\r +* Running modes examples:\r + - Go to the second filtering part folder:
\r + ````cd ppipubminer/````\r +\r + - Mode 1 - From protein pairs (PPIs) to sentences in articles\r + 1. Running all three steps of mode 1:
\r + ````python3 pubmed_pmc_literature_pipeline.py -em 1 -rtm1 0 -fo running_example/mode_1/ -fp all_pairs.tsv````\r +\r + 2. Running only step 1 of mode 1:
\r + ````python3 pubmed_pmc_literature_pipeline.py -em 1 -rtm1 1 -fo running_example/mode_1/ -fp all_pairs.tsv````\r +\r + 3. Running only step 2 of mode 1:
\r + ````python3 pubmed_pmc_literature_pipeline.py -em 1 -rtm1 2 -fo running_example/mode_1/ -fp all_pairs.tsv -fe literature_evaluation_pairs.tsv````\r +\r + 4. Running only step 3 of mode 1:
\r + ````python3 pubmed_pmc_literature_pipeline.py -em 1 -rtm1 3 -fo running_example/mode_1/ -fp all_pairs.tsv -fe literature_evaluation_pairs.tsv````\r +\r + - Mode 2 - From articles to report of sentences with any protein pairs (PPIs)\r + 1. Running all three steps of mode 2:
\r + ````python3 pubmed_pmc_literature_pipeline.py -em 2 -rtm1 0 -fo running_example/mode_2/ -fa articles_info.tsv````\r +\r + 2. Running only step 1 of mode 2:
\r + ````python3 pubmed_pmc_literature_pipeline.py -em 2 -rtm1 1 -fo running_example/mode_2/ -fa articles_info.tsv````\r +\r + 3. Running only step 2 of mode 2:
\r + ````python3 pubmed_pmc_literature_pipeline.py -em 2 -rtm1 2 -fo running_example/mode_2/ -fa articles_info.tsv ````\r +\r +## Reference\r +Martins YC, Ziviani A, Nicolás MF, de Vasconcelos AT. Large-Scale Protein Interactions Prediction by Multiple Evidence Analysis Associated With an In-Silico Curation Strategy. Frontiers in Bioinformatics. 2021:38.\r +https://www.frontiersin.org/articles/10.3389/fbinf.2021.731345/full\r +\r +## Bug Report\r +Please, use the [Issues](https://github.com/YasCoMa/ppi_validation_process/issues) tab to report any bug.""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/617?version=1" ; + schema1:isBasedOn "https://github.com/YasCoMa/ppi_validation_process" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for PPIVPro - PPI Validation Process" ; + schema1:sdDatePublished "2024-08-05 10:27:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/617/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 54801 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 36713 ; + schema1:creator ; + schema1:dateCreated "2023-10-21T23:43:48Z" ; + schema1:dateModified "2023-10-21T23:45:17Z" ; + schema1:description """## Summary\r +\r +The validation process proposed has two pipelines for filtering PPIs predicted by some _IN SILICO_ detection method, both pipelines can be executed separately. The first pipeline (i) filter according to association rules of cellular locations extracted from HINT database. The second pipeline (ii) filter according to scientific papers where both proteins in the PPIs appear in interaction context in the sentences.\r +\r +The pipeline (i) starts extracting cellular component annotations from HINT PPIs building a dataset and then the Apriori algorithm is applied in this dataset in an iterative process that repeat the application of this algorithm till the rules cover 15 main locations in the cell. This process generate a database with association rules with two main columns: antecedent and consequent, meaning that a location that occurs in antecedent also occurs with the location in consequent. The filtering task evaluate the PPI checking if some location annotated for the first protein is in the antecedent column and if some location of the second protein is also in the same rule but in the consequent column. If so, the PPI passes according to the criteria.\r +\r +The pipeline (ii) starts getting all papers that mention both proteins in the PPIs and extrating their content using the NCBI [API](https://www.ncbi.nlm.nih.gov/home/develop/api/). These XML files are cleaned removing hypertext markup and references to figures, tables and supplementary materials. The paragraphs of the remaining articles content are processed by Natural language processing steps to extract sentences, tokens, stopwords removal to remove words extremely common in english language and do not help to identify the context of interest, prioritizing tokens using part-of-speech tagging to keep just nouns and verbs. Then the sentences filtered goes to the task that identifies the proteins of the PPI in evaluation among the tokens and also tries to identify tokens or set of tokens that mention experimental methods. The sentences that have the proteins of interest are filtered if the nouns and verbs have some of the items of the list of words indicating interaction relation (recruit, bind, interact, signaling, etc). Finally, a report is made by pair with the article identifiers, the sentences, the proteins and interacting words found.\r +\r +The figure below illustrates all the tasks of these pipelines.\r +\r +
\r + pipeline\r +
\r +\r +## Requirements:\r +* Python packages needed:\r + - pip3 install pandas\r + - pip3 install rdflib\r + - pip3 install mlxtend\r + - pip3 install inflect\r + - pip3 install nltk\r + - pip3 install biopython\r + - pip3 install lxml\r + - pip3 install bs4 (beautiful soup)\r +\r +## Usage Instructions\r +### Preparation:\r +1. ````git clone https://github.com/YasCoMa/ppi_validation_process.git````\r +2. `pip3 install -r requirements.txt`\r +3. ````cd ppi_validation_process/pipe_location_assocRules/````\r +4. ````unzip pygosemsim.zip````\r +5. ````cd ../````\r +\r +### Filtering by association rules of cellular locations (first filtering part) - File ````pipe_location_assocRules/find_pattern.py```` :\r +* Pipeline parameters:\r + - __-fo__ or __--folder__
\r + Folder to store the files (use the folder where the other required file can be found)\r + - __-if__ or __--interactome_file__
\r + File with the pairs (two columns with uniprot identifiers in tsv format)
\r +\r + Example of this file: pipe_location_assocRules/running_example/all_pairs.tsv\r +\r +\r +* Running modes examples:\r + 1. Go to the first filtering part folder:
\r + ````cd pipe_location_assocRules/````\r +\r + 2. Uncompress annotation_data.zip\r + \r + 3. Run:
\r + ````python3 find_pattern.py -fo running_example/ -if all_pairs.tsv````\r +\r +\r +### Filtering by text mining on scientific papers (second filtering part) - File ````ppi_pubminer/pubmed_pmc_literature_pipeline.py````:\r +\r +* Pipeline parameters:\r + - __-em__ or __--execution_mode__
\r + Use to indicate the execution mode desired:
\r + 1 - Mode using a list of protein pairs as bait
\r + 2 - Mode that tries to find sentences of PPI context for any protein pairs given a list of articles\r + \r + - __-fo__ or __--folder__
\r + Folder to store the files (use the folder where the other required file can be found)\r +\r + - __-rtm1__ or __--running_type_mode_1__
\r + Use to indicate which execution step you want to run for mode 1 (it is desirable following the order showed):
\r + 0 (default) - Run all steps
\r + 1 - Run step 1 (Get mentions of both proteins in PMC articles)
\r + 2 - Run step 2 (Get the PMC or Pubmed files, clean and store them)
\r + 3 - Run step 3 (Get the exact sentences where the proteins were found on interacting context)\r +\r + - __-rtm2__ or __--running_type_mode_2__
\r + Use to indicate which execution step you want to run for mode 2 (it is desirable following the order showed):
\r + 0 (default) - Run all steps
\r + 1 - Run step 1 (Get the PMC or Pubmed files from the given list, clean and store them)
\r + 2 - Run step 2 (Get the exact sentences where the proteins were found on an interacting context)\r +\r + - __-fp__ or __--file_pairs__
\r + (For mode 1) File with the pairs (two columns with uniprot identifiers in tsv format)
\r + \r + Example of this file: ppipubminer/running_example/mode_1/all_pairs.tsv\r +\r + - __-fe__ or __--file_evaluation__
\r + (For mode 1) File exported after step 1 execution in tsv format
\r +\r + - __-fa__ or __--file_articles__
\r + (For mode 2) File with the articles (First column indicating if it is from pmc or pubmed and the second one is the article id) in tsv format)
\r + \r + Example of this file: ppipubminer/running_example/mode_2/articles_info.tsv\r +\r +* Running modes examples:\r + - Go to the second filtering part folder:
\r + ````cd ppipubminer/````\r +\r + - Mode 1 - From protein pairs (PPIs) to sentences in articles\r + 1. Running all three steps of mode 1:
\r + ````python3 pubmed_pmc_literature_pipeline.py -em 1 -rtm1 0 -fo running_example/mode_1/ -fp all_pairs.tsv````\r +\r + 2. Running only step 1 of mode 1:
\r + ````python3 pubmed_pmc_literature_pipeline.py -em 1 -rtm1 1 -fo running_example/mode_1/ -fp all_pairs.tsv````\r +\r + 3. Running only step 2 of mode 1:
\r + ````python3 pubmed_pmc_literature_pipeline.py -em 1 -rtm1 2 -fo running_example/mode_1/ -fp all_pairs.tsv -fe literature_evaluation_pairs.tsv````\r +\r + 4. Running only step 3 of mode 1:
\r + ````python3 pubmed_pmc_literature_pipeline.py -em 1 -rtm1 3 -fo running_example/mode_1/ -fp all_pairs.tsv -fe literature_evaluation_pairs.tsv````\r +\r + - Mode 2 - From articles to report of sentences with any protein pairs (PPIs)\r + 1. Running all three steps of mode 2:
\r + ````python3 pubmed_pmc_literature_pipeline.py -em 2 -rtm1 0 -fo running_example/mode_2/ -fa articles_info.tsv````\r +\r + 2. Running only step 1 of mode 2:
\r + ````python3 pubmed_pmc_literature_pipeline.py -em 2 -rtm1 1 -fo running_example/mode_2/ -fa articles_info.tsv````\r +\r + 3. Running only step 2 of mode 2:
\r + ````python3 pubmed_pmc_literature_pipeline.py -em 2 -rtm1 2 -fo running_example/mode_2/ -fa articles_info.tsv ````\r +\r +## Reference\r +Martins YC, Ziviani A, Nicolás MF, de Vasconcelos AT. Large-Scale Protein Interactions Prediction by Multiple Evidence Analysis Associated With an In-Silico Curation Strategy. Frontiers in Bioinformatics. 2021:38.\r +https://www.frontiersin.org/articles/10.3389/fbinf.2021.731345/full\r +\r +## Bug Report\r +Please, use the [Issues](https://github.com/YasCoMa/ppi_validation_process/issues) tab to report any bug.""" ; + schema1:image ; + schema1:keywords "Bioinformatics, scientific publication text mining, validaiton o protein interaction predictions" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "PPIVPro - PPI Validation Process" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/617?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.281.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_protein_complex_md_setup/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/281/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8792 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-22T10:00:20Z" ; + schema1:dateModified "2023-01-16T13:58:31Z" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/281?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_protein_complex_md_setup/python/workflow.py" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Automated quantitative analysis of DIA proteomics mass spectrometry measurements." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/980?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/diaproteomics" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/diaproteomics" ; + schema1:sdDatePublished "2024-08-05 10:24:05 +0100" ; + schema1:url "https://workflowhub.eu/workflows/980/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7318 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:48Z" ; + schema1:dateModified "2024-06-11T12:54:48Z" ; + schema1:description "Automated quantitative analysis of DIA proteomics mass spectrometry measurements." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/980?version=6" ; + schema1:keywords "data-independent-proteomics, dia-proteomics, openms, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/diaproteomics" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/980?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein MD Analysis tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This workflow computes a set of Quality Control (QC) analyses on top of an uploaded MD trajectory. QC analyses include positional divergence (RMSd), change of shape (Radius of Gyration), identification of flexible regions (atomic/residue fluctuations), and identification of different molecular conformations (trajectory clustering).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.287.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_protein_md_analysis/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Protein MD Analysis tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/287/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5395 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-17T13:13:07Z" ; + schema1:dateModified "2022-04-11T09:29:43Z" ; + schema1:description """# Protein MD Analysis tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This workflow computes a set of Quality Control (QC) analyses on top of an uploaded MD trajectory. QC analyses include positional divergence (RMSd), change of shape (Radius of Gyration), identification of flexible regions (atomic/residue fluctuations), and identification of different molecular conformations (trajectory clustering).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/287?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Protein MD Analysis tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_protein_md_analysis/python/workflow.py" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Conversion of files from BAM to FASTQ, including FASTQC & CCS.BAM file quality control (QC) steps.\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/220?version=1" ; + schema1:isBasedOn "https://github.com/AustralianBioCommons/BAM-to-FASTQ-QC" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CCS.BAM to FASTQ + QC (HiFi genome assembly stage 1)" ; + schema1:sdDatePublished "2024-08-05 10:31:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/220/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10774 ; + schema1:creator ; + schema1:dateCreated "2021-10-21T05:52:36Z" ; + schema1:dateModified "2022-10-17T02:43:04Z" ; + schema1:description """Conversion of files from BAM to FASTQ, including FASTQC & CCS.BAM file quality control (QC) steps.\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/220?version=1" ; + schema1:isPartOf , + ; + schema1:keywords "BAM, FASTQ, Conversion, QC" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "CCS.BAM to FASTQ + QC (HiFi genome assembly stage 1)" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/220?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.293.2" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python CNS/XPLOR MD Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-08-05 10:32:26 +0100" ; + schema1:url "https://workflowhub.eu/workflows/293/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1728 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-24T14:22:19Z" ; + schema1:dateModified "2023-01-16T13:58:35Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/293?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python CNS/XPLOR MD Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/293?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=11" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-08-05 10:23:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5700 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:03Z" ; + schema1:dateModified "2024-06-11T12:55:03Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=11" ; + schema1:version 11 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# covid-sequence-analysis-workflow\r +\r +This is the official repository of the SARS-CoV-2 variant surveillance pipeline developed by Danish Technical University (DTU), Eotvos Lorand University (ELTE), EMBL-EBI, Erasmus Medical Center (EMC) under the [Versatile Emerging infectious disease Observatory (VEO)](https://www.globalsurveillance.eu/projects/veo-versatile-emerging-infectious-disease-observatory) project. The project consists of 20 European partners. It is funded by the European Commission.\r +\r +The pipeline has been integrated on EMBL-EBI infrastructure to automatically process raw SARS-CoV-2 read data, presenting in the COVID-19 Data Portal: https://www.covid19dataportal.org/sequences?db=sra-analysis-covid19&size=15&crossReferencesOption=all#search-content.\r +\r +## Architecture\r +\r +The pipeline supports sequence reads from both Illumina and Nanopore platforms. It is designed to be highly portable for both Google Cloud Platform and High Performance Computing cluster with IBM Spectrum LSF. We have performed secondary and tertiary analysis on millions of public samples. The pipeline shows good performance for large scale production. \r +\r +![Component diagram](doc/img/pipeline.components.png)\r +\r +The pipeline takes SRA from the public FTP from ENA. It submits analysis objects back to ENA on the fly. The intermediate results and logs are stored in the cloud storage buckets or high performance local POSIX file system. The metadata is stored in Google BigQuery for metadata and status tracking and analysis. The runtime is created with Docker / Singularity containers and NextFlow. \r +\r +## Process to run the pipelines\r +\r +The pipeline requires the Nextflow Tower for the application level monitoring. A free test account can be created for evaluation purposes at https://tower.nf/.\r +\r +### Preparation\r +\r +1. Store `export TOWER_ACCESS_TOKEN='...'` in `$HOME/.bash_profile`. Restart the current session or source the updated `$HOME/.bash_profile`.\r +2. Run `git clone https://github.com/enasequence/covid-sequence-analysis-workflow`.\r +3. Create `./covid-sequence-analysis-workflow/data/projects_accounts.csv` with submission_account_id and submission_passwor, for example:\r +> project_id,center_name,meta_key,submission_account_id,submission_password,ftp_password\r +> PRJEB45555,"European Bioinformatics Institute",public,,,\r +\r +### Running pipelines\r +\r +1. Run `./covid-sequence-analysis-workflow/init.sra_index.sh` to initialize or reinitialize the metadata in BigQuery.\r +2. Run `./covid-sequence-analysis-workflow/./start.lsf.jobs.sh` with proper parameters to start the batch jobs on LSF or `./covid-sequence-analysis-workflow/./start.gls.jobs.sh` with proper parameters to start the batch jobs on GCP.\r +\r +### Error handling\r +\r +If a job is killed or died, run the following to update the metadata to avoid reprocessing samples completed successfully.\r +\r +1. Run `./covid-sequence-analysis-workflow/update.receipt.sh ` to collect the submission receipts and to update submission metadata. The script can be run at anytime. It needs to be run if a batch job is killed instead of completed for any reason.\r +2. Run `./covid-sequence-analysis-workflow/set.archived.sh` to update stats for analyses submitted. The script can be run at anytime. It needs to be run at least once before ending a snapshot to make sure that the stats are up-to-date.\r +\r +To reprocess the samples failed, delete the record in `sra_processing`.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.664.1" ; + schema1:isBasedOn "https://github.com/enasequence/covid-sequence-analysis-workflow.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for covid-sequence-analysis-workflow" ; + schema1:sdDatePublished "2024-08-05 10:27:14 +0100" ; + schema1:url "https://workflowhub.eu/workflows/664/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 272 ; + schema1:creator ; + schema1:dateCreated "2023-11-14T09:42:17Z" ; + schema1:dateModified "2023-11-14T09:42:53Z" ; + schema1:description """# covid-sequence-analysis-workflow\r +\r +This is the official repository of the SARS-CoV-2 variant surveillance pipeline developed by Danish Technical University (DTU), Eotvos Lorand University (ELTE), EMBL-EBI, Erasmus Medical Center (EMC) under the [Versatile Emerging infectious disease Observatory (VEO)](https://www.globalsurveillance.eu/projects/veo-versatile-emerging-infectious-disease-observatory) project. The project consists of 20 European partners. It is funded by the European Commission.\r +\r +The pipeline has been integrated on EMBL-EBI infrastructure to automatically process raw SARS-CoV-2 read data, presenting in the COVID-19 Data Portal: https://www.covid19dataportal.org/sequences?db=sra-analysis-covid19&size=15&crossReferencesOption=all#search-content.\r +\r +## Architecture\r +\r +The pipeline supports sequence reads from both Illumina and Nanopore platforms. It is designed to be highly portable for both Google Cloud Platform and High Performance Computing cluster with IBM Spectrum LSF. We have performed secondary and tertiary analysis on millions of public samples. The pipeline shows good performance for large scale production. \r +\r +![Component diagram](doc/img/pipeline.components.png)\r +\r +The pipeline takes SRA from the public FTP from ENA. It submits analysis objects back to ENA on the fly. The intermediate results and logs are stored in the cloud storage buckets or high performance local POSIX file system. The metadata is stored in Google BigQuery for metadata and status tracking and analysis. The runtime is created with Docker / Singularity containers and NextFlow. \r +\r +## Process to run the pipelines\r +\r +The pipeline requires the Nextflow Tower for the application level monitoring. A free test account can be created for evaluation purposes at https://tower.nf/.\r +\r +### Preparation\r +\r +1. Store `export TOWER_ACCESS_TOKEN='...'` in `$HOME/.bash_profile`. Restart the current session or source the updated `$HOME/.bash_profile`.\r +2. Run `git clone https://github.com/enasequence/covid-sequence-analysis-workflow`.\r +3. Create `./covid-sequence-analysis-workflow/data/projects_accounts.csv` with submission_account_id and submission_passwor, for example:\r +> project_id,center_name,meta_key,submission_account_id,submission_password,ftp_password\r +> PRJEB45555,"European Bioinformatics Institute",public,,,\r +\r +### Running pipelines\r +\r +1. Run `./covid-sequence-analysis-workflow/init.sra_index.sh` to initialize or reinitialize the metadata in BigQuery.\r +2. Run `./covid-sequence-analysis-workflow/./start.lsf.jobs.sh` with proper parameters to start the batch jobs on LSF or `./covid-sequence-analysis-workflow/./start.gls.jobs.sh` with proper parameters to start the batch jobs on GCP.\r +\r +### Error handling\r +\r +If a job is killed or died, run the following to update the metadata to avoid reprocessing samples completed successfully.\r +\r +1. Run `./covid-sequence-analysis-workflow/update.receipt.sh ` to collect the submission receipts and to update submission metadata. The script can be run at anytime. It needs to be run if a batch job is killed instead of completed for any reason.\r +2. Run `./covid-sequence-analysis-workflow/set.archived.sh` to update stats for analyses submitted. The script can be run at anytime. It needs to be run at least once before ending a snapshot to make sure that the stats are up-to-date.\r +\r +To reprocess the samples failed, delete the record in `sra_processing`.\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "covid-sequence-analysis-workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/664?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1027?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/viralrecon" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/viralrecon" ; + schema1:sdDatePublished "2024-08-05 10:23:06 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1027/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10177 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:21Z" ; + schema1:dateModified "2024-06-11T12:55:21Z" ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1027?version=10" ; + schema1:keywords "Amplicon, ARTIC, Assembly, covid-19, covid19, illumina, long-read-sequencing, Metagenomics, nanopore, ONT, oxford-nanopore, SARS-CoV-2, variant-calling, viral, Virus" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/viralrecon" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1027?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.XXXXXXX-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.XXXXXXX)\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A522.10.1-23aa62.svg)](https://www.nextflow.io/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +[![Launch on Nextflow Tower](https://img.shields.io/badge/Launch%20%F0%9F%9A%80-Nextflow%20Tower-%234256e7)](https://tower.nf/launch?pipeline=https://github.com/sanger-tol/treeval)\r +\r +## Introduction\r +\r +**sanger-tol/treeval** is a bioinformatics best-practice analysis pipeline for the generation of data supplemental to the curation of reference quality genomes. This pipeline has been written to generate flat files compatible with [JBrowse2](https://jbrowse.org/jb2/).\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!\r +\r +The treeval pipeline has a sister pipeline currently named [curationpretext](https://github.com/sanger-tol/curationpretext) which acts to regenerate the pretext maps and accessory files during genomic curation in order to confirm interventions. This pipeline is sufficiently different to the treeval implementation that it is written as it's own pipeline.\r +\r +1. Parse input yaml ( YAML_INPUT )\r +2. Generate my.genome file ( GENERATE_GENOME )\r +3. Generate insilico digests of the input assembly ( INSILICO_DIGEST )\r +4. Generate gene alignments with high quality data against the input assembly ( GENE_ALIGNMENT )\r +5. Generate a repeat density graph ( REPEAT_DENSITY )\r +6. Generate a gap track ( GAP_FINDER )\r +7. Generate a map of self complementary sequence ( SELFCOMP )\r +8. Generate syntenic alignments with a closely related high quality assembly ( SYNTENY )\r +9. Generate a coverage track using PacBio data ( LONGREAD_COVERAGE )\r +10. Generate HiC maps, pretext and higlass using HiC cram files ( HIC_MAPPING )\r +11. Generate a telomere track based on input motif ( TELO_FINDER )\r +12. Run Busco and convert results into bed format ( BUSCO_ANNOTATION )\r +13. Ancestral Busco linkage if available for clade ( BUSCO_ANNOTATION:ANCESTRAL_GENE )\r +\r +## Usage\r +\r +> **Note**\r +> If you are new to Nextflow and nf-core, please refer to [this page](https://nf-co.re/docs/usage/installation) on how\r +> to set-up Nextflow. Make sure to [test your setup](https://nf-co.re/docs/usage/introduction#how-to-run-a-pipeline)\r +> with `-profile test` before running the workflow on actual data.\r +\r +Currently, it is advised to run the pipeline with docker or singularity as a small number of major modules do not currently have a conda env associated with them.\r +\r +Now, you can run the pipeline using:\r +\r +```bash\r +# For the FULL pipeline\r +nextflow run main.nf -profile singularity --input treeval.yaml --outdir {OUTDIR}\r +\r +# For the RAPID subset\r +nextflow run main.nf -profile singularity --input treeval.yaml -entry RAPID --outdir {OUTDIR}\r +```\r +\r +An example treeval.yaml can be found [here](assets/local_testing/nxOscDF5033.yaml).\r +\r +Further documentation about the pipeline can be found in the following files: [usage](https://pipelines.tol.sanger.ac.uk/treeval/dev/usage), [parameters](https://pipelines.tol.sanger.ac.uk/treeval/dev/parameters) and [output](https://pipelines.tol.sanger.ac.uk/treeval/dev/output).\r +\r +> **Warning:**\r +> Please provide pipeline parameters via the CLI or Nextflow `-params-file` option. Custom config files including those\r +> provided by the `-c` Nextflow option can be used to provide any configuration _**except for parameters**_;\r +> see [docs](https://nf-co.re/usage/configuration#custom-configuration-files).\r +\r +## Credits\r +\r +sanger-tol/treeval has been written by Damon-Lee Pointon (@DLBPointon), Yumi Sims (@yumisims) and William Eagles (@weaglesBio).\r +\r +We thank the following people for their extensive assistance in the development of this pipeline:\r +\r +
    \r +
  • @gq1 - For building the infrastructure around TreeVal and helping with code review
  • \r +
  • @ksenia-krasheninnikova - For help with C code implementation and YAML parsing
  • \r +
  • @mcshane - For guidance on algorithms
  • \r +
  • @muffato - For code reviews and code support
  • \r +
  • @priyanka-surana - For help with the majority of code reviews and code support
  • \r +
\r +\r +## Contributions and Support\r +\r +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\r +\r +## Citations\r +\r +\r +\r +If you use sanger-tol/treeval for your analysis, please cite it using the following doi: [10.5281/zenodo.XXXXXX](https://doi.org/10.5281/zenodo.XXXXXX).\r +\r +### Tools\r +\r +An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\r +\r +You can cite the `nf-core` publication as follows:\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/668?version=1" ; + schema1:isBasedOn "https://github.com/sanger-tol/treeval" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for sanger-tol/treeval v1.0 - Ancient Atlantis" ; + schema1:sdDatePublished "2024-08-05 10:27:09 +0100" ; + schema1:url "https://workflowhub.eu/workflows/668/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1736 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2023-11-14T12:10:54Z" ; + schema1:dateModified "2023-11-14T12:10:54Z" ; + schema1:description """[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.XXXXXXX-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.XXXXXXX)\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A522.10.1-23aa62.svg)](https://www.nextflow.io/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +[![Launch on Nextflow Tower](https://img.shields.io/badge/Launch%20%F0%9F%9A%80-Nextflow%20Tower-%234256e7)](https://tower.nf/launch?pipeline=https://github.com/sanger-tol/treeval)\r +\r +## Introduction\r +\r +**sanger-tol/treeval** is a bioinformatics best-practice analysis pipeline for the generation of data supplemental to the curation of reference quality genomes. This pipeline has been written to generate flat files compatible with [JBrowse2](https://jbrowse.org/jb2/).\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!\r +\r +The treeval pipeline has a sister pipeline currently named [curationpretext](https://github.com/sanger-tol/curationpretext) which acts to regenerate the pretext maps and accessory files during genomic curation in order to confirm interventions. This pipeline is sufficiently different to the treeval implementation that it is written as it's own pipeline.\r +\r +1. Parse input yaml ( YAML_INPUT )\r +2. Generate my.genome file ( GENERATE_GENOME )\r +3. Generate insilico digests of the input assembly ( INSILICO_DIGEST )\r +4. Generate gene alignments with high quality data against the input assembly ( GENE_ALIGNMENT )\r +5. Generate a repeat density graph ( REPEAT_DENSITY )\r +6. Generate a gap track ( GAP_FINDER )\r +7. Generate a map of self complementary sequence ( SELFCOMP )\r +8. Generate syntenic alignments with a closely related high quality assembly ( SYNTENY )\r +9. Generate a coverage track using PacBio data ( LONGREAD_COVERAGE )\r +10. Generate HiC maps, pretext and higlass using HiC cram files ( HIC_MAPPING )\r +11. Generate a telomere track based on input motif ( TELO_FINDER )\r +12. Run Busco and convert results into bed format ( BUSCO_ANNOTATION )\r +13. Ancestral Busco linkage if available for clade ( BUSCO_ANNOTATION:ANCESTRAL_GENE )\r +\r +## Usage\r +\r +> **Note**\r +> If you are new to Nextflow and nf-core, please refer to [this page](https://nf-co.re/docs/usage/installation) on how\r +> to set-up Nextflow. Make sure to [test your setup](https://nf-co.re/docs/usage/introduction#how-to-run-a-pipeline)\r +> with `-profile test` before running the workflow on actual data.\r +\r +Currently, it is advised to run the pipeline with docker or singularity as a small number of major modules do not currently have a conda env associated with them.\r +\r +Now, you can run the pipeline using:\r +\r +```bash\r +# For the FULL pipeline\r +nextflow run main.nf -profile singularity --input treeval.yaml --outdir {OUTDIR}\r +\r +# For the RAPID subset\r +nextflow run main.nf -profile singularity --input treeval.yaml -entry RAPID --outdir {OUTDIR}\r +```\r +\r +An example treeval.yaml can be found [here](assets/local_testing/nxOscDF5033.yaml).\r +\r +Further documentation about the pipeline can be found in the following files: [usage](https://pipelines.tol.sanger.ac.uk/treeval/dev/usage), [parameters](https://pipelines.tol.sanger.ac.uk/treeval/dev/parameters) and [output](https://pipelines.tol.sanger.ac.uk/treeval/dev/output).\r +\r +> **Warning:**\r +> Please provide pipeline parameters via the CLI or Nextflow `-params-file` option. Custom config files including those\r +> provided by the `-c` Nextflow option can be used to provide any configuration _**except for parameters**_;\r +> see [docs](https://nf-co.re/usage/configuration#custom-configuration-files).\r +\r +## Credits\r +\r +sanger-tol/treeval has been written by Damon-Lee Pointon (@DLBPointon), Yumi Sims (@yumisims) and William Eagles (@weaglesBio).\r +\r +We thank the following people for their extensive assistance in the development of this pipeline:\r +\r +
    \r +
  • @gq1 - For building the infrastructure around TreeVal and helping with code review
  • \r +
  • @ksenia-krasheninnikova - For help with C code implementation and YAML parsing
  • \r +
  • @mcshane - For guidance on algorithms
  • \r +
  • @muffato - For code reviews and code support
  • \r +
  • @priyanka-surana - For help with the majority of code reviews and code support
  • \r +
\r +\r +## Contributions and Support\r +\r +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\r +\r +## Citations\r +\r +\r +\r +If you use sanger-tol/treeval for your analysis, please cite it using the following doi: [10.5281/zenodo.XXXXXX](https://doi.org/10.5281/zenodo.XXXXXX).\r +\r +### Tools\r +\r +An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\r +\r +You can cite the `nf-core` publication as follows:\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +""" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "sanger-tol/treeval v1.0 - Ancient Atlantis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/668?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Global Run-On sequencing analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1004?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/nascent" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/nascent" ; + schema1:sdDatePublished "2024-08-05 10:23:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1004/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8566 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:06Z" ; + schema1:dateModified "2024-06-11T12:55:06Z" ; + schema1:description "Global Run-On sequencing analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1004?version=4" ; + schema1:keywords "gro-seq, nascent, pro-seq, rna, transcription, tss" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/nascent" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1004?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "Pre-processing of mass spectrometry-based metabolomics data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/57?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/metaboigniter" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/metaboigniter" ; + schema1:sdDatePublished "2024-08-05 10:24:29 +0100" ; + schema1:url "https://workflowhub.eu/workflows/57/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 23675 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:00Z" ; + schema1:dateModified "2024-06-11T12:55:00Z" ; + schema1:description "Pre-processing of mass spectrometry-based metabolomics data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/57?version=3" ; + schema1:keywords "Metabolomics, identification, quantification, mass-spectrometry, ms1, ms2" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/metaboigniter" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/57?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# RASflow: RNA-Seq Analysis Snakemake Workflow\r +RASflow is a modular, flexible and user-friendly RNA-Seq analysis workflow. \r +\r +RASflow can be applied to both model and non-model organisms. It supports mapping RNA-Seq raw reads to both genome and transcriptome (can be downloaded from public database or can be homemade by users) and it can do both transcript- and gene-level Differential Expression Analysis (DEA) when transcriptome is used as mapping reference. It requires little programming skill for basic use. If you're good at programming, you can do more magic with RASflow!\r +\r +You can help support RASflow by citing our publication:\r +\r +**Zhang, X., Jonassen, I. RASflow: an RNA-Seq analysis workflow with Snakemake. BMC Bioinformatics 21, 110 (2020). https://doi.org/10.1186/s12859-020-3433-x**\r +""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/451?version=1" ; + schema1:isBasedOn "https://github.com/zhxiaokang/RASflow.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for RASflow: RNA-Seq Analysis Snakemake Workflow" ; + schema1:sdDatePublished "2024-08-05 10:31:09 +0100" ; + schema1:url "https://workflowhub.eu/workflows/451/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 2091 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5359 ; + schema1:creator ; + schema1:dateCreated "2023-04-06T07:54:59Z" ; + schema1:dateModified "2023-04-06T07:54:59Z" ; + schema1:description """# RASflow: RNA-Seq Analysis Snakemake Workflow\r +RASflow is a modular, flexible and user-friendly RNA-Seq analysis workflow. \r +\r +RASflow can be applied to both model and non-model organisms. It supports mapping RNA-Seq raw reads to both genome and transcriptome (can be downloaded from public database or can be homemade by users) and it can do both transcript- and gene-level Differential Expression Analysis (DEA) when transcriptome is used as mapping reference. It requires little programming skill for basic use. If you're good at programming, you can do more magic with RASflow!\r +\r +You can help support RASflow by citing our publication:\r +\r +**Zhang, X., Jonassen, I. RASflow: an RNA-Seq analysis workflow with Snakemake. BMC Bioinformatics 21, 110 (2020). https://doi.org/10.1186/s12859-020-3433-x**\r +""" ; + schema1:image ; + schema1:keywords "Transcriptomics" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "RASflow: RNA-Seq Analysis Snakemake Workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/451?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 733943 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Integration of viral sequences in genomic data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1026?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/viralintegration" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/viralintegration" ; + schema1:sdDatePublished "2024-08-05 10:23:07 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1026/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9792 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:20Z" ; + schema1:dateModified "2024-06-11T12:55:20Z" ; + schema1:description "Integration of viral sequences in genomic data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1026?version=1" ; + schema1:keywords "chimeric-alignment, ctat, viral-integration, Virus, virusintegrationfinder" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/viralintegration" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1026?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + schema1:datePublished "2024-03-13T15:58:39.020374" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein MD Analysis tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This workflow computes a set of Quality Control (QC) analyses on top of an uploaded MD trajectory. QC analyses include positional divergence (RMSd), change of shape (Radius of Gyration), identification of flexible regions (atomic/residue fluctuations), and identification of different molecular conformations (trajectory clustering).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.287.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_protein_md_analysis/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Protein MD Analysis tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/287/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5395 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-23T08:37:12Z" ; + schema1:dateModified "2023-01-16T13:58:32Z" ; + schema1:description """# Protein MD Analysis tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This workflow computes a set of Quality Control (QC) analyses on top of an uploaded MD trajectory. QC analyses include positional divergence (RMSd), change of shape (Radius of Gyration), identification of flexible regions (atomic/residue fluctuations), and identification of different molecular conformations (trajectory clustering).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/287?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Protein MD Analysis tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_protein_md_analysis/python/workflow.py" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """## Summary\r +\r +This pipeline contains the following functions: \r +(1) Data processing to handle the tansformations needed to obtain the original pathway scores of the samples according to single sample analysis GSEA\r +(2) Model training based on the disease and healthy sample pathway scores, to classify them\r +(3) Scoring matrix weights optimization according to a gold standard list of drugs (those that went on clinical trials or are approved for the disease).It tests the weights in a range of 0 to 30 (you may change as you want). The evaluation function tests and try to maximize the number of approved drugs whose modified pathway scores for disease samples is changed from disease to healthy sample classification, according to the trained model.\r +(4) Computation of the calibrated disease samples pathwa scores according to the interaction among drug and targets found in the sample pathways & Drug ranking based on the disease samples whose calibrated matrix were responsible to change the trained model decision from disease to healthy state.\r +(5) Drug combination ranking evaluated the same way as in option (4) but adding the effects of multiple drugs in each sample while calculating the calibrated scoring matrix\r + \r +## Input configuration file:\r +* The pipeline only needs a configuration file and the step number you want to run.\r +- Configuration file keys (see also the example in config.json):\r + - **identifier**: project identifier to be used in the result files\r + - **type_normalization**: normalization type (possible values: tpm, fpkm, tmm, cpm or fpkm_uq)\r + - **genome_assembly**: the supported assemblies are the 37 and 38 (values may be: g37 or g38)\r + - **pathway_geneset**: pathway-based gene sets, choose one identifier from the list in [genesets_available.txt](https://github.com/YasCoMa/caliscoma_pipeline/blob/master/genesets_available.txt)\r + - **folder**: working directory\r + - **expression_file**: compressed gene expression file for the desired icgc project, it must be separated by tabulation. The following columns are mandatory: submitted_file_id (sample names), raw_read_count (the read counts without normalization) and gene_id (genes in ensembl or hgnc symbol). File expected to be in {folder}.\r + - **labels_file** (optional for function 1): file with two columns, one named 'sample' corresponding to the unique values of submitted_sample_id; the second named 'label' corresponding to a disease (or confirmed tumour) (1) or a healthy (0) case. File expected to be in {folder}.\r + - **trained_model** (optional for function 1): file with the trained model to separate healthy and disease cases. Full path is expected.\r + - **means_table_file** (optional for function 1): file with the means table calculated when the model is trained by the function 3. Full path is expected.\r + - **samples_pathway_scores** (optional for function 1): file with the original model calculated pathway scores by function 1, in order to check the number of features expected by the original model. Full path is expected.\r + - **optimized_weights_file**: tab separated table file with two columns representing the weights (w1, w2, w3) and their respective values.\r + - **drug_list_file** (only mandatory for function 3): file with the gold standard drug list (one drugbank id per line), this file is expected to be in the in the experiment item folder results ({folder}/{identifier})\r + - **drug_combination_file** (only mandatory for function 5): file with the drug combination candidates list (drugbank ids concatenated with comma in each line). Full path is expected.\r +\r +- Observation: \r + * The "labels_file" parameter is mandatory for the weights optimization, scoring matrix calculation, model traning and drug (or drug combination) ranking \r + * In case of transfer learning, "labels_file" may be ignored only if both "trained_model", "means_table_file" and "samples_pathway_scores" are present. This is only possible for the functions 2, 4 and 5. For weights optimization, only labels file is accepted.\r + * If type_normalization and/or genome_assembly are missing or empty, it will switch to the default fpkm_uq\r + * If pathway_geneset is missing or empty, it will switch to the default KEGG_2021_HUMAN\r + * If optimized_weights_file is missing or empty, it will switch to the default values (w1: 20, w2: 5, w3: 10)\r + \r +## Usage Instructions\r +### Preparation:\r +1. ````git clone https://github.com/YasCoMa/caliscoma_pipeline.git````\r +2. ````cd caliscoma_pipeline````\r +3. Create conda environment to handle dependencies: ````conda env create -f drugresponse_env.yml````\r +4. ````conda activate drugresponse_env````\r +5. Setup an environment variable named "path_workflow" with the full path to this workflow folder\r +\r +### Getting data for the running example in the LICA-FR and LIRI-JP projects from ICGC\r +1. Download the [expression file for LICA-FR](https://dcc.icgc.org/api/v1/download?fn=/current/Projects/LICA-FR/exp_seq.LICA-FR.tsv.gz) and put it in data_icgc folder\r +2. Download the [expression file for LIRI-JP](https://dcc.icgc.org/api/v1/download?fn=/current/Projects/LIRI-JP/exp_seq.LIRI-JP.tsv.gz) and put it in data_icgc folder\r +3. For the liri-jp project, the labels file is already processed, to given an example of a project that run all steps proposed by this workflow\r +\r +### Run analysis\r +- Run all steps: ````python3 main.py -rt 0 -cf config.json````\r +- Run all steps: ````python3 main.py -rt 0 -cf config_transfer_options.json````\r +\r +- Run only data processing: ````python3 main.py -rt 1 -cf config.json````\r +- Run only data processing: ````python3 main.py -rt 1 -cf config_transfer_options.json````\r +\r +- Run only model training & modified pathway score matrix: ````python3 main.py -rt 2 -cf config.json````\r +- Run only model training & modified pathway score matrix: ````python3 main.py -rt 2 -cf config_transfer_options.json````\r +\r +- Run only weights optimization: ````python3 main.py -rt 3 -cf config.json````\r +\r +- Run only drug ranking: ````python3 main.py -rt 4 -cf config.json````\r +- Run only drug ranking: ````python3 main.py -rt 4 -cf config_transfer_options.json````\r +\r +- Run only drug combination evaluation: ````python3 main.py -rt 5 -cf config.json````\r +- Run only drug combination evaluation: ````python3 main.py -rt 5 -cf config_transfer_options.json````\r +\r +## Reference\r +Martins, Y. C. (2023). Multi-task analysis of gene expression data on cancer public datasets. medRxiv, 2023-09.\r +\r +## Bug Report\r +Please, use the [Issues](https://github.com/YasCoMa/caliscoma_pipeline/issues) tab to report any bug.""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/620?version=1" ; + schema1:isBasedOn "https://github.com/YasCoMa/caliscoma_pipeline" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for DReCaS - Pipeline for drug ranking based on computed pathway scores of disease and healthy samples" ; + schema1:sdDatePublished "2024-08-05 10:27:40 +0100" ; + schema1:url "https://workflowhub.eu/workflows/620/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 16450 ; + schema1:creator ; + schema1:dateCreated "2023-10-22T00:11:33Z" ; + schema1:dateModified "2023-10-22T00:11:33Z" ; + schema1:description """## Summary\r +\r +This pipeline contains the following functions: \r +(1) Data processing to handle the tansformations needed to obtain the original pathway scores of the samples according to single sample analysis GSEA\r +(2) Model training based on the disease and healthy sample pathway scores, to classify them\r +(3) Scoring matrix weights optimization according to a gold standard list of drugs (those that went on clinical trials or are approved for the disease).It tests the weights in a range of 0 to 30 (you may change as you want). The evaluation function tests and try to maximize the number of approved drugs whose modified pathway scores for disease samples is changed from disease to healthy sample classification, according to the trained model.\r +(4) Computation of the calibrated disease samples pathwa scores according to the interaction among drug and targets found in the sample pathways & Drug ranking based on the disease samples whose calibrated matrix were responsible to change the trained model decision from disease to healthy state.\r +(5) Drug combination ranking evaluated the same way as in option (4) but adding the effects of multiple drugs in each sample while calculating the calibrated scoring matrix\r + \r +## Input configuration file:\r +* The pipeline only needs a configuration file and the step number you want to run.\r +- Configuration file keys (see also the example in config.json):\r + - **identifier**: project identifier to be used in the result files\r + - **type_normalization**: normalization type (possible values: tpm, fpkm, tmm, cpm or fpkm_uq)\r + - **genome_assembly**: the supported assemblies are the 37 and 38 (values may be: g37 or g38)\r + - **pathway_geneset**: pathway-based gene sets, choose one identifier from the list in [genesets_available.txt](https://github.com/YasCoMa/caliscoma_pipeline/blob/master/genesets_available.txt)\r + - **folder**: working directory\r + - **expression_file**: compressed gene expression file for the desired icgc project, it must be separated by tabulation. The following columns are mandatory: submitted_file_id (sample names), raw_read_count (the read counts without normalization) and gene_id (genes in ensembl or hgnc symbol). File expected to be in {folder}.\r + - **labels_file** (optional for function 1): file with two columns, one named 'sample' corresponding to the unique values of submitted_sample_id; the second named 'label' corresponding to a disease (or confirmed tumour) (1) or a healthy (0) case. File expected to be in {folder}.\r + - **trained_model** (optional for function 1): file with the trained model to separate healthy and disease cases. Full path is expected.\r + - **means_table_file** (optional for function 1): file with the means table calculated when the model is trained by the function 3. Full path is expected.\r + - **samples_pathway_scores** (optional for function 1): file with the original model calculated pathway scores by function 1, in order to check the number of features expected by the original model. Full path is expected.\r + - **optimized_weights_file**: tab separated table file with two columns representing the weights (w1, w2, w3) and their respective values.\r + - **drug_list_file** (only mandatory for function 3): file with the gold standard drug list (one drugbank id per line), this file is expected to be in the in the experiment item folder results ({folder}/{identifier})\r + - **drug_combination_file** (only mandatory for function 5): file with the drug combination candidates list (drugbank ids concatenated with comma in each line). Full path is expected.\r +\r +- Observation: \r + * The "labels_file" parameter is mandatory for the weights optimization, scoring matrix calculation, model traning and drug (or drug combination) ranking \r + * In case of transfer learning, "labels_file" may be ignored only if both "trained_model", "means_table_file" and "samples_pathway_scores" are present. This is only possible for the functions 2, 4 and 5. For weights optimization, only labels file is accepted.\r + * If type_normalization and/or genome_assembly are missing or empty, it will switch to the default fpkm_uq\r + * If pathway_geneset is missing or empty, it will switch to the default KEGG_2021_HUMAN\r + * If optimized_weights_file is missing or empty, it will switch to the default values (w1: 20, w2: 5, w3: 10)\r + \r +## Usage Instructions\r +### Preparation:\r +1. ````git clone https://github.com/YasCoMa/caliscoma_pipeline.git````\r +2. ````cd caliscoma_pipeline````\r +3. Create conda environment to handle dependencies: ````conda env create -f drugresponse_env.yml````\r +4. ````conda activate drugresponse_env````\r +5. Setup an environment variable named "path_workflow" with the full path to this workflow folder\r +\r +### Getting data for the running example in the LICA-FR and LIRI-JP projects from ICGC\r +1. Download the [expression file for LICA-FR](https://dcc.icgc.org/api/v1/download?fn=/current/Projects/LICA-FR/exp_seq.LICA-FR.tsv.gz) and put it in data_icgc folder\r +2. Download the [expression file for LIRI-JP](https://dcc.icgc.org/api/v1/download?fn=/current/Projects/LIRI-JP/exp_seq.LIRI-JP.tsv.gz) and put it in data_icgc folder\r +3. For the liri-jp project, the labels file is already processed, to given an example of a project that run all steps proposed by this workflow\r +\r +### Run analysis\r +- Run all steps: ````python3 main.py -rt 0 -cf config.json````\r +- Run all steps: ````python3 main.py -rt 0 -cf config_transfer_options.json````\r +\r +- Run only data processing: ````python3 main.py -rt 1 -cf config.json````\r +- Run only data processing: ````python3 main.py -rt 1 -cf config_transfer_options.json````\r +\r +- Run only model training & modified pathway score matrix: ````python3 main.py -rt 2 -cf config.json````\r +- Run only model training & modified pathway score matrix: ````python3 main.py -rt 2 -cf config_transfer_options.json````\r +\r +- Run only weights optimization: ````python3 main.py -rt 3 -cf config.json````\r +\r +- Run only drug ranking: ````python3 main.py -rt 4 -cf config.json````\r +- Run only drug ranking: ````python3 main.py -rt 4 -cf config_transfer_options.json````\r +\r +- Run only drug combination evaluation: ````python3 main.py -rt 5 -cf config.json````\r +- Run only drug combination evaluation: ````python3 main.py -rt 5 -cf config_transfer_options.json````\r +\r +## Reference\r +Martins, Y. C. (2023). Multi-task analysis of gene expression data on cancer public datasets. medRxiv, 2023-09.\r +\r +## Bug Report\r +Please, use the [Issues](https://github.com/YasCoMa/caliscoma_pipeline/issues) tab to report any bug.""" ; + schema1:keywords "Workflows, durg response simulation, gene set enrichment analysis, personalized medicine, data retrieval and transformation" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "DReCaS - Pipeline for drug ranking based on computed pathway scores of disease and healthy samples" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/620?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-15T16:30:18.151248" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Purge-duplicate-contigs-VGP6/main" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Purge-duplicate-contigs-VGP6/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:version "0.3.1" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """# Pangenome databases provide superior host removal and mycobacteria classification from clinical metagenomic data\r +\r +> Hall, M, Coin, L., Pangenome databases provide superior host removal and mycobacteria classification from clinical metagenomic data. bioRxiv 2023. doi: [10.1101/2023.09.18.558339][doi]\r +\r +Benchmarking different ways of doing read (taxonomic) classification, with a focus on\r +removal of contamination and classification of _M. tuberculosis_ reads.\r +\r +This repository contains the code and snakemake pipeline to build/download the\r +databases, obtain all results from [the paper][doi], along with accompanying configuration\r +files.\r +\r +Custom databases have all been uploaded to Zenodo, along with the simulated reads:\r +\r +- Nanopore simulated metagenomic reads - \r +- Illumina simulated metagenomic reads - \r +- Nanopore and Illumina artificial real reads - \r +- Kraken2 database built from the Human Pangenome Reference Consortium\r + genomes - \r +- Kraken2 database built from the kraken2 Human\r + library - \r +- Kraken2 database built from a *Mycobacterium* representative set of\r + genomes - \r +- A (fasta) database of representative genomes from the *Mycobacterium*\r + genus - \r +- A (fasta) database of *M. tuberculosis* genomes from a variety of\r + lineages - \r +- The fasta file built from the [Clockwork](https://github.com/iqbal-lab-org/clockwork)\r + decontamination pipeline - \r +\r +## Example usage\r +\r +We provide some usage examples showing how to download the databases and then use them\r +on your reads.\r +\r +### Human read removal\r +\r +The method we found to give the best balance of runtime, memory usage, and precision and\r +recall was kraken2 with a database built from the Human Pangenome Reference Consortium\r +genomes.\r +\r +This example has been wrapped into a standalone tool called [`nohuman`](https://github.com/mbhall88/nohuman/) which takes a fastq as input and returns a fastq with human reads removed.\r +\r +#### Download human database\r +\r +```\r +mkdir HPRC_db/\r +cd HPRC_db\r +URL="https://zenodo.org/record/8339732/files/k2_HPRC_20230810.tar.gz"\r +wget "$URL"\r +tar -xzf k2_HPRC_20230810.tar.gz\r +rm k2_HPRC_20230810.tar.gz\r +```\r +\r +#### Run kraken2 with HPRC database\r +\r +You'll need [kraken2](https://github.com/DerrickWood/kraken2) installed for this step.\r +\r +```\r +kraken2 --threads 4 --db HPRC_db/ --output classifications.tsv reads.fq\r +```\r +\r +If you are using Illumina reads, a slight adjustment is needed\r +\r +```\r +kraken2 --paired --threads 4 --db HPRC_db/ --output classifications.tsv reads_1.fq reads_2.fq\r +```\r +\r +#### Extract non-human reads\r +\r +You'll need [seqkit](https://github.com/shenwei356/seqkit) installed for this step\r +\r +For Nanopore data\r +\r +```\r +awk -F'\\t' '$1=="U" {print $2}' classifications.tsv | \\\r + seqkit grep -f - -o reads.depleted.fq reads.fq\r +```\r +\r +For Illumina data\r +\r +```\r +awk -F'\\t' '$1=="U" {print $2}' classifications.tsv > ids.txt\r +seqkit grep --id-regexp '^(\\S+)/[12]' -f ids.txt -o reads_1.depleted.fq reads_1.fq\r +seqkit grep --id-regexp '^(\\S+)/[12]' -f ids.txt -o reads_2.depleted.fq reads_2.fq\r +```\r +\r +### *M. tuberculosis* classification/enrichment\r +\r +For this step we recommend either [minimap2](https://github.com/lh3/minimap2) or kraken2\r +with a *Mycobacterium* genus database. We leave it to the user to decide which approach\r +they prefer based on the results in our manuscript.\r +\r +#### Download databases\r +\r +```\r +mkdir Mycobacterium_db\r +cd Mycobacterium_db\r +# download database for use with minimap2\r +URL="https://zenodo.org/record/8339941/files/Mycobacterium.rep.fna.gz"\r +wget "$URL"\r +IDS_URL="https://zenodo.org/record/8343322/files/mtb.ids"\r +wget "$IDS_URL"\r +# download kraken database\r +URL="https://zenodo.org/record/8339822/files/k2_Mycobacterium_20230817.tar.gz"\r +wget "$URL"\r +tar -xzf k2_Mycobacterium_20230817.tar.gz\r +rm k2_Mycobacterium_20230817.tar.gz\r +```\r +\r +#### Classify reads\r +\r +**minimap2**\r +\r +```\r +# nanopore\r +minimap2 --secondary=no -c -t 4 -x map-ont -o reads.aln.paf Mycobacterium_db/Mycobacterium.rep.fna.gz reads.depleted.fq\r +# illumina\r +minimap2 --secondary=no -c -t 4 -x sr -o reads.aln.paf Mycobacterium_db/Mycobacterium.rep.fna.gz reads_1.depleted.fq reads_2.depleted.fq\r +```\r +\r +**kraken2**\r +\r +```\r +# nanopore\r +kraken2 --db Mycobacterium_db --threads 4 --report myco.kreport --output classifications.myco.tsv reads.depleted.fq\r +# illumina\r +kraken2 --db Mycobacterium_db --paired --threads 4 --report myco.kreport --output classifications.myco.tsv reads_1.depleted.fq reads_2.depleted.fq\r +```\r +\r +#### Extract *M. tuberculosis* reads\r +\r +**minimap2**\r +\r +```\r +# nanopore\r +grep -Ff Mycobacterium_db/mtb.ids reads.aln.paf | cut -f1 | \\\r + seqkit grep -f - -o reads.enriched.fq reads.depleted.fq\r +# illumina\r +grep -Ff Mycobacterium_db/mtb.ids reads.aln.paf | cut -f1 > keep.ids\r +seqkit grep -f keep.ids -o reads_1.enriched.fq reads_1.depleted.fq\r +seqkit grep -f keep.ids -o reads_2.enriched.fq reads_2.depleted.fq\r +```\r +\r +**kraken2**\r +\r +We'll use\r +the [`extract_kraken_reads.py` script](https://github.com/jenniferlu717/KrakenTools#extract_kraken_readspy)\r +for this\r +\r +```\r +# nanopore\r +python extract_kraken_reads.py -k classifications.myco.tsv -1 reads.depleted.fq -o reads.enriched.fq -t 1773 -r myco.kreport --include-children\r +# illumina\r +python extract_kraken_reads.py -k classifications.myco.tsv -1 reads_1.depleted.fq -2 reads_2.depleted.fq -o reads_1.enriched.fq -o2 reads_2.enriched.fq -t 1773 -r myco.kreport --include-children\r +```\r +\r +[doi]: https://doi.org/10.1101/2023.09.18.558339 \r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.700.2" ; + schema1:isBasedOn "https://github.com/mbhall88/classification_benchmark.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Pangenome databases provide superior host removal and mycobacteria classification from clinical metagenomic data" ; + schema1:sdDatePublished "2024-08-05 10:26:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/700/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1531 ; + schema1:creator ; + schema1:dateCreated "2024-01-10T01:19:39Z" ; + schema1:dateModified "2024-01-10T01:20:12Z" ; + schema1:description """# Pangenome databases provide superior host removal and mycobacteria classification from clinical metagenomic data\r +\r +> Hall, M, Coin, L., Pangenome databases provide superior host removal and mycobacteria classification from clinical metagenomic data. bioRxiv 2023. doi: [10.1101/2023.09.18.558339][doi]\r +\r +Benchmarking different ways of doing read (taxonomic) classification, with a focus on\r +removal of contamination and classification of _M. tuberculosis_ reads.\r +\r +This repository contains the code and snakemake pipeline to build/download the\r +databases, obtain all results from [the paper][doi], along with accompanying configuration\r +files.\r +\r +Custom databases have all been uploaded to Zenodo, along with the simulated reads:\r +\r +- Nanopore simulated metagenomic reads - \r +- Illumina simulated metagenomic reads - \r +- Nanopore and Illumina artificial real reads - \r +- Kraken2 database built from the Human Pangenome Reference Consortium\r + genomes - \r +- Kraken2 database built from the kraken2 Human\r + library - \r +- Kraken2 database built from a *Mycobacterium* representative set of\r + genomes - \r +- A (fasta) database of representative genomes from the *Mycobacterium*\r + genus - \r +- A (fasta) database of *M. tuberculosis* genomes from a variety of\r + lineages - \r +- The fasta file built from the [Clockwork](https://github.com/iqbal-lab-org/clockwork)\r + decontamination pipeline - \r +\r +## Example usage\r +\r +We provide some usage examples showing how to download the databases and then use them\r +on your reads.\r +\r +### Human read removal\r +\r +The method we found to give the best balance of runtime, memory usage, and precision and\r +recall was kraken2 with a database built from the Human Pangenome Reference Consortium\r +genomes.\r +\r +This example has been wrapped into a standalone tool called [`nohuman`](https://github.com/mbhall88/nohuman/) which takes a fastq as input and returns a fastq with human reads removed.\r +\r +#### Download human database\r +\r +```\r +mkdir HPRC_db/\r +cd HPRC_db\r +URL="https://zenodo.org/record/8339732/files/k2_HPRC_20230810.tar.gz"\r +wget "$URL"\r +tar -xzf k2_HPRC_20230810.tar.gz\r +rm k2_HPRC_20230810.tar.gz\r +```\r +\r +#### Run kraken2 with HPRC database\r +\r +You'll need [kraken2](https://github.com/DerrickWood/kraken2) installed for this step.\r +\r +```\r +kraken2 --threads 4 --db HPRC_db/ --output classifications.tsv reads.fq\r +```\r +\r +If you are using Illumina reads, a slight adjustment is needed\r +\r +```\r +kraken2 --paired --threads 4 --db HPRC_db/ --output classifications.tsv reads_1.fq reads_2.fq\r +```\r +\r +#### Extract non-human reads\r +\r +You'll need [seqkit](https://github.com/shenwei356/seqkit) installed for this step\r +\r +For Nanopore data\r +\r +```\r +awk -F'\\t' '$1=="U" {print $2}' classifications.tsv | \\\r + seqkit grep -f - -o reads.depleted.fq reads.fq\r +```\r +\r +For Illumina data\r +\r +```\r +awk -F'\\t' '$1=="U" {print $2}' classifications.tsv > ids.txt\r +seqkit grep --id-regexp '^(\\S+)/[12]' -f ids.txt -o reads_1.depleted.fq reads_1.fq\r +seqkit grep --id-regexp '^(\\S+)/[12]' -f ids.txt -o reads_2.depleted.fq reads_2.fq\r +```\r +\r +### *M. tuberculosis* classification/enrichment\r +\r +For this step we recommend either [minimap2](https://github.com/lh3/minimap2) or kraken2\r +with a *Mycobacterium* genus database. We leave it to the user to decide which approach\r +they prefer based on the results in our manuscript.\r +\r +#### Download databases\r +\r +```\r +mkdir Mycobacterium_db\r +cd Mycobacterium_db\r +# download database for use with minimap2\r +URL="https://zenodo.org/record/8339941/files/Mycobacterium.rep.fna.gz"\r +wget "$URL"\r +IDS_URL="https://zenodo.org/record/8343322/files/mtb.ids"\r +wget "$IDS_URL"\r +# download kraken database\r +URL="https://zenodo.org/record/8339822/files/k2_Mycobacterium_20230817.tar.gz"\r +wget "$URL"\r +tar -xzf k2_Mycobacterium_20230817.tar.gz\r +rm k2_Mycobacterium_20230817.tar.gz\r +```\r +\r +#### Classify reads\r +\r +**minimap2**\r +\r +```\r +# nanopore\r +minimap2 --secondary=no -c -t 4 -x map-ont -o reads.aln.paf Mycobacterium_db/Mycobacterium.rep.fna.gz reads.depleted.fq\r +# illumina\r +minimap2 --secondary=no -c -t 4 -x sr -o reads.aln.paf Mycobacterium_db/Mycobacterium.rep.fna.gz reads_1.depleted.fq reads_2.depleted.fq\r +```\r +\r +**kraken2**\r +\r +```\r +# nanopore\r +kraken2 --db Mycobacterium_db --threads 4 --report myco.kreport --output classifications.myco.tsv reads.depleted.fq\r +# illumina\r +kraken2 --db Mycobacterium_db --paired --threads 4 --report myco.kreport --output classifications.myco.tsv reads_1.depleted.fq reads_2.depleted.fq\r +```\r +\r +#### Extract *M. tuberculosis* reads\r +\r +**minimap2**\r +\r +```\r +# nanopore\r +grep -Ff Mycobacterium_db/mtb.ids reads.aln.paf | cut -f1 | \\\r + seqkit grep -f - -o reads.enriched.fq reads.depleted.fq\r +# illumina\r +grep -Ff Mycobacterium_db/mtb.ids reads.aln.paf | cut -f1 > keep.ids\r +seqkit grep -f keep.ids -o reads_1.enriched.fq reads_1.depleted.fq\r +seqkit grep -f keep.ids -o reads_2.enriched.fq reads_2.depleted.fq\r +```\r +\r +**kraken2**\r +\r +We'll use\r +the [`extract_kraken_reads.py` script](https://github.com/jenniferlu717/KrakenTools#extract_kraken_readspy)\r +for this\r +\r +```\r +# nanopore\r +python extract_kraken_reads.py -k classifications.myco.tsv -1 reads.depleted.fq -o reads.enriched.fq -t 1773 -r myco.kreport --include-children\r +# illumina\r +python extract_kraken_reads.py -k classifications.myco.tsv -1 reads_1.depleted.fq -2 reads_2.depleted.fq -o reads_1.enriched.fq -o2 reads_2.enriched.fq -t 1773 -r myco.kreport --include-children\r +```\r +\r +[doi]: https://doi.org/10.1101/2023.09.18.558339 \r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/700?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Pangenome databases provide superior host removal and mycobacteria classification from clinical metagenomic data" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/700?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """The workflows in this collection are from the '16S Microbial Analysis with mothur' tutorial for analysis of 16S data (Saskia Hiltemann, Bérénice Batut, Dave Clements), adapted for pipeline use on galaxy australia (Ahmed Mehdi). The workflows developed in galaxy use mothur software package developed by Schloss et al https://pubmed.ncbi.nlm.nih.gov/19801464/. \r +\r +Please also refer to the 16S tutorials available at Galaxy https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop-short/tutorial.html and [https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop/tutorial.html\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/650?version=1" ; + schema1:isBasedOn "https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop-short/tutorial.html" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Workflow 3: Classification [Galaxy Training: 16S Microbial Analysis With Mothur]" ; + schema1:sdDatePublished "2024-08-05 10:27:20 +0100" ; + schema1:url "https://workflowhub.eu/workflows/650/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12146 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2023-11-09T05:16:29Z" ; + schema1:dateModified "2023-11-09T05:16:29Z" ; + schema1:description """The workflows in this collection are from the '16S Microbial Analysis with mothur' tutorial for analysis of 16S data (Saskia Hiltemann, Bérénice Batut, Dave Clements), adapted for pipeline use on galaxy australia (Ahmed Mehdi). The workflows developed in galaxy use mothur software package developed by Schloss et al https://pubmed.ncbi.nlm.nih.gov/19801464/. \r +\r +Please also refer to the 16S tutorials available at Galaxy https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop-short/tutorial.html and [https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop/tutorial.html\r +""" ; + schema1:isPartOf ; + schema1:keywords "Metagenomics" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Workflow 3: Classification [Galaxy Training: 16S Microbial Analysis With Mothur]" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/650?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + ; + ns1:output , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.131.5" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/131/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 76457 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-04T15:31:10Z" ; + schema1:dateModified "2024-05-14T10:16:26Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/131?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_amber_md_setup/blob/main/biobb_wf_amber_md_setup/notebooks/mdsetup_lig/biobb_amber_complex_setup_notebook.ipynb" ; + schema1:version 5 . + + a schema1:Dataset ; + schema1:datePublished "2024-07-19T19:43:37.062597" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/bacterial_genome_annotation" ; + schema1:license "GPL-3.0-or-later" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "bacterial_genome_annotation/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.24" . + + a schema1:Dataset ; + schema1:datePublished "2023-11-23T06:34:49+00:00" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/AustralianBioCommons/Genome-assessment-post-assembly.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Genome-assessment-post-assembly" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Genome-assessment-post-assembly" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/AustralianBioCommons/Genome-assessment-post-assembly.git" ; + schema1:version "main" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.818.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/main/biobb_wf_amber_complex_md_setup/docker" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Docker Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:24:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/818/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 780 ; + schema1:creator , + ; + schema1:dateCreated "2024-04-22T10:11:25Z" ; + schema1:dateModified "2024-05-22T13:49:10Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Docker Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/main/biobb_wf_amber_complex_md_setup/docker/Dockerfile" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Conformational ensembles generation\r +\r +## Workflow included in the [ELIXIR 3D-Bioinfo](https://elixir-europe.org/communities/3d-bioinfo) Implementation Study:\r +\r +### Building on PDBe-KB to chart and characterize the conformation landscape of native proteins\r +\r +This tutorial aims to illustrate the process of generating **protein conformational ensembles** from** 3D structures **and analysing its **molecular flexibility**, step by step, using the **BioExcel Building Blocks library (biobb)**.\r +\r +## Conformational landscape of native proteins\r +**Proteins** are **dynamic** systems that adopt multiple **conformational states**, a property essential for many **biological processes** (e.g. binding other proteins, nucleic acids, small molecule ligands, or switching between functionaly active and inactive states). Characterizing the different **conformational states** of proteins and the transitions between them is therefore critical for gaining insight into their **biological function** and can help explain the effects of genetic variants in **health** and **disease** and the action of drugs.\r +\r +**Structural biology** has become increasingly efficient in sampling the different **conformational states** of proteins. The **PDB** has currently archived more than **170,000 individual structures**, but over two thirds of these structures represent **multiple conformations** of the same or related protein, observed in different crystal forms, when interacting with other proteins or other macromolecules, or upon binding small molecule ligands. Charting this conformational diversity across the PDB can therefore be employed to build a useful approximation of the **conformational landscape** of native proteins.\r +\r +A number of resources and **tools** describing and characterizing various often complementary aspects of protein **conformational diversity** in known structures have been developed, notably by groups in Europe. These tools include algorithms with varying degree of sophistication, for aligning the 3D structures of individual protein chains or domains, of protein assemblies, and evaluating their degree of **structural similarity**. Using such tools one can **align structures pairwise**, compute the corresponding **similarity matrix**, and identify ensembles of **structures/conformations** with a defined **similarity level** that tend to recur in different PDB entries, an operation typically performed using **clustering** methods. Such workflows are at the basis of resources such as **CATH, Contemplate, or PDBflex** that offer access to **conformational ensembles** comprised of similar **conformations** clustered according to various criteria. Other types of tools focus on differences between **protein conformations**, identifying regions of proteins that undergo large **collective displacements** in different PDB entries, those that act as **hinges or linkers**, or regions that are inherently **flexible**.\r +\r +To build a meaningful approximation of the **conformational landscape** of native proteins, the **conformational ensembles** (and the differences between them), identified on the basis of **structural similarity/dissimilarity** measures alone, need to be **biophysically characterized**. This may be approached at **two different levels**. \r +- At the **biological level**, it is important to link observed **conformational ensembles**, to their **functional roles** by evaluating the correspondence with **protein family classifications** based on sequence information and **functional annotations** in public databases e.g. Uniprot, PDKe-Knowledge Base (KB). These links should provide valuable mechanistic insights into how the **conformational and dynamic properties** of proteins are exploited by evolution to regulate their **biological function**.

\r +\r +- At the **physical level** one needs to introduce **energetic consideration** to evaluate the likelihood that the identified **conformational ensembles** represent **conformational states** that the protein (or domain under study) samples in isolation. Such evaluation is notoriously **challenging** and can only be roughly approximated by using **computational methods** to evaluate the extent to which the observed **conformational ensembles** can be reproduced by algorithms that simulate the **dynamic behavior** of protein systems. These algorithms include the computationally expensive **classical molecular dynamics (MD) simulations** to sample local thermal fluctuations but also faster more approximate methods such as **Elastic Network Models** and **Normal Node Analysis** (NMA) to model low energy **collective motions**. Alternatively, **enhanced sampling molecular dynamics** can be used to model complex types of **conformational changes** but at a very high computational cost. \r +\r +The **ELIXIR 3D-Bioinfo Implementation Study** *Building on PDBe-KB to chart and characterize the conformation landscape of native proteins* focuses on:\r +\r +1. Mapping the **conformational diversity** of proteins and their homologs across the PDB. \r +2. Characterize the different **flexibility properties** of protein regions, and link this information to sequence and functional annotation.\r +3. Benchmark **computational methods** that can predict a biophysical description of protein motions.\r +\r +This notebook is part of the third objective, where a list of **computational resources** that are able to predict **protein flexibility** and **conformational ensembles** have been collected, evaluated, and integrated in reproducible and interoperable workflows using the **BioExcel Building Blocks library**. Note that the list is not meant to be exhaustive, it is built following the expertise of the implementation study partners.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.486.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_flexdyn" ; + schema1:license "other-open" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein conformational ensembles generation" ; + schema1:sdDatePublished "2024-08-05 10:30:21 +0100" ; + schema1:url "https://workflowhub.eu/workflows/486/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 108623 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-04T15:18:56Z" ; + schema1:dateModified "2024-04-22T10:40:33Z" ; + schema1:description """# Protein Conformational ensembles generation\r +\r +## Workflow included in the [ELIXIR 3D-Bioinfo](https://elixir-europe.org/communities/3d-bioinfo) Implementation Study:\r +\r +### Building on PDBe-KB to chart and characterize the conformation landscape of native proteins\r +\r +This tutorial aims to illustrate the process of generating **protein conformational ensembles** from** 3D structures **and analysing its **molecular flexibility**, step by step, using the **BioExcel Building Blocks library (biobb)**.\r +\r +## Conformational landscape of native proteins\r +**Proteins** are **dynamic** systems that adopt multiple **conformational states**, a property essential for many **biological processes** (e.g. binding other proteins, nucleic acids, small molecule ligands, or switching between functionaly active and inactive states). Characterizing the different **conformational states** of proteins and the transitions between them is therefore critical for gaining insight into their **biological function** and can help explain the effects of genetic variants in **health** and **disease** and the action of drugs.\r +\r +**Structural biology** has become increasingly efficient in sampling the different **conformational states** of proteins. The **PDB** has currently archived more than **170,000 individual structures**, but over two thirds of these structures represent **multiple conformations** of the same or related protein, observed in different crystal forms, when interacting with other proteins or other macromolecules, or upon binding small molecule ligands. Charting this conformational diversity across the PDB can therefore be employed to build a useful approximation of the **conformational landscape** of native proteins.\r +\r +A number of resources and **tools** describing and characterizing various often complementary aspects of protein **conformational diversity** in known structures have been developed, notably by groups in Europe. These tools include algorithms with varying degree of sophistication, for aligning the 3D structures of individual protein chains or domains, of protein assemblies, and evaluating their degree of **structural similarity**. Using such tools one can **align structures pairwise**, compute the corresponding **similarity matrix**, and identify ensembles of **structures/conformations** with a defined **similarity level** that tend to recur in different PDB entries, an operation typically performed using **clustering** methods. Such workflows are at the basis of resources such as **CATH, Contemplate, or PDBflex** that offer access to **conformational ensembles** comprised of similar **conformations** clustered according to various criteria. Other types of tools focus on differences between **protein conformations**, identifying regions of proteins that undergo large **collective displacements** in different PDB entries, those that act as **hinges or linkers**, or regions that are inherently **flexible**.\r +\r +To build a meaningful approximation of the **conformational landscape** of native proteins, the **conformational ensembles** (and the differences between them), identified on the basis of **structural similarity/dissimilarity** measures alone, need to be **biophysically characterized**. This may be approached at **two different levels**. \r +- At the **biological level**, it is important to link observed **conformational ensembles**, to their **functional roles** by evaluating the correspondence with **protein family classifications** based on sequence information and **functional annotations** in public databases e.g. Uniprot, PDKe-Knowledge Base (KB). These links should provide valuable mechanistic insights into how the **conformational and dynamic properties** of proteins are exploited by evolution to regulate their **biological function**.

\r +\r +- At the **physical level** one needs to introduce **energetic consideration** to evaluate the likelihood that the identified **conformational ensembles** represent **conformational states** that the protein (or domain under study) samples in isolation. Such evaluation is notoriously **challenging** and can only be roughly approximated by using **computational methods** to evaluate the extent to which the observed **conformational ensembles** can be reproduced by algorithms that simulate the **dynamic behavior** of protein systems. These algorithms include the computationally expensive **classical molecular dynamics (MD) simulations** to sample local thermal fluctuations but also faster more approximate methods such as **Elastic Network Models** and **Normal Node Analysis** (NMA) to model low energy **collective motions**. Alternatively, **enhanced sampling molecular dynamics** can be used to model complex types of **conformational changes** but at a very high computational cost. \r +\r +The **ELIXIR 3D-Bioinfo Implementation Study** *Building on PDBe-KB to chart and characterize the conformation landscape of native proteins* focuses on:\r +\r +1. Mapping the **conformational diversity** of proteins and their homologs across the PDB. \r +2. Characterize the different **flexibility properties** of protein regions, and link this information to sequence and functional annotation.\r +3. Benchmark **computational methods** that can predict a biophysical description of protein motions.\r +\r +This notebook is part of the third objective, where a list of **computational resources** that are able to predict **protein flexibility** and **conformational ensembles** have been collected, evaluated, and integrated in reproducible and interoperable workflows using the **BioExcel Building Blocks library**. Note that the list is not meant to be exhaustive, it is built following the expertise of the implementation study partners.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/486?version=2" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "" ; + schema1:name "Jupyter Notebook Protein conformational ensembles generation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_flexdyn/blob/main/biobb_wf_flexdyn/notebooks/biobb_wf_flexdyn.ipynb" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:description """# RNA-seq Scientific Workflow\r +Workflow for RNA sequencing using the Parallel Scripting Library - Parsl.\r +\r +**Reference:** Cruz, L., Coelho, M., Terra, R., Carvalho, D., Gadelha, L., Osthoff, C., & Ocaña, K. (2021). *Workflows* Científicos de RNA-Seq em Ambientes Distribuídos de Alto Desempenho: Otimização de Desempenho e Análises de Dados de Expressão Diferencial de Genes. In *Anais do XV Brazilian e-Science Workshop*, p. 57-64. Porto Alegre: SBC. DOI: https://doi.org/10.5753/bresci.2021.15789\r +\r +## Requirements\r +\r +In order to use RNA-seq Workflow the following tools must be available:\r +\r +- [Bowtie2](http://bowtie-bio.sourceforge.net/bowtie2/index.shtml)\r +\r +You can install Bowtie2 by running:\r +\r +> bowtie2-2.3.5.1-linux-x86_64.zip\r +\r +Or\r +\r +> sudo yum install bowtie2-2.3.5-linux-x86_64\r +\r +- [Samtools](http://www.htslib.org/)\r +\r +Samtools is a suite of programs for interacting with high-throughput sequencing data.\r +\r +- [Picard](https://github.com/broadinstitute/picard)\r +\r +Picard is a set of Java command line tools for manipulating high-throughput sequencing (HTS) data and formats.\r +\r +- [HTSeq](https://htseq.readthedocs.io/en/master/)\r +\r +HTSeq is a native Python library that folows conventions of many Python packages. You can install it by running:\r +\r +> pip install HTSeq\r +\r +HTSeq uses [NumPy](https://numpy.org/), [Pysam](https://github.com/pysam-developers/pysam) and [matplotlib](https://matplotlib.org/). Be sure this tools are installed.\r +\r +- [R](https://www.r-project.org/)\r +\r +To use [DESEq2](https://bioconductor.org/packages/release/bioc/html/DESeq2.html) script make sure R language is also installed. You can install it by running:\r +\r +\r +> sudo apt install r-base\r +\r +- [Parsl - Parallel Scripting Library](https://parsl.readthedocs.io/en/stable/index.html)\r +\r +The recommended way to install Parsl is the suggest approach from Parsl's documentation:\r +\r +\r +> python3 -m pip install parsl\r +\r +- [Python (version >= 3.5)](https://www.python.org/)\r +\r +To use Parsl, you need Python 3.5 or above. You also need Python to use HTSeq, so you should load only one Python version.\r +\r +## Workflow invocation\r +\r +First of all, make a Comma Separated Values (CSV) file. So, onto the first line type: ``sampleName,fileName,condition``. **Remember, there must be no spaces between items**. You can use the file *"table.csv"* in this repository as an example. Your CSV file will be like this:\r +\r + | sampleName | fileName |condition|\r + |------------------|------------------|---------|\r + | tissue control 1 | SRR5445794.merge.count | control |\r + | tissue control 2 | SRR5445795.merge.count | control |\r + | tissue control 3 | SRR5445796.merge.count | control |\r + | tissue wntup 1 | SRR5445797.merge.count | wntup |\r + | tissue wntup 2 | SRR5445798.merge.count | wntup |\r + | tissue wntup 3 | SRR5445799.merge.count | wntup |\r +\r +The list of command line arguments passed to Python script, beyond the script's name, must be: \r +\r + 1. The indexed genome; \r + 2. The number of threads for bowtie task, sort task, number of splitted files for split_picard task and number of CPU running in htseq task; \r + 3. Path to read fastaq file, which is the path of the input files; \r + 4. Directory's name where the output files must be placed; \r + 5. GTF file;\r + 7. and, lastly the DESeq script. \r + \r +Make sure all the files necessary to run the workflow are in the same directory and the fastaq files in a dedicated folder, as a input directory. The command line will be like this:\r +\r +> python3 rna-seq.py ../mm9/mm9 24 ../inputs/ ../outputs ../Mus_musculus.NCBIM37.67.gtf ../DESeq.R\r +\r +**Remember to adjust the parameter multithreaded and multicore according with your computational environment.** \r +Example: If your machine has 8 cores, you should set the parameter on 8.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.411.1" ; + schema1:isBasedOn "https://github.com/lucruzz/RNA-seq" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ParslRNA-seq Scientific Workflow" ; + schema1:sdDatePublished "2024-08-05 10:31:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/411/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4579 ; + schema1:creator , + , + ; + schema1:dateCreated "2022-12-06T19:17:17Z" ; + schema1:dateModified "2023-01-16T14:04:54Z" ; + schema1:description """# RNA-seq Scientific Workflow\r +Workflow for RNA sequencing using the Parallel Scripting Library - Parsl.\r +\r +**Reference:** Cruz, L., Coelho, M., Terra, R., Carvalho, D., Gadelha, L., Osthoff, C., & Ocaña, K. (2021). *Workflows* Científicos de RNA-Seq em Ambientes Distribuídos de Alto Desempenho: Otimização de Desempenho e Análises de Dados de Expressão Diferencial de Genes. In *Anais do XV Brazilian e-Science Workshop*, p. 57-64. Porto Alegre: SBC. DOI: https://doi.org/10.5753/bresci.2021.15789\r +\r +## Requirements\r +\r +In order to use RNA-seq Workflow the following tools must be available:\r +\r +- [Bowtie2](http://bowtie-bio.sourceforge.net/bowtie2/index.shtml)\r +\r +You can install Bowtie2 by running:\r +\r +> bowtie2-2.3.5.1-linux-x86_64.zip\r +\r +Or\r +\r +> sudo yum install bowtie2-2.3.5-linux-x86_64\r +\r +- [Samtools](http://www.htslib.org/)\r +\r +Samtools is a suite of programs for interacting with high-throughput sequencing data.\r +\r +- [Picard](https://github.com/broadinstitute/picard)\r +\r +Picard is a set of Java command line tools for manipulating high-throughput sequencing (HTS) data and formats.\r +\r +- [HTSeq](https://htseq.readthedocs.io/en/master/)\r +\r +HTSeq is a native Python library that folows conventions of many Python packages. You can install it by running:\r +\r +> pip install HTSeq\r +\r +HTSeq uses [NumPy](https://numpy.org/), [Pysam](https://github.com/pysam-developers/pysam) and [matplotlib](https://matplotlib.org/). Be sure this tools are installed.\r +\r +- [R](https://www.r-project.org/)\r +\r +To use [DESEq2](https://bioconductor.org/packages/release/bioc/html/DESeq2.html) script make sure R language is also installed. You can install it by running:\r +\r +\r +> sudo apt install r-base\r +\r +- [Parsl - Parallel Scripting Library](https://parsl.readthedocs.io/en/stable/index.html)\r +\r +The recommended way to install Parsl is the suggest approach from Parsl's documentation:\r +\r +\r +> python3 -m pip install parsl\r +\r +- [Python (version >= 3.5)](https://www.python.org/)\r +\r +To use Parsl, you need Python 3.5 or above. You also need Python to use HTSeq, so you should load only one Python version.\r +\r +## Workflow invocation\r +\r +First of all, make a Comma Separated Values (CSV) file. So, onto the first line type: ``sampleName,fileName,condition``. **Remember, there must be no spaces between items**. You can use the file *"table.csv"* in this repository as an example. Your CSV file will be like this:\r +\r + | sampleName | fileName |condition|\r + |------------------|------------------|---------|\r + | tissue control 1 | SRR5445794.merge.count | control |\r + | tissue control 2 | SRR5445795.merge.count | control |\r + | tissue control 3 | SRR5445796.merge.count | control |\r + | tissue wntup 1 | SRR5445797.merge.count | wntup |\r + | tissue wntup 2 | SRR5445798.merge.count | wntup |\r + | tissue wntup 3 | SRR5445799.merge.count | wntup |\r +\r +The list of command line arguments passed to Python script, beyond the script's name, must be: \r +\r + 1. The indexed genome; \r + 2. The number of threads for bowtie task, sort task, number of splitted files for split_picard task and number of CPU running in htseq task; \r + 3. Path to read fastaq file, which is the path of the input files; \r + 4. Directory's name where the output files must be placed; \r + 5. GTF file;\r + 7. and, lastly the DESeq script. \r + \r +Make sure all the files necessary to run the workflow are in the same directory and the fastaq files in a dedicated folder, as a input directory. The command line will be like this:\r +\r +> python3 rna-seq.py ../mm9/mm9 24 ../inputs/ ../outputs ../Mus_musculus.NCBIM37.67.gtf ../DESeq.R\r +\r +**Remember to adjust the parameter multithreaded and multicore according with your computational environment.** \r +Example: If your machine has 8 cores, you should set the parameter on 8.\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "ParslRNA-seq Scientific Workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/411?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 19053 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Automated quantitative analysis of DIA proteomics mass spectrometry measurements." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/980?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/diaproteomics" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/diaproteomics" ; + schema1:sdDatePublished "2024-08-05 10:24:05 +0100" ; + schema1:url "https://workflowhub.eu/workflows/980/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7318 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:48Z" ; + schema1:dateModified "2024-06-11T12:54:48Z" ; + schema1:description "Automated quantitative analysis of DIA proteomics mass spectrometry measurements." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/980?version=6" ; + schema1:keywords "data-independent-proteomics, dia-proteomics, openms, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/diaproteomics" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/980?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for the identification of circular DNAs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/972?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/circdna" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/circdna" ; + schema1:sdDatePublished "2024-08-05 10:24:11 +0100" ; + schema1:url "https://workflowhub.eu/workflows/972/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10158 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:45Z" ; + schema1:dateModified "2024-06-11T12:54:45Z" ; + schema1:description "Pipeline for the identification of circular DNAs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/972?version=7" ; + schema1:keywords "ampliconarchitect, ampliconsuite, circle-seq, circular, DNA, eccdna, ecdna, extrachromosomal-circular-dna, Genomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/circdna" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/972?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """\r +Author: AMBARISH KUMAR er.ambarish@gmail.com & ambari73_sit@jnu.ac.in\r +\r +This is a proposed standard operating procedure for genomic variant detection using GATK4.\r +\r +It is hoped to be effective and useful for getting SARS-CoV-2 genome variants.\r +\r +\r +\r +It uses Illumina RNASEQ reads and genome sequence.\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/33?version=1" ; + schema1:isBasedOn "https://github.com/ambarishK/bio-cwl-tools/blob/release/gatk4W-spark.cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genomic variants - SNPs and INDELs detection using GATK4 spark based tools." ; + schema1:sdDatePublished "2024-08-05 10:33:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/33/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4305 ; + schema1:creator ; + schema1:dateCreated "2020-06-17T06:38:32Z" ; + schema1:dateModified "2023-01-16T13:42:33Z" ; + schema1:description """\r +Author: AMBARISH KUMAR er.ambarish@gmail.com & ambari73_sit@jnu.ac.in\r +\r +This is a proposed standard operating procedure for genomic variant detection using GATK4.\r +\r +It is hoped to be effective and useful for getting SARS-CoV-2 genome variants.\r +\r +\r +\r +It uses Illumina RNASEQ reads and genome sequence.\r +""" ; + schema1:image ; + schema1:keywords "CWL, GATK4, SNPs, INDELs, SPARK" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Genomic variants - SNPs and INDELs detection using GATK4 spark based tools." ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/33?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + ; + ns1:output , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 72307 . + + a schema1:Dataset ; + schema1:datePublished "2024-03-06T13:06:49.225296" ; + schema1:hasPart , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/fastq-to-matrix-10x" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + ; + schema1:name "fastq-to-matrix-10x/scrna-seq-fastq-to-matrix-10x-cellplex" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + , + , + ; + schema1:name "fastq-to-matrix-10x/scrna-seq-fastq-to-matrix-10x-v3" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/fastq-to-matrix-10x" ; + schema1:version "0.3" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# ![sanger-tol/ensemblrepeatdownload](docs/images/sanger-tol-ensemblrepeatdownload_logo.png)\r +\r +[![GitHub Actions CI Status](https://github.com/sanger-tol/ensemblrepeatdownload/workflows/nf-core%20CI/badge.svg)](https://github.com/sanger-tol/ensemblrepeatdownload/actions?query=workflow%3A%22nf-core+CI%22)\r +\r +\r +\r +[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.7183380-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.7183380)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A522.04.0-23aa62.svg)](https://www.nextflow.io/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +\r +[![Get help on Slack](http://img.shields.io/badge/slack-SangerTreeofLife%20%23pipelines-4A154B?labelColor=000000&logo=slack)](https://SangerTreeofLife.slack.com/channels/pipelines)\r +[![Follow on Twitter](http://img.shields.io/badge/twitter-%40sangertol-1DA1F2?labelColor=000000&logo=twitter)](https://twitter.com/sangertol)\r +[![Watch on YouTube](http://img.shields.io/badge/youtube-tree--of--life-FF0000?labelColor=000000&logo=youtube)](https://www.youtube.com/channel/UCFeDpvjU58SA9V0ycRXejhA)\r +\r +## Introduction\r +\r +**sanger-tol/ensemblrepeatdownload** is a pipeline that downloads repeat annotations from Ensembl into a Tree of Life directory structure.\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!\r +\r +On release, automated continuous integration tests run the pipeline on a full-sized dataset on the GitHub CI infrastructure. This ensures that the pipeline runs in a third-party environment, and has sensible resource allocation defaults set to run on real-world datasets.\r +\r +## Pipeline summary\r +\r +## Overview\r +\r +The pipeline takes a CSV file that contains assembly accession number, Ensembl species names (as they may differ from Tree of Life ones !), output directories.\r +Assembly accession numbers are optional too. If missing, the pipeline assumes it can be retrieved from files named `ACCESSION` in the standard location on disk.\r +The pipeline downloads the repeat annotation as the masked Fasta file and a BED file.\r +All files are compressed with `bgzip`, and indexed with `samtools faidx` or `tabix`.\r +\r +Steps involved:\r +\r +- Download the masked fasta file from Ensembl.\r +- Extract the coordinates of the masked regions into a BED file.\r +- Compress and index the BED file with `bgzip` and `tabix`.\r +\r +## Quick Start\r +\r +1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=22.04.0`)\r +\r +2. Install any of [`Docker`](https://docs.docker.com/engine/installation/), [`Singularity`](https://www.sylabs.io/guides/3.0/user-guide/) (you can follow [this tutorial](https://singularity-tutorial.github.io/01-installation/)), [`Podman`](https://podman.io/), [`Shifter`](https://nersc.gitlab.io/development/shifter/how-to-use/) or [`Charliecloud`](https://hpc.github.io/charliecloud/) for full pipeline reproducibility _(you can use [`Conda`](https://conda.io/miniconda.html) both to install Nextflow itself and also to manage software within pipelines. Please only use it within pipelines as a last resort; see [docs](https://nf-co.re/usage/configuration#basic-configuration-profiles))_.\r +\r +3. Download the pipeline and test it on a minimal dataset with a single command:\r +\r + ```bash\r + nextflow run sanger-tol/ensemblrepeatdownload -profile test,YOURPROFILE --outdir \r + ```\r +\r + Note that some form of configuration will be needed so that Nextflow knows how to fetch the required software. This is usually done in the form of a config profile (`YOURPROFILE` in the example command above). You can chain multiple config profiles in a comma-separated string.\r +\r + > - The pipeline comes with config profiles called `docker`, `singularity`, `podman`, `shifter`, `charliecloud` and `conda` which instruct the pipeline to use the named tool for software management. For example, `-profile test,docker`.\r + > - Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment.\r + > - If you are using `singularity`, please use the [`nf-core download`](https://nf-co.re/tools/#downloading-pipelines-for-offline-use) command to download images first, before running the pipeline. Setting the [`NXF_SINGULARITY_CACHEDIR` or `singularity.cacheDir`](https://www.nextflow.io/docs/latest/singularity.html?#singularity-docker-hub) Nextflow options enables you to store and re-use the images from a central location for future pipeline runs.\r + > - If you are using `conda`, it is highly recommended to use the [`NXF_CONDA_CACHEDIR` or `conda.cacheDir`](https://www.nextflow.io/docs/latest/conda.html) settings to store the environments in a central location for future pipeline runs.\r +\r +4. Start running your own analysis!\r +\r + ```console\r + nextflow run sanger-tol/ensemblrepeatdownload --input $PWD/assets/samplesheet.csv --outdir -profile \r + ```\r +\r +## Documentation\r +\r +The sanger-tol/ensemblrepeatdownload pipeline comes with documentation about the pipeline [usage](docs/usage.md) and [output](docs/output.md).\r +\r +## Credits\r +\r +sanger-tol/ensemblrepeatdownload was originally written by @muffato.\r +\r +## Contributions and Support\r +\r +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\r +\r +For further information or help, don't hesitate to get in touch on the [Slack `#pipelines` channel](https://sangertreeoflife.slack.com/channels/pipelines). Please [create an issue](https://github.com/sanger-tol/ensemblrepeatdownload/issues/new/choose) on GitHub if you are not on the Sanger slack channel.\r +\r +## Citations\r +\r +If you use sanger-tol/ensemblrepeatdownload for your analysis, please cite it using the following doi: [10.5281/zenodo.7183380](https://doi.org/10.5281/zenodo.7183380)\r +\r +An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\r +\r +This pipeline uses code and infrastructure developed and maintained by the [nf-core](https://nf-co.re) community, reused here under the [MIT license](https://github.com/nf-core/tools/blob/master/LICENSE).\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/667?version=1" ; + schema1:isBasedOn "https://github.com/sanger-tol/ensemblrepeatdownload" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for sanger-tol/ensemblrepeatdownload v1.0.0 - Gwaihir the Windlord" ; + schema1:sdDatePublished "2024-08-05 10:27:10 +0100" ; + schema1:url "https://workflowhub.eu/workflows/667/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1721 ; + schema1:creator , + ; + schema1:dateCreated "2023-11-14T12:06:10Z" ; + schema1:dateModified "2023-11-14T12:06:10Z" ; + schema1:description """# ![sanger-tol/ensemblrepeatdownload](docs/images/sanger-tol-ensemblrepeatdownload_logo.png)\r +\r +[![GitHub Actions CI Status](https://github.com/sanger-tol/ensemblrepeatdownload/workflows/nf-core%20CI/badge.svg)](https://github.com/sanger-tol/ensemblrepeatdownload/actions?query=workflow%3A%22nf-core+CI%22)\r +\r +\r +\r +[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.7183380-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.7183380)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A522.04.0-23aa62.svg)](https://www.nextflow.io/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +\r +[![Get help on Slack](http://img.shields.io/badge/slack-SangerTreeofLife%20%23pipelines-4A154B?labelColor=000000&logo=slack)](https://SangerTreeofLife.slack.com/channels/pipelines)\r +[![Follow on Twitter](http://img.shields.io/badge/twitter-%40sangertol-1DA1F2?labelColor=000000&logo=twitter)](https://twitter.com/sangertol)\r +[![Watch on YouTube](http://img.shields.io/badge/youtube-tree--of--life-FF0000?labelColor=000000&logo=youtube)](https://www.youtube.com/channel/UCFeDpvjU58SA9V0ycRXejhA)\r +\r +## Introduction\r +\r +**sanger-tol/ensemblrepeatdownload** is a pipeline that downloads repeat annotations from Ensembl into a Tree of Life directory structure.\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!\r +\r +On release, automated continuous integration tests run the pipeline on a full-sized dataset on the GitHub CI infrastructure. This ensures that the pipeline runs in a third-party environment, and has sensible resource allocation defaults set to run on real-world datasets.\r +\r +## Pipeline summary\r +\r +## Overview\r +\r +The pipeline takes a CSV file that contains assembly accession number, Ensembl species names (as they may differ from Tree of Life ones !), output directories.\r +Assembly accession numbers are optional too. If missing, the pipeline assumes it can be retrieved from files named `ACCESSION` in the standard location on disk.\r +The pipeline downloads the repeat annotation as the masked Fasta file and a BED file.\r +All files are compressed with `bgzip`, and indexed with `samtools faidx` or `tabix`.\r +\r +Steps involved:\r +\r +- Download the masked fasta file from Ensembl.\r +- Extract the coordinates of the masked regions into a BED file.\r +- Compress and index the BED file with `bgzip` and `tabix`.\r +\r +## Quick Start\r +\r +1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=22.04.0`)\r +\r +2. Install any of [`Docker`](https://docs.docker.com/engine/installation/), [`Singularity`](https://www.sylabs.io/guides/3.0/user-guide/) (you can follow [this tutorial](https://singularity-tutorial.github.io/01-installation/)), [`Podman`](https://podman.io/), [`Shifter`](https://nersc.gitlab.io/development/shifter/how-to-use/) or [`Charliecloud`](https://hpc.github.io/charliecloud/) for full pipeline reproducibility _(you can use [`Conda`](https://conda.io/miniconda.html) both to install Nextflow itself and also to manage software within pipelines. Please only use it within pipelines as a last resort; see [docs](https://nf-co.re/usage/configuration#basic-configuration-profiles))_.\r +\r +3. Download the pipeline and test it on a minimal dataset with a single command:\r +\r + ```bash\r + nextflow run sanger-tol/ensemblrepeatdownload -profile test,YOURPROFILE --outdir \r + ```\r +\r + Note that some form of configuration will be needed so that Nextflow knows how to fetch the required software. This is usually done in the form of a config profile (`YOURPROFILE` in the example command above). You can chain multiple config profiles in a comma-separated string.\r +\r + > - The pipeline comes with config profiles called `docker`, `singularity`, `podman`, `shifter`, `charliecloud` and `conda` which instruct the pipeline to use the named tool for software management. For example, `-profile test,docker`.\r + > - Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment.\r + > - If you are using `singularity`, please use the [`nf-core download`](https://nf-co.re/tools/#downloading-pipelines-for-offline-use) command to download images first, before running the pipeline. Setting the [`NXF_SINGULARITY_CACHEDIR` or `singularity.cacheDir`](https://www.nextflow.io/docs/latest/singularity.html?#singularity-docker-hub) Nextflow options enables you to store and re-use the images from a central location for future pipeline runs.\r + > - If you are using `conda`, it is highly recommended to use the [`NXF_CONDA_CACHEDIR` or `conda.cacheDir`](https://www.nextflow.io/docs/latest/conda.html) settings to store the environments in a central location for future pipeline runs.\r +\r +4. Start running your own analysis!\r +\r + ```console\r + nextflow run sanger-tol/ensemblrepeatdownload --input $PWD/assets/samplesheet.csv --outdir -profile \r + ```\r +\r +## Documentation\r +\r +The sanger-tol/ensemblrepeatdownload pipeline comes with documentation about the pipeline [usage](docs/usage.md) and [output](docs/output.md).\r +\r +## Credits\r +\r +sanger-tol/ensemblrepeatdownload was originally written by @muffato.\r +\r +## Contributions and Support\r +\r +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\r +\r +For further information or help, don't hesitate to get in touch on the [Slack `#pipelines` channel](https://sangertreeoflife.slack.com/channels/pipelines). Please [create an issue](https://github.com/sanger-tol/ensemblrepeatdownload/issues/new/choose) on GitHub if you are not on the Sanger slack channel.\r +\r +## Citations\r +\r +If you use sanger-tol/ensemblrepeatdownload for your analysis, please cite it using the following doi: [10.5281/zenodo.7183380](https://doi.org/10.5281/zenodo.7183380)\r +\r +An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\r +\r +This pipeline uses code and infrastructure developed and maintained by the [nf-core](https://nf-co.re) community, reused here under the [MIT license](https://github.com/nf-core/tools/blob/master/LICENSE).\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "sanger-tol/ensemblrepeatdownload v1.0.0 - Gwaihir the Windlord" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/667?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +**Based on the official [pmx tutorial](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate how to compute a **fast-growth mutation free energy** calculation, step by step, using the BioExcel **Building Blocks library (biobb)**. The particular example used is the **Staphylococcal nuclease** protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +The **non-equilibrium free energy calculation** protocol performs a **fast alchemical transition** in the direction **WT->Mut** and back **Mut->WT**. The two equilibrium trajectories needed for the tutorial, one for **Wild Type (WT)** and another for the **Mutated (Mut)** protein (Isoleucine 10 to Alanine -I10A-), have already been generated and are included in this example. We will name **WT as stateA** and **Mut as stateB**.\r +\r +![](https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/schema.png)\r +\r +The tutorial calculates the **free energy difference** in the folded state of a protein. Starting from **two 1ns-length independent equilibrium simulations** (WT and mutant), snapshots are selected to start **fast (50ps) transitions** driving the system in the **forward** (WT to mutant) and **reverse** (mutant to WT) directions, and the **work values** required to perform these transitions are collected. With these values, **Crooks Gaussian Intersection** (CGI), **Bennett Acceptance Ratio** (BAR) and **Jarzynski estimator** methods are used to calculate the **free energy difference** between the two states.\r +\r +*Please note that for the sake of disk space this tutorial is using 1ns-length equilibrium trajectories, whereas in the [original example](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/eq.mdp) the equilibrium trajectories used were obtained from 10ns-length simulations.*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.55.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_pmx_tutorial" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)" ; + schema1:sdDatePublished "2024-08-05 10:25:29 +0100" ; + schema1:url "https://workflowhub.eu/workflows/55/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 48363 ; + schema1:creator , + ; + schema1:dateCreated "2021-07-01T10:33:20Z" ; + schema1:dateModified "2022-06-10T09:43:14Z" ; + schema1:description """# Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +**Based on the official [pmx tutorial](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate how to compute a **fast-growth mutation free energy** calculation, step by step, using the BioExcel **Building Blocks library (biobb)**. The particular example used is the **Staphylococcal nuclease** protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +The **non-equilibrium free energy calculation** protocol performs a **fast alchemical transition** in the direction **WT->Mut** and back **Mut->WT**. The two equilibrium trajectories needed for the tutorial, one for **Wild Type (WT)** and another for the **Mutated (Mut)** protein (Isoleucine 10 to Alanine -I10A-), have already been generated and are included in this example. We will name **WT as stateA** and **Mut as stateB**.\r +\r +![](https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/schema.png)\r +\r +The tutorial calculates the **free energy difference** in the folded state of a protein. Starting from **two 1ns-length independent equilibrium simulations** (WT and mutant), snapshots are selected to start **fast (50ps) transitions** driving the system in the **forward** (WT to mutant) and **reverse** (mutant to WT) directions, and the **work values** required to perform these transitions are collected. With these values, **Crooks Gaussian Intersection** (CGI), **Bennett Acceptance Ratio** (BAR) and **Jarzynski estimator** methods are used to calculate the **free energy difference** between the two states.\r +\r +*Please note that for the sake of disk space this tutorial is using 1ns-length equilibrium trajectories, whereas in the [original example](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/eq.mdp) the equilibrium trajectories used were obtained from 10ns-length simulations.*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/55?version=5" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/55?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +For this workflow:\r +\r +Inputs:\r +* assembled-genome.fasta\r +* hard-repeat-masked-genome.fasta\r +* If using the mRNAs option, the additional inputs required are .cdna, .pro and .dat files. \r +\r +What it does:\r +* This workflow splits the input genomes into single sequences (to decrease computation time), annotates using FgenesH++, and merges the output. \r +\r +Outputs:\r +* genome annotation in gff3 format\r +* fasta files of mRNAs, cDNAs and proteins\r +* Busco report\r +\r +\r +\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.881.4" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Fgenesh annotation -TSI" ; + schema1:sdDatePublished "2024-08-05 10:22:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/881/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 18063 ; + schema1:creator ; + schema1:dateCreated "2024-06-18T09:46:52Z" ; + schema1:dateModified "2024-06-18T09:49:24Z" ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +For this workflow:\r +\r +Inputs:\r +* assembled-genome.fasta\r +* hard-repeat-masked-genome.fasta\r +* If using the mRNAs option, the additional inputs required are .cdna, .pro and .dat files. \r +\r +What it does:\r +* This workflow splits the input genomes into single sequences (to decrease computation time), annotates using FgenesH++, and merges the output. \r +\r +Outputs:\r +* genome annotation in gff3 format\r +* fasta files of mRNAs, cDNAs and proteins\r +* Busco report\r +\r +\r +\r +\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/881?version=3" ; + schema1:isPartOf ; + schema1:keywords "TSI-annotation" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Fgenesh annotation -TSI" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/881?version=4" ; + schema1:version 4 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 189968 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.283.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_md_setup/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Amber Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/283/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6933 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-17T10:27:30Z" ; + schema1:dateModified "2022-04-11T09:29:43Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/283?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Amber Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_md_setup/python/workflow.py" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2022-10-19T12:24:27.604420" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/cutandrun" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "cutandrun/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.11" . + + a schema1:Dataset ; + schema1:datePublished "2023-12-21T09:59:05.410152" ; + schema1:hasPart , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/fastq-to-matrix-10x" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + ; + schema1:name "fastq-to-matrix-10x/scrna-seq-fastq-to-matrix-10x-cellplex" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.19" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.19" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + , + , + ; + schema1:name "fastq-to-matrix-10x/scrna-seq-fastq-to-matrix-10x-v3" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/fastq-to-matrix-10x" ; + schema1:version "0.1" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """\r +\r +![Logo](https://cbg-ethz.github.io/V-pipe/img/logo.svg)\r +\r +[![bio.tools](https://img.shields.io/badge/bio-tools-blue.svg)](https://bio.tools/V-Pipe)\r +[![Snakemake](https://img.shields.io/badge/snakemake-≥6.8.1-blue.svg)](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe)\r +[![Deploy Docker image](https://github.com/cbg-ethz/V-pipe/actions/workflows/deploy-docker.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe)\r +[![Tests](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml)\r +[![Mega-Linter](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml)\r +[![License: Apache-2.0](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)\r +\r +V-pipe is a workflow designed for the analysis of next generation sequencing (NGS) data from viral pathogens. It produces a number of results in a curated format (e.g., consensus sequences, SNV calls, local/global haplotypes).\r +V-pipe is written using the Snakemake workflow management system.\r +\r +## Usage\r +\r +Different ways of initializing V-pipe are presented below. We strongly encourage you to deploy it [using the quick install script](#using-quick-install-script), as this is our preferred method.\r +\r +To configure V-pipe refer to the documentation present in [config/README.md](config/README.md).\r +\r +V-pipe expects the input samples to be organized in a [two-level](config/README.md#samples) directory hierarchy,\r +and the sequencing reads must be provided in a sub-folder named `raw_data`. Further details can be found on the [website](https://cbg-ethz.github.io/V-pipe/usage/).\r +Check the utils subdirectory for [mass-importers tools](utils/README.md#samples-mass-importers) that can assist you in generating this hierarchy.\r +\r +We provide [virus-specific base configuration files](config/README.md#virus-base-config) which contain handy defaults for, e.g., HIV and SARS-CoV-2. Set the virus in the general section of the configuration file:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +```\r +\r +Also see [snakemake's documentation](https://snakemake.readthedocs.io/en/stable/executing/cli.html) to learn more about the command-line options available when executing the workflow.\r +\r +### Using quick install script\r +\r +To deploy V-pipe, use the [installation script](utils/README.md#quick-installer) with the following parameters:\r +\r +```bash\r +curl -O 'https://raw.githubusercontent.com/cbg-ethz/V-pipe/master/utils/quick_install.sh'\r +./quick_install.sh -w work\r +```\r +\r +This script will download and install miniconda, checkout the V-pipe git repository (use `-b` to specify which branch/tag) and setup a work directory (specified with `-w`) with an executable script that will execute the workflow:\r +\r +```bash\r +cd work\r +# edit config.yaml and provide samples/ directory\r +./vpipe --jobs 4 --printshellcmds --dry-run\r +```\r +\r +### Using Docker\r +\r +Note: the [docker image](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe) is only setup with components to run the workflow for HIV and SARS-CoV-2 virus base configurations.\r +Using V-pipe with other viruses or configurations might require internet connectivity for additional software components.\r +\r +Create `config.yaml` or `vpipe.config` and then populate the `samples/` directory.\r +For example, the following config file could be used:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +\r +output:\r + snv: true\r + local: true\r + global: false\r + visualization: true\r + QA: true\r +```\r +\r +Then execute:\r +\r +```bash\r +docker run --rm -it -v $PWD:/work ghcr.io/cbg-ethz/v-pipe:master --jobs 4 --printshellcmds --dry-run\r +```\r +\r +### Using Snakedeploy\r +\r +First install [mamba](https://github.com/conda-forge/miniforge#mambaforge), then create and activate an environment with Snakemake and Snakedeploy:\r +\r +```bash\r +mamba create -c bioconda -c conda-forge --name snakemake snakemake snakedeploy\r +conda activate snakemake\r +```\r +\r +Snakemake's [official workflow installer Snakedeploy](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe) can now be used:\r +\r +```bash\r +snakedeploy deploy-workflow https://github.com/cbg-ethz/V-pipe --tag master .\r +# edit config/config.yaml and provide samples/ directory\r +snakemake --use-conda --jobs 4 --printshellcmds --dry-run\r +```\r +\r +## Dependencies\r +\r +- **[Conda](https://conda.io/docs/index.html)**\r +\r + Conda is a cross-platform package management system and an environment manager application. Snakemake uses mamba as a package manager.\r +\r +- **[Snakemake](https://snakemake.readthedocs.io/)**\r +\r + Snakemake is the central workflow and dependency manager of V-pipe. It determines the order in which individual tools are invoked and checks that programs do not exit unexpectedly.\r +\r +- **[VICUNA](https://www.broadinstitute.org/viral-genomics/vicuna)**\r +\r + VICUNA is a *de novo* assembly software designed for populations with high mutation rates. It is used to build an initial reference for mapping reads with ngshmmalign aligner when a `references/cohort_consensus.fasta` file is not provided. Further details can be found in the [wiki](https://github.com/cbg-ethz/V-pipe/wiki/getting-started#input-files) pages.\r +\r +### Computational tools\r +\r +Other dependencies are managed by using isolated conda environments per rule, and below we list some of the computational tools integrated in V-pipe:\r +\r +- **[FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/)**\r +\r + FastQC gives an overview of the raw sequencing data. Flowcells that have been overloaded or otherwise fail during sequencing can easily be determined with FastQC.\r +\r +- **[PRINSEQ](http://prinseq.sourceforge.net/)**\r +\r + Trimming and clipping of reads is performed by PRINSEQ. It is currently the most versatile raw read processor with many customization options.\r +\r +- **[ngshmmalign](https://github.com/cbg-ethz/ngshmmalign)**\r +\r + We perform the alignment of the curated NGS data using our custom ngshmmalign that takes structural variants into account. It produces multiple consensus sequences that include either majority bases or ambiguous bases.\r +\r +- **[bwa](https://github.com/lh3/bwa)**\r +\r + In order to detect specific cross-contaminations with other probes, the Burrows-Wheeler aligner is used. It quickly yields estimates for foreign genomic material in an experiment.\r + Additionally, It can be used as an alternative aligner to ngshmmalign.\r +\r +- **[MAFFT](http://mafft.cbrc.jp/alignment/software/)**\r +\r + To standardise multiple samples to the same reference genome (say HXB2 for HIV-1), the multiple sequence aligner MAFFT is employed. The multiple sequence alignment helps in determining regions of low conservation and thus makes standardisation of alignments more robust.\r +\r +- **[Samtools and bcftools](https://www.htslib.org/)**\r +\r + The Swiss Army knife of alignment postprocessing and diagnostics. bcftools is also used to generate consensus sequence with indels.\r +\r +- **[SmallGenomeUtilities](https://github.com/cbg-ethz/smallgenomeutilities)**\r +\r + We perform genomic liftovers to standardised reference genomes using our in-house developed python library of utilities for rewriting alignments.\r +\r +- **[ShoRAH](https://github.com/cbg-ethz/shorah)**\r +\r + ShoRAh performs SNV calling and local haplotype reconstruction by using bayesian clustering.\r +\r +- **[LoFreq](https://csb5.github.io/lofreq/)**\r +\r + LoFreq (version 2) is SNVs and indels caller from next-generation sequencing data, and can be used as an alternative engine for SNV calling.\r +\r +- **[SAVAGE](https://bitbucket.org/jbaaijens/savage) and [Haploclique](https://github.com/cbg-ethz/haploclique)**\r +\r + We use HaploClique or SAVAGE to perform global haplotype reconstruction for heterogeneous viral populations by using an overlap graph.\r +\r +## Citation\r +\r +If you use this software in your research, please cite:\r +\r +Posada-Céspedes S., Seifert D., Topolsky I., Jablonski K.P., Metzner K.J., and Beerenwinkel N. 2021.\r +"V-pipe: a computational pipeline for assessing viral genetic diversity from high-throughput sequencing data."\r +_Bioinformatics_, January. doi:[10.1093/bioinformatics/btab015](https://doi.org/10.1093/bioinformatics/btab015).\r +\r +## Contributions\r +\r +- [Ivan Topolsky\\* ![orcid]](https://orcid.org/0000-0002-7561-0810), [![github]](https://github.com/dryak)\r +- [Kim Philipp Jablonski ![orcid]](https://orcid.org/0000-0002-4166-4343), [![github]](https://github.com/kpj)\r +- [Lara Fuhrmann ![orcid]](https://orcid.org/0000-0001-6405-0654), [![github]](https://github.com/LaraFuhrmann)\r +- [Uwe Schmitt ![orcid]](https://orcid.org/0000-0002-4658-0616), [![github]](https://github.com/uweschmitt)\r +- [Monica Dragan ![orcid]](https://orcid.org/0000-0002-7719-5892), [![github]](https://github.com/monicadragan)\r +- [Susana Posada Céspedes ![orcid]](https://orcid.org/0000-0002-7459-8186), [![github]](https://github.com/sposadac)\r +- [David Seifert ![orcid]](https://orcid.org/0000-0003-4739-5110), [![github]](https://github.com/SoapZA)\r +- Tobias Marschall\r +- [Niko Beerenwinkel\\*\\* ![orcid]](https://orcid.org/0000-0002-0573-6119)\r +\r +\\* software maintainer ;\r +\\** group leader\r +\r +[github]: https://cbg-ethz.github.io/V-pipe/img/mark-github.svg\r +[orcid]: https://cbg-ethz.github.io/V-pipe/img/ORCIDiD_iconvector.svg\r +\r +## Contact\r +\r +We encourage users to use the [issue tracker](https://github.com/cbg-ethz/V-pipe/issues). For further enquiries, you can also contact the V-pipe Dev Team .\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/301?version=1" ; + schema1:isBasedOn "https://github.com/cbg-ethz/V-pipe.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for V-pipe (main multi-virus version)" ; + schema1:sdDatePublished "2024-08-05 10:23:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/301/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1308 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-30T16:50:14Z" ; + schema1:dateModified "2022-03-30T16:50:14Z" ; + schema1:description """\r +\r +![Logo](https://cbg-ethz.github.io/V-pipe/img/logo.svg)\r +\r +[![bio.tools](https://img.shields.io/badge/bio-tools-blue.svg)](https://bio.tools/V-Pipe)\r +[![Snakemake](https://img.shields.io/badge/snakemake-≥6.8.1-blue.svg)](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe)\r +[![Deploy Docker image](https://github.com/cbg-ethz/V-pipe/actions/workflows/deploy-docker.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe)\r +[![Tests](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml)\r +[![Mega-Linter](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml)\r +[![License: Apache-2.0](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)\r +\r +V-pipe is a workflow designed for the analysis of next generation sequencing (NGS) data from viral pathogens. It produces a number of results in a curated format (e.g., consensus sequences, SNV calls, local/global haplotypes).\r +V-pipe is written using the Snakemake workflow management system.\r +\r +## Usage\r +\r +Different ways of initializing V-pipe are presented below. We strongly encourage you to deploy it [using the quick install script](#using-quick-install-script), as this is our preferred method.\r +\r +To configure V-pipe refer to the documentation present in [config/README.md](config/README.md).\r +\r +V-pipe expects the input samples to be organized in a [two-level](config/README.md#samples) directory hierarchy,\r +and the sequencing reads must be provided in a sub-folder named `raw_data`. Further details can be found on the [website](https://cbg-ethz.github.io/V-pipe/usage/).\r +Check the utils subdirectory for [mass-importers tools](utils/README.md#samples-mass-importers) that can assist you in generating this hierarchy.\r +\r +We provide [virus-specific base configuration files](config/README.md#virus-base-config) which contain handy defaults for, e.g., HIV and SARS-CoV-2. Set the virus in the general section of the configuration file:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +```\r +\r +Also see [snakemake's documentation](https://snakemake.readthedocs.io/en/stable/executing/cli.html) to learn more about the command-line options available when executing the workflow.\r +\r +### Using quick install script\r +\r +To deploy V-pipe, use the [installation script](utils/README.md#quick-installer) with the following parameters:\r +\r +```bash\r +curl -O 'https://raw.githubusercontent.com/cbg-ethz/V-pipe/master/utils/quick_install.sh'\r +./quick_install.sh -w work\r +```\r +\r +This script will download and install miniconda, checkout the V-pipe git repository (use `-b` to specify which branch/tag) and setup a work directory (specified with `-w`) with an executable script that will execute the workflow:\r +\r +```bash\r +cd work\r +# edit config.yaml and provide samples/ directory\r +./vpipe --jobs 4 --printshellcmds --dry-run\r +```\r +\r +### Using Docker\r +\r +Note: the [docker image](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe) is only setup with components to run the workflow for HIV and SARS-CoV-2 virus base configurations.\r +Using V-pipe with other viruses or configurations might require internet connectivity for additional software components.\r +\r +Create `config.yaml` or `vpipe.config` and then populate the `samples/` directory.\r +For example, the following config file could be used:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +\r +output:\r + snv: true\r + local: true\r + global: false\r + visualization: true\r + QA: true\r +```\r +\r +Then execute:\r +\r +```bash\r +docker run --rm -it -v $PWD:/work ghcr.io/cbg-ethz/v-pipe:master --jobs 4 --printshellcmds --dry-run\r +```\r +\r +### Using Snakedeploy\r +\r +First install [mamba](https://github.com/conda-forge/miniforge#mambaforge), then create and activate an environment with Snakemake and Snakedeploy:\r +\r +```bash\r +mamba create -c bioconda -c conda-forge --name snakemake snakemake snakedeploy\r +conda activate snakemake\r +```\r +\r +Snakemake's [official workflow installer Snakedeploy](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe) can now be used:\r +\r +```bash\r +snakedeploy deploy-workflow https://github.com/cbg-ethz/V-pipe --tag master .\r +# edit config/config.yaml and provide samples/ directory\r +snakemake --use-conda --jobs 4 --printshellcmds --dry-run\r +```\r +\r +## Dependencies\r +\r +- **[Conda](https://conda.io/docs/index.html)**\r +\r + Conda is a cross-platform package management system and an environment manager application. Snakemake uses mamba as a package manager.\r +\r +- **[Snakemake](https://snakemake.readthedocs.io/)**\r +\r + Snakemake is the central workflow and dependency manager of V-pipe. It determines the order in which individual tools are invoked and checks that programs do not exit unexpectedly.\r +\r +- **[VICUNA](https://www.broadinstitute.org/viral-genomics/vicuna)**\r +\r + VICUNA is a *de novo* assembly software designed for populations with high mutation rates. It is used to build an initial reference for mapping reads with ngshmmalign aligner when a `references/cohort_consensus.fasta` file is not provided. Further details can be found in the [wiki](https://github.com/cbg-ethz/V-pipe/wiki/getting-started#input-files) pages.\r +\r +### Computational tools\r +\r +Other dependencies are managed by using isolated conda environments per rule, and below we list some of the computational tools integrated in V-pipe:\r +\r +- **[FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/)**\r +\r + FastQC gives an overview of the raw sequencing data. Flowcells that have been overloaded or otherwise fail during sequencing can easily be determined with FastQC.\r +\r +- **[PRINSEQ](http://prinseq.sourceforge.net/)**\r +\r + Trimming and clipping of reads is performed by PRINSEQ. It is currently the most versatile raw read processor with many customization options.\r +\r +- **[ngshmmalign](https://github.com/cbg-ethz/ngshmmalign)**\r +\r + We perform the alignment of the curated NGS data using our custom ngshmmalign that takes structural variants into account. It produces multiple consensus sequences that include either majority bases or ambiguous bases.\r +\r +- **[bwa](https://github.com/lh3/bwa)**\r +\r + In order to detect specific cross-contaminations with other probes, the Burrows-Wheeler aligner is used. It quickly yields estimates for foreign genomic material in an experiment.\r + Additionally, It can be used as an alternative aligner to ngshmmalign.\r +\r +- **[MAFFT](http://mafft.cbrc.jp/alignment/software/)**\r +\r + To standardise multiple samples to the same reference genome (say HXB2 for HIV-1), the multiple sequence aligner MAFFT is employed. The multiple sequence alignment helps in determining regions of low conservation and thus makes standardisation of alignments more robust.\r +\r +- **[Samtools and bcftools](https://www.htslib.org/)**\r +\r + The Swiss Army knife of alignment postprocessing and diagnostics. bcftools is also used to generate consensus sequence with indels.\r +\r +- **[SmallGenomeUtilities](https://github.com/cbg-ethz/smallgenomeutilities)**\r +\r + We perform genomic liftovers to standardised reference genomes using our in-house developed python library of utilities for rewriting alignments.\r +\r +- **[ShoRAH](https://github.com/cbg-ethz/shorah)**\r +\r + ShoRAh performs SNV calling and local haplotype reconstruction by using bayesian clustering.\r +\r +- **[LoFreq](https://csb5.github.io/lofreq/)**\r +\r + LoFreq (version 2) is SNVs and indels caller from next-generation sequencing data, and can be used as an alternative engine for SNV calling.\r +\r +- **[SAVAGE](https://bitbucket.org/jbaaijens/savage) and [Haploclique](https://github.com/cbg-ethz/haploclique)**\r +\r + We use HaploClique or SAVAGE to perform global haplotype reconstruction for heterogeneous viral populations by using an overlap graph.\r +\r +## Citation\r +\r +If you use this software in your research, please cite:\r +\r +Posada-Céspedes S., Seifert D., Topolsky I., Jablonski K.P., Metzner K.J., and Beerenwinkel N. 2021.\r +"V-pipe: a computational pipeline for assessing viral genetic diversity from high-throughput sequencing data."\r +_Bioinformatics_, January. doi:[10.1093/bioinformatics/btab015](https://doi.org/10.1093/bioinformatics/btab015).\r +\r +## Contributions\r +\r +- [Ivan Topolsky\\* ![orcid]](https://orcid.org/0000-0002-7561-0810), [![github]](https://github.com/dryak)\r +- [Kim Philipp Jablonski ![orcid]](https://orcid.org/0000-0002-4166-4343), [![github]](https://github.com/kpj)\r +- [Lara Fuhrmann ![orcid]](https://orcid.org/0000-0001-6405-0654), [![github]](https://github.com/LaraFuhrmann)\r +- [Uwe Schmitt ![orcid]](https://orcid.org/0000-0002-4658-0616), [![github]](https://github.com/uweschmitt)\r +- [Monica Dragan ![orcid]](https://orcid.org/0000-0002-7719-5892), [![github]](https://github.com/monicadragan)\r +- [Susana Posada Céspedes ![orcid]](https://orcid.org/0000-0002-7459-8186), [![github]](https://github.com/sposadac)\r +- [David Seifert ![orcid]](https://orcid.org/0000-0003-4739-5110), [![github]](https://github.com/SoapZA)\r +- Tobias Marschall\r +- [Niko Beerenwinkel\\*\\* ![orcid]](https://orcid.org/0000-0002-0573-6119)\r +\r +\\* software maintainer ;\r +\\** group leader\r +\r +[github]: https://cbg-ethz.github.io/V-pipe/img/mark-github.svg\r +[orcid]: https://cbg-ethz.github.io/V-pipe/img/ORCIDiD_iconvector.svg\r +\r +## Contact\r +\r +We encourage users to use the [issue tracker](https://github.com/cbg-ethz/V-pipe/issues). For further enquiries, you can also contact the V-pipe Dev Team .\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/301?version=4" ; + schema1:keywords "Alignment, Assembly, covid-19, Genomics, INDELs, rna, SNPs, variant_calling, workflow" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "V-pipe (main multi-virus version)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/301?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=18" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-08-05 10:23:19 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=18" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17485 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:16Z" ; + schema1:dateModified "2024-06-11T12:55:16Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=18" ; + schema1:version 18 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Analysis pipeline for CUT&RUN and CUT&TAG experiments that includes sequencing QC, spike-in normalisation, IgG control normalisation, peak calling and downstream peak analysis." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/976?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/cutandrun" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/cutandrun" ; + schema1:sdDatePublished "2024-08-05 10:24:09 +0100" ; + schema1:url "https://workflowhub.eu/workflows/976/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15235 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:47Z" ; + schema1:dateModified "2024-06-11T12:54:47Z" ; + schema1:description "Analysis pipeline for CUT&RUN and CUT&TAG experiments that includes sequencing QC, spike-in normalisation, IgG control normalisation, peak calling and downstream peak analysis." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/976?version=7" ; + schema1:keywords "cutandrun, cutandrun-seq, cutandtag, cutandtag-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/cutandrun" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/976?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """This workflow demonstrates the usage of EODIE, a toolkit to extract object based timeseries information from Earth Observation data.\r +\r +EODIE is a toolkit to extract object based timeseries information from Earth Observation data.\r +\r +The EODIE code can be found on [Gitlab](https://gitlab.com/fgi_nls/public/EODIE) .\r +\r +The goal of EODIE is to ease the extraction of time series information at object level. Today, vast amounts of Earth Observation data are available to the users via for example earth explorer or scihub. Often, not the whole images are needed for exploitation, but only the timeseries of a certain feature on object level. Objects may be polygons depicting agricultural field parcels, forest plots, or areas of a certain land cover type.\r +\r +EODIE takes the objects in as polygons in a shapefile as well as the timeframe of interest and the features (eg vegetation indices) to be extracted. The output is a per polygon timeseries of the selected features over the timeframe of interest.\r +\r +**Online documentation**\r +EODIE documentation can be found [here](https://eodie.readthedocs.io/en/latest/).\r +\r +**Abstract CWL**\r +Automatically generated from the Galaxy workflow file: Workflow constructed from history 'EODIE Sentinel'""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.274.1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy workflow demonstrating the usage of EODIE Galaxy Tool" ; + schema1:sdDatePublished "2024-08-05 10:32:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/274/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1581 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8775 ; + schema1:creator ; + schema1:dateCreated "2022-03-11T12:32:55Z" ; + schema1:dateModified "2023-01-16T13:57:33Z" ; + schema1:description """This workflow demonstrates the usage of EODIE, a toolkit to extract object based timeseries information from Earth Observation data.\r +\r +EODIE is a toolkit to extract object based timeseries information from Earth Observation data.\r +\r +The EODIE code can be found on [Gitlab](https://gitlab.com/fgi_nls/public/EODIE) .\r +\r +The goal of EODIE is to ease the extraction of time series information at object level. Today, vast amounts of Earth Observation data are available to the users via for example earth explorer or scihub. Often, not the whole images are needed for exploitation, but only the timeseries of a certain feature on object level. Objects may be polygons depicting agricultural field parcels, forest plots, or areas of a certain land cover type.\r +\r +EODIE takes the objects in as polygons in a shapefile as well as the timeframe of interest and the features (eg vegetation indices) to be extracted. The output is a per polygon timeseries of the selected features over the timeframe of interest.\r +\r +**Online documentation**\r +EODIE documentation can be found [here](https://eodie.readthedocs.io/en/latest/).\r +\r +**Abstract CWL**\r +Automatically generated from the Galaxy workflow file: Workflow constructed from history 'EODIE Sentinel'""" ; + schema1:image ; + schema1:keywords "earth observation, copernicus, ndvi, sentinel-2 data" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy workflow demonstrating the usage of EODIE Galaxy Tool" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/274?version=1" ; + schema1:version 1 ; + ns1:input , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 4128 . + + a schema1:Dataset ; + schema1:datePublished "2021-12-06T14:19:09.191770" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/fragment-based-docking-scoring" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "fragment-based-docking-scoring/main" ; + schema1:sdDatePublished "2021-12-07 03:00:58 +0000" ; + schema1:softwareVersion "v0.1.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "RNA-RNA interactome analysis using ChiRA tools suite. The aligner used is CLAN." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/67?version=1" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for RNA-RNA interactome analysis using CLAN" ; + schema1:sdDatePublished "2024-08-05 10:33:24 +0100" ; + schema1:url "https://workflowhub.eu/workflows/67/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 31282 ; + schema1:creator ; + schema1:dateCreated "2020-11-03T19:49:27Z" ; + schema1:dateModified "2023-02-13T14:06:45Z" ; + schema1:description "RNA-RNA interactome analysis using ChiRA tools suite. The aligner used is CLAN." ; + schema1:keywords "rna, Transcriptomics" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "RNA-RNA interactome analysis using CLAN" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/67?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 28082 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:MediaObject ; + schema1:contentSize 3 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "energy.selection" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "genion.group" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1044 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "ions.mdp" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 971 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "minim.mdp" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 109917 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1aki.pdb" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2998215 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1u3m.pdb" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2918025 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1xyw.pdb" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88246 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1aki.gro" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577586 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1aki.top" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1280044 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1aki_em.tpr" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1aki_em_energy.edr" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1279304 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1aki_ions.tpr" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88246 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1aki_newbox.gro" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2142 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1aki_potential.xvg" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1525186 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1aki_solv.gro" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1524475 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1aki_solv_ions.gro" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 82046 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1u3m.gro" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 537920 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1u3m.top" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1416272 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1u3m_em.tpr" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1u3m_em_energy.edr" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1415196 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1u3m_ions.tpr" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 82046 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1u3m_newbox.gro" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2160 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1u3m_potential.xvg" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1837046 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1u3m_solv.gro" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1836965 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1u3m_solv_ions.gro" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 80112 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1xyw.gro" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 523996 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1xyw.top" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1408872 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1xyw_em.tpr" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1xyw_em_energy.edr" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1407844 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1xyw_ions.tpr" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 80112 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1xyw_newbox.gro" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2144 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1xyw_potential.xvg" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1845237 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1xyw_solv.gro" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1845066 ; + schema1:dateModified "2024-01-24T14:27:52" ; + schema1:name "1xyw_solv_ions.gro" ; + schema1:sdDatePublished "2024-01-24T14:28:04+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Workflow converts one or multiple bam files back to the fastq format" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/968?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/bamtofastq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for qbic-pipelines/bamtofastq" ; + schema1:sdDatePublished "2024-08-05 10:24:14 +0100" ; + schema1:url "https://workflowhub.eu/workflows/968/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4134 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:44Z" ; + schema1:dateModified "2024-06-11T12:54:44Z" ; + schema1:description "Workflow converts one or multiple bam files back to the fastq format" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/968?version=5" ; + schema1:keywords "bamtofastq, Conversion, cramtofastq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "qbic-pipelines/bamtofastq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/968?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/963?version=10" ; + schema1:isBasedOn "https://github.com/nf-core/airrflow" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/airrflow" ; + schema1:sdDatePublished "2024-08-05 10:24:22 +0100" ; + schema1:url "https://workflowhub.eu/workflows/963/ro_crate?version=10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11664 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:37Z" ; + schema1:dateModified "2024-06-11T12:54:37Z" ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/963?version=13" ; + schema1:keywords "airr, b-cell, immcantation, immunorepertoire, repseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/airrflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/963?version=10" ; + schema1:version 10 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1023?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/smrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/smrnaseq" ; + schema1:sdDatePublished "2024-08-05 10:23:11 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1023/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8315 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:18Z" ; + schema1:dateModified "2024-06-11T12:55:18Z" ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1023?version=10" ; + schema1:keywords "small-rna, smrna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/smrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1023?version=3" ; + schema1:version 3 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 2317017 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 140884 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.0.0" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.0.1" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.0.2" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.0.3" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.0.4" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.0.5" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.1.0" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.1.1" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.1.2" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.1.3" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.1.4" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.1.5" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.2.0" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.2.1" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.2.2" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.2.3" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.2.4" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.2.5" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.3.0" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.3.1" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.3.2" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.3.3" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.3.4" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.3.5" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.4.0" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.4.1" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.4.2" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.4.3" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.4.4" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.4.5" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.5.0" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.5.1" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.5.2" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.5.3" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.5.4" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.5.5" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.0.0" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.0.1" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.0.2" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.0.3" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.0.4" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.0.5" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.1.0" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.1.1" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.1.2" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.1.3" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.1.4" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.1.5" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.2.0" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.2.1" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.2.2" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.2.3" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.2.4" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.2.5" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.3.0" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.3.1" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.3.2" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.3.3" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.3.4" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.3.5" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.4.0" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.4.1" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.4.2" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.4.3" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.4.4" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.4.5" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.5.0" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.5.1" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.5.2" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.5.3" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.5.4" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.5.5" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description "Basic workflows inspired by the Nanopolish tutorials" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/50?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ONT --Tutorial-Nanopolish-variants" ; + schema1:sdDatePublished "2024-08-05 10:33:26 +0100" ; + schema1:url "https://workflowhub.eu/workflows/50/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9930 ; + schema1:creator ; + schema1:dateCreated "2020-08-05T12:52:50Z" ; + schema1:dateModified "2023-02-13T14:06:46Z" ; + schema1:description "Basic workflows inspired by the Nanopolish tutorials" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ONT --Tutorial-Nanopolish-variants" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/50?version=1" ; + schema1:version 1 ; + ns1:input , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1017?version=12" ; + schema1:isBasedOn "https://github.com/nf-core/rnafusion" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnafusion" ; + schema1:sdDatePublished "2024-08-05 10:22:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1017/ro_crate?version=12" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10551 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:10Z" ; + schema1:dateModified "2024-06-11T12:55:10Z" ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1017?version=16" ; + schema1:keywords "fusion, fusion-genes, gene-fusion, rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnafusion" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1017?version=12" ; + schema1:version 12 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1021?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/scrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/scrnaseq" ; + schema1:sdDatePublished "2024-08-05 10:23:14 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1021/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9948 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:17Z" ; + schema1:dateModified "2024-06-11T12:55:17Z" ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1021?version=13" ; + schema1:keywords "10x-genomics, 10xgenomics, alevin, bustools, Cellranger, kallisto, rna-seq, single-cell, star-solo" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/scrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1021?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """CWL version of the md_list.py workflow for HPC. This performs a system setup and runs a molecular dynamics simulation on the structure passed to this workflow. This workflow uses the md\\_gather.cwl sub-workflow to gather the outputs together to return these.\r +To work with more than one structure this workflow can be called from either the md\\_launch.cwl workflow, or the md\\_launch\\_mutate.cwl workflow. These use scatter for parallelising the workflow. md\\_launch.cwl operates on a list of individual input molecule files. md\\_launch\\_mutate.cwl operates on a single input molecule file, and a list of mutations to apply to that molecule. Within that list of mutations, a value of 'WT' will indicate that the molecule should be simulated without any mutation being applied.\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.121.1" ; + schema1:isBasedOn "https://github.com/douglowe/biobb_hpc_cwl_md_list" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Molecular Dynamics Simulation" ; + schema1:sdDatePublished "2024-08-05 10:33:16 +0100" ; + schema1:url "https://workflowhub.eu/workflows/121/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9242 ; + schema1:dateCreated "2021-05-20T14:41:19Z" ; + schema1:dateModified "2023-01-16T13:49:52Z" ; + schema1:description """CWL version of the md_list.py workflow for HPC. This performs a system setup and runs a molecular dynamics simulation on the structure passed to this workflow. This workflow uses the md\\_gather.cwl sub-workflow to gather the outputs together to return these.\r +To work with more than one structure this workflow can be called from either the md\\_launch.cwl workflow, or the md\\_launch\\_mutate.cwl workflow. These use scatter for parallelising the workflow. md\\_launch.cwl operates on a list of individual input molecule files. md\\_launch\\_mutate.cwl operates on a single input molecule file, and a list of mutations to apply to that molecule. Within that list of mutations, a value of 'WT' will indicate that the molecule should be simulated without any mutation being applied.\r +""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Molecular Dynamics Simulation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/121?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + ; + ns1:output . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 34463 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "Pre-processing of mass spectrometry-based metabolomics data with quantification and identification based on MS1 and MS2 data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/57?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/metaboigniter" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/metaboigniter" ; + schema1:sdDatePublished "2024-08-05 10:24:29 +0100" ; + schema1:url "https://workflowhub.eu/workflows/57/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5375 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:00Z" ; + schema1:dateModified "2024-06-11T12:55:00Z" ; + schema1:description "Pre-processing of mass spectrometry-based metabolomics data with quantification and identification based on MS1 and MS2 data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/57?version=3" ; + schema1:keywords "Metabolomics, identification, quantification, mass-spectrometry, ms1, ms2" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/metaboigniter" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/57?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + schema1:datePublished "2021-09-14T16:21:52.393735" ; + schema1:description "Cryo-EM processing workflow" ; + schema1:hasPart , + , + ; + schema1:image "workflow.svg" ; + schema1:keywords "cryoem" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "testEntryTitleNew" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:image ; + schema1:name "workflow" ; + schema1:programmingLanguage . + + a schema1:MediaObject . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Analysis of Chromosome Conformation Capture data (Hi-C)" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/989?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/hic" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hic" ; + schema1:sdDatePublished "2024-08-05 10:23:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/989/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3924 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:54Z" ; + schema1:dateModified "2024-06-11T12:54:54Z" ; + schema1:description "Analysis of Chromosome Conformation Capture data (Hi-C)" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/989?version=7" ; + schema1:keywords "chromosome-conformation-capture, Hi-C" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hic" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/989?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Scipion is a workflow engine mostly for Cryo-Electron Microscopy image processing. In this extremely simple workflow, we load the Relion 3.0 tutorial data and process it to 2.9A resolution." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/69?version=1" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Scipion Tutorial example reaching 2.9A resolution" ; + schema1:sdDatePublished "2024-08-05 10:33:23 +0100" ; + schema1:url "https://workflowhub.eu/workflows/69/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 16475 ; + schema1:dateCreated "2020-11-19T13:29:51Z" ; + schema1:dateModified "2023-06-27T12:39:11Z" ; + schema1:description "Scipion is a workflow engine mostly for Cryo-Electron Microscopy image processing. In this extremely simple workflow, we load the Relion 3.0 tutorial data and process it to 2.9A resolution." ; + schema1:keywords "Electron microscopy, image processing, single particle analysis" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Scipion Tutorial example reaching 2.9A resolution" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/69?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for the analysis of crispr data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/975?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/crisprseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/crisprseq" ; + schema1:sdDatePublished "2024-08-05 10:22:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/975/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10090 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:46Z" ; + schema1:dateModified "2024-06-11T12:54:46Z" ; + schema1:description "Pipeline for the analysis of crispr data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/975?version=5" ; + schema1:keywords "crispr, crispr-analysis, crispr-cas, NGS" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/crisprseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/975?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1027?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/viralrecon" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/viralrecon" ; + schema1:sdDatePublished "2024-08-05 10:23:06 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1027/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9965 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:20Z" ; + schema1:dateModified "2024-06-11T12:55:20Z" ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1027?version=10" ; + schema1:keywords "Amplicon, ARTIC, Assembly, covid-19, covid19, illumina, long-read-sequencing, Metagenomics, nanopore, ONT, oxford-nanopore, SARS-CoV-2, variant-calling, viral, Virus" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/viralrecon" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1027?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# TronFlow BAM preprocessing pipeline\r +\r +![GitHub tag (latest SemVer)](https://img.shields.io/github/v/release/tron-bioinformatics/tronflow-bam-preprocessing?sort=semver)\r +[![Automated tests](https://github.com/TRON-Bioinformatics/tronflow-bam-preprocessing/actions/workflows/automated_tests.yml/badge.svg)](https://github.com/TRON-Bioinformatics/tronflow-bam-preprocessing/actions/workflows/automated_tests.yml)\r +[![DOI](https://zenodo.org/badge/358400957.svg)](https://zenodo.org/badge/latestdoi/358400957)\r +[![License](https://img.shields.io/badge/license-MIT-green)](https://opensource.org/licenses/MIT)\r +[![Powered by Nextflow](https://img.shields.io/badge/powered%20by-Nextflow-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)](https://www.nextflow.io/)\r +\r +The TronFlow BAM preprocessing pipeline is part of a collection of computational workflows for tumor-normal pair \r +somatic variant calling. These workflows are implemented in the Nextflow (Di Tommaso, 2017) framework.\r +\r +Find the documentation here [![Documentation Status](https://readthedocs.org/projects/tronflow-docs/badge/?version=latest)](https://tronflow-docs.readthedocs.io/en/latest/?badge=latest)\r +\r +\r +The aim of this workflow is to preprocess BAM files based on Picard and GATK (DePristo, 2011) best practices.\r +\r +\r +## Background\r +\r +In order to have a variant calling ready BAM file there are a number of operations that need to be applied on the BAM. \r +This pipeline depends on the particular variant caller, but there are some common operations.\r +\r +GATK has been providing a well known best practices document on BAM preprocessing, the latest best practices for \r +GATK4 (https://software.broadinstitute.org/gatk/best-practices/workflow?id=11165) does not perform anymore realignment around indels as opposed to best practices for GATK3 (https://software.broadinstitute.org/gatk/documentation/article?id=3238). This pipeline is based on both Picard and GATK. These best practices have been implemented a number of times, see for instance this implementation in Workflow Definition Language https://github.com/gatk-workflows/gatk4-data-processing/blob/master/processing-for-variant-discovery-gatk4.wdl.\r +\r +\r +## Objectives\r +\r +We aim at providing a single implementation of the BAM preprocessing pipeline that can be used across different \r +use cases. \r +For this purpose there are some required steps and some optional steps. \r +\r +The input can be either a tab-separated values file (`--input_files`) where each line corresponds to one input BAM or a single BAM (`--input_bam` and `--input_name`).\r +\r +## Implementation\r +\r +Steps:\r +\r +* **Clean BAM**. Sets the mapping quality to 0 for all unmapped reads and avoids soft clipping going beyond the reference genome boundaries. Implemented in Picard\r +* **Reorder chromosomes**. Makes the chromosomes in the BAM follow the same order as the reference genome. Implemented in Picard\r +* **Add read groups**. GATK requires that some headers are adde to the BAM, also we want to flag somehow the normal and tumor BAMs in the header as some callers, such as Mutect2 require it. Implemented in Picard.\r +* **Mark duplicates** (optional). Identify the PCR and the optical duplications and marks those reads. This uses the parallelized version on Spark, it is reported to scale linearly up to 16 CPUs.\r +* **Realignment around indels** (optional). This procedure is important for locus based variant callers, but for any variant caller doing haplotype assembly it is not needed. This is computing intensive as it first finds regions for realignment where there are indication of indels and then it performs a local realignment over those regions. Implemented in GATK3, deprecated in GATK4\r +* **Base Quality Score Recalibration (BQSR)** (optional). It aims at correcting systematic errors in the sequencer when assigning the base call quality errors, as these scores are used by variant callers it improves variant calling in some situations. Implemented in GATK4\r +* **Metrics** (optional). A number of metrics are obtained from the BAM file with Picard's CollectMetrics, CollectHsMetrics and samtools' coverage and depth.\r +\r +![Pipeline](figures/bam_preprocessing2.png)\r +\r +\r +## How to run it\r +\r +```\r +$ nextflow run tron-bioinformatics/tronflow-bam-preprocessing --help\r +\r +N E X T F L O W ~ version 19.07.0\r +Launching `main.nf` [intergalactic_shannon] - revision: e707c77d7b\r +\r +Usage:\r + main.nf --input_files input_files\r +\r +Input:\r + * --input_bam: the path to a single BAM (this option is not compatible with --input_files)\r + * --input_files: the path to a tab-separated values file containing in each row the sample name, sample type (eg: tumor or normal) and path to the BAM file (this option is not compatible with --input_bam)\r + Sample type will be added to the BAM header @SN sample name\r + The input file does not have header!\r + Example input file:\r + name1 tumor tumor.1.bam\r + name1 normal normal.1.bam\r + name2 tumor tumor.2.bam\r + * --reference: path to the FASTA genome reference (indexes expected *.fai, *.dict)\r +\r +Optional input:\r + * --input_name: the name of the sample. Only used when --input_bam is provided (default: normal)\r + * --dbsnp: path to the dbSNP VCF (required to perform BQSR)\r + * --known_indels1: path to a VCF of known indels (optional to perform realignment around indels)\r + * --known_indels2: path to a second VCF of known indels (optional to perform realignment around indels)\r + * --intervals: path to a BED file to collect coverage and HS metrics from (default: None)\r + * --collect_hs_minimum_base_quality: minimum base quality for a base to contribute coverage (default: 20).\r + * --collect_hs_minimum_mapping_quality: minimum mapping quality for a read to contribute coverage (default: 20).\r + * --skip_bqsr: optionally skip BQSR (default: false)\r + * --skip_realignment: optionally skip realignment (default: false)\r + * --skip_deduplication: optionally skip deduplication (default: false)\r + * --remove_duplicates: removes duplicate reads from output BAM instead of flagging them (default: true)\r + * --skip_metrics: optionally skip metrics (default: false)\r + * --output: the folder where to publish output (default: ./output)\r + * --platform: the platform to be added to the BAM header. Valid values: [ILLUMINA, SOLID, LS454, HELICOS and PACBIO] (default: ILLUMINA)\r +\r +Computational resources:\r + * --prepare_bam_cpus: (default: 3)\r + * --prepare_bam_memory: (default: 8g)\r + * --mark_duplicates_cpus: (default: 16)\r + * --mark_duplicates_memory: (default: 64g)\r + * --realignment_around_indels_cpus: (default: 2)\r + * --realignment_around_indels_memory: (default: 31g)\r + * --bqsr_cpus: (default: 3)\r + * --bqsr_memory: (default: 4g)\r + * --metrics_cpus: (default: 1)\r + * --metrics_memory: (default: 8g)\r +\r + Output:\r + * Preprocessed and indexed BAMs\r + * Tab-separated values file with the absolute paths to the preprocessed BAMs, preprocessed_bams.txt\r +\r +Optional output:\r + * Recalibration report\r + * Deduplication metrics\r + * Realignment intervals\r + * GATK multiple metrics\r + * HS metrics\r + * Horizontal and vertical coverage metrics\r +```\r +\r +### Input table\r +\r +The table with FASTQ files expects two tab-separated columns **without a header**\r +\r +| Sample name | Sample type | BAM |\r +|----------------------|---------------------------------|------------------------------|\r +| sample_1 | normal | /path/to/sample_1.normal.bam |\r +| sample_1 | tumor | /path/to/sample_1.tumor.bam |\r +| sample_2 | normal | /path/to/sample_2.normal.bam |\r +| sample_2 | tumor | /path/to/sample_2.tumor.bam |\r +\r +The values used in `sample type` are arbitrary. These will be set in the BAM header tag @RG:SM for sample. There may be some downstream constraints, eg: Mutect2 pipeline requires that the sample type between normal and tumor samples of the same pair are not the same.\r +\r +### References\r +\r +The BAM preprocessing workflow requires the human reference genome (`--reference`)\r +Base Quality Score Recalibration (BQSR) requires dbSNP to avoid extracting error metrics from polymorphic sites (`--dbsnp`)\r +Realignment around indels requires a set of known indels (`--known_indels1` and `--known_indels2`).\r +These resources can be fetched from the GATK bundle https://gatk.broadinstitute.org/hc/en-us/articles/360035890811-Resource-bundle.\r +\r +Optionally, in order to run Picard's CollectHsMetrics a BED file will need to be provided (`--intervals`).\r +This BED file will also be used for `samtools coverage`.\r +\r +## Troubleshooting\r +\r +### Too new Java version for MarkDuplicatesSpark\r +\r +When using Java 11 the cryptic error messsage `java.lang.IllegalArgumentException: Unsupported class file major version 55` has been observed.\r +This issue is described here and the solution is to use Java 8 https://gatk.broadinstitute.org/hc/en-us/community/posts/360056174592-MarkDuplicatesSpark-crash.\r +\r +\r +\r +## Bibliography\r +\r +* DePristo M, Banks E, Poplin R, Garimella K, Maguire J, Hartl C, Philippakis A, del Angel G, Rivas MA, Hanna M, McKenna A, Fennell T, Kernytsky A, Sivachenko A, Cibulskis K, Gabriel S, Altshuler D, Daly M. (2011). A framework for variation discovery and genotyping using next-generation DNA sequencing data. Nat Genet, 43:491-498. DOI: 10.1038/ng.806.\r +* Di Tommaso, P., Chatzou, M., Floden, E. W., Barja, P. P., Palumbo, E., & Notredame, C. (2017). Nextflow enables reproducible computational workflows. Nature Biotechnology, 35(4), 316–319. 10.1038/nbt.3820\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/419?version=1" ; + schema1:isBasedOn "https://github.com/TRON-Bioinformatics/tronflow-bam-preprocessing" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for TronFlow BAM preprocessing pipeline" ; + schema1:sdDatePublished "2024-08-05 10:31:26 +0100" ; + schema1:url "https://workflowhub.eu/workflows/419/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4865 ; + schema1:dateCreated "2023-01-17T16:54:14Z" ; + schema1:dateModified "2023-01-17T16:54:14Z" ; + schema1:description """# TronFlow BAM preprocessing pipeline\r +\r +![GitHub tag (latest SemVer)](https://img.shields.io/github/v/release/tron-bioinformatics/tronflow-bam-preprocessing?sort=semver)\r +[![Automated tests](https://github.com/TRON-Bioinformatics/tronflow-bam-preprocessing/actions/workflows/automated_tests.yml/badge.svg)](https://github.com/TRON-Bioinformatics/tronflow-bam-preprocessing/actions/workflows/automated_tests.yml)\r +[![DOI](https://zenodo.org/badge/358400957.svg)](https://zenodo.org/badge/latestdoi/358400957)\r +[![License](https://img.shields.io/badge/license-MIT-green)](https://opensource.org/licenses/MIT)\r +[![Powered by Nextflow](https://img.shields.io/badge/powered%20by-Nextflow-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)](https://www.nextflow.io/)\r +\r +The TronFlow BAM preprocessing pipeline is part of a collection of computational workflows for tumor-normal pair \r +somatic variant calling. These workflows are implemented in the Nextflow (Di Tommaso, 2017) framework.\r +\r +Find the documentation here [![Documentation Status](https://readthedocs.org/projects/tronflow-docs/badge/?version=latest)](https://tronflow-docs.readthedocs.io/en/latest/?badge=latest)\r +\r +\r +The aim of this workflow is to preprocess BAM files based on Picard and GATK (DePristo, 2011) best practices.\r +\r +\r +## Background\r +\r +In order to have a variant calling ready BAM file there are a number of operations that need to be applied on the BAM. \r +This pipeline depends on the particular variant caller, but there are some common operations.\r +\r +GATK has been providing a well known best practices document on BAM preprocessing, the latest best practices for \r +GATK4 (https://software.broadinstitute.org/gatk/best-practices/workflow?id=11165) does not perform anymore realignment around indels as opposed to best practices for GATK3 (https://software.broadinstitute.org/gatk/documentation/article?id=3238). This pipeline is based on both Picard and GATK. These best practices have been implemented a number of times, see for instance this implementation in Workflow Definition Language https://github.com/gatk-workflows/gatk4-data-processing/blob/master/processing-for-variant-discovery-gatk4.wdl.\r +\r +\r +## Objectives\r +\r +We aim at providing a single implementation of the BAM preprocessing pipeline that can be used across different \r +use cases. \r +For this purpose there are some required steps and some optional steps. \r +\r +The input can be either a tab-separated values file (`--input_files`) where each line corresponds to one input BAM or a single BAM (`--input_bam` and `--input_name`).\r +\r +## Implementation\r +\r +Steps:\r +\r +* **Clean BAM**. Sets the mapping quality to 0 for all unmapped reads and avoids soft clipping going beyond the reference genome boundaries. Implemented in Picard\r +* **Reorder chromosomes**. Makes the chromosomes in the BAM follow the same order as the reference genome. Implemented in Picard\r +* **Add read groups**. GATK requires that some headers are adde to the BAM, also we want to flag somehow the normal and tumor BAMs in the header as some callers, such as Mutect2 require it. Implemented in Picard.\r +* **Mark duplicates** (optional). Identify the PCR and the optical duplications and marks those reads. This uses the parallelized version on Spark, it is reported to scale linearly up to 16 CPUs.\r +* **Realignment around indels** (optional). This procedure is important for locus based variant callers, but for any variant caller doing haplotype assembly it is not needed. This is computing intensive as it first finds regions for realignment where there are indication of indels and then it performs a local realignment over those regions. Implemented in GATK3, deprecated in GATK4\r +* **Base Quality Score Recalibration (BQSR)** (optional). It aims at correcting systematic errors in the sequencer when assigning the base call quality errors, as these scores are used by variant callers it improves variant calling in some situations. Implemented in GATK4\r +* **Metrics** (optional). A number of metrics are obtained from the BAM file with Picard's CollectMetrics, CollectHsMetrics and samtools' coverage and depth.\r +\r +![Pipeline](figures/bam_preprocessing2.png)\r +\r +\r +## How to run it\r +\r +```\r +$ nextflow run tron-bioinformatics/tronflow-bam-preprocessing --help\r +\r +N E X T F L O W ~ version 19.07.0\r +Launching `main.nf` [intergalactic_shannon] - revision: e707c77d7b\r +\r +Usage:\r + main.nf --input_files input_files\r +\r +Input:\r + * --input_bam: the path to a single BAM (this option is not compatible with --input_files)\r + * --input_files: the path to a tab-separated values file containing in each row the sample name, sample type (eg: tumor or normal) and path to the BAM file (this option is not compatible with --input_bam)\r + Sample type will be added to the BAM header @SN sample name\r + The input file does not have header!\r + Example input file:\r + name1 tumor tumor.1.bam\r + name1 normal normal.1.bam\r + name2 tumor tumor.2.bam\r + * --reference: path to the FASTA genome reference (indexes expected *.fai, *.dict)\r +\r +Optional input:\r + * --input_name: the name of the sample. Only used when --input_bam is provided (default: normal)\r + * --dbsnp: path to the dbSNP VCF (required to perform BQSR)\r + * --known_indels1: path to a VCF of known indels (optional to perform realignment around indels)\r + * --known_indels2: path to a second VCF of known indels (optional to perform realignment around indels)\r + * --intervals: path to a BED file to collect coverage and HS metrics from (default: None)\r + * --collect_hs_minimum_base_quality: minimum base quality for a base to contribute coverage (default: 20).\r + * --collect_hs_minimum_mapping_quality: minimum mapping quality for a read to contribute coverage (default: 20).\r + * --skip_bqsr: optionally skip BQSR (default: false)\r + * --skip_realignment: optionally skip realignment (default: false)\r + * --skip_deduplication: optionally skip deduplication (default: false)\r + * --remove_duplicates: removes duplicate reads from output BAM instead of flagging them (default: true)\r + * --skip_metrics: optionally skip metrics (default: false)\r + * --output: the folder where to publish output (default: ./output)\r + * --platform: the platform to be added to the BAM header. Valid values: [ILLUMINA, SOLID, LS454, HELICOS and PACBIO] (default: ILLUMINA)\r +\r +Computational resources:\r + * --prepare_bam_cpus: (default: 3)\r + * --prepare_bam_memory: (default: 8g)\r + * --mark_duplicates_cpus: (default: 16)\r + * --mark_duplicates_memory: (default: 64g)\r + * --realignment_around_indels_cpus: (default: 2)\r + * --realignment_around_indels_memory: (default: 31g)\r + * --bqsr_cpus: (default: 3)\r + * --bqsr_memory: (default: 4g)\r + * --metrics_cpus: (default: 1)\r + * --metrics_memory: (default: 8g)\r +\r + Output:\r + * Preprocessed and indexed BAMs\r + * Tab-separated values file with the absolute paths to the preprocessed BAMs, preprocessed_bams.txt\r +\r +Optional output:\r + * Recalibration report\r + * Deduplication metrics\r + * Realignment intervals\r + * GATK multiple metrics\r + * HS metrics\r + * Horizontal and vertical coverage metrics\r +```\r +\r +### Input table\r +\r +The table with FASTQ files expects two tab-separated columns **without a header**\r +\r +| Sample name | Sample type | BAM |\r +|----------------------|---------------------------------|------------------------------|\r +| sample_1 | normal | /path/to/sample_1.normal.bam |\r +| sample_1 | tumor | /path/to/sample_1.tumor.bam |\r +| sample_2 | normal | /path/to/sample_2.normal.bam |\r +| sample_2 | tumor | /path/to/sample_2.tumor.bam |\r +\r +The values used in `sample type` are arbitrary. These will be set in the BAM header tag @RG:SM for sample. There may be some downstream constraints, eg: Mutect2 pipeline requires that the sample type between normal and tumor samples of the same pair are not the same.\r +\r +### References\r +\r +The BAM preprocessing workflow requires the human reference genome (`--reference`)\r +Base Quality Score Recalibration (BQSR) requires dbSNP to avoid extracting error metrics from polymorphic sites (`--dbsnp`)\r +Realignment around indels requires a set of known indels (`--known_indels1` and `--known_indels2`).\r +These resources can be fetched from the GATK bundle https://gatk.broadinstitute.org/hc/en-us/articles/360035890811-Resource-bundle.\r +\r +Optionally, in order to run Picard's CollectHsMetrics a BED file will need to be provided (`--intervals`).\r +This BED file will also be used for `samtools coverage`.\r +\r +## Troubleshooting\r +\r +### Too new Java version for MarkDuplicatesSpark\r +\r +When using Java 11 the cryptic error messsage `java.lang.IllegalArgumentException: Unsupported class file major version 55` has been observed.\r +This issue is described here and the solution is to use Java 8 https://gatk.broadinstitute.org/hc/en-us/community/posts/360056174592-MarkDuplicatesSpark-crash.\r +\r +\r +\r +## Bibliography\r +\r +* DePristo M, Banks E, Poplin R, Garimella K, Maguire J, Hartl C, Philippakis A, del Angel G, Rivas MA, Hanna M, McKenna A, Fennell T, Kernytsky A, Sivachenko A, Cibulskis K, Gabriel S, Altshuler D, Daly M. (2011). A framework for variation discovery and genotyping using next-generation DNA sequencing data. Nat Genet, 43:491-498. DOI: 10.1038/ng.806.\r +* Di Tommaso, P., Chatzou, M., Floden, E. W., Barja, P. P., Palumbo, E., & Notredame, C. (2017). Nextflow enables reproducible computational workflows. Nature Biotechnology, 35(4), 316–319. 10.1038/nbt.3820\r +""" ; + schema1:isPartOf ; + schema1:keywords "Bioinformatics, GATK4, sambamba" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "TronFlow BAM preprocessing pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/419?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """This workflow has been created as part of Demonstrator 6 of the project EOSC-Life (within WP3) and is focused on reusing publicly available RNAi screens to gain insights into the nucleolus biology. The workflow downloads images from the Image Data Resource (IDR), performs object segmentation (of nuclei and nucleoli) and feature extraction of the images and objects identified.\r +\r +Tutorial: https://training.galaxyproject.org/training-material/topics/imaging/tutorials/tutorial-CP/tutorial.html""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/41?version=2" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Nucleoli segmentation using CellProfiler (EOSC-Life D6)" ; + schema1:sdDatePublished "2024-08-05 10:30:08 +0100" ; + schema1:url "https://workflowhub.eu/workflows/41/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 56581 ; + schema1:dateCreated "2020-11-03T22:12:24Z" ; + schema1:dateModified "2023-07-03T10:15:31Z" ; + schema1:description """This workflow has been created as part of Demonstrator 6 of the project EOSC-Life (within WP3) and is focused on reusing publicly available RNAi screens to gain insights into the nucleolus biology. The workflow downloads images from the Image Data Resource (IDR), performs object segmentation (of nuclei and nucleoli) and feature extraction of the images and objects identified.\r +\r +Tutorial: https://training.galaxyproject.org/training-material/topics/imaging/tutorials/tutorial-CP/tutorial.html""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/41?version=1" ; + schema1:keywords "CellProfiler, Galaxy, image processing, imaging" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Nucleoli segmentation using CellProfiler (EOSC-Life D6)" ; + schema1:producer , + , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/41?version=2" ; + schema1:version 2 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """The aim of this workflow is to handle the routine part of shotgun metagenomics data processing on Galaxy Australia. \r +\r +The workflow is using the tools MetaPhlAn2 for taxonomy classification and HUMAnN2 for functional profiling of the metagenomes. The workflow is based on the Galaxy Training tutorial 'Analyses of metagenomics data - The global picture' (Saskia Hiltemann, Bérénice Batut) https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/general-tutorial/tutorial.html#shotgun-metagenomics-data. \r +\r +The how-to guide is available here: https://vmurigneu.github.io/shotgun_howto_ga_workflows/\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.624.1" ; + schema1:isBasedOn "https://github.com/vmurigneu/shotgun_howto_ga_workflows" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Analyses of shotgun metagenomics data with MetaPhlAn2" ; + schema1:sdDatePublished "2024-08-05 10:27:09 +0100" ; + schema1:url "https://workflowhub.eu/workflows/624/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 156545 ; + schema1:creator , + , + , + , + , + ; + schema1:dateCreated "2023-10-26T19:34:08Z" ; + schema1:dateModified "2024-04-05T05:20:07Z" ; + schema1:description """The aim of this workflow is to handle the routine part of shotgun metagenomics data processing on Galaxy Australia. \r +\r +The workflow is using the tools MetaPhlAn2 for taxonomy classification and HUMAnN2 for functional profiling of the metagenomes. The workflow is based on the Galaxy Training tutorial 'Analyses of metagenomics data - The global picture' (Saskia Hiltemann, Bérénice Batut) https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/general-tutorial/tutorial.html#shotgun-metagenomics-data. \r +\r +The how-to guide is available here: https://vmurigneu.github.io/shotgun_howto_ga_workflows/\r +""" ; + schema1:image ; + schema1:keywords "Metagenomics, GUCFG2galaxy, shotgun" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Analyses of shotgun metagenomics data with MetaPhlAn2" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/624?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 115054 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 6396 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + schema1:dateModified "2024-05-29T10:05:27+00:00" ; + schema1:hasPart , + , + , + ; + schema1:name "A" ; + schema1:sdDatePublished "2024-06-18T13:37:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16 ; + schema1:dateModified "2024-06-18T13:37:20+00:00" ; + schema1:name "A.0.0" ; + schema1:sdDatePublished "2024-06-18T13:37:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16 ; + schema1:dateModified "2024-06-18T13:37:20+00:00" ; + schema1:name "A.0.1" ; + schema1:sdDatePublished "2024-06-18T13:37:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16 ; + schema1:dateModified "2024-06-18T13:37:20+00:00" ; + schema1:name "A.1.0" ; + schema1:sdDatePublished "2024-06-18T13:37:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16 ; + schema1:dateModified "2024-06-18T13:37:20+00:00" ; + schema1:name "A.1.1" ; + schema1:sdDatePublished "2024-06-18T13:37:29+00:00" . + + a schema1:Dataset ; + schema1:dateModified "2022-05-09T10:23:59+00:00" ; + schema1:hasPart , + , + , + ; + schema1:name "B" ; + schema1:sdDatePublished "2024-06-18T13:37:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16 ; + schema1:dateModified "2024-06-18T13:37:20+00:00" ; + schema1:name "B.0.0" ; + schema1:sdDatePublished "2024-06-18T13:37:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16 ; + schema1:dateModified "2024-06-18T13:37:20+00:00" ; + schema1:name "B.0.1" ; + schema1:sdDatePublished "2024-06-18T13:37:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16 ; + schema1:dateModified "2024-06-18T13:37:20+00:00" ; + schema1:name "B.1.0" ; + schema1:sdDatePublished "2024-06-18T13:37:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16 ; + schema1:dateModified "2024-06-18T13:37:20+00:00" ; + schema1:name "B.1.1" ; + schema1:sdDatePublished "2024-06-18T13:37:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 20 ; + schema1:dateModified "2024-06-18T13:37:24+00:00" ; + schema1:name "C.0.0" ; + schema1:sdDatePublished "2024-06-18T13:37:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 20 ; + schema1:dateModified "2024-06-18T13:37:24+00:00" ; + schema1:name "C.0.1" ; + schema1:sdDatePublished "2024-06-18T13:37:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 20 ; + schema1:dateModified "2024-06-18T13:37:24+00:00" ; + schema1:name "C.1.0" ; + schema1:sdDatePublished "2024-06-18T13:37:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 20 ; + schema1:dateModified "2024-06-18T13:37:24+00:00" ; + schema1:name "C.1.1" ; + schema1:sdDatePublished "2024-06-18T13:37:29+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1023?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/smrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/smrnaseq" ; + schema1:sdDatePublished "2024-08-05 10:23:11 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1023/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6277 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:18Z" ; + schema1:dateModified "2024-06-11T12:55:18Z" ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1023?version=10" ; + schema1:keywords "small-rna, smrna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/smrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1023?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# HiC contact map generation\r +\r +Snakemake pipeline for the generation of `.pretext` and `.mcool` files for visualisation of HiC contact maps with the softwares PretextView and HiGlass, respectively.\r +\r +## Prerequisites\r +\r +This pipeine has been tested using `Snakemake v7.32.4` and requires conda for installation of required tools. To run the pipline use the command:\r +\r +`snakemake --use-conda`\r +\r +There are provided a set of configuration and running scripts for exectution on a slurm queueing system. After configuring the `cluster.json` file run:\r +\r +`./run_cluster`\r +\r +## Before starting\r +\r +You need to create a temporary folder and specify the path in the `config.yaml` file. This should be able to hold the temporary files created when sorting the `.pairsam` file (100s of GB or even many TBs)\r +\r +The path to the genome assemly must be given in the `config.yaml`.\r +\r +The HiC reads should be paired and named as follows: `Library_1.fastq.gz Library_2.fastq.gz`. The pipeline can accept any number of paired HiC read files, but the naming must be consistent. The folder containing these files must be provided in the `config.yaml`.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.795.2" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for HiC contact map generation" ; + schema1:sdDatePublished "2024-08-05 10:25:08 +0100" ; + schema1:url "https://workflowhub.eu/workflows/795/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5790 ; + schema1:creator ; + schema1:dateCreated "2024-04-29T12:22:21Z" ; + schema1:dateModified "2024-04-29T12:25:04Z" ; + schema1:description """# HiC contact map generation\r +\r +Snakemake pipeline for the generation of `.pretext` and `.mcool` files for visualisation of HiC contact maps with the softwares PretextView and HiGlass, respectively.\r +\r +## Prerequisites\r +\r +This pipeine has been tested using `Snakemake v7.32.4` and requires conda for installation of required tools. To run the pipline use the command:\r +\r +`snakemake --use-conda`\r +\r +There are provided a set of configuration and running scripts for exectution on a slurm queueing system. After configuring the `cluster.json` file run:\r +\r +`./run_cluster`\r +\r +## Before starting\r +\r +You need to create a temporary folder and specify the path in the `config.yaml` file. This should be able to hold the temporary files created when sorting the `.pairsam` file (100s of GB or even many TBs)\r +\r +The path to the genome assemly must be given in the `config.yaml`.\r +\r +The HiC reads should be paired and named as follows: `Library_1.fastq.gz Library_2.fastq.gz`. The pipeline can accept any number of paired HiC read files, but the naming must be consistent. The folder containing these files must be provided in the `config.yaml`.\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/795?version=1" ; + schema1:isPartOf ; + schema1:keywords "Bioinformatics, Genomics, Snakemake, Genome assembly" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "HiC contact map generation" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/795?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for screening for functional components of assembled contigs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/987?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/funcscan" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/funcscan" ; + schema1:sdDatePublished "2024-08-05 10:23:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/987/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14829 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:53Z" ; + schema1:dateModified "2024-06-11T12:54:53Z" ; + schema1:description "Pipeline for screening for functional components of assembled contigs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/987?version=8" ; + schema1:keywords "amp, AMR, antibiotic-resistance, antimicrobial-peptides, antimicrobial-resistance-genes, arg, Assembly, bgc, biosynthetic-gene-clusters, contigs, function, Metagenomics, natural-products, screening, secondary-metabolites" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/funcscan" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/987?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 260152 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "application/pdf" ; + schema1:name "complete_graph.pdf" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """This workflow is used to process timeseries from meteorological stations in Finland but can be applied to any timeseries according it follows the same format.\r +\r +Take a temperature timeseries from any meteorological station. Input format is csv and it must be standardized with 6 columns:\r +\r +1. Year (ex: 2021)\r +2. month (ex: 1)\r +3. day (ex: 15) \r +4. Time (ex: 16:56)\r +5. Time zone (such as UTC)\r +6. Air temperature (degC)""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/123?version=1" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Compute daily and monthly mean from meteorological station measurements" ; + schema1:sdDatePublished "2024-08-05 10:33:14 +0100" ; + schema1:url "https://workflowhub.eu/workflows/123/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 27186 ; + schema1:dateCreated "2021-05-23T19:28:06Z" ; + schema1:dateModified "2023-01-16T13:50:01Z" ; + schema1:description """This workflow is used to process timeseries from meteorological stations in Finland but can be applied to any timeseries according it follows the same format.\r +\r +Take a temperature timeseries from any meteorological station. Input format is csv and it must be standardized with 6 columns:\r +\r +1. Year (ex: 2021)\r +2. month (ex: 1)\r +3. day (ex: 15) \r +4. Time (ex: 16:56)\r +5. Time zone (such as UTC)\r +6. Air temperature (degC)""" ; + schema1:keywords "Climate, eosc-nordic, observation" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Compute daily and monthly mean from meteorological station measurements" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/123?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "Abstract CWL Automatically generated from the Galaxy workflow file: Workflow with Copernicus Essential Climate Variable - select and plot" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/46?version=1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Copernicus Essential Climate Variable - select and plot" ; + schema1:sdDatePublished "2024-08-05 10:33:28 +0100" ; + schema1:url "https://workflowhub.eu/workflows/46/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 2916 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15139 ; + schema1:dateCreated "2020-07-23T18:22:21Z" ; + schema1:dateModified "2023-01-16T13:44:09Z" ; + schema1:description "Abstract CWL Automatically generated from the Galaxy workflow file: Workflow with Copernicus Essential Climate Variable - select and plot" ; + schema1:image ; + schema1:keywords "Galaxy, Climate, copernicus" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Copernicus Essential Climate Variable - select and plot" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/46?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 7145 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Take a scRNAseq counts matrix from a single sample, and perform basic QC with scanpy. Then, do further processing by making a UMAP and clustering. Produces a processed AnnData \r +object.""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/514?version=2" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq Single Sample Processing Counts Matrix" ; + schema1:sdDatePublished "2024-08-05 10:24:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/514/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 105641 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-11-09T01:56:24Z" ; + schema1:dateModified "2023-11-09T03:52:51Z" ; + schema1:description """Take a scRNAseq counts matrix from a single sample, and perform basic QC with scanpy. Then, do further processing by making a UMAP and clustering. Produces a processed AnnData \r +object.""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/514?version=2" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "scRNAseq Single Sample Processing Counts Matrix" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/514?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/278?version=11" ; + schema1:isBasedOn "https://gitlab.bsc.es/lrodrig1/structuralvariants_poc/-/tree/1.1.3/structuralvariants/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CNV_pipeline" ; + schema1:sdDatePublished "2024-08-05 10:32:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/278/ro_crate?version=11" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 9694 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9474 ; + schema1:creator , + ; + schema1:dateCreated "2022-10-11T11:04:44Z" ; + schema1:dateModified "2023-01-16T13:57:59Z" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/278?version=10" ; + schema1:keywords "cancer, CODEX2, ExomeDepth, manta, TransBioNet, variant calling, GRIDSS, structural variants" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CNV_pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/278?version=11" ; + schema1:version 11 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 72914 . + + a schema1:Dataset ; + schema1:datePublished "2024-01-12T17:00:57.136582" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-Trio-phasing-VGP5/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.19" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# CMIP tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of computing **classical molecular interaction potentials** from **protein structures**, step by step, using the **BioExcel Building Blocks library (biobb)**. Examples shown are **Molecular Interaction Potentials (MIPs) grids, protein-protein/ligand interaction potentials, and protein titration**. The particular structures used are the **Lysozyme** protein (PDB code [1AKI](https://www.rcsb.org/structure/1aki)), and a MD simulation of the complex formed by the **SARS-CoV-2 Receptor Binding Domain and the human Angiotensin Converting Enzyme 2** (PDB code [6VW1](https://www.rcsb.org/structure/6vw1)).\r +\r +The code wrapped is the ***Classical Molecular Interaction Potentials (CMIP)*** code:\r +\r +**Classical molecular interaction potentials: Improved setup procedure in molecular dynamics simulations of proteins.**\r +*Gelpí, J.L., Kalko, S.G., Barril, X., Cirera, J., de la Cruz, X., Luque, F.J. and Orozco, M. (2001)*\r +*Proteins, 45: 428-437. [https://doi.org/10.1002/prot.1159](https://doi.org/10.1002/prot.1159)*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.820.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/main/biobb_wf_cmip/docker" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Docker Classical Molecular Interaction Potentials" ; + schema1:sdDatePublished "2024-08-05 10:24:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/820/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 737 ; + schema1:creator , + ; + schema1:dateCreated "2024-04-22T10:28:04Z" ; + schema1:dateModified "2024-04-22T10:32:32Z" ; + schema1:description """# CMIP tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of computing **classical molecular interaction potentials** from **protein structures**, step by step, using the **BioExcel Building Blocks library (biobb)**. Examples shown are **Molecular Interaction Potentials (MIPs) grids, protein-protein/ligand interaction potentials, and protein titration**. The particular structures used are the **Lysozyme** protein (PDB code [1AKI](https://www.rcsb.org/structure/1aki)), and a MD simulation of the complex formed by the **SARS-CoV-2 Receptor Binding Domain and the human Angiotensin Converting Enzyme 2** (PDB code [6VW1](https://www.rcsb.org/structure/6vw1)).\r +\r +The code wrapped is the ***Classical Molecular Interaction Potentials (CMIP)*** code:\r +\r +**Classical molecular interaction potentials: Improved setup procedure in molecular dynamics simulations of proteins.**\r +*Gelpí, J.L., Kalko, S.G., Barril, X., Cirera, J., de la Cruz, X., Luque, F.J. and Orozco, M. (2001)*\r +*Proteins, 45: 428-437. [https://doi.org/10.1002/prot.1159](https://doi.org/10.1002/prot.1159)*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Docker Classical Molecular Interaction Potentials" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/main/biobb_wf_cmip/docker/Dockerfile" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """Workflow for Metagenomics from raw reads to annotated bins.\r +**Steps:**\r +* workflow_quality.cwl:\r + * FastQC (control)\r + * fastp (quality trimming)\r + * bbmap contamination filter\r +* SPAdes (Assembly)\r +* QUAST (Assembly quality report)\r +* BBmap (Read mapping to assembly)\r +* MetaBat2 (binning)\r +* CheckM (bin completeness and contamination)\r +* GTDB-Tk (bin taxonomic classification)\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/64?version=9" ; + schema1:isBasedOn "https://gitlab.com/m-unlock/cwl/-/blob/master/cwl/workflows/workflow_metagenomics_binning.cwl" ; + schema1:license "CC0-1.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Metagenomics Binning Workflow" ; + schema1:sdDatePublished "2024-08-05 10:31:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/64/ro_crate?version=9" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 61045 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13780 ; + schema1:creator , + ; + schema1:dateCreated "2021-06-01T10:43:13Z" ; + schema1:dateModified "2021-06-07T17:04:02Z" ; + schema1:description """Workflow for Metagenomics from raw reads to annotated bins.\r +**Steps:**\r +* workflow_quality.cwl:\r + * FastQC (control)\r + * fastp (quality trimming)\r + * bbmap contamination filter\r +* SPAdes (Assembly)\r +* QUAST (Assembly quality report)\r +* BBmap (Read mapping to assembly)\r +* MetaBat2 (binning)\r +* CheckM (bin completeness and contamination)\r +* GTDB-Tk (bin taxonomic classification)\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/64?version=10" ; + schema1:keywords "Metagenomics, microbial, metagenome, binning" ; + schema1:license "https://spdx.org/licenses/CC0-1.0" ; + schema1:name "Metagenomics Binning Workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/64?version=9" ; + schema1:version 9 ; + ns1:input , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Conformational Transitions calculations tutorial using BioExcel Building Blocks (biobb) and GOdMD\r +\r +This tutorial aims to illustrate the process of computing a conformational transition between two known structural conformations of a protein, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.548.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_godmd" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein Conformational Transitions calculations tutorial" ; + schema1:sdDatePublished "2024-08-05 10:29:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/548/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 20808 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-04T15:26:51Z" ; + schema1:dateModified "2024-05-14T10:15:26Z" ; + schema1:description """# Protein Conformational Transitions calculations tutorial using BioExcel Building Blocks (biobb) and GOdMD\r +\r +This tutorial aims to illustrate the process of computing a conformational transition between two known structural conformations of a protein, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/548?version=1" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein Conformational Transitions calculations tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_godmd/blob/main/biobb_wf_godmd/notebooks/biobb_wf_godmd.ipynb" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-08-05 10:23:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3843 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:02Z" ; + schema1:dateModified "2024-06-11T12:55:02Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2020-12-18T14:44:57.573950" ; + schema1:description "Continuous flexibility analysis of SARS-CoV-2 Spike prefusion structures" ; + schema1:hasPart , + ; + schema1:image "workflow.svg" ; + schema1:keywords "cryoem", + "scipion" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Cryo electron microscopy of SARS-CoV-2 spike in prefusion state" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:image ; + schema1:name "workflow" ; + schema1:programmingLanguage . + + a schema1:MediaObject . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=23" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-08-05 10:23:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=23" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15178 ; + schema1:creator , + ; + schema1:dateCreated "2024-07-02T03:02:47Z" ; + schema1:dateModified "2024-07-02T03:02:47Z" ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=23" ; + schema1:version 23 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """A CWL-based pipeline for calling small germline variants, namely SNPs and small INDELs, by processing data from Whole-genome Sequencing (WGS) or Targeted Sequencing (e.g., Whole-exome sequencing; WES) experiments.\r +\r +On the respective GitHub folder are available:\r +\r +- The CWL wrappers and subworkflows for the workflow\r +- A pre-configured YAML template, based on validation analysis of publicly available HTS data\r +\r +Briefly, the workflow performs the following steps:\r +\r +1. Quality control of Illumina reads (FastQC)\r +2. Trimming of the reads (e.g., removal of adapter and/or low quality sequences) (Trim galore)\r +3. Mapping to reference genome (BWA-MEM)\r +4. Convertion of mapped reads from SAM (Sequence Alignment Map) to BAM (Binary Alignment Map) format (samtools)\r +5. Sorting mapped reads based on read names (samtools)\r +6. Adding information regarding paired end reads (e.g., CIGAR field information) (samtools)\r +7. Re-sorting mapped reads based on chromosomal coordinates (samtools)\r +8. Adding basic Read-Group information regarding sample name, platform unit, platform (e.g., ILLUMINA), library and identifier (picard AddOrReplaceReadGroups)\r +9. Marking PCR and/or optical duplicate reads (picard MarkDuplicates)\r +10. Collection of summary statistics (samtools) \r +11. Creation of indexes for coordinate-sorted BAM files to enable fast random access (samtools)\r +12. Splitting the reference genome into a predefined number of intervals for parallel processing (GATK SplitIntervals)\r +\r +At this point the application of single-sample workflow follows, during which multiple samples are accepted as input and they are not merged into a unified VCF file but are rather processed separately in each step of the workflow, leading to the production of a VCF file for each sample:\r +\r +13. Application of Base Quality Score Recalibration (BQSR) (GATK BaseRecalibrator, GatherBQSRReports and ApplyBQSR tools)\r +14. Variant calling (GATK HaplotypeCaller) \r +15. Merging of all genomic interval-split gVCF files for each sample (GATK MergeVCFs)\r +16. Separate annotation of SNPs and INDELs based on pretrained Convolutional Neural Network (CNN) models (GATK SelectVariants, CNNScoreVariants and FilterVariantTranches tools)\r +17. (Optional) Independent step of hard-filtering (GATK VariantFiltration)\r +18. Variant filtering based on the information added during VQSR and/or custom filters (bcftools)\r +19. Normalization of INDELs (split multiallelic sites) (bcftools)\r +20. Annotation of the final dataset of filtered variants with genomic, population-related and/or clinical information (ANNOVAR)\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.527.1" ; + schema1:isBasedOn "https://github.com/BiodataAnalysisGroup/CWL_HTS_pipelines/tree/main/Germline_Variant_calling/single-sample_analysis/with_BQSR_CNN_%26_hard_filtering" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL-based (single-sample) workflow for germline variant calling" ; + schema1:sdDatePublished "2024-08-05 10:29:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/527/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 36026 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-07-05T09:48:09Z" ; + schema1:dateModified "2023-07-05T09:49:33Z" ; + schema1:description """A CWL-based pipeline for calling small germline variants, namely SNPs and small INDELs, by processing data from Whole-genome Sequencing (WGS) or Targeted Sequencing (e.g., Whole-exome sequencing; WES) experiments.\r +\r +On the respective GitHub folder are available:\r +\r +- The CWL wrappers and subworkflows for the workflow\r +- A pre-configured YAML template, based on validation analysis of publicly available HTS data\r +\r +Briefly, the workflow performs the following steps:\r +\r +1. Quality control of Illumina reads (FastQC)\r +2. Trimming of the reads (e.g., removal of adapter and/or low quality sequences) (Trim galore)\r +3. Mapping to reference genome (BWA-MEM)\r +4. Convertion of mapped reads from SAM (Sequence Alignment Map) to BAM (Binary Alignment Map) format (samtools)\r +5. Sorting mapped reads based on read names (samtools)\r +6. Adding information regarding paired end reads (e.g., CIGAR field information) (samtools)\r +7. Re-sorting mapped reads based on chromosomal coordinates (samtools)\r +8. Adding basic Read-Group information regarding sample name, platform unit, platform (e.g., ILLUMINA), library and identifier (picard AddOrReplaceReadGroups)\r +9. Marking PCR and/or optical duplicate reads (picard MarkDuplicates)\r +10. Collection of summary statistics (samtools) \r +11. Creation of indexes for coordinate-sorted BAM files to enable fast random access (samtools)\r +12. Splitting the reference genome into a predefined number of intervals for parallel processing (GATK SplitIntervals)\r +\r +At this point the application of single-sample workflow follows, during which multiple samples are accepted as input and they are not merged into a unified VCF file but are rather processed separately in each step of the workflow, leading to the production of a VCF file for each sample:\r +\r +13. Application of Base Quality Score Recalibration (BQSR) (GATK BaseRecalibrator, GatherBQSRReports and ApplyBQSR tools)\r +14. Variant calling (GATK HaplotypeCaller) \r +15. Merging of all genomic interval-split gVCF files for each sample (GATK MergeVCFs)\r +16. Separate annotation of SNPs and INDELs based on pretrained Convolutional Neural Network (CNN) models (GATK SelectVariants, CNNScoreVariants and FilterVariantTranches tools)\r +17. (Optional) Independent step of hard-filtering (GATK VariantFiltration)\r +18. Variant filtering based on the information added during VQSR and/or custom filters (bcftools)\r +19. Normalization of INDELs (split multiallelic sites) (bcftools)\r +20. Annotation of the final dataset of filtered variants with genomic, population-related and/or clinical information (ANNOVAR)\r +""" ; + schema1:image ; + schema1:keywords "CWL, workflow, Germline, variant calling, Genomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "CWL-based (single-sample) workflow for germline variant calling" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/527?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 221255 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """Phylogenetic reconstruction using genome-wide and single-gene alignment data. Here we use maximum likelihood reconstruction program IQTree. \r +Data can be prepared using the [phylogenetic data preparation workflow](http://workflowhub.eu/workflows/358) prior to phylogenetic reconstruction.\r +Resulting trees can be viewed interactively using Galaxy's 'Phyloviz' or 'Phylogenetic Tree Visualization'""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/359?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ML phylogenetic reconstruction" ; + schema1:sdDatePublished "2024-08-05 10:32:04 +0100" ; + schema1:url "https://workflowhub.eu/workflows/359/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 21956 ; + schema1:creator ; + schema1:dateCreated "2022-06-06T14:14:25Z" ; + schema1:dateModified "2023-02-13T14:06:44Z" ; + schema1:description """Phylogenetic reconstruction using genome-wide and single-gene alignment data. Here we use maximum likelihood reconstruction program IQTree. \r +Data can be prepared using the [phylogenetic data preparation workflow](http://workflowhub.eu/workflows/358) prior to phylogenetic reconstruction.\r +Resulting trees can be viewed interactively using Galaxy's 'Phyloviz' or 'Phylogenetic Tree Visualization'""" ; + schema1:keywords "phylogenetics, phylogenomics" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "ML phylogenetic reconstruction" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/359?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# HiFi *de novo* genome assembly workflow\r +\r +HiFi-assembly-workflow is a bioinformatics pipeline that can be used to analyse Pacbio CCS reads for *de novo* genome assembly using PacBio Circular Consensus Sequencing (CCS) reads. This workflow is implemented in Nextflow and has 3 major sections. \r + \r +Please refer to the following documentation for detailed description of each workflow section:\r + \r +- [Adapter filtration and pre-assembly quality control (QC)](https://australianbiocommons.github.io/hifi-assembly-workflow/recommendations#stage-1-adapter-filtration-and-pre-assembly-quality-control)\r +- [Assembly](https://australianbiocommons.github.io/hifi-assembly-workflow/recommendations#stage-2-assembly)\r +- [Post-assembly QC](https://australianbiocommons.github.io/hifi-assembly-workflow/recommendations#stage-3-post-assembly-quality-control)\r +\r +\r +## General recommendations \r +\r +A more detailed module and workflow description as well as execution examples on Gadi and Setonix are [available here](https://australianbiocommons.github.io/hifi-assembly-workflow/workflows).\r +\r +\r +## Attributions\r +\r +This work was developed at AGRF and supported by the Australian BioCommons via Bioplatforms Australia funding, the Australian Research Data Commons (https://doi.org/10.47486/PL105) and the Queensland Government RICF programme. Bioplatforms Australia and the Australian Research Data Commons are enabled by the National Collaborative Research Infrastructure Strategy (NCRIS).\r +\r +The documentation in this repository is based on Australian BioCommons guidelines. \r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/560?version=1" ; + schema1:isBasedOn "https://github.com/AustralianBioCommons/hifi-assembly-workflow" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for HiFi de novo genome assembly workflow" ; + schema1:sdDatePublished "2024-08-05 10:27:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/560/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7193 ; + schema1:creator , + , + , + , + , + ; + schema1:dateCreated "2023-08-31T07:41:03Z" ; + schema1:dateModified "2024-02-05T03:12:14Z" ; + schema1:description """# HiFi *de novo* genome assembly workflow\r +\r +HiFi-assembly-workflow is a bioinformatics pipeline that can be used to analyse Pacbio CCS reads for *de novo* genome assembly using PacBio Circular Consensus Sequencing (CCS) reads. This workflow is implemented in Nextflow and has 3 major sections. \r + \r +Please refer to the following documentation for detailed description of each workflow section:\r + \r +- [Adapter filtration and pre-assembly quality control (QC)](https://australianbiocommons.github.io/hifi-assembly-workflow/recommendations#stage-1-adapter-filtration-and-pre-assembly-quality-control)\r +- [Assembly](https://australianbiocommons.github.io/hifi-assembly-workflow/recommendations#stage-2-assembly)\r +- [Post-assembly QC](https://australianbiocommons.github.io/hifi-assembly-workflow/recommendations#stage-3-post-assembly-quality-control)\r +\r +\r +## General recommendations \r +\r +A more detailed module and workflow description as well as execution examples on Gadi and Setonix are [available here](https://australianbiocommons.github.io/hifi-assembly-workflow/workflows).\r +\r +\r +## Attributions\r +\r +This work was developed at AGRF and supported by the Australian BioCommons via Bioplatforms Australia funding, the Australian Research Data Commons (https://doi.org/10.47486/PL105) and the Queensland Government RICF programme. Bioplatforms Australia and the Australian Research Data Commons are enabled by the National Collaborative Research Infrastructure Strategy (NCRIS).\r +\r +The documentation in this repository is based on Australian BioCommons guidelines. \r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "HiFi de novo genome assembly workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/560?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Simple bacterial assembly and annotation" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/966?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/bacass" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/bacass" ; + schema1:sdDatePublished "2024-08-05 10:24:18 +0100" ; + schema1:url "https://workflowhub.eu/workflows/966/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12331 ; + schema1:creator ; + schema1:dateCreated "2024-06-25T03:02:47Z" ; + schema1:dateModified "2024-06-25T03:02:47Z" ; + schema1:description "Simple bacterial assembly and annotation" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/966?version=7" ; + schema1:keywords "Assembly, bacterial-genomes, denovo, denovo-assembly, genome-assembly, hybrid-assembly, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/bacass" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/966?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """ORSON combine state-of-the-art tools for annotation processes within a Nextflow pipeline: sequence similarity search (PLAST, BLAST or Diamond), functional annotation retrieval (BeeDeeM) and functional prediction (InterProScan). When required, BUSCO completness evaluation and eggNOG Orthogroup annotation can be activated. While ORSON results can be analyzed through the command-line, it also offers the possibility to be compatible with BlastViewer or Blast2GO graphical tools.\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.136.1" ; + schema1:isBasedOn "https://gitlab.ifremer.fr/bioinfo/orson" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ORSON: workflow for prOteome and tRanScriptome functiOnal aNnotation" ; + schema1:sdDatePublished "2024-08-05 10:33:11 +0100" ; + schema1:url "https://workflowhub.eu/workflows/136/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 480384 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 0 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2021-07-08T14:18:03Z" ; + schema1:dateModified "2023-01-16T13:50:20Z" ; + schema1:description """ORSON combine state-of-the-art tools for annotation processes within a Nextflow pipeline: sequence similarity search (PLAST, BLAST or Diamond), functional annotation retrieval (BeeDeeM) and functional prediction (InterProScan). When required, BUSCO completness evaluation and eggNOG Orthogroup annotation can be activated. While ORSON results can be analyzed through the command-line, it also offers the possibility to be compatible with BlastViewer or Blast2GO graphical tools.\r +\r +""" ; + schema1:image ; + schema1:keywords "Annotation, Transcriptomics, Genomics, Proteomics, Nextflow" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ORSON: workflow for prOteome and tRanScriptome functiOnal aNnotation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://gitlab.ifremer.fr/bioinfo/orson/-/raw/master/main.nf" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.130.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Amber Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:25:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/130/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 64833 ; + schema1:creator , + ; + schema1:dateCreated "2021-06-30T11:47:18Z" ; + schema1:dateModified "2022-09-15T12:29:05Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/130?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Amber Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_amber_md_setup/8bcda75405183a84476acd7ba733e4cb666ce397/biobb_wf_amber_md_setup/notebooks/mdsetup/biobb_amber_setup_notebook.ipynb" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# COnSensus Interaction Network InFErence Service\r +Inference framework for reconstructing networks using a consensus approach between multiple methods and data sources.\r +\r +![alt text](https://raw.githubusercontent.com/PhosphorylatedRabbits/cosifer/master/docs/_static/logo.png)\r +\r +## Reference\r +[Manica, Matteo, Charlotte, Bunne, Roland, Mathis, Joris, Cadow, Mehmet Eren, Ahsen, Gustavo A, Stolovitzky, and María Rodríguez, Martínez. "COSIFER: a python package for the consensus inference of molecular interaction networks".Bioinformatics (2020)](https://doi.org/10.1093/bioinformatics/btaa942).""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/119?version=1" ; + schema1:isBasedOn "https://github.com/inab/ipc_workflows/tree/main/cosifer/nextflow" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for COSIFER" ; + schema1:sdDatePublished "2024-08-05 10:33:16 +0100" ; + schema1:url "https://workflowhub.eu/workflows/119/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1362 ; + schema1:creator , + ; + schema1:dateCreated "2021-05-05T15:53:28Z" ; + schema1:dateModified "2023-04-21T11:04:40Z" ; + schema1:description """# COnSensus Interaction Network InFErence Service\r +Inference framework for reconstructing networks using a consensus approach between multiple methods and data sources.\r +\r +![alt text](https://raw.githubusercontent.com/PhosphorylatedRabbits/cosifer/master/docs/_static/logo.png)\r +\r +## Reference\r +[Manica, Matteo, Charlotte, Bunne, Roland, Mathis, Joris, Cadow, Mehmet Eren, Ahsen, Gustavo A, Stolovitzky, and María Rodríguez, Martínez. "COSIFER: a python package for the consensus inference of molecular interaction networks".Bioinformatics (2020)](https://doi.org/10.1093/bioinformatics/btaa942).""" ; + schema1:keywords "cosifer, cancer, pediatric, rna-seq" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "COSIFER" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/119?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Summary\r +\r +This tutorial aims to illustrate how to compute a fast-growth mutation free energy calculation, step by step, using the BioExcel Building Blocks library (biobb). The particular example used is the Staphylococcal nuclease protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +Workflow engine is a jupyter notebook. Auxiliary libraries used are nb\\_conda\\_kernels, os, and plotly. Environment setup can be carried out using the environment.yml in the code repository. The tutorial uses docker for running pmx - a local setup can be used instead, see notes in the tutorial.\r +\r +# Parameters\r +\r +## Inputs\r +\r +Workflow Input files needed:\r +\r +* **stateA_traj**: Equilibrium trajectory for the WT protein.\r +\r +* **stateB_traj**: Equilibrium trajectory for the Mutated protein.\r +\r +* **stateA_tpr**: WT protein topology (GROMACS tpr format).\r +\r +* **stateB_tpr**: Mutated protein topology (GROMACS tpr format).\r +\r +Auxiliar force field libraries needed:\r +\r +* **mutff45 (folder)**: pmx mutation force field libraries. \r +\r +\r +## Outputs\r +\r +* **pmx.outputs**: Final free energy estimation. Summary of information got applying the different methods.\r +\r +* **pmx.plots.png**: Final free energy plot of the Mutation free energy pipeline.\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.55.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_pmx_tutorial" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Mutation Free Energy Calculations using BioExcel Building Blocks (biobb) (jupyter notebook)" ; + schema1:sdDatePublished "2024-08-05 10:25:29 +0100" ; + schema1:url "https://workflowhub.eu/workflows/55/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 203732 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 49901 ; + schema1:creator , + ; + schema1:dateCreated "2020-09-15T11:56:52Z" ; + schema1:dateModified "2021-05-07T13:30:53Z" ; + schema1:description """# Summary\r +\r +This tutorial aims to illustrate how to compute a fast-growth mutation free energy calculation, step by step, using the BioExcel Building Blocks library (biobb). The particular example used is the Staphylococcal nuclease protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +Workflow engine is a jupyter notebook. Auxiliary libraries used are nb\\_conda\\_kernels, os, and plotly. Environment setup can be carried out using the environment.yml in the code repository. The tutorial uses docker for running pmx - a local setup can be used instead, see notes in the tutorial.\r +\r +# Parameters\r +\r +## Inputs\r +\r +Workflow Input files needed:\r +\r +* **stateA_traj**: Equilibrium trajectory for the WT protein.\r +\r +* **stateB_traj**: Equilibrium trajectory for the Mutated protein.\r +\r +* **stateA_tpr**: WT protein topology (GROMACS tpr format).\r +\r +* **stateB_tpr**: Mutated protein topology (GROMACS tpr format).\r +\r +Auxiliar force field libraries needed:\r +\r +* **mutff45 (folder)**: pmx mutation force field libraries. \r +\r +\r +## Outputs\r +\r +* **pmx.outputs**: Final free energy estimation. Summary of information got applying the different methods.\r +\r +* **pmx.plots.png**: Final free energy plot of the Mutation free energy pipeline.\r +\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/55?version=5" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Mutation Free Energy Calculations using BioExcel Building Blocks (biobb) (jupyter notebook)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/55?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """# COVID-19 Multiscale Modelling of the Virus and Patients’ Tissue Workflow\r +\r +## Table of Contents\r +\r +- [COVID-19 Multiscale Modelling of the Virus and Patients’ Tissue Workflow](#covid-19-multiscale-modelling-of-the-virus-and-patients-tissue-workflow)\r + - [Table of Contents](#table-of-contents)\r + - [Description](#description)\r + - [Contents](#contents)\r + - [Building Blocks](#building-blocks)\r + - [Workflows](#workflows)\r + - [Resources](#resources)\r + - [Tests](#tests)\r + - [Instructions](#instructions)\r + - [Local machine](#local-machine)\r + - [Requirements](#requirements)\r + - [Usage steps](#usage-steps)\r + - [MareNostrum 4](#marenostrum-4)\r + - [Requirements in MN4](#requirements-in-mn4)\r + - [Usage steps in MN4](#usage-steps-in-mn4)\r + - [Mahti or Puhti](#mahti-or-puhti)\r + - [Requirements](#requirements)\r + - [Steps](#steps)\r + - [License](#license)\r + - [Contact](#contact)\r +\r +## Description\r +\r +Uses multiscale simulations to predict patient-specific SARS‑CoV‑2 severity subtypes\r +(moderate, severe or control), using single-cell RNA-Seq data, MaBoSS and PhysiBoSS.\r +Boolean models are used to determine the behaviour of individual agents as a function\r +of extracellular conditions and the concentration of different substrates, including\r +the number of virions. Predictions of severity subtypes are based on a meta-analysis of\r +personalised model outputs simulating cellular apoptosis regulation in epithelial cells\r +infected by SARS‑CoV‑2.\r +\r +The workflow uses the following building blocks, described in order of execution:\r +\r +1. High-throughput mutant analysis\r +2. Single-cell processing\r +3. Personalise patient\r +4. PhysiBoSS\r +5. Analysis of all simulations\r +\r +For details on individual workflow steps, see the user documentation for each building block.\r +\r +[`GitHub repository`]()\r +\r +\r +## Contents\r +\r +### Building Blocks\r +\r +The ``BuildingBlocks`` folder contains the script to install the\r +Building Blocks used in the COVID-19 Workflow.\r +\r +### Workflows\r +\r +The ``Workflow`` folder contains the workflows implementations.\r +\r +Currently contains the implementation using PyCOMPSs and Snakemake (in progress).\r +\r +### Resources\r +\r +The ``Resources`` folder contains dataset files.\r +\r +### Tests\r +\r +The ``Tests`` folder contains the scripts that run each Building Block\r +used in the workflow for the given small dataset.\r +They can be executed individually for testing purposes.\r +\r +## Instructions\r +\r +### Local machine\r +\r +This section explains the requirements and usage for the COVID19 Workflow in a laptop or desktop computer.\r +\r +#### Requirements\r +\r +- [`permedcoe`](https://github.com/PerMedCoE/permedcoe) package\r +- [PyCOMPSs](https://pycompss.readthedocs.io/en/stable/Sections/00_Quickstart.html) / [Snakemake](https://snakemake.readthedocs.io/en/stable/)\r +- [Singularity](https://sylabs.io/guides/3.0/user-guide/installation.html)\r +\r +#### Usage steps\r +\r +1. Clone this repository:\r +\r + ```bash\r + git clone https://github.com/PerMedCoE/covid-19-workflow.git\r + ```\r +\r +2. Install the Building Blocks required for the COVID19 Workflow:\r +\r + ```bash\r + covid-19-workflow/BuildingBlocks/./install_BBs.sh\r + ```\r +\r +3. Get the required Building Block images from the project [B2DROP](https://b2drop.bsc.es/index.php/f/444350):\r +\r + - Required images:\r + - MaBoSS.singularity\r + - meta_analysis.singularity\r + - PhysiCell-COVID19.singularity\r + - single_cell.singularity\r +\r + The path where these files are stored **MUST be exported in the `PERMEDCOE_IMAGES`** environment variable.\r +\r + > :warning: **TIP**: These containers can be built manually as follows (be patient since some of them may take some time):\r + 1. Clone the `BuildingBlocks` repository\r + ```bash\r + git clone https://github.com/PerMedCoE/BuildingBlocks.git\r + ```\r + 2. Build the required Building Block images\r + ```bash\r + cd BuildingBlocks/Resources/images\r + sudo singularity build MaBoSS.sif MaBoSS.singularity\r + sudo singularity build meta_analysis.sif meta_analysis.singularity\r + sudo singularity build PhysiCell-COVID19.sif PhysiCell-COVID19.singularity\r + sudo singularity build single_cell.sif single_cell.singularity\r + cd ../../..\r + ```\r +\r +**If using PyCOMPSs in local PC** (make sure that PyCOMPSs in installed):\r +\r +4. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflows/PyCOMPSs\r + ```\r +\r +5. Execute `./run.sh`\r +\r +**If using Snakemake in local PC** (make sure that SnakeMake is installed):\r +\r +4. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflows/SnakeMake\r + ```\r +\r +5. Execute `./run.sh`\r + > **TIP**: If you want to run the workflow with a different dataset, please update the `run.sh` script setting the `dataset` variable to the new dataset folder and their file names.\r +\r +\r +### MareNostrum 4\r +\r +This section explains the requirements and usage for the COVID19 Workflow in the MareNostrum 4 supercomputer.\r +\r +#### Requirements in MN4\r +\r +- Access to MN4\r +\r +All Building Blocks are already installed in MN4, and the COVID19 Workflow available.\r +\r +#### Usage steps in MN4\r +\r +1. Load the `COMPSs`, `Singularity` and `permedcoe` modules\r +\r + ```bash\r + export COMPSS_PYTHON_VERSION=3\r + module load COMPSs/3.1\r + module load singularity/3.5.2\r + module use /apps/modules/modulefiles/tools/COMPSs/libraries\r + module load permedcoe\r + ```\r +\r + > **TIP**: Include the loading into your `${HOME}/.bashrc` file to load it automatically on the session start.\r +\r + This commands will load COMPSs and the permedcoe package which provides all necessary dependencies, as well as the path to the singularity container images (`PERMEDCOE_IMAGES` environment variable) and testing dataset (`COVID19WORKFLOW_DATASET` environment variable).\r +\r +2. Get a copy of the pilot workflow into your desired folder\r +\r + ```bash\r + mkdir desired_folder\r + cd desired_folder\r + get_covid19workflow\r + ```\r +\r +3. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflow/PyCOMPSs\r + ```\r +\r +4. Execute `./launch.sh`\r +\r + This command will launch a job into the job queuing system (SLURM) requesting 2 nodes (one node acting half master and half worker, and other full worker node) for 20 minutes, and is prepared to use the singularity images that are already deployed in MN4 (located into the `PERMEDCOE_IMAGES` environment variable). It uses the dataset located into `../../Resources/data` folder.\r +\r + > :warning: **TIP**: If you want to run the workflow with a different dataset, please edit the `launch.sh` script and define the appropriate dataset path.\r +\r + After the execution, a `results` folder will be available with with COVID19 Workflow results.\r +\r +### Mahti or Puhti\r +\r +This section explains how to run the COVID19 workflow on CSC supercomputers using SnakeMake.\r +\r +#### Requirements\r +\r +- Install snakemake (or check if there is a version installed using `module spider snakemake`)\r +- Install workflow, using the same steps as for the local machine. With the exception that containers have to be built elsewhere.\r +\r +#### Steps\r +\r +\r +1. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflow/SnakeMake\r + ```\r +\r +2. Edit `launch.sh` with the correct partition, account, and resource specifications. \r +\r +3. Execute `./launch.sh`\r +\r + > :warning: Snakemake provides a `--cluster` flag, but this functionality should be avoided as it's really not suited for HPC systems.\r +\r +## License\r +\r +[Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)\r +\r +## Contact\r +\r +\r +\r +This software has been developed for the [PerMedCoE project](https://permedcoe.eu/), funded by the European Commission (EU H2020 [951773](https://cordis.europa.eu/project/id/951773)).\r +\r +![](https://permedcoe.eu/wp-content/uploads/2020/11/logo_1.png "PerMedCoE")\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/481?version=1" ; + schema1:isBasedOn "https://github.com/PerMedCoE/covid-19-workflow.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for PerMedCoE Covid19 Pilot workflow (Snakemake)" ; + schema1:sdDatePublished "2024-08-05 10:30:22 +0100" ; + schema1:url "https://workflowhub.eu/workflows/481/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3913 ; + schema1:creator ; + schema1:dateCreated "2023-05-23T13:24:53Z" ; + schema1:dateModified "2023-05-23T13:24:53Z" ; + schema1:description """# COVID-19 Multiscale Modelling of the Virus and Patients’ Tissue Workflow\r +\r +## Table of Contents\r +\r +- [COVID-19 Multiscale Modelling of the Virus and Patients’ Tissue Workflow](#covid-19-multiscale-modelling-of-the-virus-and-patients-tissue-workflow)\r + - [Table of Contents](#table-of-contents)\r + - [Description](#description)\r + - [Contents](#contents)\r + - [Building Blocks](#building-blocks)\r + - [Workflows](#workflows)\r + - [Resources](#resources)\r + - [Tests](#tests)\r + - [Instructions](#instructions)\r + - [Local machine](#local-machine)\r + - [Requirements](#requirements)\r + - [Usage steps](#usage-steps)\r + - [MareNostrum 4](#marenostrum-4)\r + - [Requirements in MN4](#requirements-in-mn4)\r + - [Usage steps in MN4](#usage-steps-in-mn4)\r + - [Mahti or Puhti](#mahti-or-puhti)\r + - [Requirements](#requirements)\r + - [Steps](#steps)\r + - [License](#license)\r + - [Contact](#contact)\r +\r +## Description\r +\r +Uses multiscale simulations to predict patient-specific SARS‑CoV‑2 severity subtypes\r +(moderate, severe or control), using single-cell RNA-Seq data, MaBoSS and PhysiBoSS.\r +Boolean models are used to determine the behaviour of individual agents as a function\r +of extracellular conditions and the concentration of different substrates, including\r +the number of virions. Predictions of severity subtypes are based on a meta-analysis of\r +personalised model outputs simulating cellular apoptosis regulation in epithelial cells\r +infected by SARS‑CoV‑2.\r +\r +The workflow uses the following building blocks, described in order of execution:\r +\r +1. High-throughput mutant analysis\r +2. Single-cell processing\r +3. Personalise patient\r +4. PhysiBoSS\r +5. Analysis of all simulations\r +\r +For details on individual workflow steps, see the user documentation for each building block.\r +\r +[`GitHub repository`]()\r +\r +\r +## Contents\r +\r +### Building Blocks\r +\r +The ``BuildingBlocks`` folder contains the script to install the\r +Building Blocks used in the COVID-19 Workflow.\r +\r +### Workflows\r +\r +The ``Workflow`` folder contains the workflows implementations.\r +\r +Currently contains the implementation using PyCOMPSs and Snakemake (in progress).\r +\r +### Resources\r +\r +The ``Resources`` folder contains dataset files.\r +\r +### Tests\r +\r +The ``Tests`` folder contains the scripts that run each Building Block\r +used in the workflow for the given small dataset.\r +They can be executed individually for testing purposes.\r +\r +## Instructions\r +\r +### Local machine\r +\r +This section explains the requirements and usage for the COVID19 Workflow in a laptop or desktop computer.\r +\r +#### Requirements\r +\r +- [`permedcoe`](https://github.com/PerMedCoE/permedcoe) package\r +- [PyCOMPSs](https://pycompss.readthedocs.io/en/stable/Sections/00_Quickstart.html) / [Snakemake](https://snakemake.readthedocs.io/en/stable/)\r +- [Singularity](https://sylabs.io/guides/3.0/user-guide/installation.html)\r +\r +#### Usage steps\r +\r +1. Clone this repository:\r +\r + ```bash\r + git clone https://github.com/PerMedCoE/covid-19-workflow.git\r + ```\r +\r +2. Install the Building Blocks required for the COVID19 Workflow:\r +\r + ```bash\r + covid-19-workflow/BuildingBlocks/./install_BBs.sh\r + ```\r +\r +3. Get the required Building Block images from the project [B2DROP](https://b2drop.bsc.es/index.php/f/444350):\r +\r + - Required images:\r + - MaBoSS.singularity\r + - meta_analysis.singularity\r + - PhysiCell-COVID19.singularity\r + - single_cell.singularity\r +\r + The path where these files are stored **MUST be exported in the `PERMEDCOE_IMAGES`** environment variable.\r +\r + > :warning: **TIP**: These containers can be built manually as follows (be patient since some of them may take some time):\r + 1. Clone the `BuildingBlocks` repository\r + ```bash\r + git clone https://github.com/PerMedCoE/BuildingBlocks.git\r + ```\r + 2. Build the required Building Block images\r + ```bash\r + cd BuildingBlocks/Resources/images\r + sudo singularity build MaBoSS.sif MaBoSS.singularity\r + sudo singularity build meta_analysis.sif meta_analysis.singularity\r + sudo singularity build PhysiCell-COVID19.sif PhysiCell-COVID19.singularity\r + sudo singularity build single_cell.sif single_cell.singularity\r + cd ../../..\r + ```\r +\r +**If using PyCOMPSs in local PC** (make sure that PyCOMPSs in installed):\r +\r +4. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflows/PyCOMPSs\r + ```\r +\r +5. Execute `./run.sh`\r +\r +**If using Snakemake in local PC** (make sure that SnakeMake is installed):\r +\r +4. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflows/SnakeMake\r + ```\r +\r +5. Execute `./run.sh`\r + > **TIP**: If you want to run the workflow with a different dataset, please update the `run.sh` script setting the `dataset` variable to the new dataset folder and their file names.\r +\r +\r +### MareNostrum 4\r +\r +This section explains the requirements and usage for the COVID19 Workflow in the MareNostrum 4 supercomputer.\r +\r +#### Requirements in MN4\r +\r +- Access to MN4\r +\r +All Building Blocks are already installed in MN4, and the COVID19 Workflow available.\r +\r +#### Usage steps in MN4\r +\r +1. Load the `COMPSs`, `Singularity` and `permedcoe` modules\r +\r + ```bash\r + export COMPSS_PYTHON_VERSION=3\r + module load COMPSs/3.1\r + module load singularity/3.5.2\r + module use /apps/modules/modulefiles/tools/COMPSs/libraries\r + module load permedcoe\r + ```\r +\r + > **TIP**: Include the loading into your `${HOME}/.bashrc` file to load it automatically on the session start.\r +\r + This commands will load COMPSs and the permedcoe package which provides all necessary dependencies, as well as the path to the singularity container images (`PERMEDCOE_IMAGES` environment variable) and testing dataset (`COVID19WORKFLOW_DATASET` environment variable).\r +\r +2. Get a copy of the pilot workflow into your desired folder\r +\r + ```bash\r + mkdir desired_folder\r + cd desired_folder\r + get_covid19workflow\r + ```\r +\r +3. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflow/PyCOMPSs\r + ```\r +\r +4. Execute `./launch.sh`\r +\r + This command will launch a job into the job queuing system (SLURM) requesting 2 nodes (one node acting half master and half worker, and other full worker node) for 20 minutes, and is prepared to use the singularity images that are already deployed in MN4 (located into the `PERMEDCOE_IMAGES` environment variable). It uses the dataset located into `../../Resources/data` folder.\r +\r + > :warning: **TIP**: If you want to run the workflow with a different dataset, please edit the `launch.sh` script and define the appropriate dataset path.\r +\r + After the execution, a `results` folder will be available with with COVID19 Workflow results.\r +\r +### Mahti or Puhti\r +\r +This section explains how to run the COVID19 workflow on CSC supercomputers using SnakeMake.\r +\r +#### Requirements\r +\r +- Install snakemake (or check if there is a version installed using `module spider snakemake`)\r +- Install workflow, using the same steps as for the local machine. With the exception that containers have to be built elsewhere.\r +\r +#### Steps\r +\r +\r +1. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflow/SnakeMake\r + ```\r +\r +2. Edit `launch.sh` with the correct partition, account, and resource specifications. \r +\r +3. Execute `./launch.sh`\r +\r + > :warning: Snakemake provides a `--cluster` flag, but this functionality should be avoided as it's really not suited for HPC systems.\r +\r +## License\r +\r +[Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)\r +\r +## Contact\r +\r +\r +\r +This software has been developed for the [PerMedCoE project](https://permedcoe.eu/), funded by the European Commission (EU H2020 [951773](https://cordis.europa.eu/project/id/951773)).\r +\r +![](https://permedcoe.eu/wp-content/uploads/2020/11/logo_1.png "PerMedCoE")\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "PerMedCoE Covid19 Pilot workflow (Snakemake)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/481?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2023-09-04T13:04:29.526383" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + , + ; + schema1:name "consensus-peaks/consensus-peaks-chip-sr" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-atac-cutandrun" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-chip-pe" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.10" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/278?version=2" ; + schema1:isBasedOn "https://gitlab.bsc.es/lrodrig1/structuralvariants_poc/-/tree/1.1.3/structuralvariants/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CNV_pipeline" ; + schema1:sdDatePublished "2024-08-05 10:31:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/278/ro_crate?version=2" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 8862 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8665 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-14T15:00:04Z" ; + schema1:dateModified "2022-03-14T15:01:57Z" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/278?version=10" ; + schema1:keywords "cancer, CODEX2, ExomeDepth, manta, TransBioNet, variant calling, GRIDSS, structural variants" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CNV_pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/278?version=2" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 66395 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "ATACSeq peak-calling and differential analysis pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/965?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/atacseq" ; + schema1:sdDatePublished "2024-08-05 10:24:18 +0100" ; + schema1:url "https://workflowhub.eu/workflows/965/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5510 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:41Z" ; + schema1:dateModified "2024-06-11T12:54:41Z" ; + schema1:description "ATACSeq peak-calling and differential analysis pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/965?version=8" ; + schema1:keywords "ATAC-seq, chromatin-accessibiity" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/atacseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/965?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Alignment, assembly RNASEQ reads and annotation of generated transcripts." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/38?version=1" ; + schema1:isBasedOn "https://usegalaxy.eu/u/ambarishk/w/covid-19-unicycler-assembly-and-annotation" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Unicycler assembly and annotation" ; + schema1:sdDatePublished "2024-08-05 10:33:30 +0100" ; + schema1:url "https://workflowhub.eu/workflows/38/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 37964 ; + schema1:dateCreated "2020-06-18T23:07:23Z" ; + schema1:dateModified "2023-02-13T14:06:46Z" ; + schema1:description "Alignment, assembly RNASEQ reads and annotation of generated transcripts." ; + schema1:image ; + schema1:keywords "Unicycler, Alignment, Assembly, Annotation, RNASEQ, covid-19" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Unicycler assembly and annotation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/38?version=1" ; + schema1:version 1 ; + ns1:input , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 192982 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Taxonomic classification and profiling of shotgun short- and long-read metagenomic data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1025?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/taxprofiler" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/taxprofiler" ; + schema1:sdDatePublished "2024-08-05 10:23:09 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1025/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15728 ; + schema1:creator , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:19Z" ; + schema1:dateModified "2024-06-11T12:55:19Z" ; + schema1:description "Taxonomic classification and profiling of shotgun short- and long-read metagenomic data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1025?version=10" ; + schema1:keywords "Classification, illumina, long-reads, Metagenomics, microbiome, nanopore, pathogen, Profiling, shotgun, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/taxprofiler" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1025?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + , + , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """microPIPE was developed to automate high-quality complete bacterial genome assembly using Oxford Nanopore Sequencing in combination with Illumina sequencing.\r +\r +To build microPIPE we evaluated the performance of several tools at each step of bacterial genome assembly, including basecalling, assembly, and polishing. Results at each step were validated using the high-quality ST131 Escherichia coli strain EC958 (GenBank: HG941718.1). After appraisal of each step, we selected the best combination of tools to achieve the most consistent and best quality bacterial genome assemblies.\r +\r +The workflow below summarises the different steps of the pipeline (with each selected tool) and the approximate run time (using GPU basecalling, averaged over 12 E. coli isolates sequenced on a R9.4 MinION flow cell). Dashed boxes correspond to optional steps in the pipeline.\r +\r +Micropipe has been written in Nextflow and uses Singularity containers. It can use both GPU and CPU resources.\r +\r +For more information please see our publication here: https://bmcgenomics.biomedcentral.com/articles/10.1186/s12864-021-07767-z\r +\r +Infrastructure\\_deployment\\_metadata: Zeus (Pawsey)""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.140.1" ; + schema1:isBasedOn "https://github.com/BeatsonLab-MicrobialGenomics/micropipe" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for microPIPE: a pipeline for high-quality bacterial genome construction using ONT and Illumina sequencing" ; + schema1:sdDatePublished "2024-08-05 10:33:09 +0100" ; + schema1:url "https://workflowhub.eu/workflows/140/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 82123 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 39326 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2021-08-09T00:17:36Z" ; + schema1:dateModified "2023-01-16T13:51:29Z" ; + schema1:description """microPIPE was developed to automate high-quality complete bacterial genome assembly using Oxford Nanopore Sequencing in combination with Illumina sequencing.\r +\r +To build microPIPE we evaluated the performance of several tools at each step of bacterial genome assembly, including basecalling, assembly, and polishing. Results at each step were validated using the high-quality ST131 Escherichia coli strain EC958 (GenBank: HG941718.1). After appraisal of each step, we selected the best combination of tools to achieve the most consistent and best quality bacterial genome assemblies.\r +\r +The workflow below summarises the different steps of the pipeline (with each selected tool) and the approximate run time (using GPU basecalling, averaged over 12 E. coli isolates sequenced on a R9.4 MinION flow cell). Dashed boxes correspond to optional steps in the pipeline.\r +\r +Micropipe has been written in Nextflow and uses Singularity containers. It can use both GPU and CPU resources.\r +\r +For more information please see our publication here: https://bmcgenomics.biomedcentral.com/articles/10.1186/s12864-021-07767-z\r +\r +Infrastructure\\_deployment\\_metadata: Zeus (Pawsey)""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "ONT, bacterial-genomics, Assembly, Nextflow, workflow" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "microPIPE: a pipeline for high-quality bacterial genome construction using ONT and Illumina sequencing" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/140?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Simple bacterial assembly and annotation pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/966?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/bacass" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/bacass" ; + schema1:sdDatePublished "2024-08-05 10:24:17 +0100" ; + schema1:url "https://workflowhub.eu/workflows/966/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5135 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:42Z" ; + schema1:dateModified "2024-06-11T12:54:42Z" ; + schema1:description "Simple bacterial assembly and annotation pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/966?version=7" ; + schema1:keywords "Assembly, bacterial-genomes, denovo, denovo-assembly, genome-assembly, hybrid-assembly, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/bacass" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/966?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.195.4" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_dna_helparms" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Structural DNA helical parameters tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/195/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 74219 ; + schema1:creator , + ; + schema1:dateCreated "2023-07-26T13:28:37Z" ; + schema1:dateModified "2023-07-26T13:32:13Z" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/195?version=4" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Structural DNA helical parameters tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_dna_helparms/master/biobb_wf_dna_helparms/notebooks/biobb_dna_helparms_tutorial.ipynb" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# CroMaSt: A workflow for assessing protein domain classification by cross-mapping of structural instances between domain databases and structural alignment\r +\r +CroMaSt (**Cro**ss **Ma**pper of domain **St**ructural instances) is an automated iterative workflow to clarify the assignment of protein domains to a given domain type of interest, based on their 3D structure and by cross-mapping of domain structural instances between domain databases. CroMaSt (for Cross-Mapper of domain Structural instances) will classify all structural instances of a given domain type into 4 different categories (**Core**, **True**, **Domain-like**, and **Failed**). \r +\r +\r +## Requirements\r +1. [Conda](https://docs.conda.io/projects/conda/en/latest/) or [Miniconda](https://docs.conda.io/en/latest/miniconda.html)\r +2. [Kpax](http://kpax.loria.fr/download.php) \r +Download and install conda (or Miniconda) and Kpax by following the instructions from their official site.\r +\r +\r +## Get it running \r +(Considering the requirements are already met)\r +\r +1. Clone the repository and change the directory\r +\r +```\r +git clone https://gitlab.inria.fr/capsid.public_codes/CroMaSt.git\r +cd CroMaSt\r +```\r +\r +2. Create the conda environment for the workflow\r +```\r +conda env create --file yml/environment.yml\r +conda activate CroMaSt\r +```\r +\r +3. Change the path of variables in paramter file\r +```\r +sed -i 's/\\/home\\/hdhondge\\/CroMaSt\\//\\/YOUR\\/PATH\\/TO_CroMaSt\\//g' yml/CroMaSt_input.yml \r +```\r +\r +4. Create the directory to store files from PDB and SIFTS (if not already)\r +```\r +mkdir PDB_files SIFTS\r +```\r +\r +5. Download the source input data\r +```\r +cwl-runner Tools/download_data.cwl yml/download_data.yml\r +```\r +\r +## Basic example\r +\r +### 1. First, we will run the workflow for the KH domain with family identifiers `RRM_1` and `RRM` in Pfam and CATH, respectively.\r +Run the workflow -\r +\r +```\r +cwl-runner --parallel --outdir=Results/ CroMaSt.cwl yml/CroMaSt_input.yml\r +```\r +\r +### 2. Once the iteration is complete, check the `new_param.yml` file from the `outputdir` (Results), if there is any family identifier in either `pfam` or `cath`; run the next iteration using following command (Until there is no new families explored by workflow) -\r +\r +```\r +cwl-runner --parallel --outdir=Results/ CroMaSt.cwl Results/new_param.yml\r +```\r + \r +### **Extra:** Start the workflow with multiple families from one or both databases \r +If you would like to start the workflow with multiple families from one or both databases, then simply add a comma in between two family identifiers. \r +```\r +pfam: ['PF00076', 'PF08777']\r +cath: ['3.30.70.330']\r +```\r +\r +- **Pro Tip**: Don't forget to give different path to `--outdir` option while running the workflow multiple times or at least move the results to some other location after first run.\r +\r +## Run the workflow for protein domain of your choice \r +### 1. You can run the workflow for the domain of your choice by simply changing the family identifers in `yml/CroMaSt_input.yml` file.\r +\r +Simply replace the following values of family identifiers (for pfam and cath) with the family identifiers of your choice in `yml/CroMaSt_input.yml` file. \r +```\r +pfam: ['PF00076']\r +cath: ['3.30.70.330']\r +```\r +\r +\r +\r +## Data files used in current version are as follows:\r +**Files in Data directory can be downloaded as follows**:\r +\r +1. File used from Pfam database: [pdbmap.gz](http://ftp.ebi.ac.uk/pub/databases/Pfam/releases/Pfam35.0/pdbmap.gz)\r +\r +2. File used from CATH database: [cath-domain-description-file.txt](ftp://orengoftp.biochem.ucl.ac.uk:21/cath/releases/latest-release/cath-classification-data/cath-domain-description-file.txt) \r +\r +3. Obsolete entries from RCSB PDB\r +[obsolete_PDB_entry_ids.txt](https://data.rcsb.org/rest/v1/holdings/removed/entry_ids) \r +\r +\r +CATH Version - 4.3.0 (Ver_Date - 11-Sep-2019) [FTP site](ftp://orengoftp.biochem.ucl.ac.uk/cath/releases/latest-release/cath-classification-data/)\r +Pfam Version - 35.0 (Ver_Date - November-2021) [FTP site](http://ftp.ebi.ac.uk/pub/databases/Pfam/releases/Pfam35.0/)\r +\r +## Reference\r +```\r +Poster - \r +1. Hrishikesh Dhondge, Isaure Chauvot de Beauchêne, Marie-Dominique Devignes. CroMaSt: A workflow for domain family curation through cross-mapping of structural instances between protein domain databases. 21st European Conference on Computational Biology, Sep 2022, Sitges, Spain. ⟨hal-03789541⟩\r +\r +```\r +\r +## Acknowledgements\r +This project has received funding from the Marie Skłodowska-Curie Innovative Training Network (MSCA-ITN) RNAct supported by European Union’s Horizon 2020 research and innovation programme under granta greement No 813239.\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.390.2" ; + schema1:isBasedOn "https://github.com/HrishiDhondge/CroMaSt.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CroMaSt: A workflow for assessing protein domain classification by cross-mapping of structural instances between domain databases and structural alignment" ; + schema1:sdDatePublished "2024-08-05 10:30:07 +0100" ; + schema1:url "https://workflowhub.eu/workflows/390/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 21410 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-06-20T12:06:49Z" ; + schema1:dateModified "2023-06-20T12:08:47Z" ; + schema1:description """# CroMaSt: A workflow for assessing protein domain classification by cross-mapping of structural instances between domain databases and structural alignment\r +\r +CroMaSt (**Cro**ss **Ma**pper of domain **St**ructural instances) is an automated iterative workflow to clarify the assignment of protein domains to a given domain type of interest, based on their 3D structure and by cross-mapping of domain structural instances between domain databases. CroMaSt (for Cross-Mapper of domain Structural instances) will classify all structural instances of a given domain type into 4 different categories (**Core**, **True**, **Domain-like**, and **Failed**). \r +\r +\r +## Requirements\r +1. [Conda](https://docs.conda.io/projects/conda/en/latest/) or [Miniconda](https://docs.conda.io/en/latest/miniconda.html)\r +2. [Kpax](http://kpax.loria.fr/download.php) \r +Download and install conda (or Miniconda) and Kpax by following the instructions from their official site.\r +\r +\r +## Get it running \r +(Considering the requirements are already met)\r +\r +1. Clone the repository and change the directory\r +\r +```\r +git clone https://gitlab.inria.fr/capsid.public_codes/CroMaSt.git\r +cd CroMaSt\r +```\r +\r +2. Create the conda environment for the workflow\r +```\r +conda env create --file yml/environment.yml\r +conda activate CroMaSt\r +```\r +\r +3. Change the path of variables in paramter file\r +```\r +sed -i 's/\\/home\\/hdhondge\\/CroMaSt\\//\\/YOUR\\/PATH\\/TO_CroMaSt\\//g' yml/CroMaSt_input.yml \r +```\r +\r +4. Create the directory to store files from PDB and SIFTS (if not already)\r +```\r +mkdir PDB_files SIFTS\r +```\r +\r +5. Download the source input data\r +```\r +cwl-runner Tools/download_data.cwl yml/download_data.yml\r +```\r +\r +## Basic example\r +\r +### 1. First, we will run the workflow for the KH domain with family identifiers `RRM_1` and `RRM` in Pfam and CATH, respectively.\r +Run the workflow -\r +\r +```\r +cwl-runner --parallel --outdir=Results/ CroMaSt.cwl yml/CroMaSt_input.yml\r +```\r +\r +### 2. Once the iteration is complete, check the `new_param.yml` file from the `outputdir` (Results), if there is any family identifier in either `pfam` or `cath`; run the next iteration using following command (Until there is no new families explored by workflow) -\r +\r +```\r +cwl-runner --parallel --outdir=Results/ CroMaSt.cwl Results/new_param.yml\r +```\r + \r +### **Extra:** Start the workflow with multiple families from one or both databases \r +If you would like to start the workflow with multiple families from one or both databases, then simply add a comma in between two family identifiers. \r +```\r +pfam: ['PF00076', 'PF08777']\r +cath: ['3.30.70.330']\r +```\r +\r +- **Pro Tip**: Don't forget to give different path to `--outdir` option while running the workflow multiple times or at least move the results to some other location after first run.\r +\r +## Run the workflow for protein domain of your choice \r +### 1. You can run the workflow for the domain of your choice by simply changing the family identifers in `yml/CroMaSt_input.yml` file.\r +\r +Simply replace the following values of family identifiers (for pfam and cath) with the family identifiers of your choice in `yml/CroMaSt_input.yml` file. \r +```\r +pfam: ['PF00076']\r +cath: ['3.30.70.330']\r +```\r +\r +\r +\r +## Data files used in current version are as follows:\r +**Files in Data directory can be downloaded as follows**:\r +\r +1. File used from Pfam database: [pdbmap.gz](http://ftp.ebi.ac.uk/pub/databases/Pfam/releases/Pfam35.0/pdbmap.gz)\r +\r +2. File used from CATH database: [cath-domain-description-file.txt](ftp://orengoftp.biochem.ucl.ac.uk:21/cath/releases/latest-release/cath-classification-data/cath-domain-description-file.txt) \r +\r +3. Obsolete entries from RCSB PDB\r +[obsolete_PDB_entry_ids.txt](https://data.rcsb.org/rest/v1/holdings/removed/entry_ids) \r +\r +\r +CATH Version - 4.3.0 (Ver_Date - 11-Sep-2019) [FTP site](ftp://orengoftp.biochem.ucl.ac.uk/cath/releases/latest-release/cath-classification-data/)\r +Pfam Version - 35.0 (Ver_Date - November-2021) [FTP site](http://ftp.ebi.ac.uk/pub/databases/Pfam/releases/Pfam35.0/)\r +\r +## Reference\r +```\r +Poster - \r +1. Hrishikesh Dhondge, Isaure Chauvot de Beauchêne, Marie-Dominique Devignes. CroMaSt: A workflow for domain family curation through cross-mapping of structural instances between protein domain databases. 21st European Conference on Computational Biology, Sep 2022, Sitges, Spain. ⟨hal-03789541⟩\r +\r +```\r +\r +## Acknowledgements\r +This project has received funding from the Marie Skłodowska-Curie Innovative Training Network (MSCA-ITN) RNAct supported by European Union’s Horizon 2020 research and innovation programme under granta greement No 813239.\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/390?version=1" ; + schema1:keywords "Pfam, CATH, Protein domains, data integration" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "CroMaSt: A workflow for assessing protein domain classification by cross-mapping of structural instances between domain databases and structural alignment" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/390?version=2" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 106918 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=19" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-08-05 10:23:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=19" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8900 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:04Z" ; + schema1:dateModified "2024-06-11T12:55:04Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=19" ; + schema1:version 19 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-09T11:15:18.484186" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/kmer-profiling-hifi-trio-VGP2" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "kmer-profiling-hifi-trio-VGP2/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.17" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-08-05 10:23:40 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3635 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:58Z" ; + schema1:dateModified "2024-06-11T12:54:58Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +**Based on the official [pmx tutorial](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate how to compute a **fast-growth mutation free energy** calculation, step by step, using the BioExcel **Building Blocks library (biobb)**. The particular example used is the **Staphylococcal nuclease** protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +The **non-equilibrium free energy calculation** protocol performs a **fast alchemical transition** in the direction **WT->Mut** and back **Mut->WT**. The two equilibrium trajectories needed for the tutorial, one for **Wild Type (WT)** and another for the **Mutated (Mut)** protein (Isoleucine 10 to Alanine -I10A-), have already been generated and are included in this example. We will name **WT as stateA** and **Mut as stateB**.\r +\r +![](https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/schema.png)\r +\r +The tutorial calculates the **free energy difference** in the folded state of a protein. Starting from **two 1ns-length independent equilibrium simulations** (WT and mutant), snapshots are selected to start **fast (50ps) transitions** driving the system in the **forward** (WT to mutant) and **reverse** (mutant to WT) directions, and the **work values** required to perform these transitions are collected. With these values, **Crooks Gaussian Intersection** (CGI), **Bennett Acceptance Ratio** (BAR) and **Jarzynski estimator** methods are used to calculate the **free energy difference** between the two states.\r +\r +*Please note that for the sake of disk space this tutorial is using 1ns-length equilibrium trajectories, whereas in the [original example](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/eq.mdp) the equilibrium trajectories used were obtained from 10ns-length simulations.*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.827.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_pmx_tutorial" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Docker Mutation Free Energy Calculations" ; + schema1:sdDatePublished "2024-08-05 10:24:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/827/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 766 ; + schema1:creator , + ; + schema1:dateCreated "2024-04-22T11:05:41Z" ; + schema1:dateModified "2024-05-22T13:43:56Z" ; + schema1:description """# Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +**Based on the official [pmx tutorial](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate how to compute a **fast-growth mutation free energy** calculation, step by step, using the BioExcel **Building Blocks library (biobb)**. The particular example used is the **Staphylococcal nuclease** protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +The **non-equilibrium free energy calculation** protocol performs a **fast alchemical transition** in the direction **WT->Mut** and back **Mut->WT**. The two equilibrium trajectories needed for the tutorial, one for **Wild Type (WT)** and another for the **Mutated (Mut)** protein (Isoleucine 10 to Alanine -I10A-), have already been generated and are included in this example. We will name **WT as stateA** and **Mut as stateB**.\r +\r +![](https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/schema.png)\r +\r +The tutorial calculates the **free energy difference** in the folded state of a protein. Starting from **two 1ns-length independent equilibrium simulations** (WT and mutant), snapshots are selected to start **fast (50ps) transitions** driving the system in the **forward** (WT to mutant) and **reverse** (mutant to WT) directions, and the **work values** required to perform these transitions are collected. With these values, **Crooks Gaussian Intersection** (CGI), **Bennett Acceptance Ratio** (BAR) and **Jarzynski estimator** methods are used to calculate the **free energy difference** between the two states.\r +\r +*Please note that for the sake of disk space this tutorial is using 1ns-length equilibrium trajectories, whereas in the [original example](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/eq.mdp) the equilibrium trajectories used were obtained from 10ns-length simulations.*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Docker Mutation Free Energy Calculations" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/main/biobb_wf_pmx_tutorial/docker/Dockerfile" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Optional workflow to purge duplicates from the contig assembly.\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/237?version=1" ; + schema1:isBasedOn "https://github.com/AustralianBioCommons/Purge-duplicates-from-hifiasm-assembly" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Purge duplicates from hifiasm assembly v1.0 (HiFi genome assembly stage 3)" ; + schema1:sdDatePublished "2024-08-05 10:31:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/237/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 23345 ; + schema1:creator ; + schema1:dateCreated "2021-11-15T01:39:22Z" ; + schema1:dateModified "2022-10-17T02:51:28Z" ; + schema1:description """Optional workflow to purge duplicates from the contig assembly.\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/237?version=1" ; + schema1:isPartOf , + ; + schema1:keywords "Assembly, purge_dups, HiFi" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Purge duplicates from hifiasm assembly v1.0 (HiFi genome assembly stage 3)" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/237?version=1" ; + schema1:version 1 ; + ns1:input , + ; + ns1:output , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 118673 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """This Workflow takes a dataset collection of single-cell ATAC-seq fragments and performs:\r +- preprocessing\r +- filtering\r +- concatenation\r +- dimension reduction\r +- batch correction (with Harmony and optionally Scanorama and MNC-correct)\r +- leiden clustering\r +\r +* new SnapATAC2 version: from 2.5.3 to 2.6.4""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1078?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Multisample Batch Correction with SnapATAC2 and Harmony" ; + schema1:sdDatePublished "2024-08-05 10:22:11 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1078/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 68999 ; + schema1:dateCreated "2024-07-17T16:09:58Z" ; + schema1:dateModified "2024-08-02T18:12:59Z" ; + schema1:description """This Workflow takes a dataset collection of single-cell ATAC-seq fragments and performs:\r +- preprocessing\r +- filtering\r +- concatenation\r +- dimension reduction\r +- batch correction (with Harmony and optionally Scanorama and MNC-correct)\r +- leiden clustering\r +\r +* new SnapATAC2 version: from 2.5.3 to 2.6.4""" ; + schema1:keywords "scATAC-seq, single-cell" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Multisample Batch Correction with SnapATAC2 and Harmony" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1078?version=1" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2024-06-10T13:19:59.881478" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + , + ; + schema1:name "consensus-peaks/consensus-peaks-chip-sr" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-atac-cutandrun" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "1.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-chip-pe" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "1.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + schema1:datePublished "2022-11-30T12:34:43.169128" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/rnaseq-pe" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "rnaseq-pe/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.3" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Call somatic, germline and LoH event variants from PE Illumina sequencing data obtained from matched pairs of tumor and normal tissue samples.\r +\r +This workflow can be used with whole-genome and whole-exome sequencing data as input. For WES data, parts of the analysis can be restricted to the exome capture kits target regions by providing the optional "Regions of Interest" bed dataset.\r +\r +The current version uses bwa-mem for read mapping and varscan somatic for variant calling and somatic status classification.""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.628.1" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Variant calling from matched tumor/normal sample pair (hg38 version)" ; + schema1:sdDatePublished "2024-08-05 10:27:30 +0100" ; + schema1:url "https://workflowhub.eu/workflows/628/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 129767 ; + schema1:creator ; + schema1:dateCreated "2023-10-27T13:48:44Z" ; + schema1:dateModified "2023-10-27T13:50:45Z" ; + schema1:description """Call somatic, germline and LoH event variants from PE Illumina sequencing data obtained from matched pairs of tumor and normal tissue samples.\r +\r +This workflow can be used with whole-genome and whole-exome sequencing data as input. For WES data, parts of the analysis can be restricted to the exome capture kits target regions by providing the optional "Regions of Interest" bed dataset.\r +\r +The current version uses bwa-mem for read mapping and varscan somatic for variant calling and somatic status classification.""" ; + schema1:keywords "EOSC4Cancer" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Variant calling from matched tumor/normal sample pair (hg38 version)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://usegalaxy.eu/api/workflows/af30010868c97316/download?format=json-download" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """\r +# Github: https://github.com/Lcornet/GENERA\r +\r +# BCCM GEN-ERA tools repository\r +\r +Please visit the wiki for tutorials and access to the tools:\r +https://github.com/Lcornet/GENERA/wiki \r +\r +# NEWS\r +Mantis is now installed in a singularity container for the Metabolic workflow (install is no longer necessary). \r +\r +# Information about the GEN-ERA project\r +Please visit \r +https://bccm.belspo.be/content/bccm-collections-genomic-era \r +\r +# Publications\r +1. ToRQuEMaDA: tool for retrieving queried Eubacteria, metadata and dereplicating assemblies. \r + Léonard, R. R., Leleu, M., Vlierberghe, M. V., Cornet, L., Kerff, F., and Baurain, D. (2021). \r + PeerJ 9, e11348. doi:10.7717/peerj.11348. \r + https://peerj.com/articles/11348/ \r +2. The taxonomy of the Trichophyton rubrum complex: a phylogenomic approach. \r + Cornet, L., D’hooge, E., Magain, N., Stubbe, D., Packeu, A., Baurain, D., and Becker P. (2021). \r + Microbial Genomics 7, 000707. doi:10.1099/mgen.0.000707. \r + https://www.microbiologyresearch.org/content/journal/mgen/10.1099/mgen.0.000707 \r +3. ORPER: A Workflow for Constrained SSU rRNA Phylogenies. \r + Cornet, L., Ahn, A.-C., Wilmotte, A., and Baurain, D. (2021). \r + Genes 12, 1741. doi:10.3390/genes12111741. \r + https://www.mdpi.com/2073-4425/12/11/1741/html \r +4. AMAW: automated gene annotation for non-model eukaryotic genomes. \r + Meunier, L., Baurain, D., Cornet, L. (2021) \r + https://www.biorxiv.org/content/10.1101/2021.12.07.471566v1 \r +5. Phylogenomic analyses of Snodgrassella isolates from honeybees and bumblebees reveals taxonomic and functional diversity. \r + Cornet, L., Cleenwerck, I., Praet, J., Leonard, R., Vereecken, N.J., Michez, D., Smagghe, G., Baurain, D., Vandamme, P. (2021) \r + https://www.biorxiv.org/content/10.1101/2021.12.10.472130v1 \r +6. Contamination detection in genomic data: more is not enough. \r + Cornet, L & Baurain, D (2022) \r + Genome Biology. 2022;23:60. \r + https://genomebiology.biomedcentral.com/articles/10.1186/s13059-022-02619-9 \r +7. The GEN-ERA toolbox: unified and reproducible workflows for research in microbial genomics \r + Cornet, L., Durieu, B., Baert, F., D’hooge, E., Colignon, D., Meunier, L., Lupo, V., Cleenwerck I.,\r + Daniel, HM., Rigouts, L., Sirjacobs, D., Declerck, D., Vandamme, P., Wilmotte, A., Baurain, D., Becker P (2022). \r + https://www.biorxiv.org/content/10.1101/2022.10.20.513017v1 \r +8. CRitical Assessment of genomic COntamination detection at several Taxonomic ranks (CRACOT) \r + Cornet, L., Lupo, V., Declerck, S., Baurain, D. (2022). \r + https://www.biorxiv.org/content/10.1101/2022.11.14.516442v1 \r +\r +# Copyright and License\r +\r +This softwares is copyright (c) 2017-2021 by University of Liege / Sciensano / BCCM collection by Luc CORNET\r +This is free softwares; you can redistribute it and/or modify.\r +\r +![BCCM](https://github.com/Lcornet/GENERA/blob/main/images/GENERA-logo.png) \r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.416.1" ; + schema1:isBasedOn "https://github.com/Lcornet/GENERA" ; + schema1:license "LGPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for GEN-ERA toolbox" ; + schema1:sdDatePublished "2024-08-05 10:31:33 +0100" ; + schema1:url "https://workflowhub.eu/workflows/416/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17141 ; + schema1:creator ; + schema1:dateCreated "2023-01-13T14:30:04Z" ; + schema1:dateModified "2023-03-14T14:35:27Z" ; + schema1:description """\r +# Github: https://github.com/Lcornet/GENERA\r +\r +# BCCM GEN-ERA tools repository\r +\r +Please visit the wiki for tutorials and access to the tools:\r +https://github.com/Lcornet/GENERA/wiki \r +\r +# NEWS\r +Mantis is now installed in a singularity container for the Metabolic workflow (install is no longer necessary). \r +\r +# Information about the GEN-ERA project\r +Please visit \r +https://bccm.belspo.be/content/bccm-collections-genomic-era \r +\r +# Publications\r +1. ToRQuEMaDA: tool for retrieving queried Eubacteria, metadata and dereplicating assemblies. \r + Léonard, R. R., Leleu, M., Vlierberghe, M. V., Cornet, L., Kerff, F., and Baurain, D. (2021). \r + PeerJ 9, e11348. doi:10.7717/peerj.11348. \r + https://peerj.com/articles/11348/ \r +2. The taxonomy of the Trichophyton rubrum complex: a phylogenomic approach. \r + Cornet, L., D’hooge, E., Magain, N., Stubbe, D., Packeu, A., Baurain, D., and Becker P. (2021). \r + Microbial Genomics 7, 000707. doi:10.1099/mgen.0.000707. \r + https://www.microbiologyresearch.org/content/journal/mgen/10.1099/mgen.0.000707 \r +3. ORPER: A Workflow for Constrained SSU rRNA Phylogenies. \r + Cornet, L., Ahn, A.-C., Wilmotte, A., and Baurain, D. (2021). \r + Genes 12, 1741. doi:10.3390/genes12111741. \r + https://www.mdpi.com/2073-4425/12/11/1741/html \r +4. AMAW: automated gene annotation for non-model eukaryotic genomes. \r + Meunier, L., Baurain, D., Cornet, L. (2021) \r + https://www.biorxiv.org/content/10.1101/2021.12.07.471566v1 \r +5. Phylogenomic analyses of Snodgrassella isolates from honeybees and bumblebees reveals taxonomic and functional diversity. \r + Cornet, L., Cleenwerck, I., Praet, J., Leonard, R., Vereecken, N.J., Michez, D., Smagghe, G., Baurain, D., Vandamme, P. (2021) \r + https://www.biorxiv.org/content/10.1101/2021.12.10.472130v1 \r +6. Contamination detection in genomic data: more is not enough. \r + Cornet, L & Baurain, D (2022) \r + Genome Biology. 2022;23:60. \r + https://genomebiology.biomedcentral.com/articles/10.1186/s13059-022-02619-9 \r +7. The GEN-ERA toolbox: unified and reproducible workflows for research in microbial genomics \r + Cornet, L., Durieu, B., Baert, F., D’hooge, E., Colignon, D., Meunier, L., Lupo, V., Cleenwerck I.,\r + Daniel, HM., Rigouts, L., Sirjacobs, D., Declerck, D., Vandamme, P., Wilmotte, A., Baurain, D., Becker P (2022). \r + https://www.biorxiv.org/content/10.1101/2022.10.20.513017v1 \r +8. CRitical Assessment of genomic COntamination detection at several Taxonomic ranks (CRACOT) \r + Cornet, L., Lupo, V., Declerck, S., Baurain, D. (2022). \r + https://www.biorxiv.org/content/10.1101/2022.11.14.516442v1 \r +\r +# Copyright and License\r +\r +This softwares is copyright (c) 2017-2021 by University of Liege / Sciensano / BCCM collection by Luc CORNET\r +This is free softwares; you can redistribute it and/or modify.\r +\r +![BCCM](https://github.com/Lcornet/GENERA/blob/main/images/GENERA-logo.png) \r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/LGPL-3.0" ; + schema1:name "GEN-ERA toolbox" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/416?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A workflow to simulate reads" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1015?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/readsimulator" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/readsimulator" ; + schema1:sdDatePublished "2024-08-05 10:23:22 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1015/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11824 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:09Z" ; + schema1:dateModified "2024-06-11T12:55:09Z" ; + schema1:description "A workflow to simulate reads" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1015?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/readsimulator" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1015?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# Galaxy Workflow Documentation: MS Finder Pipeline\r +\r +This document outlines a MSFinder Galaxy workflow designed for peak annotation. The workflow consists of several steps aimed at preprocessing MS data, filtering, enhancing, and running MSFinder.\r +\r +## Step 1: Data Collection and Preprocessing\r +Collect if the inchi and smiles are missing from the dataset, and subsequently filter out the spectra which are missing inchi and smiles.\r +\r +### 1.1 MSMetaEnhancer: Collect InChi, Isomeric_smiles, and Nominal_mass\r +- Utilizes MSMetaEnhancer to collect InChi and Isomeric_smiles using PubChem and IDSM databases.\r +- Utilizes MSMetaEnhancer to collect MW using RDkit (For GOLM).\r +\r +### 1.2 replace key\r +- replace isomeric_smiles key to smiles using replace text tool\r +- replace MW key to parent_mass using replace text tool (For GOLM)\r +\r +### 1.3 Matchms Filtering\r +- Filters out invalid SMILES and InChi from the dataset using Matchms filtering.\r +\r +## Step 2: Complex Removal and Subsetting Dataset\r +Removes coordination complexes from the dataset.\r +\r +### 2.1 Remove Complexes and Subset Data\r +- Removes complexes from the dataset.\r +- Exports metadata using Matchms metadata export, cuts the SMILES column, removes complexes using Rem_Complex tool, and updates the dataset using Matchms subsetting.\r +\r +## Step 3: Data Key Manipulation\r +Add missing metadata required by the MSFinder for annotation.\r +\r +### 3.1 Matchms Remove Key\r +- Removes existing keys such as adduct, charge, and ionmode from the dataset.\r +\r +### 3.2 Matchms Add Key\r +- Adds necessary keys like charge, ionmode, and adduct to the dataset.\r +\r +### 3.3 Matchms Filtering\r +- Derives precursor m/z using parent mass and adduct information using matchms filtering.\r +\r +### 3.4 Matchms Convert\r +- Converts the dataset to Riken format for compatibility with MSFinder using matchms convert.\r +\r +## Step 4: Peak Annotation\r +### 4.1 Recetox-MSFinder\r +- Executes MSFinder with a 0.5 Da tolerance for both MS1 and MS2, including all element checks and an extended range for peak annotation.\r +\r +## Step 5: Error Handling and Refinement\r +Check the MSFinder output to see if the output is the results or the log file. If the output is log file remove the smile from the dataset using matchms subsetting tool and rerun MSFinder.\r +\r +### 5.1 Error Handling\r +- Handles errors in peak annotation by removing SMILES that are not accepted by MSFinder.\r +- Reruns MSFinder after error correction or with different parameter (if applicable).\r +\r +## Step 6: High-res Annotation\r +### 6.1 High-Res Peak Overwriting\r +- Utilizes the Use_Theoretical_mz_Annotations tool to Overwrite experimentally measured mz values for peaks with theoretical values from peak comments.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.888.1" ; + schema1:isBasedOn "https://github.com/RECETOX/workflow-testing/blob/main/msfinder_workflow/Galaxy_Workflow_MsFinder_Workflow_GOLM_V2.ga" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Theoretical fragment substructure generation and in silico mass spectral library high-resolution upcycling workflow" ; + schema1:sdDatePublished "2024-08-05 10:23:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/888/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 47532 ; + schema1:creator , + , + , + , + , + , + ; + schema1:dateCreated "2024-05-20T10:05:05Z" ; + schema1:dateModified "2024-06-06T09:58:39Z" ; + schema1:description """# Galaxy Workflow Documentation: MS Finder Pipeline\r +\r +This document outlines a MSFinder Galaxy workflow designed for peak annotation. The workflow consists of several steps aimed at preprocessing MS data, filtering, enhancing, and running MSFinder.\r +\r +## Step 1: Data Collection and Preprocessing\r +Collect if the inchi and smiles are missing from the dataset, and subsequently filter out the spectra which are missing inchi and smiles.\r +\r +### 1.1 MSMetaEnhancer: Collect InChi, Isomeric_smiles, and Nominal_mass\r +- Utilizes MSMetaEnhancer to collect InChi and Isomeric_smiles using PubChem and IDSM databases.\r +- Utilizes MSMetaEnhancer to collect MW using RDkit (For GOLM).\r +\r +### 1.2 replace key\r +- replace isomeric_smiles key to smiles using replace text tool\r +- replace MW key to parent_mass using replace text tool (For GOLM)\r +\r +### 1.3 Matchms Filtering\r +- Filters out invalid SMILES and InChi from the dataset using Matchms filtering.\r +\r +## Step 2: Complex Removal and Subsetting Dataset\r +Removes coordination complexes from the dataset.\r +\r +### 2.1 Remove Complexes and Subset Data\r +- Removes complexes from the dataset.\r +- Exports metadata using Matchms metadata export, cuts the SMILES column, removes complexes using Rem_Complex tool, and updates the dataset using Matchms subsetting.\r +\r +## Step 3: Data Key Manipulation\r +Add missing metadata required by the MSFinder for annotation.\r +\r +### 3.1 Matchms Remove Key\r +- Removes existing keys such as adduct, charge, and ionmode from the dataset.\r +\r +### 3.2 Matchms Add Key\r +- Adds necessary keys like charge, ionmode, and adduct to the dataset.\r +\r +### 3.3 Matchms Filtering\r +- Derives precursor m/z using parent mass and adduct information using matchms filtering.\r +\r +### 3.4 Matchms Convert\r +- Converts the dataset to Riken format for compatibility with MSFinder using matchms convert.\r +\r +## Step 4: Peak Annotation\r +### 4.1 Recetox-MSFinder\r +- Executes MSFinder with a 0.5 Da tolerance for both MS1 and MS2, including all element checks and an extended range for peak annotation.\r +\r +## Step 5: Error Handling and Refinement\r +Check the MSFinder output to see if the output is the results or the log file. If the output is log file remove the smile from the dataset using matchms subsetting tool and rerun MSFinder.\r +\r +### 5.1 Error Handling\r +- Handles errors in peak annotation by removing SMILES that are not accepted by MSFinder.\r +- Reruns MSFinder after error correction or with different parameter (if applicable).\r +\r +## Step 6: High-res Annotation\r +### 6.1 High-Res Peak Overwriting\r +- Utilizes the Use_Theoretical_mz_Annotations tool to Overwrite experimentally measured mz values for peaks with theoretical values from peak comments.\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/888?version=1" ; + schema1:keywords "Bioinformatics, Cheminformatics, Metabolomics" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Theoretical fragment substructure generation and in silico mass spectral library high-resolution upcycling workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/888?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/644?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Workflow 2: Sciensano" ; + schema1:sdDatePublished "2024-08-05 10:27:23 +0100" ; + schema1:url "https://workflowhub.eu/workflows/644/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2664 ; + schema1:dateCreated "2023-11-07T17:29:22Z" ; + schema1:dateModified "2023-11-07T18:20:09Z" ; + schema1:description "" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Workflow 2: Sciensano" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/644?version=1" ; + schema1:version 1 ; + ns1:output , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Simple bacterial assembly and annotation pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/966?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/bacass" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/bacass" ; + schema1:sdDatePublished "2024-08-05 10:24:17 +0100" ; + schema1:url "https://workflowhub.eu/workflows/966/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4381 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:42Z" ; + schema1:dateModified "2024-06-11T12:54:42Z" ; + schema1:description "Simple bacterial assembly and annotation pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/966?version=7" ; + schema1:keywords "Assembly, bacterial-genomes, denovo, denovo-assembly, genome-assembly, hybrid-assembly, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/bacass" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/966?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Rare disease researchers workflow is that they submit their raw data (fastq), run the mapping and variant calling RD-Connect pipeline and obtain unannotated gvcf files to further submit to the RD-Connect GPAP or analyse on their own.\r +\r +This demonstrator focuses on the variant calling pipeline. The raw genomic data is processed using the RD-Connect pipeline ([Laurie et al., 2016](https://www.ncbi.nlm.nih.gov/pubmed/27604516)) running on the standards (GA4GH) compliant, interoperable container orchestration platform.\r +\r +This demonstrator will be aligned with the current implementation study on [Development of Architecture for Software Containers at ELIXIR and its use by EXCELERATE use-case communities](docs/Appendix%201%20-%20Project%20Plan%202018-biocontainers%2020171117.pdf) \r +\r +For this implementation, different steps are required:\r +\r +1. Adapt the pipeline to CWL and dockerise elements \r +2. Align with IS efforts on software containers to package the different components (Nextflow) \r +3. Submit trio of Illumina NA12878 Platinum Genome or Exome to the GA4GH platform cloud (by Aspera or ftp server)\r +4. Run the RD-Connect pipeline on the container platform\r +5. Return corresponding gvcf files\r +6. OPTIONAL: annotate and update to RD-Connect playground instance\r +\r +N.B: The demonstrator might have some manual steps, which will not be in production. \r +\r +## RD-Connect pipeline\r +\r +Detailed information about the RD-Connect pipeline can be found in [Laurie et al., 2016](https://www.ncbi.nlm.nih.gov/pubmed/?term=27604516)\r +\r +![alt text](https://raw.githubusercontent.com/inab/Wetlab2Variations/eosc-life/docs/RD-Connect_pipeline.jpg)\r +\r +## The applications\r +\r +**1\\. Name of the application: Adaptor removal**\r +Function: remove sequencing adaptors \r +Container (readiness status, location, version): [cutadapt (v.1.18)](https://hub.docker.com/r/cnag/cutadapt) \r +Required resources in cores and RAM: current container size 169MB \r +Input data (amount, format, directory..): raw fastq \r +Output data: paired fastq without adaptors \r +\r +**2\\. Name of the application: Mapping and bam sorting**\r +Function: align data to reference genome \r +Container : [bwa-mem (v.0.7.17)](https://hub.docker.com/r/cnag/bwa) / [Sambamba (v. 0.6.8 )](https://hub.docker.com/r/cnag/sambamba)(or samtools) \r +Resources :current container size 111MB / 32MB \r +Input data: paired fastq without adaptors \r +Output data: sorted bam \r +\r +**3\\. Name of the application: MarkDuplicates** \r +Function: Mark (and remove) duplicates \r +Container: [Picard (v.2.18.25)](https://hub.docker.com/r/cnag/picard)\r +Resources: current container size 261MB \r +Input data:sorted bam \r +Output data: Sorted bam with marked (or removed) duplicates \r +\r +**4\\. Name of the application: Base quality recalibration (BQSR)** \r +Function: Base quality recalibration \r +Container: [GATK (v.3.6-0)](https://hub.docker.com/r/cnag/gatk)\r +Resources: current container size 270MB \r +Input data: Sorted bam with marked (or removed) duplicates \r +Output data: Sorted bam with marked duplicates & base quality recalculated \r +\r +**5\\. Name of the application: Variant calling** \r +Function: variant calling \r +Container: [GATK (v.3.6-0)](https://hub.docker.com/r/cnag/gatk)\r +Resources: current container size 270MB \r +Input data:Sorted bam with marked duplicates & base quality recalculated \r +Output data: unannotated gvcf per sample \r +\r +**6\\. (OPTIONAL)Name of the application: Quality of the fastq** \r +Function: report on the sequencing quality \r +Container: [fastqc 0.11.8](https://hub.docker.com/r/cnag/fastqc)\r +Resources: current container size 173MB \r +Input data: raw fastq \r +Output data: QC report \r +\r +## Licensing\r +\r +GATK declares that archived packages are made available for free to academic researchers under a limited license for non-commercial use. If you need to use one of these packages for commercial use. https://software.broadinstitute.org/gatk/download/archive """ ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/106?version=2" ; + schema1:isBasedOn "https://github.com/inab/Wetlab2Variations/tree/eosc-life/nextflow" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for VariantCaller_GATK3.6" ; + schema1:sdDatePublished "2024-08-05 10:33:15 +0100" ; + schema1:url "https://workflowhub.eu/workflows/106/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 22429 ; + schema1:creator ; + schema1:dateCreated "2021-05-21T08:43:08Z" ; + schema1:dateModified "2021-05-21T08:43:08Z" ; + schema1:description """Rare disease researchers workflow is that they submit their raw data (fastq), run the mapping and variant calling RD-Connect pipeline and obtain unannotated gvcf files to further submit to the RD-Connect GPAP or analyse on their own.\r +\r +This demonstrator focuses on the variant calling pipeline. The raw genomic data is processed using the RD-Connect pipeline ([Laurie et al., 2016](https://www.ncbi.nlm.nih.gov/pubmed/27604516)) running on the standards (GA4GH) compliant, interoperable container orchestration platform.\r +\r +This demonstrator will be aligned with the current implementation study on [Development of Architecture for Software Containers at ELIXIR and its use by EXCELERATE use-case communities](docs/Appendix%201%20-%20Project%20Plan%202018-biocontainers%2020171117.pdf) \r +\r +For this implementation, different steps are required:\r +\r +1. Adapt the pipeline to CWL and dockerise elements \r +2. Align with IS efforts on software containers to package the different components (Nextflow) \r +3. Submit trio of Illumina NA12878 Platinum Genome or Exome to the GA4GH platform cloud (by Aspera or ftp server)\r +4. Run the RD-Connect pipeline on the container platform\r +5. Return corresponding gvcf files\r +6. OPTIONAL: annotate and update to RD-Connect playground instance\r +\r +N.B: The demonstrator might have some manual steps, which will not be in production. \r +\r +## RD-Connect pipeline\r +\r +Detailed information about the RD-Connect pipeline can be found in [Laurie et al., 2016](https://www.ncbi.nlm.nih.gov/pubmed/?term=27604516)\r +\r +![alt text](https://raw.githubusercontent.com/inab/Wetlab2Variations/eosc-life/docs/RD-Connect_pipeline.jpg)\r +\r +## The applications\r +\r +**1\\. Name of the application: Adaptor removal**\r +Function: remove sequencing adaptors \r +Container (readiness status, location, version): [cutadapt (v.1.18)](https://hub.docker.com/r/cnag/cutadapt) \r +Required resources in cores and RAM: current container size 169MB \r +Input data (amount, format, directory..): raw fastq \r +Output data: paired fastq without adaptors \r +\r +**2\\. Name of the application: Mapping and bam sorting**\r +Function: align data to reference genome \r +Container : [bwa-mem (v.0.7.17)](https://hub.docker.com/r/cnag/bwa) / [Sambamba (v. 0.6.8 )](https://hub.docker.com/r/cnag/sambamba)(or samtools) \r +Resources :current container size 111MB / 32MB \r +Input data: paired fastq without adaptors \r +Output data: sorted bam \r +\r +**3\\. Name of the application: MarkDuplicates** \r +Function: Mark (and remove) duplicates \r +Container: [Picard (v.2.18.25)](https://hub.docker.com/r/cnag/picard)\r +Resources: current container size 261MB \r +Input data:sorted bam \r +Output data: Sorted bam with marked (or removed) duplicates \r +\r +**4\\. Name of the application: Base quality recalibration (BQSR)** \r +Function: Base quality recalibration \r +Container: [GATK (v.3.6-0)](https://hub.docker.com/r/cnag/gatk)\r +Resources: current container size 270MB \r +Input data: Sorted bam with marked (or removed) duplicates \r +Output data: Sorted bam with marked duplicates & base quality recalculated \r +\r +**5\\. Name of the application: Variant calling** \r +Function: variant calling \r +Container: [GATK (v.3.6-0)](https://hub.docker.com/r/cnag/gatk)\r +Resources: current container size 270MB \r +Input data:Sorted bam with marked duplicates & base quality recalculated \r +Output data: unannotated gvcf per sample \r +\r +**6\\. (OPTIONAL)Name of the application: Quality of the fastq** \r +Function: report on the sequencing quality \r +Container: [fastqc 0.11.8](https://hub.docker.com/r/cnag/fastqc)\r +Resources: current container size 173MB \r +Input data: raw fastq \r +Output data: QC report \r +\r +## Licensing\r +\r +GATK declares that archived packages are made available for free to academic researchers under a limited license for non-commercial use. If you need to use one of these packages for commercial use. https://software.broadinstitute.org/gatk/download/archive """ ; + schema1:isBasedOn "https://workflowhub.eu/workflows/106?version=2" ; + schema1:keywords "Nextflow, variant_calling" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "VariantCaller_GATK3.6" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/106?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + schema1:datePublished "2024-07-01T18:43:44.016669" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "Performs Long Read assembly using PacBio data and Hifiasm. Part of VGP assembly pipeline. This workflow generate a phased assembly." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/318?version=1" ; + schema1:isBasedOn "https://github.com/Delphine-L/iwc/tree/VGP/workflows/VGP-assembly-v2" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for VGP HiFi phased assembly with hifiasm and HiC data" ; + schema1:sdDatePublished "2024-08-05 10:32:21 +0100" ; + schema1:url "https://workflowhub.eu/workflows/318/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 11731 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 51495 ; + schema1:creator ; + schema1:dateCreated "2022-04-05T21:55:13Z" ; + schema1:dateModified "2023-01-16T13:59:26Z" ; + schema1:description "Performs Long Read assembly using PacBio data and Hifiasm. Part of VGP assembly pipeline. This workflow generate a phased assembly." ; + schema1:keywords "vgp, Assembly, Galaxy" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "VGP HiFi phased assembly with hifiasm and HiC data" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/318?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.415.1" ; + schema1:isBasedOn "https://renkulab.io/gitlab/dsavchenko/gw-backend/-/raw/master/notebooks/conesearch.ipynb" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Gravitational Wave source Cone Search" ; + schema1:sdDatePublished "2024-08-05 10:31:33 +0100" ; + schema1:url "https://workflowhub.eu/workflows/415/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1406 ; + schema1:creator ; + schema1:dateCreated "2023-01-11T12:22:37Z" ; + schema1:dateModified "2023-01-16T14:05:00Z" ; + schema1:description "" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "astronomy, Gravitational Waves, FAIR workflows" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Gravitational Wave source Cone Search" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/415?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + ; + ns1:output , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 4924 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Alternative splicing analysis using RNA-seq." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1018?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/rnasplice" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnasplice" ; + schema1:sdDatePublished "2024-08-05 10:23:21 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1018/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14104 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:14Z" ; + schema1:dateModified "2024-06-11T12:55:14Z" ; + schema1:description "Alternative splicing analysis using RNA-seq." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1018?version=4" ; + schema1:keywords "alternative-splicing, rna, rna-seq, splicing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnasplice" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1018?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1021?version=11" ; + schema1:isBasedOn "https://github.com/nf-core/scrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/scrnaseq" ; + schema1:sdDatePublished "2024-08-05 10:23:15 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1021/ro_crate?version=11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10731 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:17Z" ; + schema1:dateModified "2024-06-11T12:55:17Z" ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1021?version=13" ; + schema1:keywords "10x-genomics, 10xgenomics, alevin, bustools, Cellranger, kallisto, rna-seq, single-cell, star-solo" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/scrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1021?version=11" ; + schema1:version 11 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """

\r +

+VIRify

\r +

Sankey plot

\r +

VIRify is a recently developed pipeline for the detection, annotation, and taxonomic classification of viral contigs in metagenomic and metatranscriptomic assemblies. The pipeline is part of the repertoire of analysis services offered by MGnify. VIRify’s taxonomic classification relies on the detection of taxon-specific profile hidden Markov models (HMMs), built upon a set of 22,014 orthologous protein domains and referred to as ViPhOGs.

\r +

VIRify was implemented in CWL.

\r +

+What do I need?

\r +

The current implementation uses CWL version 1.2 dev+2. It was tested using Toil version 4.10 as the workflow engine and conda to manage the software dependencies.

\r +

+Docker - Singularity support

\r +

Soon…

\r +

+Setup environment

\r +
conda env create -f cwl/requirements/conda_env.yml\r
+conda activate viral_pipeline\r
+
\r +

+Basic execution

\r +
cd cwl/\r
+virify.sh -h\r
+
\r +

+A note about metatranscriptomes

\r +

Although VIRify has been benchmarked and validated with metagenomic data in mind, it is also possible to use this tool to detect RNA viruses in metatranscriptome assemblies (e.g. SARS-CoV-2). However, some additional considerations for this purpose are outlined below:
\r +1. Quality control: As for metagenomic data, a thorough quality control of the FASTQ sequence reads to remove low-quality bases, adapters and host contamination (if appropriate) is required prior to assembly. This is especially important for metatranscriptomes as small errors can further decrease the quality and contiguity of the assembly obtained. We have used TrimGalore for this purpose.

\r +

2. Assembly: There are many assemblers available that are appropriate for either metagenomic or single-species transcriptomic data. However, to our knowledge, there is no assembler currently available specifically for metatranscriptomic data. From our preliminary investigations, we have found that transcriptome-specific assemblers (e.g. rnaSPAdes) generate more contiguous and complete metatranscriptome assemblies compared to metagenomic alternatives (e.g. MEGAHIT and metaSPAdes).

\r +

3. Post-processing: Metatranscriptomes generate highly fragmented assemblies. Therefore, filtering contigs based on a set minimum length has a substantial impact in the number of contigs processed in VIRify. It has also been observed that the number of false-positive detections of VirFinder (one of the tools included in VIRify) is lower among larger contigs. The choice of a length threshold will depend on the complexity of the sample and the sequencing technology used, but in our experience any contigs <2 kb should be analysed with caution.

\r +

4. Classification: The classification module of VIRify depends on the presence of a minimum number and proportion of phylogenetically-informative genes within each contig in order to confidently assign a taxonomic lineage. Therefore, short contigs typically obtained from metatranscriptome assemblies remain generally unclassified. For targeted classification of RNA viruses (for instance, to search for Coronavirus-related sequences), alternative DNA- or protein-based classification methods can be used. Two of the possible options are: (i) using MashMap to screen the VIRify contigs against a database of RNA viruses (e.g. Coronaviridae) or (ii) using hmmsearch to screen the proteins obtained in the VIRify contigs against marker genes of the taxon of interest.

\r +

Contact us

\r +MGnify helpdesk""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.26.1" ; + schema1:isBasedOn "https://github.com/EBI-Metagenomics/emg-viral-pipeline/blob/master/cwl/src/pipeline.cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for VIRify" ; + schema1:sdDatePublished "2024-08-05 10:33:19 +0100" ; + schema1:url "https://workflowhub.eu/workflows/26/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8472 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2020-06-08T10:21:08Z" ; + schema1:dateModified "2023-01-16T13:41:53Z" ; + schema1:description """

\r +

+VIRify

\r +

Sankey plot

\r +

VIRify is a recently developed pipeline for the detection, annotation, and taxonomic classification of viral contigs in metagenomic and metatranscriptomic assemblies. The pipeline is part of the repertoire of analysis services offered by MGnify. VIRify’s taxonomic classification relies on the detection of taxon-specific profile hidden Markov models (HMMs), built upon a set of 22,014 orthologous protein domains and referred to as ViPhOGs.

\r +

VIRify was implemented in CWL.

\r +

+What do I need?

\r +

The current implementation uses CWL version 1.2 dev+2. It was tested using Toil version 4.10 as the workflow engine and conda to manage the software dependencies.

\r +

+Docker - Singularity support

\r +

Soon…

\r +

+Setup environment

\r +
conda env create -f cwl/requirements/conda_env.yml\r
+conda activate viral_pipeline\r
+
\r +

+Basic execution

\r +
cd cwl/\r
+virify.sh -h\r
+
\r +

+A note about metatranscriptomes

\r +

Although VIRify has been benchmarked and validated with metagenomic data in mind, it is also possible to use this tool to detect RNA viruses in metatranscriptome assemblies (e.g. SARS-CoV-2). However, some additional considerations for this purpose are outlined below:
\r +1. Quality control: As for metagenomic data, a thorough quality control of the FASTQ sequence reads to remove low-quality bases, adapters and host contamination (if appropriate) is required prior to assembly. This is especially important for metatranscriptomes as small errors can further decrease the quality and contiguity of the assembly obtained. We have used TrimGalore for this purpose.

\r +

2. Assembly: There are many assemblers available that are appropriate for either metagenomic or single-species transcriptomic data. However, to our knowledge, there is no assembler currently available specifically for metatranscriptomic data. From our preliminary investigations, we have found that transcriptome-specific assemblers (e.g. rnaSPAdes) generate more contiguous and complete metatranscriptome assemblies compared to metagenomic alternatives (e.g. MEGAHIT and metaSPAdes).

\r +

3. Post-processing: Metatranscriptomes generate highly fragmented assemblies. Therefore, filtering contigs based on a set minimum length has a substantial impact in the number of contigs processed in VIRify. It has also been observed that the number of false-positive detections of VirFinder (one of the tools included in VIRify) is lower among larger contigs. The choice of a length threshold will depend on the complexity of the sample and the sequencing technology used, but in our experience any contigs <2 kb should be analysed with caution.

\r +

4. Classification: The classification module of VIRify depends on the presence of a minimum number and proportion of phylogenetically-informative genes within each contig in order to confidently assign a taxonomic lineage. Therefore, short contigs typically obtained from metatranscriptome assemblies remain generally unclassified. For targeted classification of RNA viruses (for instance, to search for Coronavirus-related sequences), alternative DNA- or protein-based classification methods can be used. Two of the possible options are: (i) using MashMap to screen the VIRify contigs against a database of RNA viruses (e.g. Coronaviridae) or (ii) using hmmsearch to screen the proteins obtained in the VIRify contigs against marker genes of the taxon of interest.

\r +

Contact us

\r +MGnify helpdesk""" ; + schema1:image ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "VIRify" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/26?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 47487 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "A workflow for the quality assessment of mass spectrometry (MS) based proteomics analyses" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.343.1" ; + schema1:license "BSD-3-Clause" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for MaCProQC" ; + schema1:sdDatePublished "2024-08-05 10:32:09 +0100" ; + schema1:url "https://workflowhub.eu/workflows/343/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 199601 ; + schema1:creator ; + schema1:dateCreated "2022-05-20T09:38:29Z" ; + schema1:dateModified "2023-01-16T13:59:52Z" ; + schema1:description "A workflow for the quality assessment of mass spectrometry (MS) based proteomics analyses" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/BSD-3-Clause" ; + schema1:name "MaCProQC" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/343?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 136866 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Differential abundance analysis" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/981?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/differentialabundance" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/differentialabundance" ; + schema1:sdDatePublished "2024-08-05 10:24:02 +0100" ; + schema1:url "https://workflowhub.eu/workflows/981/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11181 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:48Z" ; + schema1:dateModified "2024-06-11T12:54:48Z" ; + schema1:description "Differential abundance analysis" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/981?version=8" ; + schema1:keywords "ATAC-seq, ChIP-seq, deseq2, differential-abundance, differential-expression, gsea, limma, microarray, rna-seq, shiny" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/differentialabundance" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/981?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=10" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-08-05 10:23:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5097 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:03Z" ; + schema1:dateModified "2024-06-11T12:55:03Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=10" ; + schema1:version 10 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 231159 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:MediaObject ; + schema1:contentSize 1583 ; + schema1:dateModified "2024-03-13T10:40:28+00:00" ; + schema1:name "kmeans.csv" ; + schema1:sdDatePublished "2024-03-22T17:53:30+00:00" . + + a schema1:Dataset ; + schema1:datePublished "2022-10-20T13:35:37.242644" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "atacseq/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.11" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# HiC contact map generation\r +\r +Snakemake pipeline for the generation of `.pretext` and `.mcool` files for visualisation of HiC contact maps with the softwares PretextView and HiGlass, respectively.\r +\r +## Prerequisites\r +\r +This pipeine has been tested using `Snakemake v7.32.4` and requires conda for installation of required tools. To run the pipline use the command:\r +\r +`snakemake --use-conda`\r +\r +There are provided a set of configuration and running scripts for exectution on a slurm queueing system. After configuring the `cluster.json` file run:\r +\r +`./run_cluster`\r +\r +## Before starting\r +\r +You need to create a temporary folder and specify the path in the `config.yaml` file. This should be able to hold the temporary files created when sorting the `.pairsam` file (100s of GB or even many TBs)\r +\r +The path to the genome assemly must be given in the `config.yaml`.\r +\r +The HiC reads should be paired and named as follows: `Library_1.fastq.gz Library_2.fastq.gz`. The pipeline can accept any number of paired HiC read files, but the naming must be consistent. The folder containing these files must be provided in the `config.yaml`.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.795.1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for HiC contact map generation" ; + schema1:sdDatePublished "2024-08-05 10:25:08 +0100" ; + schema1:url "https://workflowhub.eu/workflows/795/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5789 ; + schema1:creator ; + schema1:dateCreated "2024-03-14T09:50:42Z" ; + schema1:dateModified "2024-03-14T09:54:40Z" ; + schema1:description """# HiC contact map generation\r +\r +Snakemake pipeline for the generation of `.pretext` and `.mcool` files for visualisation of HiC contact maps with the softwares PretextView and HiGlass, respectively.\r +\r +## Prerequisites\r +\r +This pipeine has been tested using `Snakemake v7.32.4` and requires conda for installation of required tools. To run the pipline use the command:\r +\r +`snakemake --use-conda`\r +\r +There are provided a set of configuration and running scripts for exectution on a slurm queueing system. After configuring the `cluster.json` file run:\r +\r +`./run_cluster`\r +\r +## Before starting\r +\r +You need to create a temporary folder and specify the path in the `config.yaml` file. This should be able to hold the temporary files created when sorting the `.pairsam` file (100s of GB or even many TBs)\r +\r +The path to the genome assemly must be given in the `config.yaml`.\r +\r +The HiC reads should be paired and named as follows: `Library_1.fastq.gz Library_2.fastq.gz`. The pipeline can accept any number of paired HiC read files, but the naming must be consistent. The folder containing these files must be provided in the `config.yaml`.\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/795?version=1" ; + schema1:isPartOf ; + schema1:keywords "Bioinformatics, Genomics, Snakemake, Genome assembly" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "HiC contact map generation" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/795?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# BatchConvert ![DOI:10.5281](https://zenodo.org/badge/doi/10.5281/zenodo.7955974.svg)\r +\r +A command line tool for converting image data into either of the standard file formats OME-TIFF or OME-Zarr. \r +\r +The tool wraps the dedicated file converters bfconvert and bioformats2raw to convert into OME-TIFF or OME-Zarr,\r +respectively. The workflow management system NextFlow is used to perform conversion in parallel for batches of images. \r +\r +The tool also wraps s3 and Aspera clients (go-mc and aspera-cli, respectively). Therefore, input and output locations can \r +be specified as local or remote storage and file transfer will be performed automatically. The conversion can be run on \r +HPC with Slurm. \r +\r +![](figures/diagram.png)\r +\r +## Installation & Dependencies\r +\r +**Important** note: The package has been so far only tested on Ubuntu 20.04.\r +\r +The minimal dependency to run the tool is NextFlow, which should be installed and made accessible from the command line.\r +\r +If conda exists on your system, you can install BatchConvert together with NextFlow using the following script:\r +```\r +git clone https://github.com/Euro-BioImaging/BatchConvert.git && \\ \r +source BatchConvert/installation/install_with_nextflow.sh\r +```\r +\r +\r +If you already have NextFlow installed and accessible from the command line (or if you prefer to install it manually \r +e.g., as shown [here](https://www.nextflow.io/docs/latest/getstarted.html)), you can also install BatchConvert alone, using the following script:\r +```\r +git clone https://github.com/Euro-BioImaging/BatchConvert.git && \\ \r +source BatchConvert/installation/install.sh\r +```\r +\r +\r +Other dependencies (which will be **automatically** installed):\r +- bioformats2raw (entrypoint bioformats2raw)\r +- bftools (entrypoint bfconvert)\r +- go-mc (entrypoint mc)\r +- aspera-cli (entrypoint ascp)\r +\r +These dependencies will be pulled and cached automatically at the first execution of the conversion command. \r +The mode of dependency management can be specified by using the command line option ``--profile`` or `-pf`. Depending \r +on how this option is specified, the dependencies will be acquired / run either via conda or via docker/singularity containers. \r +\r +Specifying ``--profile conda`` (default) will install the dependencies to an \r +environment at ``./.condaCache`` and use this environment to run the workflow. This option \r +requires that miniconda/anaconda is installed on your system. \r +\r +Alternatively, specifying ``--profile docker`` or ``--profile singularity`` will pull a docker or \r +singularity image with the dependencies, respectively, and use this image to run the workflow.\r +These options assume that the respective container runtime (docker or singularity) is available on \r +your system. If singularity is being used, a cache directory will be created at the path \r +``./.singularityCache`` where the singularity image is stored. \r +\r +Finally, you can still choose to install the dependencies manually and use your own installations to run\r +the workflow. In this case, you should specify ``--profile standard`` and make sure the entrypoints\r +specified above are recognised by your shell. \r +\r +\r +## Configuration\r +\r +BatchConvert can be configured to have default options for file conversion and transfer. Probably, the most important sets of parameters\r +to be configured include credentials for the remote ends. The easiest way to configure remote stores is by running the interactive \r +configuration command as indicated below.\r +\r +### Configuration of the s3 object store\r +\r +Run the interactive configuration command: \r +\r +`batchconvert configure_s3_remote`\r +\r +This will start a sequence of requests for s3 credentials such as name, url, access, etc. Provide each requested credential and click\r +enter. Continue this cycle until the process is finished. Upon completing the configuration, the sequence of commands should roughly look like this:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_s3_remote\r +enter remote name (for example s3)\r +s3\r +enter url:\r +https://s3.embl.de\r +enter access key:\r +"your-access-key"\r +enter secret key:\r +"your-secret-key"\r +enter bucket name:\r +"your-bucket"\r +Configuration of the default s3 credentials is complete\r +```\r +\r +\r +### Configuration of the BioStudies user space\r +\r +Run the interactive configuration command: \r +\r +`batchconvert configure_bia_remote`\r +\r +This will prompt a request for the secret directory to connect to. Enter the secret directory for your user space and click enter. \r +Upon completing the configuration, the sequence of commands should roughly look like this:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_bia_remote\r +enter the secret directory for BioImage Archive user space:\r +"your-secret-directory"\r +configuration of the default bia credentials is complete\r +```\r +\r +### Configuration of the slurm options\r +\r +BatchConvert can also run on slurm clusters. In order to configure the slurm parameters, run the interactive configuration command: \r +\r +`batchconvert configure_slurm`\r +\r +This will start a sequence of requests for slurm options. Provide each requested option and click enter. \r +Continue this cycle until the process is finished. Upon completing the configuration, the sequence of commands should \r +roughly look like this:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_slurm\r +Please enter value for queue_size\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´50´\r +s\r +Please enter value for submit_rate_limit\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´10/2min´\r +s\r +Please enter value for cluster_options\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´--mem-per-cpu=3140 --cpus-per-task=16´\r +s\r +Please enter value for time\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´6h´\r +s\r +configuration of the default slurm parameters is complete\r +```\r +\r +### Configuration of the default conversion parameters\r +\r +While all conversion parameters can be specified as command line arguments, it can\r +be useful for the users to set their own default parameters to avoid re-entering those\r +parameters for subsequent executions. BatchConvert allows for interactive configuration of \r +conversion in the same way as configuration of the remote stores described above.\r +\r +To configure the conversion into OME-TIFF, run the following command:\r +\r +`batchconvert configure_ometiff`\r +\r +This will prompt the user to enter a series of parameters, which will then be saved as the \r +default parameters to be passed to the `batchconvert ometiff` command. Upon completing the \r +configuration, the sequence of commands should look similar to:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_ometiff\r +Please enter value for noflat\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is "bfconvert defaults"\r +s\r +Please enter value for series\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is "bfconvert defaults"\r +s\r +Please enter value for timepoint\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is "bfconvert defaults"\r +s\r +...\r +...\r +...\r +...\r +...\r +...\r +Configuration of the default parameters for 'bfconvert' is complete\r +```\r +\r +\r +To configure the conversion into OME-Zarr, run the following command:\r +\r +`batchconvert configure_omezarr`\r +\r +Similarly, this will prompt the user to enter a series of parameters, which will then be saved as the \r +default parameters to be passed to the `batchconvert omezarr` command. Upon completing the configuration, \r +the sequence of commands should look similar to:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_omezarr\r +Please enter value for resolutions_zarr\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is "bioformats2raw defaults"\r +s\r +Please enter value for chunk_h\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is "bioformats2raw defaults"\r +s\r +Please enter value for chunk_w\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is "bioformats2raw defaults"\r +...\r +...\r +...\r +...\r +...\r +...\r +Configuration of the default parameters for 'bioformats2raw' is complete\r +```\r +\r +It is important to note that the initial defaults for the conversion parameters are the same as the defaults\r +of the backend tools bfconvert and bioformats2raw, as noted in the prompt excerpt above. Through interactive configuration, \r +the user is overriding these initial defaults and setting their own defaults. It is possible to reset the initial\r +defaults by running the following command.\r +\r +`batchconvert reset_defaults`\r +\r +Another important point is that any of these configured parameters can be overridden by passing a value to that\r +parameter in the commandline. For instance, in the following command, the value of 20 will be assigned to `chunk_h` parameter \r +even if the value for the same parameter might be different in the configuration file. \r +\r +`batchconvert omezarr --chunk_h 20 "path/to/input" "path/to/output"`\r +\r +\r +## Examples\r +\r +### Local conversion\r +\r +#### Parallel conversion of files to separate OME-TIFFs / OME-Zarrs:\r +Convert a batch of images on your local storage into OME-TIFF format. \r +Note that the `input_path` in the command given below is typically a \r +directory with multiple image files but a single image file can also be passed:\\\r +`batchconvert ometiff -pf conda "input_path" "output_path"` \r +\r +Note that if this is your first conversion with the profile `conda`, \r +it will take a while for a conda environment with the dependencies to be\r +created. All the subsequent conversion commands with the profile `conda`,\r +however, will use this environment, and thus show no such delay.\r +\r +Since conda is the default profile, it does not have to be \r +explicitly included in the command line. Thus, the command can be shortened to:\\\r +`batchconvert ometiff "input_path" "output_path"`\r +\r +Convert only the first channel of the images:\\\r +`batchconvert ometiff -chn 0 "input_path" "output_path"`\r +\r +Crop the images being converted along x and y axis by 150 pixels:\\\r +`batchconvert ometiff -cr 0,0,150,150 "input_path" "output_path"`\r +\r +Convert into OME-Zarr instead:\\\r +`batchconvert omezarr "input_path" "output_path"`\r +\r +Convert into OME-Zarr with 3 resolution levels:\\\r +`batchconvert omezarr -rz 3 "input_path" "output_path"`\r +\r +Select a subset of images with a matching string such as "mutation":\\\r +`batchconvert omezarr -p mutation "input_path" "output_path"`\r +\r +Select a subset of images using wildcards. Note that the use of "" around \r +the input path is necessary when using wildcards:\\\r +`batchconvert omezarr "input_path/*D3*.oir" "output_path"`\r +\r +Convert by using a singularity container instead of conda environment (requires\r +singularity to be installed on your system):\\\r +`batchconvert omezarr -pf singularity "input_path/*D3*.oir" "output_path"`\r +\r +Convert by using a docker container instead of conda environment (requires docker\r +to be installed on your system):\\\r +`batchconvert omezarr -pf docker "input_path/*D3*.oir" "output_path"`\r +\r +Note that similarly to the case with the profile `conda`, the first execution of\r +a conversion with the profile `singularity` or `docker` will take a while for the\r +container image to be pulled. All the subsequent conversion commands using a \r +container option will use this image, and thus show no such delay. \r +\r +Convert local data and upload the output to an s3 bucket. Note that the output \r +path is created relative to the bucket specified in your s3 configuration:\\\r +`batchconvert omezarr -dt s3 "input_path" "output_path"`\r +\r +Receive input files from an s3 bucket, convert locally and upload the output to \r +the same bucket. Note that wildcards cannot be used when the input is from s3. \r +Use pattern matching option `-p` for selecting a subset of input files:\\\r +`batchconvert omezarr -p mutation -st s3 -dt s3 "input_path" "output_path"`\r +\r +Receive input files from your private BioStudies user space and convert them locally.\r +Use pattern matching option `-p` for selecting a subset of input files:\\\r +`batchconvert omezarr -p mutation -st bia "input_path" "output_path"`\r +\r +Receive an input from an s3 bucket, convert locally and upload the output to your \r +private BioStudies user space. Use pattern matching option `-p` for selecting a subset \r +of input files:\\\r +`batchconvert omezarr -p mutation -st s3 -dt bia "input_path" "output_path"`\r +\r +Note that in all the examples shown above, BatchConvert treats each input file as separate,\r +standalone data point, disregarding the possibility that some of the input files might belong to \r +the same multidimensional array. Thus, each input file is converted to an independent \r +OME-TIFF / OME-Zarr and the number of outputs will thus equal the number of selected input files.\r +An alternative scenario is discussed below.\r +\r +#### Parallel conversion of file groups by stacking multiple files into single OME-TIFFs / OME-Zarrs:\r +\r +When the flag `--merge_files` is specified, BatchConvert tries to detect which input files might \r +belong to the same multidimensional array based on the patterns in the filenames. Then a "grouped conversion" \r +is performed, meaning that the files belonging to the same dataset will be incorporated into \r +a single OME-TIFF / OME-Zarr series, in that files will be concatenated along specific dimension(s) \r +during the conversion. Multiple file groups in the input directory can be detected and converted \r +in parallel. \r +\r +This feature uses Bio-Formats's pattern files as described [here](https://docs.openmicroscopy.org/bio-formats/6.6.0/formats/pattern-file.html).\r +However, BatchConvert generates pattern files automatically, allowing the user to directly use the \r +input directory in the conversion command. BatchConvert also has the option of specifying the \r +concatenation axes in the command line, which is especially useful in cases where the filenames \r +may not contain dimension information. \r +\r +To be able to use the `--merge files` flag, the input file names must obey certain rules:\r +1. File names in the same group must be uniform, except for one or more **numeric field(s)**, which\r +should show incremental change across the files. These so-called **variable fields** \r +will be detected and used as the dimension(s) of concatenation.\r +2. The length of variable fields must be uniform within the group. For instance, if the\r +variable field has values reaching multi-digit numbers, leading "0"s should be included where needed \r +in the file names to make the variable field length uniform within the group.\r +3. Typically, each variable field should follow a dimension specifier. What patterns can be used as \r +dimension specifiers are explained [here](https://docs.openmicroscopy.org/bio-formats/6.6.0/formats/pattern-file.html).\r +However, BatchConvert also has the option `--concatenation_order`, which allows the user to\r +specify from the command line, the dimension(s), along which the files must be concatenated.\r +4. File names that are unique and cannot be associated with any group will be assumed as\r +standalone images and converted accordingly. \r +\r +Below are some examples of grouped conversion commands in the context of different possible use-case scenarios:\r +\r +**Example 1:**\r +\r +This is an example of a folder with non-uniform filename lengths:\r +```\r +time-series/test_img_T2\r +time-series/test_img_T4\r +time-series/test_img_T6\r +time-series/test_img_T8\r +time-series/test_img_T10\r +time-series/test_img_T12\r +```\r +In this example, leading zeroes are missing in the variable fields of some filenames. \r +A typical command to convert this folder to a single OME-TIFF would look like: \\\r +`batchconvert --ometiff --merge_files "input_dir/time-series" "output_path"`\r +\r +However, this command would fail to create a single OME-Zarr folder due to the non-uniform \r +lengths of the filenames. Instead, the files would be split into two groups based on the\r +filename length, leading to two separate OME-Zarrs with names:\r +\r +`test_img_TRange{2-8-2}.ome.zarr` and `test_img_TRange{10-12-2}.ome.zarr`\r +\r +Here is the corrected version of the folder for the above example-\r +```\r +time-series/test_img_T02\r +time-series/test_img_T04\r +time-series/test_img_T06\r +time-series/test_img_T08\r +time-series/test_img_T10\r +time-series/test_img_T12\r +```\r +\r +Executing the same command on this folder would result in a single OME-Zarr with the name:\r +`test_img_TRange{02-12-2}.ome.zarr`\r +\r +**Example 2**- \r +\r +In this example, the filename lengths are uniform but the incrementation within the variable field is not.\r +```\r +time-series/test_img_T2\r +time-series/test_img_T4\r +time-series/test_img_T5\r +time-series/test_img_T7\r +```\r +\r +A typical command to convert this folder to a single OME-Zarr would look like: \\\r +`batchconvert --omezarr --merge_files "input_dir/time-series" "output_path"`\r +\r +However, the command would fail to assume these files as a single group due to the\r +non-uniform incrementation in the variable field of the filenames. Instead, the dataset \r +would be split into two groups, leading to two separate OME-Zarrs with the following names:\r +`test_img_TRange{2-4-2}.ome.zarr` and `test_img_TRange{5-7-2}.ome.zarr` \r +\r +\r +**Example 3**\r +\r +This is an example of a case where the conversion attempts to concatenate files along two\r +dimensions, channel and time.\r +```\r +multichannel_time-series/test_img_C1-T1\r +multichannel_time-series/test_img_C1-T2\r +multichannel_time-series/test_img_C1-T3\r +multichannel_time-series/test_img_C2-T1\r +multichannel_time-series/test_img_C2-T2\r +```\r +To convert this folder to a single OME-Zarr, one could try the following command: \\\r +`batchconvert --omezarr --merge_files "input_dir/multichannel_time-series" "output_path"`\r +\r +However, since the channel-2 does not have the same number of timeframes as the channel-1, \r +BatchConvert will fail to assume these two channels as part of the same series and\r +will instead split the two channels into two separate OME-Zarrs. \r +\r +The output would look like: \\\r +`test_img_C1-TRange{1-3-1}.ome.zarr` \\\r +`test_img_C2-TRange{1-2-1}.ome.zarr`\r +\r +To be able to really incorporate all files into a single OME-Zarr, the folder should have equal\r +number of images corresponding to both channels, as shown below:\r +```\r +multichannel_time-series/test_img_C1-T1\r +multichannel_time-series/test_img_C1-T2\r +multichannel_time-series/test_img_C1-T3\r +multichannel_time-series/test_img_C2-T1\r +multichannel_time-series/test_img_C2-T2\r +multichannel_time-series/test_img_C2-T3\r +```\r +The same conversion command on this version of the input folder would result in a single \r +OME-Zarr with the name: \\\r +`test_img_CRange{1-2-1}-TRange{1-3-1}.ome.zarr`\r +\r +\r +**Example 4**\r +\r +This is another example of a case, where there are multiple filename patterns in the input folder.\r +\r +```\r +folder_with_multiple_groups/test_img_C1-T1\r +folder_with_multiple_groups/test_img_C1-T2\r +folder_with_multiple_groups/test_img_C2-T1\r +folder_with_multiple_groups/test_img_C2-T2\r +folder_with_multiple_groups/test_img_T1-Z1\r +folder_with_multiple_groups/test_img_T1-Z2\r +folder_with_multiple_groups/test_img_T1-Z3\r +folder_with_multiple_groups/test_img_T2-Z1\r +folder_with_multiple_groups/test_img_T2-Z2\r +folder_with_multiple_groups/test_img_T2-Z3\r +```\r +\r +One can convert this folder with- \\\r +`batchconvert --omezarr --merge_files "input_dir/folder_with_multiple_groups" "output_path"`\r + \r +BatchConvert will detect the two patterns in this folder and perform two grouped conversions. \r +The output folders will be named as `test_img_CRange{1-2-1}-TRange{1-2-1}.ome.zarr` and \r +`test_img_TRange{1-2-1}-ZRange{1-3-1}.ome.zarr`. \r +\r +\r +**Example 5**\r +\r +Now imagine that we have the same files as in the example 4 but the filenames of the\r +first group lack any dimension specifier, so we have the following folder:\r +\r +```\r +folder_with_multiple_groups/test_img_1-1\r +folder_with_multiple_groups/test_img_1-2\r +folder_with_multiple_groups/test_img_2-1\r +folder_with_multiple_groups/test_img_2-2\r +folder_with_multiple_groups/test_img_T1-Z1\r +folder_with_multiple_groups/test_img_T1-Z2\r +folder_with_multiple_groups/test_img_T1-Z3\r +folder_with_multiple_groups/test_img_T2-Z1\r +folder_with_multiple_groups/test_img_T2-Z2\r +folder_with_multiple_groups/test_img_T2-Z3\r +```\r +\r +In such a scenario, BatchConvert allows the user to specify the concatenation axes \r +via `--concatenation_order` option. This option expects comma-separated strings of dimensions \r +for each group. In this example, the user must provide a string of 2 characters, such as `ct` for \r +channel and time, for group 1, since there are two variable fields for this group. Since group 2 \r +already has dimension specifiers (T and Z as specified in the filenames preceding the variable fields),\r +the user does not need to specify anything for this group, and can enter `auto` or `aa` for automatic\r +detection of the specifiers. \r +\r +So the following line can be used to convert this folder: \\\r +`batchconvert --omezarr --merge_files --concatenation_order ct,aa "input_dir/folder_with_multiple_groups" "output_path"`\r +\r +The resulting OME-Zarrs will have the names:\r +`test_img_CRange{1-2-1}-TRange{1-2-1}.ome.zarr` and\r +`test_img_TRange{1-2-1}-ZRange{1-3-1}.ome.zarr`\r +\r +Note that `--concatenation_order` will override any dimension specifiers already\r +existing in the filenames.\r +\r +\r +**Example 6**\r +\r +There can be scenarios where the user may want to have further control over the axes along \r +which to concatenate the images. For example, the filenames might contain the data acquisition\r +date, which can be recognised by BatchConvert as a concatenation axis in the automatic \r +detection mode. An example of such a fileset might look like:\r +\r +```\r +filenames_with_dates/test_data_date03.03.2023_imageZ1-T1\r +filenames_with_dates/test_data_date03.03.2023_imageZ1-T2\r +filenames_with_dates/test_data_date03.03.2023_imageZ1-T3\r +filenames_with_dates/test_data_date03.03.2023_imageZ2-T1\r +filenames_with_dates/test_data_date03.03.2023_imageZ2-T2\r +filenames_with_dates/test_data_date03.03.2023_imageZ2-T3\r +filenames_with_dates/test_data_date04.03.2023_imageZ1-T1\r +filenames_with_dates/test_data_date04.03.2023_imageZ1-T2\r +filenames_with_dates/test_data_date04.03.2023_imageZ1-T3\r +filenames_with_dates/test_data_date04.03.2023_imageZ2-T1\r +filenames_with_dates/test_data_date04.03.2023_imageZ2-T2\r +filenames_with_dates/test_data_date04.03.2023_imageZ2-T3\r +```\r +\r +One may try the following command to convert this folder:\r +\r +`batchconvert --omezarr --merge_files "input_dir/filenames_with_dates" "output_path"`\r +\r +Since the concatenation axes are not specified, this command would try to create\r +a single OME-Zarr with name: `test_data_dateRange{03-04-1}.03.2023_imageZRange{1-2-1}-TRange{1-3-1}`.\r +\r +In order to force BatchConvert to ignore the date field, the user can restrict the concatenation \r +axes to the last two numeric fields. This can be done by using a command such as: \\\r +`batchconvert --omezarr --merge_files --concatenation_order aa "input_dir/filenames_with_dates" "output_path"` \\\r +This command will avoid concatenation along the date field, and therefore, there will be two\r +OME-Zarrs corresponding to the two dates. The number of characters being passed to the \r +`--concatenation_order` option specifies the number of numeric fields (starting from the right \r +end of the filename) that are recognised by the BatchConvert as valid concatenation axes. \r +Passing `aa`, therefore, means that the last two numeric fields must be recognised as \r +concatenation axes and the dimension type should be automatically detected (`a` for automatic). \r +In the same logic, one could, for example, convert each Z section into a separate OME-Zarr by \r +specifying `--concatenation_order a`.\r +\r +\r +\r +### Conversion on slurm\r +\r +All the examples given above can also be run on slurm by specifying `-pf cluster` option. \r +Note that this option automatically uses the singularity profile:\\\r +`batchconvert omezarr -pf cluster -p .oir "input_path" "output_path"`\r +\r +\r +\r +\r +\r +\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.453.3" ; + schema1:isBasedOn "https://github.com/Euro-BioImaging/BatchConvert.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for BatchConvert" ; + schema1:sdDatePublished "2024-08-05 10:29:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/453/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 89 ; + schema1:creator ; + schema1:dateCreated "2023-07-05T13:47:29Z" ; + schema1:dateModified "2023-07-11T10:35:35Z" ; + schema1:description """# BatchConvert ![DOI:10.5281](https://zenodo.org/badge/doi/10.5281/zenodo.7955974.svg)\r +\r +A command line tool for converting image data into either of the standard file formats OME-TIFF or OME-Zarr. \r +\r +The tool wraps the dedicated file converters bfconvert and bioformats2raw to convert into OME-TIFF or OME-Zarr,\r +respectively. The workflow management system NextFlow is used to perform conversion in parallel for batches of images. \r +\r +The tool also wraps s3 and Aspera clients (go-mc and aspera-cli, respectively). Therefore, input and output locations can \r +be specified as local or remote storage and file transfer will be performed automatically. The conversion can be run on \r +HPC with Slurm. \r +\r +![](figures/diagram.png)\r +\r +## Installation & Dependencies\r +\r +**Important** note: The package has been so far only tested on Ubuntu 20.04.\r +\r +The minimal dependency to run the tool is NextFlow, which should be installed and made accessible from the command line.\r +\r +If conda exists on your system, you can install BatchConvert together with NextFlow using the following script:\r +```\r +git clone https://github.com/Euro-BioImaging/BatchConvert.git && \\ \r +source BatchConvert/installation/install_with_nextflow.sh\r +```\r +\r +\r +If you already have NextFlow installed and accessible from the command line (or if you prefer to install it manually \r +e.g., as shown [here](https://www.nextflow.io/docs/latest/getstarted.html)), you can also install BatchConvert alone, using the following script:\r +```\r +git clone https://github.com/Euro-BioImaging/BatchConvert.git && \\ \r +source BatchConvert/installation/install.sh\r +```\r +\r +\r +Other dependencies (which will be **automatically** installed):\r +- bioformats2raw (entrypoint bioformats2raw)\r +- bftools (entrypoint bfconvert)\r +- go-mc (entrypoint mc)\r +- aspera-cli (entrypoint ascp)\r +\r +These dependencies will be pulled and cached automatically at the first execution of the conversion command. \r +The mode of dependency management can be specified by using the command line option ``--profile`` or `-pf`. Depending \r +on how this option is specified, the dependencies will be acquired / run either via conda or via docker/singularity containers. \r +\r +Specifying ``--profile conda`` (default) will install the dependencies to an \r +environment at ``./.condaCache`` and use this environment to run the workflow. This option \r +requires that miniconda/anaconda is installed on your system. \r +\r +Alternatively, specifying ``--profile docker`` or ``--profile singularity`` will pull a docker or \r +singularity image with the dependencies, respectively, and use this image to run the workflow.\r +These options assume that the respective container runtime (docker or singularity) is available on \r +your system. If singularity is being used, a cache directory will be created at the path \r +``./.singularityCache`` where the singularity image is stored. \r +\r +Finally, you can still choose to install the dependencies manually and use your own installations to run\r +the workflow. In this case, you should specify ``--profile standard`` and make sure the entrypoints\r +specified above are recognised by your shell. \r +\r +\r +## Configuration\r +\r +BatchConvert can be configured to have default options for file conversion and transfer. Probably, the most important sets of parameters\r +to be configured include credentials for the remote ends. The easiest way to configure remote stores is by running the interactive \r +configuration command as indicated below.\r +\r +### Configuration of the s3 object store\r +\r +Run the interactive configuration command: \r +\r +`batchconvert configure_s3_remote`\r +\r +This will start a sequence of requests for s3 credentials such as name, url, access, etc. Provide each requested credential and click\r +enter. Continue this cycle until the process is finished. Upon completing the configuration, the sequence of commands should roughly look like this:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_s3_remote\r +enter remote name (for example s3)\r +s3\r +enter url:\r +https://s3.embl.de\r +enter access key:\r +"your-access-key"\r +enter secret key:\r +"your-secret-key"\r +enter bucket name:\r +"your-bucket"\r +Configuration of the default s3 credentials is complete\r +```\r +\r +\r +### Configuration of the BioStudies user space\r +\r +Run the interactive configuration command: \r +\r +`batchconvert configure_bia_remote`\r +\r +This will prompt a request for the secret directory to connect to. Enter the secret directory for your user space and click enter. \r +Upon completing the configuration, the sequence of commands should roughly look like this:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_bia_remote\r +enter the secret directory for BioImage Archive user space:\r +"your-secret-directory"\r +configuration of the default bia credentials is complete\r +```\r +\r +### Configuration of the slurm options\r +\r +BatchConvert can also run on slurm clusters. In order to configure the slurm parameters, run the interactive configuration command: \r +\r +`batchconvert configure_slurm`\r +\r +This will start a sequence of requests for slurm options. Provide each requested option and click enter. \r +Continue this cycle until the process is finished. Upon completing the configuration, the sequence of commands should \r +roughly look like this:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_slurm\r +Please enter value for queue_size\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´50´\r +s\r +Please enter value for submit_rate_limit\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´10/2min´\r +s\r +Please enter value for cluster_options\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´--mem-per-cpu=3140 --cpus-per-task=16´\r +s\r +Please enter value for time\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´6h´\r +s\r +configuration of the default slurm parameters is complete\r +```\r +\r +### Configuration of the default conversion parameters\r +\r +While all conversion parameters can be specified as command line arguments, it can\r +be useful for the users to set their own default parameters to avoid re-entering those\r +parameters for subsequent executions. BatchConvert allows for interactive configuration of \r +conversion in the same way as configuration of the remote stores described above.\r +\r +To configure the conversion into OME-TIFF, run the following command:\r +\r +`batchconvert configure_ometiff`\r +\r +This will prompt the user to enter a series of parameters, which will then be saved as the \r +default parameters to be passed to the `batchconvert ometiff` command. Upon completing the \r +configuration, the sequence of commands should look similar to:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_ometiff\r +Please enter value for noflat\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is "bfconvert defaults"\r +s\r +Please enter value for series\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is "bfconvert defaults"\r +s\r +Please enter value for timepoint\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is "bfconvert defaults"\r +s\r +...\r +...\r +...\r +...\r +...\r +...\r +Configuration of the default parameters for 'bfconvert' is complete\r +```\r +\r +\r +To configure the conversion into OME-Zarr, run the following command:\r +\r +`batchconvert configure_omezarr`\r +\r +Similarly, this will prompt the user to enter a series of parameters, which will then be saved as the \r +default parameters to be passed to the `batchconvert omezarr` command. Upon completing the configuration, \r +the sequence of commands should look similar to:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_omezarr\r +Please enter value for resolutions_zarr\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is "bioformats2raw defaults"\r +s\r +Please enter value for chunk_h\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is "bioformats2raw defaults"\r +s\r +Please enter value for chunk_w\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is "bioformats2raw defaults"\r +...\r +...\r +...\r +...\r +...\r +...\r +Configuration of the default parameters for 'bioformats2raw' is complete\r +```\r +\r +It is important to note that the initial defaults for the conversion parameters are the same as the defaults\r +of the backend tools bfconvert and bioformats2raw, as noted in the prompt excerpt above. Through interactive configuration, \r +the user is overriding these initial defaults and setting their own defaults. It is possible to reset the initial\r +defaults by running the following command.\r +\r +`batchconvert reset_defaults`\r +\r +Another important point is that any of these configured parameters can be overridden by passing a value to that\r +parameter in the commandline. For instance, in the following command, the value of 20 will be assigned to `chunk_h` parameter \r +even if the value for the same parameter might be different in the configuration file. \r +\r +`batchconvert omezarr --chunk_h 20 "path/to/input" "path/to/output"`\r +\r +\r +## Examples\r +\r +### Local conversion\r +\r +#### Parallel conversion of files to separate OME-TIFFs / OME-Zarrs:\r +Convert a batch of images on your local storage into OME-TIFF format. \r +Note that the `input_path` in the command given below is typically a \r +directory with multiple image files but a single image file can also be passed:\\\r +`batchconvert ometiff -pf conda "input_path" "output_path"` \r +\r +Note that if this is your first conversion with the profile `conda`, \r +it will take a while for a conda environment with the dependencies to be\r +created. All the subsequent conversion commands with the profile `conda`,\r +however, will use this environment, and thus show no such delay.\r +\r +Since conda is the default profile, it does not have to be \r +explicitly included in the command line. Thus, the command can be shortened to:\\\r +`batchconvert ometiff "input_path" "output_path"`\r +\r +Convert only the first channel of the images:\\\r +`batchconvert ometiff -chn 0 "input_path" "output_path"`\r +\r +Crop the images being converted along x and y axis by 150 pixels:\\\r +`batchconvert ometiff -cr 0,0,150,150 "input_path" "output_path"`\r +\r +Convert into OME-Zarr instead:\\\r +`batchconvert omezarr "input_path" "output_path"`\r +\r +Convert into OME-Zarr with 3 resolution levels:\\\r +`batchconvert omezarr -rz 3 "input_path" "output_path"`\r +\r +Select a subset of images with a matching string such as "mutation":\\\r +`batchconvert omezarr -p mutation "input_path" "output_path"`\r +\r +Select a subset of images using wildcards. Note that the use of "" around \r +the input path is necessary when using wildcards:\\\r +`batchconvert omezarr "input_path/*D3*.oir" "output_path"`\r +\r +Convert by using a singularity container instead of conda environment (requires\r +singularity to be installed on your system):\\\r +`batchconvert omezarr -pf singularity "input_path/*D3*.oir" "output_path"`\r +\r +Convert by using a docker container instead of conda environment (requires docker\r +to be installed on your system):\\\r +`batchconvert omezarr -pf docker "input_path/*D3*.oir" "output_path"`\r +\r +Note that similarly to the case with the profile `conda`, the first execution of\r +a conversion with the profile `singularity` or `docker` will take a while for the\r +container image to be pulled. All the subsequent conversion commands using a \r +container option will use this image, and thus show no such delay. \r +\r +Convert local data and upload the output to an s3 bucket. Note that the output \r +path is created relative to the bucket specified in your s3 configuration:\\\r +`batchconvert omezarr -dt s3 "input_path" "output_path"`\r +\r +Receive input files from an s3 bucket, convert locally and upload the output to \r +the same bucket. Note that wildcards cannot be used when the input is from s3. \r +Use pattern matching option `-p` for selecting a subset of input files:\\\r +`batchconvert omezarr -p mutation -st s3 -dt s3 "input_path" "output_path"`\r +\r +Receive input files from your private BioStudies user space and convert them locally.\r +Use pattern matching option `-p` for selecting a subset of input files:\\\r +`batchconvert omezarr -p mutation -st bia "input_path" "output_path"`\r +\r +Receive an input from an s3 bucket, convert locally and upload the output to your \r +private BioStudies user space. Use pattern matching option `-p` for selecting a subset \r +of input files:\\\r +`batchconvert omezarr -p mutation -st s3 -dt bia "input_path" "output_path"`\r +\r +Note that in all the examples shown above, BatchConvert treats each input file as separate,\r +standalone data point, disregarding the possibility that some of the input files might belong to \r +the same multidimensional array. Thus, each input file is converted to an independent \r +OME-TIFF / OME-Zarr and the number of outputs will thus equal the number of selected input files.\r +An alternative scenario is discussed below.\r +\r +#### Parallel conversion of file groups by stacking multiple files into single OME-TIFFs / OME-Zarrs:\r +\r +When the flag `--merge_files` is specified, BatchConvert tries to detect which input files might \r +belong to the same multidimensional array based on the patterns in the filenames. Then a "grouped conversion" \r +is performed, meaning that the files belonging to the same dataset will be incorporated into \r +a single OME-TIFF / OME-Zarr series, in that files will be concatenated along specific dimension(s) \r +during the conversion. Multiple file groups in the input directory can be detected and converted \r +in parallel. \r +\r +This feature uses Bio-Formats's pattern files as described [here](https://docs.openmicroscopy.org/bio-formats/6.6.0/formats/pattern-file.html).\r +However, BatchConvert generates pattern files automatically, allowing the user to directly use the \r +input directory in the conversion command. BatchConvert also has the option of specifying the \r +concatenation axes in the command line, which is especially useful in cases where the filenames \r +may not contain dimension information. \r +\r +To be able to use the `--merge files` flag, the input file names must obey certain rules:\r +1. File names in the same group must be uniform, except for one or more **numeric field(s)**, which\r +should show incremental change across the files. These so-called **variable fields** \r +will be detected and used as the dimension(s) of concatenation.\r +2. The length of variable fields must be uniform within the group. For instance, if the\r +variable field has values reaching multi-digit numbers, leading "0"s should be included where needed \r +in the file names to make the variable field length uniform within the group.\r +3. Typically, each variable field should follow a dimension specifier. What patterns can be used as \r +dimension specifiers are explained [here](https://docs.openmicroscopy.org/bio-formats/6.6.0/formats/pattern-file.html).\r +However, BatchConvert also has the option `--concatenation_order`, which allows the user to\r +specify from the command line, the dimension(s), along which the files must be concatenated.\r +4. File names that are unique and cannot be associated with any group will be assumed as\r +standalone images and converted accordingly. \r +\r +Below are some examples of grouped conversion commands in the context of different possible use-case scenarios:\r +\r +**Example 1:**\r +\r +This is an example of a folder with non-uniform filename lengths:\r +```\r +time-series/test_img_T2\r +time-series/test_img_T4\r +time-series/test_img_T6\r +time-series/test_img_T8\r +time-series/test_img_T10\r +time-series/test_img_T12\r +```\r +In this example, leading zeroes are missing in the variable fields of some filenames. \r +A typical command to convert this folder to a single OME-TIFF would look like: \\\r +`batchconvert --ometiff --merge_files "input_dir/time-series" "output_path"`\r +\r +However, this command would fail to create a single OME-Zarr folder due to the non-uniform \r +lengths of the filenames. Instead, the files would be split into two groups based on the\r +filename length, leading to two separate OME-Zarrs with names:\r +\r +`test_img_TRange{2-8-2}.ome.zarr` and `test_img_TRange{10-12-2}.ome.zarr`\r +\r +Here is the corrected version of the folder for the above example-\r +```\r +time-series/test_img_T02\r +time-series/test_img_T04\r +time-series/test_img_T06\r +time-series/test_img_T08\r +time-series/test_img_T10\r +time-series/test_img_T12\r +```\r +\r +Executing the same command on this folder would result in a single OME-Zarr with the name:\r +`test_img_TRange{02-12-2}.ome.zarr`\r +\r +**Example 2**- \r +\r +In this example, the filename lengths are uniform but the incrementation within the variable field is not.\r +```\r +time-series/test_img_T2\r +time-series/test_img_T4\r +time-series/test_img_T5\r +time-series/test_img_T7\r +```\r +\r +A typical command to convert this folder to a single OME-Zarr would look like: \\\r +`batchconvert --omezarr --merge_files "input_dir/time-series" "output_path"`\r +\r +However, the command would fail to assume these files as a single group due to the\r +non-uniform incrementation in the variable field of the filenames. Instead, the dataset \r +would be split into two groups, leading to two separate OME-Zarrs with the following names:\r +`test_img_TRange{2-4-2}.ome.zarr` and `test_img_TRange{5-7-2}.ome.zarr` \r +\r +\r +**Example 3**\r +\r +This is an example of a case where the conversion attempts to concatenate files along two\r +dimensions, channel and time.\r +```\r +multichannel_time-series/test_img_C1-T1\r +multichannel_time-series/test_img_C1-T2\r +multichannel_time-series/test_img_C1-T3\r +multichannel_time-series/test_img_C2-T1\r +multichannel_time-series/test_img_C2-T2\r +```\r +To convert this folder to a single OME-Zarr, one could try the following command: \\\r +`batchconvert --omezarr --merge_files "input_dir/multichannel_time-series" "output_path"`\r +\r +However, since the channel-2 does not have the same number of timeframes as the channel-1, \r +BatchConvert will fail to assume these two channels as part of the same series and\r +will instead split the two channels into two separate OME-Zarrs. \r +\r +The output would look like: \\\r +`test_img_C1-TRange{1-3-1}.ome.zarr` \\\r +`test_img_C2-TRange{1-2-1}.ome.zarr`\r +\r +To be able to really incorporate all files into a single OME-Zarr, the folder should have equal\r +number of images corresponding to both channels, as shown below:\r +```\r +multichannel_time-series/test_img_C1-T1\r +multichannel_time-series/test_img_C1-T2\r +multichannel_time-series/test_img_C1-T3\r +multichannel_time-series/test_img_C2-T1\r +multichannel_time-series/test_img_C2-T2\r +multichannel_time-series/test_img_C2-T3\r +```\r +The same conversion command on this version of the input folder would result in a single \r +OME-Zarr with the name: \\\r +`test_img_CRange{1-2-1}-TRange{1-3-1}.ome.zarr`\r +\r +\r +**Example 4**\r +\r +This is another example of a case, where there are multiple filename patterns in the input folder.\r +\r +```\r +folder_with_multiple_groups/test_img_C1-T1\r +folder_with_multiple_groups/test_img_C1-T2\r +folder_with_multiple_groups/test_img_C2-T1\r +folder_with_multiple_groups/test_img_C2-T2\r +folder_with_multiple_groups/test_img_T1-Z1\r +folder_with_multiple_groups/test_img_T1-Z2\r +folder_with_multiple_groups/test_img_T1-Z3\r +folder_with_multiple_groups/test_img_T2-Z1\r +folder_with_multiple_groups/test_img_T2-Z2\r +folder_with_multiple_groups/test_img_T2-Z3\r +```\r +\r +One can convert this folder with- \\\r +`batchconvert --omezarr --merge_files "input_dir/folder_with_multiple_groups" "output_path"`\r + \r +BatchConvert will detect the two patterns in this folder and perform two grouped conversions. \r +The output folders will be named as `test_img_CRange{1-2-1}-TRange{1-2-1}.ome.zarr` and \r +`test_img_TRange{1-2-1}-ZRange{1-3-1}.ome.zarr`. \r +\r +\r +**Example 5**\r +\r +Now imagine that we have the same files as in the example 4 but the filenames of the\r +first group lack any dimension specifier, so we have the following folder:\r +\r +```\r +folder_with_multiple_groups/test_img_1-1\r +folder_with_multiple_groups/test_img_1-2\r +folder_with_multiple_groups/test_img_2-1\r +folder_with_multiple_groups/test_img_2-2\r +folder_with_multiple_groups/test_img_T1-Z1\r +folder_with_multiple_groups/test_img_T1-Z2\r +folder_with_multiple_groups/test_img_T1-Z3\r +folder_with_multiple_groups/test_img_T2-Z1\r +folder_with_multiple_groups/test_img_T2-Z2\r +folder_with_multiple_groups/test_img_T2-Z3\r +```\r +\r +In such a scenario, BatchConvert allows the user to specify the concatenation axes \r +via `--concatenation_order` option. This option expects comma-separated strings of dimensions \r +for each group. In this example, the user must provide a string of 2 characters, such as `ct` for \r +channel and time, for group 1, since there are two variable fields for this group. Since group 2 \r +already has dimension specifiers (T and Z as specified in the filenames preceding the variable fields),\r +the user does not need to specify anything for this group, and can enter `auto` or `aa` for automatic\r +detection of the specifiers. \r +\r +So the following line can be used to convert this folder: \\\r +`batchconvert --omezarr --merge_files --concatenation_order ct,aa "input_dir/folder_with_multiple_groups" "output_path"`\r +\r +The resulting OME-Zarrs will have the names:\r +`test_img_CRange{1-2-1}-TRange{1-2-1}.ome.zarr` and\r +`test_img_TRange{1-2-1}-ZRange{1-3-1}.ome.zarr`\r +\r +Note that `--concatenation_order` will override any dimension specifiers already\r +existing in the filenames.\r +\r +\r +**Example 6**\r +\r +There can be scenarios where the user may want to have further control over the axes along \r +which to concatenate the images. For example, the filenames might contain the data acquisition\r +date, which can be recognised by BatchConvert as a concatenation axis in the automatic \r +detection mode. An example of such a fileset might look like:\r +\r +```\r +filenames_with_dates/test_data_date03.03.2023_imageZ1-T1\r +filenames_with_dates/test_data_date03.03.2023_imageZ1-T2\r +filenames_with_dates/test_data_date03.03.2023_imageZ1-T3\r +filenames_with_dates/test_data_date03.03.2023_imageZ2-T1\r +filenames_with_dates/test_data_date03.03.2023_imageZ2-T2\r +filenames_with_dates/test_data_date03.03.2023_imageZ2-T3\r +filenames_with_dates/test_data_date04.03.2023_imageZ1-T1\r +filenames_with_dates/test_data_date04.03.2023_imageZ1-T2\r +filenames_with_dates/test_data_date04.03.2023_imageZ1-T3\r +filenames_with_dates/test_data_date04.03.2023_imageZ2-T1\r +filenames_with_dates/test_data_date04.03.2023_imageZ2-T2\r +filenames_with_dates/test_data_date04.03.2023_imageZ2-T3\r +```\r +\r +One may try the following command to convert this folder:\r +\r +`batchconvert --omezarr --merge_files "input_dir/filenames_with_dates" "output_path"`\r +\r +Since the concatenation axes are not specified, this command would try to create\r +a single OME-Zarr with name: `test_data_dateRange{03-04-1}.03.2023_imageZRange{1-2-1}-TRange{1-3-1}`.\r +\r +In order to force BatchConvert to ignore the date field, the user can restrict the concatenation \r +axes to the last two numeric fields. This can be done by using a command such as: \\\r +`batchconvert --omezarr --merge_files --concatenation_order aa "input_dir/filenames_with_dates" "output_path"` \\\r +This command will avoid concatenation along the date field, and therefore, there will be two\r +OME-Zarrs corresponding to the two dates. The number of characters being passed to the \r +`--concatenation_order` option specifies the number of numeric fields (starting from the right \r +end of the filename) that are recognised by the BatchConvert as valid concatenation axes. \r +Passing `aa`, therefore, means that the last two numeric fields must be recognised as \r +concatenation axes and the dimension type should be automatically detected (`a` for automatic). \r +In the same logic, one could, for example, convert each Z section into a separate OME-Zarr by \r +specifying `--concatenation_order a`.\r +\r +\r +\r +### Conversion on slurm\r +\r +All the examples given above can also be run on slurm by specifying `-pf cluster` option. \r +Note that this option automatically uses the singularity profile:\\\r +`batchconvert omezarr -pf cluster -p .oir "input_path" "output_path"`\r +\r +\r +\r +\r +\r +\r +\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/453?version=2" ; + schema1:keywords "Nextflow, bash, Python, NGFF, OME-Zarr, Conversion, imaging, bioimaging, image file format, file conversion, OME-TIFF, S3, BioStudies, bioformats, bioformats2raw" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "BatchConvert" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/453?version=3" ; + schema1:version 3 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 151518 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """Galaxy workflow example that illustrate the process of setting up a simulation system containing a protein, step by step, using the [BioExcel Building Blocks](/projects/11) library (biobb). The particular example used is the Lysozyme protein (PDB code 1AKI). This workflow returns a resulting protein structure and simulated 3D trajectories.\r +\r +Designed for running on the https://dev.usegalaxy.es Galaxy instance.""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.194.1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Protein MD Setup tutorial using BioExcel Building Blocks (biobb) in Galaxy" ; + schema1:sdDatePublished "2024-08-05 10:32:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/194/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 60779 ; + schema1:dateCreated "2021-09-26T19:50:02Z" ; + schema1:dateModified "2023-01-16T13:53:08Z" ; + schema1:description """Galaxy workflow example that illustrate the process of setting up a simulation system containing a protein, step by step, using the [BioExcel Building Blocks](/projects/11) library (biobb). The particular example used is the Lysozyme protein (PDB code 1AKI). This workflow returns a resulting protein structure and simulated 3D trajectories.\r +\r +Designed for running on the https://dev.usegalaxy.es Galaxy instance.""" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Protein MD Setup tutorial using BioExcel Building Blocks (biobb) in Galaxy" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/194?version=1" ; + schema1:version 1 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "ChIP-seq peak-calling and differential analysis pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/971?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/chipseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/chipseq" ; + schema1:sdDatePublished "2024-08-05 10:24:13 +0100" ; + schema1:url "https://workflowhub.eu/workflows/971/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5685 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:44Z" ; + schema1:dateModified "2024-06-11T12:54:44Z" ; + schema1:description "ChIP-seq peak-calling and differential analysis pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/971?version=5" ; + schema1:keywords "ChIP, ChIP-seq, chromatin-immunoprecipitation, macs2, peak-calling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/chipseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/971?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +**Based on the official [pmx tutorial](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate how to compute a **fast-growth mutation free energy** calculation, step by step, using the BioExcel **Building Blocks library (biobb)**. The particular example used is the **Staphylococcal nuclease** protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +The **non-equilibrium free energy calculation** protocol performs a **fast alchemical transition** in the direction **WT->Mut** and back **Mut->WT**. The two equilibrium trajectories needed for the tutorial, one for **Wild Type (WT)** and another for the **Mutated (Mut)** protein (Isoleucine 10 to Alanine -I10A-), have already been generated and are included in this example. We will name **WT as stateA** and **Mut as stateB**.\r +\r +![](https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/schema.png)\r +\r +The tutorial calculates the **free energy difference** in the folded state of a protein. Starting from **two 1ns-length independent equilibrium simulations** (WT and mutant), snapshots are selected to start **fast (50ps) transitions** driving the system in the **forward** (WT to mutant) and **reverse** (mutant to WT) directions, and the **work values** required to perform these transitions are collected. With these values, **Crooks Gaussian Intersection** (CGI), **Bennett Acceptance Ratio** (BAR) and **Jarzynski estimator** methods are used to calculate the **free energy difference** between the two states.\r +\r +*Please note that for the sake of disk space this tutorial is using 1ns-length equilibrium trajectories, whereas in the [original example](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/eq.mdp) the equilibrium trajectories used were obtained from 10ns-length simulations.*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.328.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_pmx_tutorial/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)" ; + schema1:sdDatePublished "2024-08-05 10:32:13 +0100" ; + schema1:url "https://workflowhub.eu/workflows/328/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8378 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-22T10:07:20Z" ; + schema1:dateModified "2023-01-16T13:59:42Z" ; + schema1:description """# Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +**Based on the official [pmx tutorial](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate how to compute a **fast-growth mutation free energy** calculation, step by step, using the BioExcel **Building Blocks library (biobb)**. The particular example used is the **Staphylococcal nuclease** protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +The **non-equilibrium free energy calculation** protocol performs a **fast alchemical transition** in the direction **WT->Mut** and back **Mut->WT**. The two equilibrium trajectories needed for the tutorial, one for **Wild Type (WT)** and another for the **Mutated (Mut)** protein (Isoleucine 10 to Alanine -I10A-), have already been generated and are included in this example. We will name **WT as stateA** and **Mut as stateB**.\r +\r +![](https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/schema.png)\r +\r +The tutorial calculates the **free energy difference** in the folded state of a protein. Starting from **two 1ns-length independent equilibrium simulations** (WT and mutant), snapshots are selected to start **fast (50ps) transitions** driving the system in the **forward** (WT to mutant) and **reverse** (mutant to WT) directions, and the **work values** required to perform these transitions are collected. With these values, **Crooks Gaussian Intersection** (CGI), **Bennett Acceptance Ratio** (BAR) and **Jarzynski estimator** methods are used to calculate the **free energy difference** between the two states.\r +\r +*Please note that for the sake of disk space this tutorial is using 1ns-length equilibrium trajectories, whereas in the [original example](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/eq.mdp) the equilibrium trajectories used were obtained from 10ns-length simulations.*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/328?version=3" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_pmx_tutorial/python/workflow.py" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.286.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_dna_helparms/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Structural DNA helical parameters tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/286/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 22900 ; + schema1:creator , + ; + schema1:dateCreated "2023-04-14T08:48:00Z" ; + schema1:dateModified "2023-04-14T08:48:53Z" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/286?version=3" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Structural DNA helical parameters tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_dna_helparms/python/workflow.py" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for analysis of Molecular Pixelation assays" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1010?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/pixelator" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/pixelator" ; + schema1:sdDatePublished "2024-08-05 10:23:28 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1010/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12801 ; + schema1:creator ; + schema1:dateCreated "2024-08-01T03:02:58Z" ; + schema1:dateModified "2024-08-01T03:02:58Z" ; + schema1:description "Pipeline for analysis of Molecular Pixelation assays" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1010?version=7" ; + schema1:keywords "molecular-pixelation, pixelator, pixelgen-technologies, proteins, single-cell, single-cell-omics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/pixelator" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1010?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.819.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/main/biobb_wf_amber_md_setup/docker" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Docker Amber Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:24:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/819/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 764 ; + schema1:creator , + ; + schema1:dateCreated "2024-04-22T10:22:47Z" ; + schema1:dateModified "2024-05-22T13:48:33Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Docker Amber Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/main/biobb_wf_amber_md_setup/docker/Dockerfile" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.259.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_virtual_screening/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL Protein-ligand Docking tutorial (Fpocket)" ; + schema1:sdDatePublished "2024-08-05 10:32:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/259/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 29165 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5518 ; + schema1:creator , + ; + schema1:dateCreated "2022-01-10T11:55:05Z" ; + schema1:dateModified "2023-06-06T14:56:34Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/259?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CWL Protein-ligand Docking tutorial (Fpocket)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/259?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """To discover causal mutations of inherited diseases it’s common practice to do a trio analysis. In a trio analysis DNA is sequenced of both the patient and parents. Using this method, it’s possible to identify multiple inheritance patterns. Some examples of these patterns are autosomal recessive, autosomal dominant, and de-novo variants, which are represented in the figure below. To elaborate, the most left tree shows an autosomal dominant inhertitance pattern where the offspring inherits a faulty copy of the gene from one of the parents.\r +\r +To discover these mutations either whole exome sequencing (WES) or whole genome sequencing (WGS) can be used. With these technologies it is possible to uncover the DNA of the parents and offspring to find (shared) mutations in the DNA. These mutations can include insertions/deletions (indels), loss of heterozygosity (LOH), single nucleotide variants (SNVs), copy number variations (CNVs), and fusion genes.\r +\r +In this workflow we will also make use of the HTSGET protocol, which is a program to download our data securely and savely. This protocol has been implemented in the EGA Download Client Tool: toolshed.g2.bx.psu.edu/repos/iuc/ega_download_client/pyega3/4.0.0+galaxy0 tool, so we don’t have to leave Galaxy to retrieve our data.\r +\r +We will not start our analysis from scratch, since the main goal of this tutorial is to use the HTSGET protocol to download variant information from an online archive and to find the causative variant from those variants. If you want to learn how to do the analysis from scratch, using the raw reads, you can have a look at the Exome sequencing data analysis for diagnosing a genetic disease tutorial.""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.363.2" ; + schema1:isBasedOn "https://github.com/galaxyproject/training-material/blob/main/topics/variant-analysis/tutorials/trio-analysis/workflows/main_workflow.ga" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Trio Analysis" ; + schema1:sdDatePublished "2024-08-05 10:31:15 +0100" ; + schema1:url "https://workflowhub.eu/workflows/363/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 29328 ; + schema1:creator ; + schema1:dateCreated "2023-03-01T15:35:32Z" ; + schema1:dateModified "2023-09-05T08:11:49Z" ; + schema1:description """To discover causal mutations of inherited diseases it’s common practice to do a trio analysis. In a trio analysis DNA is sequenced of both the patient and parents. Using this method, it’s possible to identify multiple inheritance patterns. Some examples of these patterns are autosomal recessive, autosomal dominant, and de-novo variants, which are represented in the figure below. To elaborate, the most left tree shows an autosomal dominant inhertitance pattern where the offspring inherits a faulty copy of the gene from one of the parents.\r +\r +To discover these mutations either whole exome sequencing (WES) or whole genome sequencing (WGS) can be used. With these technologies it is possible to uncover the DNA of the parents and offspring to find (shared) mutations in the DNA. These mutations can include insertions/deletions (indels), loss of heterozygosity (LOH), single nucleotide variants (SNVs), copy number variations (CNVs), and fusion genes.\r +\r +In this workflow we will also make use of the HTSGET protocol, which is a program to download our data securely and savely. This protocol has been implemented in the EGA Download Client Tool: toolshed.g2.bx.psu.edu/repos/iuc/ega_download_client/pyega3/4.0.0+galaxy0 tool, so we don’t have to leave Galaxy to retrieve our data.\r +\r +We will not start our analysis from scratch, since the main goal of this tutorial is to use the HTSGET protocol to download variant information from an online archive and to find the causative variant from those variants. If you want to learn how to do the analysis from scratch, using the raw reads, you can have a look at the Exome sequencing data analysis for diagnosing a genetic disease tutorial.""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/363?version=1" ; + schema1:keywords "variant-analysis" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Trio Analysis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/galaxyproject/training-material/blob/main/topics/variant-analysis/tutorials/trio-analysis/workflows/main_workflow.ga" ; + schema1:version 2 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=22" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-08-05 10:23:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=22" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14671 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:40Z" ; + schema1:dateModified "2024-06-11T12:54:40Z" ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=22" ; + schema1:version 22 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Genome-wide alternative splicing analysis v.2" ; + schema1:hasPart , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.482.1" ; + schema1:license "CC-BY-NC-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genome-wide alternative splicing analysis v.2" ; + schema1:sdDatePublished "2024-08-05 10:30:11 +0100" ; + schema1:url "https://workflowhub.eu/workflows/482/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 214419 . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 30436 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 115942 ; + schema1:creator ; + schema1:dateCreated "2023-05-25T22:01:31Z" ; + schema1:dateModified "2023-06-07T16:01:06Z" ; + schema1:description "Genome-wide alternative splicing analysis v.2" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/482?version=6" ; + schema1:keywords "Transcriptomics, Alternative splicing, isoform switching" ; + schema1:license "https://spdx.org/licenses/CC-BY-NC-4.0" ; + schema1:name "Genome-wide alternative splicing analysis v.2" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/482?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Demultiplexing pipeline for Illumina sequencing data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/978?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/demultiplex" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/demultiplex" ; + schema1:sdDatePublished "2024-08-05 10:24:06 +0100" ; + schema1:url "https://workflowhub.eu/workflows/978/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9518 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:47Z" ; + schema1:dateModified "2024-06-11T12:54:47Z" ; + schema1:description "Demultiplexing pipeline for Illumina sequencing data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/978?version=7" ; + schema1:keywords "bases2fastq, bcl2fastq, demultiplexing, elementbiosciences, illumina" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/demultiplex" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/978?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "An example workflow to allow users to run the Specimen Data Refinery tools on data provided in an input CSV file." ; + schema1:identifier "https://workflowhub.eu/workflows/373?version=2" ; + schema1:license "CC-BY-4.0" ; + schema1:name "Research Object Crate for De novo digitisation" ; + schema1:sdDatePublished "2024-08-05 10:31:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/373/ro_crate?version=2" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Macromolecular Coarse-Grained Flexibility (FlexServ) tutorial using BioExcel Building Blocks (biobb)\r +\r +This tutorial aims to illustrate the process of generating protein conformational ensembles from 3D structures and analysing its molecular flexibility, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.557.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_flexserv/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy Macromolecular Coarse-Grained Flexibility tutorial" ; + schema1:sdDatePublished "2024-08-05 10:28:02 +0100" ; + schema1:url "https://workflowhub.eu/workflows/557/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 56921 ; + schema1:creator , + ; + schema1:dateCreated "2023-08-11T08:32:23Z" ; + schema1:dateModified "2023-08-11T08:34:07Z" ; + schema1:description """# Macromolecular Coarse-Grained Flexibility (FlexServ) tutorial using BioExcel Building Blocks (biobb)\r +\r +This tutorial aims to illustrate the process of generating protein conformational ensembles from 3D structures and analysing its molecular flexibility, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy Macromolecular Coarse-Grained Flexibility tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_flexserv/galaxy/biobb_wf_flexserv.ga" ; + schema1:version 1 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """From the R1 and R2 fastq files of a single samples, make a scRNAseq counts matrix, and perform basic QC with scanpy. Then, do further processing by making a UMAP and clustering. Produces a processed AnnData \r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/465?version=2" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq Single Sample Processing STARSolo" ; + schema1:sdDatePublished "2024-08-05 10:24:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/465/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 126308 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-06-22T06:36:33Z" ; + schema1:dateModified "2023-06-23T06:46:18Z" ; + schema1:description """From the R1 and R2 fastq files of a single samples, make a scRNAseq counts matrix, and perform basic QC with scanpy. Then, do further processing by making a UMAP and clustering. Produces a processed AnnData \r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/465?version=3" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "scRNAseq Single Sample Processing STARSolo" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/465?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + schema1:datePublished "2023-09-05T08:29:48.601016" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/parallel-accession-download" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "parallel-accession-download/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.10" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-08-05 10:23:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4227 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:03Z" ; + schema1:dateModified "2024-06-11T12:55:03Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """[![Cite with Zenodo](https://zenodo.org/badge/509096312.svg)](https://zenodo.org/doi/10.5281/zenodo.10047653)\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A522.10.1-23aa62.svg)](https://www.nextflow.io/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +[![Launch on Nextflow Tower](https://img.shields.io/badge/Launch%20%F0%9F%9A%80-Nextflow%20Tower-%234256e7)](https://tower.nf/launch?pipeline=https://github.com/sanger-tol/treeval)\r +\r +## Introduction\r +\r +**sanger-tol/treeval [1.1.0 - Ancient Aurora]** is a bioinformatics best-practice analysis pipeline for the generation of data supplemental to the curation of reference quality genomes. This pipeline has been written to generate flat files compatible with [JBrowse2](https://jbrowse.org/jb2/) as well as HiC maps for use in Juicebox, PretextView and HiGlass.\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!\r +\r +You can also set up and attempt to run the pipeline here: https://gitpod.io/#https://github.com/BGAcademy23/treeval-curation\r +This is a gitpod set up for BGA23 with a version of TreeVal, although for now gitpod will not run a nextflow pipeline die to issues with using singularity. We will be replacing this with an AWS instance soon.\r +\r +The treeval pipeline has a sister pipeline currently named [curationpretext](https://github.com/sanger-tol/curationpretext) which acts to regenerate the pretext maps and accessory files during genomic curation in order to confirm interventions. This pipeline is sufficiently different to the treeval implementation that it is written as it's own pipeline.\r +\r +1. Parse input yaml ( YAML_INPUT )\r +2. Generate my.genome file ( GENERATE_GENOME )\r +3. Generate insilico digests of the input assembly ( INSILICO_DIGEST )\r +4. Generate gene alignments with high quality data against the input assembly ( GENE_ALIGNMENT )\r +5. Generate a repeat density graph ( REPEAT_DENSITY )\r +6. Generate a gap track ( GAP_FINDER )\r +7. Generate a map of self complementary sequence ( SELFCOMP )\r +8. Generate syntenic alignments with a closely related high quality assembly ( SYNTENY )\r +9. Generate a coverage track using PacBio data ( LONGREAD_COVERAGE )\r +10. Generate HiC maps, pretext and higlass using HiC cram files ( HIC_MAPPING )\r +11. Generate a telomere track based on input motif ( TELO_FINDER )\r +12. Run Busco and convert results into bed format ( BUSCO_ANNOTATION )\r +13. Ancestral Busco linkage if available for clade ( BUSCO_ANNOTATION:ANCESTRAL_GENE )\r +14. Count KMERs with FastK and plot the spectra using MerquryFK ( KMER )\r +15. Generate a coverge track using KMER data ( KMER_READ_COVERAGE )\r +\r +## Usage\r +\r +> **Note**\r +> If you are new to Nextflow and nf-core, please refer to [this page](https://nf-co.re/docs/usage/installation) on how\r +> to set-up Nextflow. Make sure to [test your setup](https://nf-co.re/docs/usage/introduction#how-to-run-a-pipeline)\r +> with `-profile test` before running the workflow on actual data.\r +\r +Currently, it is advised to run the pipeline with docker or singularity as a small number of major modules do not currently have a conda env associated with them.\r +\r +Now, you can run the pipeline using:\r +\r +```bash\r +# For the FULL pipeline\r +nextflow run main.nf -profile singularity --input treeval.yaml --outdir {OUTDIR}\r +\r +# For the RAPID subset\r +nextflow run main.nf -profile singularity --input treeval.yaml -entry RAPID --outdir {OUTDIR}\r +```\r +\r +An example treeval.yaml can be found [here](assets/local_testing/nxOscDF5033.yaml).\r +\r +Further documentation about the pipeline can be found in the following files: [usage](https://pipelines.tol.sanger.ac.uk/treeval/dev/usage), [parameters](https://pipelines.tol.sanger.ac.uk/treeval/dev/parameters) and [output](https://pipelines.tol.sanger.ac.uk/treeval/dev/output).\r +\r +> **Warning:**\r +> Please provide pipeline parameters via the CLI or Nextflow `-params-file` option. Custom config files including those\r +> provided by the `-c` Nextflow option can be used to provide any configuration _**except for parameters**_;\r +> see [docs](https://nf-co.re/usage/configuration#custom-configuration-files).\r +\r +## Credits\r +\r +sanger-tol/treeval has been written by Damon-Lee Pointon (@DLBPointon), Yumi Sims (@yumisims) and William Eagles (@weaglesBio).\r +\r +We thank the following people for their extensive assistance in the development of this pipeline:\r +\r +
    \r +
  • @gq1 - For building the infrastructure around TreeVal and helping with code review
  • \r +
  • @ksenia-krasheninnikova - For help with C code implementation and YAML parsing
  • \r +
  • @mcshane - For guidance on algorithms
  • \r +
  • @muffato - For code reviews and code support
  • \r +
  • @priyanka-surana - For help with the majority of code reviews and code support
  • \r +
\r +\r +## Contributions and Support\r +\r +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\r +\r +## Citations\r +\r +\r +\r +If you use sanger-tol/treeval for your analysis, please cite it using the following doi: [10.5281/zenodo.10047653](https://doi.org/10.5281/zenodo.10047653).\r +\r +### Tools\r +\r +An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\r +\r +You can cite the `nf-core` publication as follows:\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/813?version=1" ; + schema1:isBasedOn "https://github.com/sanger-tol/treeval.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for sanger-tol/treeval v1.1.0 - Ancient Aurora" ; + schema1:sdDatePublished "2024-08-05 10:25:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/813/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2029 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-04-09T09:22:28Z" ; + schema1:dateModified "2024-04-09T09:22:28Z" ; + schema1:description """[![Cite with Zenodo](https://zenodo.org/badge/509096312.svg)](https://zenodo.org/doi/10.5281/zenodo.10047653)\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A522.10.1-23aa62.svg)](https://www.nextflow.io/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +[![Launch on Nextflow Tower](https://img.shields.io/badge/Launch%20%F0%9F%9A%80-Nextflow%20Tower-%234256e7)](https://tower.nf/launch?pipeline=https://github.com/sanger-tol/treeval)\r +\r +## Introduction\r +\r +**sanger-tol/treeval [1.1.0 - Ancient Aurora]** is a bioinformatics best-practice analysis pipeline for the generation of data supplemental to the curation of reference quality genomes. This pipeline has been written to generate flat files compatible with [JBrowse2](https://jbrowse.org/jb2/) as well as HiC maps for use in Juicebox, PretextView and HiGlass.\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!\r +\r +You can also set up and attempt to run the pipeline here: https://gitpod.io/#https://github.com/BGAcademy23/treeval-curation\r +This is a gitpod set up for BGA23 with a version of TreeVal, although for now gitpod will not run a nextflow pipeline die to issues with using singularity. We will be replacing this with an AWS instance soon.\r +\r +The treeval pipeline has a sister pipeline currently named [curationpretext](https://github.com/sanger-tol/curationpretext) which acts to regenerate the pretext maps and accessory files during genomic curation in order to confirm interventions. This pipeline is sufficiently different to the treeval implementation that it is written as it's own pipeline.\r +\r +1. Parse input yaml ( YAML_INPUT )\r +2. Generate my.genome file ( GENERATE_GENOME )\r +3. Generate insilico digests of the input assembly ( INSILICO_DIGEST )\r +4. Generate gene alignments with high quality data against the input assembly ( GENE_ALIGNMENT )\r +5. Generate a repeat density graph ( REPEAT_DENSITY )\r +6. Generate a gap track ( GAP_FINDER )\r +7. Generate a map of self complementary sequence ( SELFCOMP )\r +8. Generate syntenic alignments with a closely related high quality assembly ( SYNTENY )\r +9. Generate a coverage track using PacBio data ( LONGREAD_COVERAGE )\r +10. Generate HiC maps, pretext and higlass using HiC cram files ( HIC_MAPPING )\r +11. Generate a telomere track based on input motif ( TELO_FINDER )\r +12. Run Busco and convert results into bed format ( BUSCO_ANNOTATION )\r +13. Ancestral Busco linkage if available for clade ( BUSCO_ANNOTATION:ANCESTRAL_GENE )\r +14. Count KMERs with FastK and plot the spectra using MerquryFK ( KMER )\r +15. Generate a coverge track using KMER data ( KMER_READ_COVERAGE )\r +\r +## Usage\r +\r +> **Note**\r +> If you are new to Nextflow and nf-core, please refer to [this page](https://nf-co.re/docs/usage/installation) on how\r +> to set-up Nextflow. Make sure to [test your setup](https://nf-co.re/docs/usage/introduction#how-to-run-a-pipeline)\r +> with `-profile test` before running the workflow on actual data.\r +\r +Currently, it is advised to run the pipeline with docker or singularity as a small number of major modules do not currently have a conda env associated with them.\r +\r +Now, you can run the pipeline using:\r +\r +```bash\r +# For the FULL pipeline\r +nextflow run main.nf -profile singularity --input treeval.yaml --outdir {OUTDIR}\r +\r +# For the RAPID subset\r +nextflow run main.nf -profile singularity --input treeval.yaml -entry RAPID --outdir {OUTDIR}\r +```\r +\r +An example treeval.yaml can be found [here](assets/local_testing/nxOscDF5033.yaml).\r +\r +Further documentation about the pipeline can be found in the following files: [usage](https://pipelines.tol.sanger.ac.uk/treeval/dev/usage), [parameters](https://pipelines.tol.sanger.ac.uk/treeval/dev/parameters) and [output](https://pipelines.tol.sanger.ac.uk/treeval/dev/output).\r +\r +> **Warning:**\r +> Please provide pipeline parameters via the CLI or Nextflow `-params-file` option. Custom config files including those\r +> provided by the `-c` Nextflow option can be used to provide any configuration _**except for parameters**_;\r +> see [docs](https://nf-co.re/usage/configuration#custom-configuration-files).\r +\r +## Credits\r +\r +sanger-tol/treeval has been written by Damon-Lee Pointon (@DLBPointon), Yumi Sims (@yumisims) and William Eagles (@weaglesBio).\r +\r +We thank the following people for their extensive assistance in the development of this pipeline:\r +\r +
    \r +
  • @gq1 - For building the infrastructure around TreeVal and helping with code review
  • \r +
  • @ksenia-krasheninnikova - For help with C code implementation and YAML parsing
  • \r +
  • @mcshane - For guidance on algorithms
  • \r +
  • @muffato - For code reviews and code support
  • \r +
  • @priyanka-surana - For help with the majority of code reviews and code support
  • \r +
\r +\r +## Contributions and Support\r +\r +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\r +\r +## Citations\r +\r +\r +\r +If you use sanger-tol/treeval for your analysis, please cite it using the following doi: [10.5281/zenodo.10047653](https://doi.org/10.5281/zenodo.10047653).\r +\r +### Tools\r +\r +An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\r +\r +You can cite the `nf-core` publication as follows:\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +""" ; + schema1:keywords "Bioinformatics, Genomics, genome_assembly" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "sanger-tol/treeval v1.1.0 - Ancient Aurora" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/813?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/303?version=1" ; + schema1:isBasedOn "https://github.com/DimitraPanou/scRNAseq-cwl.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for seurat scRNA-seq" ; + schema1:sdDatePublished "2024-08-05 10:32:12 +0100" ; + schema1:url "https://workflowhub.eu/workflows/303/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 84591 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2798 ; + schema1:dateCreated "2022-04-04T04:43:31Z" ; + schema1:dateModified "2022-04-14T13:26:32Z" ; + schema1:description "" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/303?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "seurat scRNA-seq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/DimitraPanou/scRNAseq-cwl/blob/master/steps.cwl" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 31230 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:MediaObject ; + schema1:contentSize 3 ; + schema1:dateModified "2024-04-26T13:10:25+00:00" ; + schema1:name "energy.selection" ; + schema1:sdDatePublished "2024-07-18T11:10:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3 ; + schema1:dateModified "2024-04-26T13:10:25+00:00" ; + schema1:name "genion.group" ; + schema1:sdDatePublished "2024-07-18T11:10:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1044 ; + schema1:dateModified "2024-04-26T13:10:25+00:00" ; + schema1:name "ions.mdp" ; + schema1:sdDatePublished "2024-07-18T11:10:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 971 ; + schema1:dateModified "2024-04-26T13:10:25+00:00" ; + schema1:name "minim.mdp" ; + schema1:sdDatePublished "2024-07-18T11:10:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 109917 ; + schema1:dateModified "2024-04-26T13:10:27+00:00" ; + schema1:name "1aki.pdb" ; + schema1:sdDatePublished "2024-07-18T11:10:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2998215 ; + schema1:dateModified "2024-04-26T13:10:27+00:00" ; + schema1:name "1u3m.pdb" ; + schema1:sdDatePublished "2024-07-18T11:10:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2918025 ; + schema1:dateModified "2024-04-26T13:10:27+00:00" ; + schema1:name "1xyw.pdb" ; + schema1:sdDatePublished "2024-07-18T11:10:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88246 ; + schema1:dateModified "2024-07-18T11:10:46+00:00" ; + schema1:name "1aki.gro" ; + schema1:sdDatePublished "2024-07-18T11:10:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577594 ; + schema1:dateModified "2024-07-18T11:10:46+00:00" ; + schema1:name "1aki.top" ; + schema1:sdDatePublished "2024-07-18T11:10:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1280044 ; + schema1:dateModified "2024-07-18T11:10:46+00:00" ; + schema1:name "1aki_em.tpr" ; + schema1:sdDatePublished "2024-07-18T11:10:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-07-18T11:10:46+00:00" ; + schema1:name "1aki_em_energy.edr" ; + schema1:sdDatePublished "2024-07-18T11:10:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1279304 ; + schema1:dateModified "2024-07-18T11:10:46+00:00" ; + schema1:name "1aki_ions.tpr" ; + schema1:sdDatePublished "2024-07-18T11:10:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88246 ; + schema1:dateModified "2024-07-18T11:10:46+00:00" ; + schema1:name "1aki_newbox.gro" ; + schema1:sdDatePublished "2024-07-18T11:10:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 7939 ; + schema1:dateModified "2024-07-18T11:10:46+00:00" ; + schema1:name "1aki_potential.png" ; + schema1:sdDatePublished "2024-07-18T11:10:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2181 ; + schema1:dateModified "2024-07-18T11:10:46+00:00" ; + schema1:name "1aki_potential.xvg" ; + schema1:sdDatePublished "2024-07-18T11:10:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1525186 ; + schema1:dateModified "2024-07-18T11:10:46+00:00" ; + schema1:name "1aki_solv.gro" ; + schema1:sdDatePublished "2024-07-18T11:10:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1524475 ; + schema1:dateModified "2024-07-18T11:10:46+00:00" ; + schema1:name "1aki_solv_ions.gro" ; + schema1:sdDatePublished "2024-07-18T11:10:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 82046 ; + schema1:dateModified "2024-07-18T11:10:46+00:00" ; + schema1:name "1u3m.gro" ; + schema1:sdDatePublished "2024-07-18T11:10:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 537928 ; + schema1:dateModified "2024-07-18T11:10:46+00:00" ; + schema1:name "1u3m.top" ; + schema1:sdDatePublished "2024-07-18T11:10:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1416272 ; + schema1:dateModified "2024-07-18T11:10:46+00:00" ; + schema1:name "1u3m_em.tpr" ; + schema1:sdDatePublished "2024-07-18T11:10:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-07-18T11:10:46+00:00" ; + schema1:name "1u3m_em_energy.edr" ; + schema1:sdDatePublished "2024-07-18T11:10:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1415196 ; + schema1:dateModified "2024-07-18T11:10:46+00:00" ; + schema1:name "1u3m_ions.tpr" ; + schema1:sdDatePublished "2024-07-18T11:10:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 82046 ; + schema1:dateModified "2024-07-18T11:10:46+00:00" ; + schema1:name "1u3m_newbox.gro" ; + schema1:sdDatePublished "2024-07-18T11:10:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 7471 ; + schema1:dateModified "2024-07-18T11:10:46+00:00" ; + schema1:name "1u3m_potential.png" ; + schema1:sdDatePublished "2024-07-18T11:10:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2167 ; + schema1:dateModified "2024-07-18T11:10:46+00:00" ; + schema1:name "1u3m_potential.xvg" ; + schema1:sdDatePublished "2024-07-18T11:10:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1837046 ; + schema1:dateModified "2024-07-18T11:10:46+00:00" ; + schema1:name "1u3m_solv.gro" ; + schema1:sdDatePublished "2024-07-18T11:10:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1836965 ; + schema1:dateModified "2024-07-18T11:10:46+00:00" ; + schema1:name "1u3m_solv_ions.gro" ; + schema1:sdDatePublished "2024-07-18T11:10:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 80112 ; + schema1:dateModified "2024-07-18T11:10:46+00:00" ; + schema1:name "1xyw.gro" ; + schema1:sdDatePublished "2024-07-18T11:10:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 524004 ; + schema1:dateModified "2024-07-18T11:10:46+00:00" ; + schema1:name "1xyw.top" ; + schema1:sdDatePublished "2024-07-18T11:10:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1408872 ; + schema1:dateModified "2024-07-18T11:10:46+00:00" ; + schema1:name "1xyw_em.tpr" ; + schema1:sdDatePublished "2024-07-18T11:10:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-07-18T11:10:46+00:00" ; + schema1:name "1xyw_em_energy.edr" ; + schema1:sdDatePublished "2024-07-18T11:10:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1407844 ; + schema1:dateModified "2024-07-18T11:10:46+00:00" ; + schema1:name "1xyw_ions.tpr" ; + schema1:sdDatePublished "2024-07-18T11:10:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 80112 ; + schema1:dateModified "2024-07-18T11:10:46+00:00" ; + schema1:name "1xyw_newbox.gro" ; + schema1:sdDatePublished "2024-07-18T11:10:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8523 ; + schema1:dateModified "2024-07-18T11:10:46+00:00" ; + schema1:name "1xyw_potential.png" ; + schema1:sdDatePublished "2024-07-18T11:10:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2142 ; + schema1:dateModified "2024-07-18T11:10:46+00:00" ; + schema1:name "1xyw_potential.xvg" ; + schema1:sdDatePublished "2024-07-18T11:10:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1845237 ; + schema1:dateModified "2024-07-18T11:10:46+00:00" ; + schema1:name "1xyw_solv.gro" ; + schema1:sdDatePublished "2024-07-18T11:10:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1845066 ; + schema1:dateModified "2024-07-18T11:10:46+00:00" ; + schema1:name "1xyw_solv_ions.gro" ; + schema1:sdDatePublished "2024-07-18T11:10:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4308 ; + schema1:dateModified "2024-07-18T11:10:46+00:00" ; + schema1:name "POTENTIAL_RESULTS.png" ; + schema1:sdDatePublished "2024-07-18T11:10:51+00:00" . + + a schema1:Dataset ; + schema1:datePublished "2023-01-16T18:45:24.648437" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/rnaseq-pe" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "rnaseq-pe/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.3" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# GSC (Genotype Sparse Compression)\r +Genotype Sparse Compression (GSC) is an advanced tool for lossless compression of VCF files, designed to efficiently store and manage VCF files in a compressed format. It accepts VCF/BCF files as input and utilizes advanced compression techniques to significantly reduce storage requirements while ensuring fast query capabilities. In our study, we successfully compressed the VCF files from the 1000 Genomes Project (1000Gpip3), consisting of 2504 samples and 80 million variants, from an uncompressed VCF file of 803.70GB to approximately 1GB.\r +\r +## Requirements \r +### GSC requires:\r +\r +- **Compiler Compatibility**: GSC requires a modern C++14-ready compiler, such as:\r + - g++ version 10.1.0 or higher\r +\r +- **Build System**: Make build system is necessary for compiling GSC.\r +\r +- **Operating System**: GSC supports 64-bit operating systems, including:\r + - Linux (Ubuntu 18.04)\r + \r +## Installation\r +To download, build and install GSC use the following commands.\r +```bash\r +git clone https://github.com/luo-xiaolong/GSC.git\r +cd GSC\r +make\r +```\r +To clean the GSC build use:\r +```bash\r +make clean\r +```\r +## Usage\r +```bash\r +Usage: gsc [option] [arguments] \r +Available options: \r + compress - compress VCF/BCF file\r + decompress - query and decompress to VCF/BCF file\r +```\r +- Compress the input VCF/BCF file\r +```bash\r +Usage of gsc compress:\r +\r + gsc compress [options] [--in [in_file]] [--out [out_file]]\r +\r +Where:\r +\r + [options] Optional flags and parameters for compression.\r + -i, --in [in_file] Specify the input file (default: VCF or VCF.GZ). If omitted, input is taken from standard input (stdin).\r + -o, --out [out_file] Specify the output file. If omitted, output is sent to standard output (stdout).\r +\r +Options:\r +\r + -M, --mode_lossly Choose lossy compression mode (lossless by default).\r + -b, --bcf Input is a BCF file (default: VCF or VCF.GZ).\r + -p, --ploidy [X] Set ploidy of samples in input VCF to [X] (default: 2).\r + -t, --threads [X] Set number of threads to [X] (default: 1).\r + -d, --depth [X] Set maximum replication depth to [X] (default: 100, 0 means no matches).\r + -m, --merge [X] Specify files to merge, separated by commas (e.g., -m chr1.vcf,chr2.vcf), or '@' followed by a file containing a list of VCF files (e.g., -m @file_with_IDs.txt). By default, all VCF files are compressed.\r +```\r +- Decompress / Query\r +```bash\r +Usage of gsc decompress and query:\r +\r + gsc decompress [options] --in [in_file] --out [out_file]\r +\r +Where:\r + [options] Optional flags and parameters for compression.\r + -i, --in [in_file] Specify the input file . If omitted, input is taken from standard input (stdin).\r + -o, --out [out_file] Specify the output file (default: VCF). If omitted, output is sent to standard output (stdout).\r +\r +Options:\r +\r + General Options:\r +\r + -M, --mode_lossly Choose lossy compression mode (default: lossless).\r + -b, --bcf Output a BCF file (default: VCF).\r +\r + Filter options (applicable in lossy compression mode only): \r +\r + -r, --range [X] Specify range in format [start],[end] (e.g., -r 4999756,4999852).\r + -s, --samples [X] Samples separated by comms (e.g., -s HG03861,NA18639) OR '@' sign followed by the name of a file with sample name(s) separated by whitespaces (for exaple: -s @file_with_IDs.txt). By default all samples/individuals are decompressed. \r + --header-only Output only the header of the VCF/BCF.\r + --no-header Output without the VCF/BCF header (only genotypes).\r + -G, --no-genotype Don't output sample genotypes (only #CHROM, POS, ID, REF, ALT, QUAL, FILTER, and INFO columns).\r + -C, --out-ac-an Write AC/AN to the INFO field.\r + -S, --split Split output into multiple files (one per chromosome).\r + -I, [ID=^] Include only sites with specified ID (e.g., -I "ID=rs6040355").\r + --minAC [X] Include only sites with AC <= X.\r + --maxAC [X] Include only sites with AC >= X.\r + --minAF [X] Include only sites with AF >= X (X: 0 to 1).\r + --maxAF [X] Include only sites with AF <= X (X: 0 to 1).\r + --min-qual [X] Include only sites with QUAL >= X.\r + --max-qual [X] Include only sites with QUAL <= X.\r +```\r +## Example\r +There is an example VCF/VCF.gz/BCF file, `toy.vcf`/`toy.vcf.gz`/`toy.bcf`, in the toy folder, which can be used to test GSC\r +### compress\r +\r +#### lossless compression:\r +The input file format is VCF. You can compress a VCF file in lossless mode using one of the following methods:\r +1. **Explicit input and output file parameters**:\r + \r + Use the `--in` option to specify the input VCF file and the `--out` option for the output compressed file.\r + ```bash\r + ./gsc compress --in toy/toy.vcf --out toy/toy_lossless.gsc\r + ```\r +2. **Input file parameter and output redirection**:\r + \r + Use the `--out` option for the output compressed file and redirect the input VCF file into the command.\r + ```bash\r + ./gsc compress --out toy/toy_lossless.gsc < toy/toy.vcf\r + ```\r +3. **Output file redirection and input file parameter**:\r + \r + Specify the input VCF file with the `--in` option and redirect the output to create the compressed file.\r + ```bash\r + ./gsc compress --in toy/toy.vcf > toy/toy_lossless.gsc\r + ```\r +4. **Input and output redirection**:\r + \r + Use shell redirection for both input and output. This method does not use the `--in` and `--out` options.\r + ```bash\r + ./gsc compress < toy/toy.vcf > toy/toy_lossless.gsc\r + ```\r +This will create a file:\r +* `toy_lossless.gsc` - The compressed archive of the entire VCF file.\r +\r +#### lossy compression:\r +\r +The input file format is VCF. The commands are similar to those used for lossless compression, with the addition of the `-M` parameter to enable lossy compression.\r +\r + For example, to compress a VCF file in lossy mode:\r +\r + ```bash\r + ./gsc compress -M --in toy/toy.vcf --out toy/toy_lossy.gsc\r + ```\r + Or using redirection:\r + ```bash\r + ./gsc compress -M --out toy/toy_lossy.gsc < toy/toy.vcf\r + ``` \r + This will create a file:\r + * `toy_lossy.gsc` - The compressed archive of the entire VCF file is implemented with lossy compression. It only retains the 'GT' subfield within the INFO and FORMAT fields, and excludes all other subfields..\r + \r +### Decompress (The commands are similar to those used for compression)\r +lossless decompression:\r +\r +To decompress the compressed toy_lossless.gsc into a VCF file named toy_lossless.vcf:\r +```bash\r +./gsc decompress --in toy/toy_lossless.gsc --out toy/toy_lossless.vcf\r +```\r +lossy decompression:\r +\r +To decompress the compressed toy_lossy.gsc into a VCF file named toy_lossy.vcf:\r +```bash\r +./gsc decompress -M --in toy/toy_lossy.gsc --out toy/toy_lossy.vcf\r +```\r +## Dockerfile\r +Dockerfile can be used to build a Docker image with all necessary dependencies and GSC compressor. The image is based on Ubuntu 18.04. To build a Docker image and run a Docker container, you need Docker Desktop (https://www.docker.com). Example commands (run it within a directory with Dockerfile):\r +```bash\r +docker build -t gsc_project .\r +docker run -it gsc_project\r +```\r +## Citations\r +- **bio.tools ID**: `gsc_genotype_sparse_compression`\r +- **Research Resource Identifier (RRID)**: `SCR_025071`\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/885?version=1" ; + schema1:isBasedOn "https://github.com/luo-xiaolong/GSC.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for GSC (Genotype Sparse Compression)" ; + schema1:sdDatePublished "2024-08-05 10:24:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/885/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7413 ; + schema1:dateCreated "2024-05-17T16:51:00Z" ; + schema1:dateModified "2024-05-17T16:51:00Z" ; + schema1:description """# GSC (Genotype Sparse Compression)\r +Genotype Sparse Compression (GSC) is an advanced tool for lossless compression of VCF files, designed to efficiently store and manage VCF files in a compressed format. It accepts VCF/BCF files as input and utilizes advanced compression techniques to significantly reduce storage requirements while ensuring fast query capabilities. In our study, we successfully compressed the VCF files from the 1000 Genomes Project (1000Gpip3), consisting of 2504 samples and 80 million variants, from an uncompressed VCF file of 803.70GB to approximately 1GB.\r +\r +## Requirements \r +### GSC requires:\r +\r +- **Compiler Compatibility**: GSC requires a modern C++14-ready compiler, such as:\r + - g++ version 10.1.0 or higher\r +\r +- **Build System**: Make build system is necessary for compiling GSC.\r +\r +- **Operating System**: GSC supports 64-bit operating systems, including:\r + - Linux (Ubuntu 18.04)\r + \r +## Installation\r +To download, build and install GSC use the following commands.\r +```bash\r +git clone https://github.com/luo-xiaolong/GSC.git\r +cd GSC\r +make\r +```\r +To clean the GSC build use:\r +```bash\r +make clean\r +```\r +## Usage\r +```bash\r +Usage: gsc [option] [arguments] \r +Available options: \r + compress - compress VCF/BCF file\r + decompress - query and decompress to VCF/BCF file\r +```\r +- Compress the input VCF/BCF file\r +```bash\r +Usage of gsc compress:\r +\r + gsc compress [options] [--in [in_file]] [--out [out_file]]\r +\r +Where:\r +\r + [options] Optional flags and parameters for compression.\r + -i, --in [in_file] Specify the input file (default: VCF or VCF.GZ). If omitted, input is taken from standard input (stdin).\r + -o, --out [out_file] Specify the output file. If omitted, output is sent to standard output (stdout).\r +\r +Options:\r +\r + -M, --mode_lossly Choose lossy compression mode (lossless by default).\r + -b, --bcf Input is a BCF file (default: VCF or VCF.GZ).\r + -p, --ploidy [X] Set ploidy of samples in input VCF to [X] (default: 2).\r + -t, --threads [X] Set number of threads to [X] (default: 1).\r + -d, --depth [X] Set maximum replication depth to [X] (default: 100, 0 means no matches).\r + -m, --merge [X] Specify files to merge, separated by commas (e.g., -m chr1.vcf,chr2.vcf), or '@' followed by a file containing a list of VCF files (e.g., -m @file_with_IDs.txt). By default, all VCF files are compressed.\r +```\r +- Decompress / Query\r +```bash\r +Usage of gsc decompress and query:\r +\r + gsc decompress [options] --in [in_file] --out [out_file]\r +\r +Where:\r + [options] Optional flags and parameters for compression.\r + -i, --in [in_file] Specify the input file . If omitted, input is taken from standard input (stdin).\r + -o, --out [out_file] Specify the output file (default: VCF). If omitted, output is sent to standard output (stdout).\r +\r +Options:\r +\r + General Options:\r +\r + -M, --mode_lossly Choose lossy compression mode (default: lossless).\r + -b, --bcf Output a BCF file (default: VCF).\r +\r + Filter options (applicable in lossy compression mode only): \r +\r + -r, --range [X] Specify range in format [start],[end] (e.g., -r 4999756,4999852).\r + -s, --samples [X] Samples separated by comms (e.g., -s HG03861,NA18639) OR '@' sign followed by the name of a file with sample name(s) separated by whitespaces (for exaple: -s @file_with_IDs.txt). By default all samples/individuals are decompressed. \r + --header-only Output only the header of the VCF/BCF.\r + --no-header Output without the VCF/BCF header (only genotypes).\r + -G, --no-genotype Don't output sample genotypes (only #CHROM, POS, ID, REF, ALT, QUAL, FILTER, and INFO columns).\r + -C, --out-ac-an Write AC/AN to the INFO field.\r + -S, --split Split output into multiple files (one per chromosome).\r + -I, [ID=^] Include only sites with specified ID (e.g., -I "ID=rs6040355").\r + --minAC [X] Include only sites with AC <= X.\r + --maxAC [X] Include only sites with AC >= X.\r + --minAF [X] Include only sites with AF >= X (X: 0 to 1).\r + --maxAF [X] Include only sites with AF <= X (X: 0 to 1).\r + --min-qual [X] Include only sites with QUAL >= X.\r + --max-qual [X] Include only sites with QUAL <= X.\r +```\r +## Example\r +There is an example VCF/VCF.gz/BCF file, `toy.vcf`/`toy.vcf.gz`/`toy.bcf`, in the toy folder, which can be used to test GSC\r +### compress\r +\r +#### lossless compression:\r +The input file format is VCF. You can compress a VCF file in lossless mode using one of the following methods:\r +1. **Explicit input and output file parameters**:\r + \r + Use the `--in` option to specify the input VCF file and the `--out` option for the output compressed file.\r + ```bash\r + ./gsc compress --in toy/toy.vcf --out toy/toy_lossless.gsc\r + ```\r +2. **Input file parameter and output redirection**:\r + \r + Use the `--out` option for the output compressed file and redirect the input VCF file into the command.\r + ```bash\r + ./gsc compress --out toy/toy_lossless.gsc < toy/toy.vcf\r + ```\r +3. **Output file redirection and input file parameter**:\r + \r + Specify the input VCF file with the `--in` option and redirect the output to create the compressed file.\r + ```bash\r + ./gsc compress --in toy/toy.vcf > toy/toy_lossless.gsc\r + ```\r +4. **Input and output redirection**:\r + \r + Use shell redirection for both input and output. This method does not use the `--in` and `--out` options.\r + ```bash\r + ./gsc compress < toy/toy.vcf > toy/toy_lossless.gsc\r + ```\r +This will create a file:\r +* `toy_lossless.gsc` - The compressed archive of the entire VCF file.\r +\r +#### lossy compression:\r +\r +The input file format is VCF. The commands are similar to those used for lossless compression, with the addition of the `-M` parameter to enable lossy compression.\r +\r + For example, to compress a VCF file in lossy mode:\r +\r + ```bash\r + ./gsc compress -M --in toy/toy.vcf --out toy/toy_lossy.gsc\r + ```\r + Or using redirection:\r + ```bash\r + ./gsc compress -M --out toy/toy_lossy.gsc < toy/toy.vcf\r + ``` \r + This will create a file:\r + * `toy_lossy.gsc` - The compressed archive of the entire VCF file is implemented with lossy compression. It only retains the 'GT' subfield within the INFO and FORMAT fields, and excludes all other subfields..\r + \r +### Decompress (The commands are similar to those used for compression)\r +lossless decompression:\r +\r +To decompress the compressed toy_lossless.gsc into a VCF file named toy_lossless.vcf:\r +```bash\r +./gsc decompress --in toy/toy_lossless.gsc --out toy/toy_lossless.vcf\r +```\r +lossy decompression:\r +\r +To decompress the compressed toy_lossy.gsc into a VCF file named toy_lossy.vcf:\r +```bash\r +./gsc decompress -M --in toy/toy_lossy.gsc --out toy/toy_lossy.vcf\r +```\r +## Dockerfile\r +Dockerfile can be used to build a Docker image with all necessary dependencies and GSC compressor. The image is based on Ubuntu 18.04. To build a Docker image and run a Docker container, you need Docker Desktop (https://www.docker.com). Example commands (run it within a directory with Dockerfile):\r +```bash\r +docker build -t gsc_project .\r +docker run -it gsc_project\r +```\r +## Citations\r +- **bio.tools ID**: `gsc_genotype_sparse_compression`\r +- **Research Resource Identifier (RRID)**: `SCR_025071`\r +""" ; + schema1:keywords "Bioinformatics, Genomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "GSC (Genotype Sparse Compression)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/885?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/963?version=14" ; + schema1:isBasedOn "https://github.com/nf-core/airrflow" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/airrflow" ; + schema1:sdDatePublished "2024-08-05 10:24:22 +0100" ; + schema1:url "https://workflowhub.eu/workflows/963/ro_crate?version=14" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14400 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:37Z" ; + schema1:dateModified "2024-06-11T12:54:37Z" ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/963?version=13" ; + schema1:keywords "airr, b-cell, immcantation, immunorepertoire, repseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/airrflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/963?version=14" ; + schema1:version 14 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Structural and functional genome annotation with Funannotate" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/754?version=1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genome annotation with Funannotate" ; + schema1:sdDatePublished "2024-08-05 10:25:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/754/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 46764 ; + schema1:creator ; + schema1:dateCreated "2024-02-15T11:39:50Z" ; + schema1:dateModified "2024-02-15T11:39:50Z" ; + schema1:description "Structural and functional genome annotation with Funannotate" ; + schema1:isPartOf ; + schema1:keywords "genome-annotation" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Genome annotation with Funannotate" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/754?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1023?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/smrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/smrnaseq" ; + schema1:sdDatePublished "2024-08-05 10:23:12 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1023/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10530 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:18Z" ; + schema1:dateModified "2024-06-11T12:55:18Z" ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1023?version=10" ; + schema1:keywords "small-rna, smrna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/smrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1023?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Summary\r +\r +This tutorial aims to illustrate the process of setting up a simulation system containing a protein, step by step, using the BioExcel Building Blocks library (biobb). The particular example used is the Lysozyme protein (PDB code 1AKI).\r +\r +Workflow engine is a jupyter notebook. It can be run in binder, following the link given, or locally. Auxiliar libraries used are: nb\\_conda_kernels, nglview, ipywidgets, plotly, and simpletraj. Environment can be setup using the included environment.yml file.\r +\r +\r +# Parameters\r +\r +## Inputs\r +\r +Parameters needed to configure the workflow:\r +\r +* **pdbCode**: PDB code of the protein-ligand complex structure (e.g. 1AKI)\r +\r +## Outputs\r +\r +Output files generated (named according to the input parameters given above):\r +\r +* **output\\_md\\_gro**: final structure of the MD setup protocol\r +\r +* **output\\_md\\_trr**: final trajectory of the MD setup protocol\r +\r +* **output\\_md\\_cpt**: final checkpoint file\r +\r +* **output\\_gppmd\\_tpr**: final tpr file\r +\r +* **output\\_genion\\_top\\_zip**: final topology of the MD system""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.120.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Protein MD Setup tutorial using BioExcel Building Blocks (biobb) (jupyter notebook)" ; + schema1:sdDatePublished "2024-08-05 10:31:04 +0100" ; + schema1:url "https://workflowhub.eu/workflows/120/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 53057 ; + schema1:creator , + ; + schema1:dateCreated "2021-05-07T13:51:35Z" ; + schema1:dateModified "2021-05-13T08:14:02Z" ; + schema1:description """# Summary\r +\r +This tutorial aims to illustrate the process of setting up a simulation system containing a protein, step by step, using the BioExcel Building Blocks library (biobb). The particular example used is the Lysozyme protein (PDB code 1AKI).\r +\r +Workflow engine is a jupyter notebook. It can be run in binder, following the link given, or locally. Auxiliar libraries used are: nb\\_conda_kernels, nglview, ipywidgets, plotly, and simpletraj. Environment can be setup using the included environment.yml file.\r +\r +\r +# Parameters\r +\r +## Inputs\r +\r +Parameters needed to configure the workflow:\r +\r +* **pdbCode**: PDB code of the protein-ligand complex structure (e.g. 1AKI)\r +\r +## Outputs\r +\r +Output files generated (named according to the input parameters given above):\r +\r +* **output\\_md\\_gro**: final structure of the MD setup protocol\r +\r +* **output\\_md\\_trr**: final trajectory of the MD setup protocol\r +\r +* **output\\_md\\_cpt**: final checkpoint file\r +\r +* **output\\_gppmd\\_tpr**: final tpr file\r +\r +* **output\\_genion\\_top\\_zip**: final topology of the MD system""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/120?version=5" ; + schema1:isPartOf , + , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Protein MD Setup tutorial using BioExcel Building Blocks (biobb) (jupyter notebook)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/120?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "NDVI data with OpenEO to time series visualisation with HoloViz" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/759?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Visualizing NDVI time-series data with HoloViz" ; + schema1:sdDatePublished "2024-08-05 10:25:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/759/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3065 ; + schema1:creator ; + schema1:dateCreated "2024-02-15T11:59:36Z" ; + schema1:dateModified "2024-02-15T13:42:10Z" ; + schema1:description "NDVI data with OpenEO to time series visualisation with HoloViz" ; + schema1:isPartOf ; + schema1:keywords "Ecology" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Visualizing NDVI time-series data with HoloViz" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/759?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """Abstract CWL Automatically generated from the Galaxy workflow file: GTN 'Pangeo 101 for everyone - Introduction to Xarray'.\r +\r +In this tutorial, we analyze particle matter < 2.5 μm/m3 data from Copernicus Atmosphere Monitoring Service to understand Xarray Galaxy Tools:\r +- Understand how an Xarray dataset is organized;\r +- Get metadata from Xarray dataset such as variable names, units, coordinates (latitude, longitude, level), etc;\r +- Plot an Xarray dataset on a geographical map and learn to customize it;\r +- Select/Subset an Xarray dataset from coordinates values such as time selection or a subset over a geographical area;\r +- Mask an Xarray dataset with a Where statement, for instance to only see PM2.5 > 30 μm/m and highlight on a map regions with "high" values;\r +- Convert an Xarray dataset to Tabular data (pandas dataframe);\r +- Plot tabular data to visualize the forecast PM2.5 over a single point (here Naples) using a scatterplot and/or climate stripes.""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/252?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Pangeo 101 for everyone - introduction to Xarray" ; + schema1:sdDatePublished "2024-08-05 10:32:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/252/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 27957 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 124667 ; + schema1:dateCreated "2021-12-29T07:57:46Z" ; + schema1:dateModified "2023-01-16T13:56:30Z" ; + schema1:description """Abstract CWL Automatically generated from the Galaxy workflow file: GTN 'Pangeo 101 for everyone - Introduction to Xarray'.\r +\r +In this tutorial, we analyze particle matter < 2.5 μm/m3 data from Copernicus Atmosphere Monitoring Service to understand Xarray Galaxy Tools:\r +- Understand how an Xarray dataset is organized;\r +- Get metadata from Xarray dataset such as variable names, units, coordinates (latitude, longitude, level), etc;\r +- Plot an Xarray dataset on a geographical map and learn to customize it;\r +- Select/Subset an Xarray dataset from coordinates values such as time selection or a subset over a geographical area;\r +- Mask an Xarray dataset with a Where statement, for instance to only see PM2.5 > 30 μm/m and highlight on a map regions with "high" values;\r +- Convert an Xarray dataset to Tabular data (pandas dataframe);\r +- Plot tabular data to visualize the forecast PM2.5 over a single point (here Naples) using a scatterplot and/or climate stripes.""" ; + schema1:image ; + schema1:keywords "GTN, Climate, copernicus, pangeo" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Pangeo 101 for everyone - introduction to Xarray" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/252?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 101858 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=19" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-08-05 10:23:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=19" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13111 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:50Z" ; + schema1:dateModified "2024-06-11T12:54:50Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=19" ; + schema1:version 19 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=29" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-08-05 10:24:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=29" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13588 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-07-02T03:02:47Z" ; + schema1:dateModified "2024-07-02T03:02:47Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=29" ; + schema1:version 29 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.830.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_virtual-screening" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Docker Protein-ligand Docking tutorial (Fpocket)" ; + schema1:sdDatePublished "2024-08-05 10:24:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/830/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 968 ; + schema1:creator , + ; + schema1:dateCreated "2024-04-22T11:19:34Z" ; + schema1:dateModified "2024-05-22T13:44:48Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Docker Protein-ligand Docking tutorial (Fpocket)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/main/biobb_wf_virtual_screening/docker/Dockerfile" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Alternative splicing analysis using RNA-seq." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1018?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/rnasplice" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnasplice" ; + schema1:sdDatePublished "2024-08-05 10:23:21 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1018/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14104 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:14Z" ; + schema1:dateModified "2024-06-11T12:55:14Z" ; + schema1:description "Alternative splicing analysis using RNA-seq." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1018?version=4" ; + schema1:keywords "alternative-splicing, rna, rna-seq, splicing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnasplice" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1018?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# COVID-19 Multiscale Modelling of the Virus and Patients’ Tissue Workflow\r +\r +## Table of Contents\r +\r +- [COVID-19 Multiscale Modelling of the Virus and Patients’ Tissue Workflow](#covid-19-multiscale-modelling-of-the-virus-and-patients-tissue-workflow)\r + - [Table of Contents](#table-of-contents)\r + - [Description](#description)\r + - [Contents](#contents)\r + - [Building Blocks](#building-blocks)\r + - [Workflows](#workflows)\r + - [Resources](#resources)\r + - [Tests](#tests)\r + - [Instructions](#instructions)\r + - [Local machine](#local-machine)\r + - [Requirements](#requirements)\r + - [Usage steps](#usage-steps)\r + - [MareNostrum 4](#marenostrum-4)\r + - [Requirements in MN4](#requirements-in-mn4)\r + - [Usage steps in MN4](#usage-steps-in-mn4)\r + - [Mahti or Puhti](#mahti-or-puhti)\r + - [Requirements](#requirements)\r + - [Steps](#steps)\r + - [License](#license)\r + - [Contact](#contact)\r +\r +## Description\r +\r +Uses multiscale simulations to predict patient-specific SARS‑CoV‑2 severity subtypes\r +(moderate, severe or control), using single-cell RNA-Seq data, MaBoSS and PhysiBoSS.\r +Boolean models are used to determine the behaviour of individual agents as a function\r +of extracellular conditions and the concentration of different substrates, including\r +the number of virions. Predictions of severity subtypes are based on a meta-analysis of\r +personalised model outputs simulating cellular apoptosis regulation in epithelial cells\r +infected by SARS‑CoV‑2.\r +\r +The workflow uses the following building blocks, described in order of execution:\r +\r +1. High-throughput mutant analysis\r +2. Single-cell processing\r +3. Personalise patient\r +4. PhysiBoSS\r +5. Analysis of all simulations\r +\r +For details on individual workflow steps, see the user documentation for each building block.\r +\r +[`GitHub repository`]()\r +\r +\r +## Contents\r +\r +### Building Blocks\r +\r +The ``BuildingBlocks`` folder contains the script to install the\r +Building Blocks used in the COVID-19 Workflow.\r +\r +### Workflows\r +\r +The ``Workflow`` folder contains the workflows implementations.\r +\r +Currently contains the implementation using PyCOMPSs and Snakemake (in progress).\r +\r +### Resources\r +\r +The ``Resources`` folder contains dataset files.\r +\r +### Tests\r +\r +The ``Tests`` folder contains the scripts that run each Building Block\r +used in the workflow for the given small dataset.\r +They can be executed individually for testing purposes.\r +\r +## Instructions\r +\r +### Local machine\r +\r +This section explains the requirements and usage for the COVID19 Workflow in a laptop or desktop computer.\r +\r +#### Requirements\r +\r +- [`permedcoe`](https://github.com/PerMedCoE/permedcoe) package\r +- [PyCOMPSs](https://pycompss.readthedocs.io/en/stable/Sections/00_Quickstart.html) / [Snakemake](https://snakemake.readthedocs.io/en/stable/)\r +- [Singularity](https://sylabs.io/guides/3.0/user-guide/installation.html)\r +\r +#### Usage steps\r +\r +1. Clone this repository:\r +\r + ```bash\r + git clone https://github.com/PerMedCoE/covid-19-workflow.git\r + ```\r +\r +2. Install the Building Blocks required for the COVID19 Workflow:\r +\r + ```bash\r + covid-19-workflow/BuildingBlocks/./install_BBs.sh\r + ```\r +\r +3. Get the required Building Block images from the project [B2DROP](https://b2drop.bsc.es/index.php/f/444350):\r +\r + - Required images:\r + - MaBoSS.singularity\r + - meta_analysis.singularity\r + - PhysiCell-COVID19.singularity\r + - single_cell.singularity\r +\r + The path where these files are stored **MUST be exported in the `PERMEDCOE_IMAGES`** environment variable.\r +\r + > :warning: **TIP**: These containers can be built manually as follows (be patient since some of them may take some time):\r + 1. Clone the `BuildingBlocks` repository\r + ```bash\r + git clone https://github.com/PerMedCoE/BuildingBlocks.git\r + ```\r + 2. Build the required Building Block images\r + ```bash\r + cd BuildingBlocks/Resources/images\r + sudo singularity build MaBoSS.sif MaBoSS.singularity\r + sudo singularity build meta_analysis.sif meta_analysis.singularity\r + sudo singularity build PhysiCell-COVID19.sif PhysiCell-COVID19.singularity\r + sudo singularity build single_cell.sif single_cell.singularity\r + cd ../../..\r + ```\r +\r +**If using PyCOMPSs in local PC** (make sure that PyCOMPSs in installed):\r +\r +4. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflows/PyCOMPSs\r + ```\r +\r +5. Execute `./run.sh`\r +\r +**If using Snakemake in local PC** (make sure that SnakeMake is installed):\r +\r +4. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflows/SnakeMake\r + ```\r +\r +5. Execute `./run.sh`\r + > **TIP**: If you want to run the workflow with a different dataset, please update the `run.sh` script setting the `dataset` variable to the new dataset folder and their file names.\r +\r +\r +### MareNostrum 4\r +\r +This section explains the requirements and usage for the COVID19 Workflow in the MareNostrum 4 supercomputer.\r +\r +#### Requirements in MN4\r +\r +- Access to MN4\r +\r +All Building Blocks are already installed in MN4, and the COVID19 Workflow available.\r +\r +#### Usage steps in MN4\r +\r +1. Load the `COMPSs`, `Singularity` and `permedcoe` modules\r +\r + ```bash\r + export COMPSS_PYTHON_VERSION=3\r + module load COMPSs/3.1\r + module load singularity/3.5.2\r + module use /apps/modules/modulefiles/tools/COMPSs/libraries\r + module load permedcoe\r + ```\r +\r + > **TIP**: Include the loading into your `${HOME}/.bashrc` file to load it automatically on the session start.\r +\r + This commands will load COMPSs and the permedcoe package which provides all necessary dependencies, as well as the path to the singularity container images (`PERMEDCOE_IMAGES` environment variable) and testing dataset (`COVID19WORKFLOW_DATASET` environment variable).\r +\r +2. Get a copy of the pilot workflow into your desired folder\r +\r + ```bash\r + mkdir desired_folder\r + cd desired_folder\r + get_covid19workflow\r + ```\r +\r +3. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflow/PyCOMPSs\r + ```\r +\r +4. Execute `./launch.sh`\r +\r + This command will launch a job into the job queuing system (SLURM) requesting 2 nodes (one node acting half master and half worker, and other full worker node) for 20 minutes, and is prepared to use the singularity images that are already deployed in MN4 (located into the `PERMEDCOE_IMAGES` environment variable). It uses the dataset located into `../../Resources/data` folder.\r +\r + > :warning: **TIP**: If you want to run the workflow with a different dataset, please edit the `launch.sh` script and define the appropriate dataset path.\r +\r + After the execution, a `results` folder will be available with with COVID19 Workflow results.\r +\r +### Mahti or Puhti\r +\r +This section explains how to run the COVID19 workflow on CSC supercomputers using SnakeMake.\r +\r +#### Requirements\r +\r +- Install snakemake (or check if there is a version installed using `module spider snakemake`)\r +- Install workflow, using the same steps as for the local machine. With the exception that containers have to be built elsewhere.\r +\r +#### Steps\r +\r +\r +1. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflow/SnakeMake\r + ```\r +\r +2. Edit `launch.sh` with the correct partition, account, and resource specifications. \r +\r +3. Execute `./launch.sh`\r +\r + > :warning: Snakemake provides a `--cluster` flag, but this functionality should be avoided as it's really not suited for HPC systems.\r +\r +## License\r +\r +[Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)\r +\r +## Contact\r +\r +\r +\r +This software has been developed for the [PerMedCoE project](https://permedcoe.eu/), funded by the European Commission (EU H2020 [951773](https://cordis.europa.eu/project/id/951773)).\r +\r +![](https://permedcoe.eu/wp-content/uploads/2020/11/logo_1.png "PerMedCoE")\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/476?version=1" ; + schema1:isBasedOn "https://github.com/PerMedCoE/covid-19-workflow/" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for PerMedCoE Covid19 Pilot workflow (PyCOMPSs)" ; + schema1:sdDatePublished "2024-08-05 10:30:25 +0100" ; + schema1:url "https://workflowhub.eu/workflows/476/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1029 ; + schema1:dateCreated "2023-05-23T12:07:56Z" ; + schema1:dateModified "2023-05-23T12:33:23Z" ; + schema1:description """# COVID-19 Multiscale Modelling of the Virus and Patients’ Tissue Workflow\r +\r +## Table of Contents\r +\r +- [COVID-19 Multiscale Modelling of the Virus and Patients’ Tissue Workflow](#covid-19-multiscale-modelling-of-the-virus-and-patients-tissue-workflow)\r + - [Table of Contents](#table-of-contents)\r + - [Description](#description)\r + - [Contents](#contents)\r + - [Building Blocks](#building-blocks)\r + - [Workflows](#workflows)\r + - [Resources](#resources)\r + - [Tests](#tests)\r + - [Instructions](#instructions)\r + - [Local machine](#local-machine)\r + - [Requirements](#requirements)\r + - [Usage steps](#usage-steps)\r + - [MareNostrum 4](#marenostrum-4)\r + - [Requirements in MN4](#requirements-in-mn4)\r + - [Usage steps in MN4](#usage-steps-in-mn4)\r + - [Mahti or Puhti](#mahti-or-puhti)\r + - [Requirements](#requirements)\r + - [Steps](#steps)\r + - [License](#license)\r + - [Contact](#contact)\r +\r +## Description\r +\r +Uses multiscale simulations to predict patient-specific SARS‑CoV‑2 severity subtypes\r +(moderate, severe or control), using single-cell RNA-Seq data, MaBoSS and PhysiBoSS.\r +Boolean models are used to determine the behaviour of individual agents as a function\r +of extracellular conditions and the concentration of different substrates, including\r +the number of virions. Predictions of severity subtypes are based on a meta-analysis of\r +personalised model outputs simulating cellular apoptosis regulation in epithelial cells\r +infected by SARS‑CoV‑2.\r +\r +The workflow uses the following building blocks, described in order of execution:\r +\r +1. High-throughput mutant analysis\r +2. Single-cell processing\r +3. Personalise patient\r +4. PhysiBoSS\r +5. Analysis of all simulations\r +\r +For details on individual workflow steps, see the user documentation for each building block.\r +\r +[`GitHub repository`]()\r +\r +\r +## Contents\r +\r +### Building Blocks\r +\r +The ``BuildingBlocks`` folder contains the script to install the\r +Building Blocks used in the COVID-19 Workflow.\r +\r +### Workflows\r +\r +The ``Workflow`` folder contains the workflows implementations.\r +\r +Currently contains the implementation using PyCOMPSs and Snakemake (in progress).\r +\r +### Resources\r +\r +The ``Resources`` folder contains dataset files.\r +\r +### Tests\r +\r +The ``Tests`` folder contains the scripts that run each Building Block\r +used in the workflow for the given small dataset.\r +They can be executed individually for testing purposes.\r +\r +## Instructions\r +\r +### Local machine\r +\r +This section explains the requirements and usage for the COVID19 Workflow in a laptop or desktop computer.\r +\r +#### Requirements\r +\r +- [`permedcoe`](https://github.com/PerMedCoE/permedcoe) package\r +- [PyCOMPSs](https://pycompss.readthedocs.io/en/stable/Sections/00_Quickstart.html) / [Snakemake](https://snakemake.readthedocs.io/en/stable/)\r +- [Singularity](https://sylabs.io/guides/3.0/user-guide/installation.html)\r +\r +#### Usage steps\r +\r +1. Clone this repository:\r +\r + ```bash\r + git clone https://github.com/PerMedCoE/covid-19-workflow.git\r + ```\r +\r +2. Install the Building Blocks required for the COVID19 Workflow:\r +\r + ```bash\r + covid-19-workflow/BuildingBlocks/./install_BBs.sh\r + ```\r +\r +3. Get the required Building Block images from the project [B2DROP](https://b2drop.bsc.es/index.php/f/444350):\r +\r + - Required images:\r + - MaBoSS.singularity\r + - meta_analysis.singularity\r + - PhysiCell-COVID19.singularity\r + - single_cell.singularity\r +\r + The path where these files are stored **MUST be exported in the `PERMEDCOE_IMAGES`** environment variable.\r +\r + > :warning: **TIP**: These containers can be built manually as follows (be patient since some of them may take some time):\r + 1. Clone the `BuildingBlocks` repository\r + ```bash\r + git clone https://github.com/PerMedCoE/BuildingBlocks.git\r + ```\r + 2. Build the required Building Block images\r + ```bash\r + cd BuildingBlocks/Resources/images\r + sudo singularity build MaBoSS.sif MaBoSS.singularity\r + sudo singularity build meta_analysis.sif meta_analysis.singularity\r + sudo singularity build PhysiCell-COVID19.sif PhysiCell-COVID19.singularity\r + sudo singularity build single_cell.sif single_cell.singularity\r + cd ../../..\r + ```\r +\r +**If using PyCOMPSs in local PC** (make sure that PyCOMPSs in installed):\r +\r +4. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflows/PyCOMPSs\r + ```\r +\r +5. Execute `./run.sh`\r +\r +**If using Snakemake in local PC** (make sure that SnakeMake is installed):\r +\r +4. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflows/SnakeMake\r + ```\r +\r +5. Execute `./run.sh`\r + > **TIP**: If you want to run the workflow with a different dataset, please update the `run.sh` script setting the `dataset` variable to the new dataset folder and their file names.\r +\r +\r +### MareNostrum 4\r +\r +This section explains the requirements and usage for the COVID19 Workflow in the MareNostrum 4 supercomputer.\r +\r +#### Requirements in MN4\r +\r +- Access to MN4\r +\r +All Building Blocks are already installed in MN4, and the COVID19 Workflow available.\r +\r +#### Usage steps in MN4\r +\r +1. Load the `COMPSs`, `Singularity` and `permedcoe` modules\r +\r + ```bash\r + export COMPSS_PYTHON_VERSION=3\r + module load COMPSs/3.1\r + module load singularity/3.5.2\r + module use /apps/modules/modulefiles/tools/COMPSs/libraries\r + module load permedcoe\r + ```\r +\r + > **TIP**: Include the loading into your `${HOME}/.bashrc` file to load it automatically on the session start.\r +\r + This commands will load COMPSs and the permedcoe package which provides all necessary dependencies, as well as the path to the singularity container images (`PERMEDCOE_IMAGES` environment variable) and testing dataset (`COVID19WORKFLOW_DATASET` environment variable).\r +\r +2. Get a copy of the pilot workflow into your desired folder\r +\r + ```bash\r + mkdir desired_folder\r + cd desired_folder\r + get_covid19workflow\r + ```\r +\r +3. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflow/PyCOMPSs\r + ```\r +\r +4. Execute `./launch.sh`\r +\r + This command will launch a job into the job queuing system (SLURM) requesting 2 nodes (one node acting half master and half worker, and other full worker node) for 20 minutes, and is prepared to use the singularity images that are already deployed in MN4 (located into the `PERMEDCOE_IMAGES` environment variable). It uses the dataset located into `../../Resources/data` folder.\r +\r + > :warning: **TIP**: If you want to run the workflow with a different dataset, please edit the `launch.sh` script and define the appropriate dataset path.\r +\r + After the execution, a `results` folder will be available with with COVID19 Workflow results.\r +\r +### Mahti or Puhti\r +\r +This section explains how to run the COVID19 workflow on CSC supercomputers using SnakeMake.\r +\r +#### Requirements\r +\r +- Install snakemake (or check if there is a version installed using `module spider snakemake`)\r +- Install workflow, using the same steps as for the local machine. With the exception that containers have to be built elsewhere.\r +\r +#### Steps\r +\r +\r +1. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflow/SnakeMake\r + ```\r +\r +2. Edit `launch.sh` with the correct partition, account, and resource specifications. \r +\r +3. Execute `./launch.sh`\r +\r + > :warning: Snakemake provides a `--cluster` flag, but this functionality should be avoided as it's really not suited for HPC systems.\r +\r +## License\r +\r +[Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)\r +\r +## Contact\r +\r +\r +\r +This software has been developed for the [PerMedCoE project](https://permedcoe.eu/), funded by the European Commission (EU H2020 [951773](https://cordis.europa.eu/project/id/951773)).\r +\r +![](https://permedcoe.eu/wp-content/uploads/2020/11/logo_1.png "PerMedCoE")\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "PerMedCoE Covid19 Pilot workflow (PyCOMPSs)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/476?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1027?version=10" ; + schema1:isBasedOn "https://github.com/nf-core/viralrecon" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/viralrecon" ; + schema1:sdDatePublished "2024-08-05 10:23:06 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1027/ro_crate?version=10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10178 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:21Z" ; + schema1:dateModified "2024-06-11T12:55:21Z" ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1027?version=10" ; + schema1:keywords "Amplicon, ARTIC, Assembly, covid-19, covid19, illumina, long-read-sequencing, Metagenomics, nanopore, ONT, oxford-nanopore, SARS-CoV-2, variant-calling, viral, Virus" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/viralrecon" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1027?version=10" ; + schema1:version 10 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.255.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_ligand_parameterization/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL GMX Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-08-05 10:32:28 +0100" ; + schema1:url "https://workflowhub.eu/workflows/255/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 12338 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2368 ; + schema1:creator , + ; + schema1:dateCreated "2022-01-10T10:39:09Z" ; + schema1:dateModified "2023-06-06T12:23:52Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/255?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CWL GMX Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/255?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + ; + ns1:output , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "Performs scaffolding using HiC Data. Part of VGP assembly pipeline. The scaffolding can be performed on long read assembly contigs or on scaffolds (e.g.: Bionano scaffolds)." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/324?version=1" ; + schema1:isBasedOn "https://github.com/Delphine-L/iwc/tree/VGP/workflows/VGP-assembly-v2" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for VGP hybrid scaffolding with HiC data" ; + schema1:sdDatePublished "2024-08-05 10:32:18 +0100" ; + schema1:url "https://workflowhub.eu/workflows/324/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 9690 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 52589 ; + schema1:creator ; + schema1:dateCreated "2022-04-05T23:33:36Z" ; + schema1:dateModified "2023-01-16T13:59:36Z" ; + schema1:description "Performs scaffolding using HiC Data. Part of VGP assembly pipeline. The scaffolding can be performed on long read assembly contigs or on scaffolds (e.g.: Bionano scaffolds)." ; + schema1:isPartOf ; + schema1:keywords "vgp, Assembly, Galaxy" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "VGP hybrid scaffolding with HiC data" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/324?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=16" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-08-05 10:23:19 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=16" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17642 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:16Z" ; + schema1:dateModified "2024-06-11T12:55:16Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=16" ; + schema1:version 16 . + + a schema1:Dataset ; + schema1:datePublished "2024-04-09T15:37:12.177589" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:description "Preprocessing of raw SARS-CoV-2 reads. This workflow contains an alternate starting point to avoid the data to be downloaded from the NCBI SRA. More info can be found at https://covid19.galaxyproject.org/genomics/" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/4?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genomics - Read pre-processing without downloading from SRA" ; + schema1:sdDatePublished "2024-08-05 10:33:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/4/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 6333 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 35202 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2020-04-10T12:06:00Z" ; + schema1:dateModified "2023-01-16T13:39:49Z" ; + schema1:description "Preprocessing of raw SARS-CoV-2 reads. This workflow contains an alternate starting point to avoid the data to be downloaded from the NCBI SRA. More info can be found at https://covid19.galaxyproject.org/genomics/" ; + schema1:image ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Genomics - Read pre-processing without downloading from SRA" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/4?version=1" ; + schema1:version 1 ; + ns1:input , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 10958 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """MGnify (http://www.ebi.ac.uk/metagenomics) provides a free to use platform for the assembly, analysis and archiving of microbiome data derived from sequencing microbial populations that are present in particular environments. Over the past 2 years, MGnify (formerly EBI Metagenomics) has more than doubled the number of publicly available analysed datasets held within the resource. Recently, an updated approach to data analysis has been unveiled (version 5.0), replacing the previous single pipeline with multiple analysis pipelines that are tailored according to the input data, and that are formally described using the Common Workflow Language, enabling greater provenance, reusability, and reproducibility. MGnify's new analysis pipelines offer additional approaches for taxonomic assertions based on ribosomal internal transcribed spacer regions (ITS1/2) and expanded protein functional annotations. Biochemical pathways and systems predictions have also been added for assembled contigs. MGnify's growing focus on the assembly of metagenomic data has also seen the number of datasets it has assembled and analysed increase six-fold. The non-redundant protein database constructed from the proteins encoded by these assemblies now exceeds 1 billion sequences. Meanwhile, a newly developed contig viewer provides fine-grained visualisation of the assembled contigs and their enriched annotations.\r +\r +Documentation: https://docs.mgnify.org/en/latest/analysis.html#assembly-analysis-pipeline\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/360?version=2" ; + schema1:isBasedOn "https://github.com/EBI-Metagenomics/pipeline-v5.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for MGnify - assembly analysis pipeline" ; + schema1:sdDatePublished "2024-08-05 10:32:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/360/ro_crate?version=2" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 67287 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7995 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2022-06-07T08:03:35Z" ; + schema1:dateModified "2023-04-28T10:09:02Z" ; + schema1:description """MGnify (http://www.ebi.ac.uk/metagenomics) provides a free to use platform for the assembly, analysis and archiving of microbiome data derived from sequencing microbial populations that are present in particular environments. Over the past 2 years, MGnify (formerly EBI Metagenomics) has more than doubled the number of publicly available analysed datasets held within the resource. Recently, an updated approach to data analysis has been unveiled (version 5.0), replacing the previous single pipeline with multiple analysis pipelines that are tailored according to the input data, and that are formally described using the Common Workflow Language, enabling greater provenance, reusability, and reproducibility. MGnify's new analysis pipelines offer additional approaches for taxonomic assertions based on ribosomal internal transcribed spacer regions (ITS1/2) and expanded protein functional annotations. Biochemical pathways and systems predictions have also been added for assembled contigs. MGnify's growing focus on the assembly of metagenomic data has also seen the number of datasets it has assembled and analysed increase six-fold. The non-redundant protein database constructed from the proteins encoded by these assemblies now exceeds 1 billion sequences. Meanwhile, a newly developed contig viewer provides fine-grained visualisation of the assembled contigs and their enriched annotations.\r +\r +Documentation: https://docs.mgnify.org/en/latest/analysis.html#assembly-analysis-pipeline\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/360?version=1" ; + schema1:keywords "Metagenomics, Annotation, workflow, CWL" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "MGnify - assembly analysis pipeline" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/360?version=2" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """[![GitHub Actions CI Status](https://github.com/plant-food-research-open/assemblyqc/actions/workflows/ci.yml/badge.svg)](https://github.com/plant-food-research-open/assemblyqc/actions/workflows/ci.yml)\r +[![GitHub Actions Linting Status](https://github.com/plant-food-research-open/assemblyqc/actions/workflows/linting.yml/badge.svg)](https://github.com/plant-food-research-open/assemblyqc/actions/workflows/linting.yml)[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.10647870-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.10647870)\r +[![nf-test](https://img.shields.io/badge/unit_tests-nf--test-337ab7.svg)](https://www.nf-test.com)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A523.04.0-23aa62.svg)](https://www.nextflow.io/)\r +[![run with conda ❌](http://img.shields.io/badge/run%20with-conda%20❌-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +[![Launch on Seqera Platform](https://img.shields.io/badge/Launch%20%F0%9F%9A%80-Seqera%20Platform-%234256e7)](https://cloud.seqera.io/launch?pipeline=https://github.com/plant-food-research-open/assemblyqc)\r +\r +## Introduction\r +\r +**plant-food-research-open/assemblyqc** is a [NextFlow](https://www.nextflow.io/docs/latest/index.html) pipeline which evaluates assembly quality with multiple QC tools and presents the results in a unified html report. The tools are shown in the [Pipeline Flowchart](#pipeline-flowchart) and their references are listed in [CITATIONS.md](./CITATIONS.md).\r +\r +## Pipeline Flowchart\r +\r +```mermaid\r +%%{init: {\r + 'theme': 'base',\r + 'themeVariables': {\r + 'fontSize': '52px",\r + 'primaryColor': '#9A6421',\r + 'primaryTextColor': '#ffffff',\r + 'primaryBorderColor': '#9A6421',\r + 'lineColor': '#B180A8',\r + 'secondaryColor': '#455C58',\r + 'tertiaryColor': '#ffffff'\r + }\r +}}%%\r +flowchart LR\r + forEachTag(Assembly) ==> VALIDATE_FORMAT[VALIDATE FORMAT]\r +\r + VALIDATE_FORMAT ==> ncbiFCS[NCBI FCS\\nADAPTOR]\r + ncbiFCS ==> Check{Check}\r +\r + VALIDATE_FORMAT ==> ncbiGX[NCBI FCS GX]\r + ncbiGX ==> Check\r + Check ==> |Clean|Run(Run)\r +\r + Check ==> |Contamination|Skip(Skip All)\r + Skip ==> REPORT\r +\r + VALIDATE_FORMAT ==> GFF_STATS[GENOMETOOLS GT STAT]\r +\r + Run ==> ASS_STATS[ASSEMBLATHON STATS]\r + Run ==> BUSCO\r + Run ==> TIDK\r + Run ==> LAI\r + Run ==> KRAKEN2\r + Run ==> HIC_CONTACT_MAP[HIC CONTACT MAP]\r + Run ==> MUMMER\r + Run ==> MINIMAP2\r + Run ==> MERQURY\r +\r + MUMMER ==> CIRCOS\r + MUMMER ==> DOTPLOT\r +\r + MINIMAP2 ==> PLOTSR\r +\r + ASS_STATS ==> REPORT\r + GFF_STATS ==> REPORT\r + BUSCO ==> REPORT\r + TIDK ==> REPORT\r + LAI ==> REPORT\r + KRAKEN2 ==> REPORT\r + HIC_CONTACT_MAP ==> REPORT\r + CIRCOS ==> REPORT\r + DOTPLOT ==> REPORT\r + PLOTSR ==> REPORT\r + MERQURY ==> REPORT\r +```\r +\r +- [FASTA VALIDATOR](https://github.com/linsalrob/fasta_validator) + [SEQKIT RMDUP](https://github.com/shenwei356/seqkit): FASTA validation\r +- [GENOMETOOLS GT GFF3VALIDATOR](https://genometools.org/tools/gt_gff3validator.html): GFF3 validation\r +- [ASSEMBLATHON STATS](https://github.com/PlantandFoodResearch/assemblathon2-analysis/blob/a93cba25d847434f7eadc04e63b58c567c46a56d/assemblathon_stats.pl): Assembly statistics\r +- [GENOMETOOLS GT STAT](https://genometools.org/tools/gt_stat.html): Annotation statistics\r +- [NCBI FCS ADAPTOR](https://github.com/ncbi/fcs): Adaptor contamination pass/fail\r +- [NCBI FCS GX](https://github.com/ncbi/fcs): Foreign organism contamination pass/fail\r +- [BUSCO](https://gitlab.com/ezlab/busco): Gene-space completeness estimation\r +- [TIDK](https://github.com/tolkit/telomeric-identifier): Telomere repeat identification\r +- [LAI](https://github.com/oushujun/LTR_retriever/blob/master/LAI): Continuity of repetitive sequences\r +- [KRAKEN2](https://github.com/DerrickWood/kraken2): Taxonomy classification\r +- [HIC CONTACT MAP](https://github.com/igvteam/juicebox.js): Alignment and visualisation of HiC data\r +- [MUMMER](https://github.com/mummer4/mummer) → [CIRCOS](http://circos.ca/documentation/) + [DOTPLOT](https://plotly.com) & [MINIMAP2](https://github.com/lh3/minimap2) → [PLOTSR](https://github.com/schneebergerlab/plotsr): Synteny analysis\r +- [MERQURY](https://github.com/marbl/merqury): K-mer completeness, consensus quality and phasing assessment\r +\r +## Usage\r +\r +Refer to [usage](./docs/usage.md), [parameters](./docs/parameters.md) and [output](./docs/output.md) documents for details.\r +\r +> [!NOTE]\r +> If you are new to Nextflow and nf-core, please refer to [this page](https://nf-co.re/docs/usage/installation) on how to set-up Nextflow. Make sure to [test your setup](https://nf-co.re/docs/usage/introduction#how-to-run-a-pipeline) with `-profile test` before running the workflow on actual data.\r +\r +Prepare an `assemblysheet.csv` file with following columns representing target assemblies and associated meta-data.\r +\r +- `tag:` A unique tag which represents the target assembly throughout the pipeline and in the final report\r +- `fasta:` FASTA file\r +\r +Now, you can run the pipeline using:\r +\r +```bash\r +nextflow run plant-food-research-open/assemblyqc \\\r + -profile \\\r + --input assemblysheet.csv \\\r + --outdir \r +```\r +\r +> [!WARNING]\r +> Please provide pipeline parameters via the CLI or Nextflow `-params-file` option. Custom config files including those provided by the `-c` Nextflow option can be used to provide any configuration _**except for parameters**_;\r +> see [docs](https://nf-co.re/usage/configuration#custom-configuration-files).\r +\r +### Plant&Food Users\r +\r +Download the pipeline to your `/workspace/$USER` folder. Change the parameters defined in the [pfr/params.json](./pfr/params.json) file. Submit the pipeline to SLURM for execution.\r +\r +```bash\r +sbatch ./pfr_assemblyqc\r +```\r +\r +## Credits\r +\r +plant-food-research-open/assemblyqc was originally written by Usman Rashid ([@gallvp](https://github.com/gallvp)) and Ken Smith ([@hzlnutspread](https://github.com/hzlnutspread)).\r +\r +Ross Crowhurst ([@rosscrowhurst](https://github.com/rosscrowhurst)), Chen Wu ([@christinawu2008](https://github.com/christinawu2008)) and Marcus Davy ([@mdavy86](https://github.com/mdavy86)) generously contributed their QC scripts.\r +\r +Mahesh Binzer-Panchal ([@mahesh-panchal](https://github.com/mahesh-panchal)) helped port the pipeline modules and sub-workflows to [nf-core](https://nf-co.re) schema.\r +\r +We thank the following people for their extensive assistance in the development of this pipeline:\r +\r +- [Cecilia Deng](https://github.com/CeciliaDeng)\r +- [Ignacio Carvajal](https://github.com/ignacio3437)\r +- [Jason Shiller](https://github.com/jasonshiller)\r +- [Sarah Bailey](https://github.com/SarahBailey1998)\r +- [Susan Thomson](https://github.com/cflsjt)\r +- [Ting-Hsuan Chen](https://github.com/ting-hsuan-chen)\r +\r +The pipeline uses nf-core modules contributed by following authors:\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +## Contributions and Support\r +\r +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\r +\r +## Citations\r +\r +If you use plant-food-research-open/assemblyqc for your analysis, please cite it as:\r +\r +> Rashid, U., Wu, C., Shiller, J., Smith, K., Crowhurst, R., Davy, M., Chen, T.-H., Thomson, S., & Deng, C. (2024). AssemblyQC: A NextFlow pipeline for evaluating assembly quality (2.0.0). Zenodo. https://doi.org/10.5281/zenodo.10647870\r +\r +An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\r +\r +This pipeline uses code and infrastructure developed and maintained by the [nf-core](https://nf-co.re) community, reused here under the [MIT license](https://github.com/nf-core/tools/blob/master/LICENSE).\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1058?version=1" ; + schema1:isBasedOn "https://github.com/Plant-Food-Research-Open/assemblyqc" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for AssemblyQC: A NextFlow pipeline for evaluating assembly quality" ; + schema1:sdDatePublished "2024-08-05 10:22:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1058/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3163 ; + schema1:creator , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-25T01:40:30Z" ; + schema1:dateModified "2024-06-25T01:40:30Z" ; + schema1:description """[![GitHub Actions CI Status](https://github.com/plant-food-research-open/assemblyqc/actions/workflows/ci.yml/badge.svg)](https://github.com/plant-food-research-open/assemblyqc/actions/workflows/ci.yml)\r +[![GitHub Actions Linting Status](https://github.com/plant-food-research-open/assemblyqc/actions/workflows/linting.yml/badge.svg)](https://github.com/plant-food-research-open/assemblyqc/actions/workflows/linting.yml)[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.10647870-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.10647870)\r +[![nf-test](https://img.shields.io/badge/unit_tests-nf--test-337ab7.svg)](https://www.nf-test.com)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A523.04.0-23aa62.svg)](https://www.nextflow.io/)\r +[![run with conda ❌](http://img.shields.io/badge/run%20with-conda%20❌-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +[![Launch on Seqera Platform](https://img.shields.io/badge/Launch%20%F0%9F%9A%80-Seqera%20Platform-%234256e7)](https://cloud.seqera.io/launch?pipeline=https://github.com/plant-food-research-open/assemblyqc)\r +\r +## Introduction\r +\r +**plant-food-research-open/assemblyqc** is a [NextFlow](https://www.nextflow.io/docs/latest/index.html) pipeline which evaluates assembly quality with multiple QC tools and presents the results in a unified html report. The tools are shown in the [Pipeline Flowchart](#pipeline-flowchart) and their references are listed in [CITATIONS.md](./CITATIONS.md).\r +\r +## Pipeline Flowchart\r +\r +```mermaid\r +%%{init: {\r + 'theme': 'base',\r + 'themeVariables': {\r + 'fontSize': '52px",\r + 'primaryColor': '#9A6421',\r + 'primaryTextColor': '#ffffff',\r + 'primaryBorderColor': '#9A6421',\r + 'lineColor': '#B180A8',\r + 'secondaryColor': '#455C58',\r + 'tertiaryColor': '#ffffff'\r + }\r +}}%%\r +flowchart LR\r + forEachTag(Assembly) ==> VALIDATE_FORMAT[VALIDATE FORMAT]\r +\r + VALIDATE_FORMAT ==> ncbiFCS[NCBI FCS\\nADAPTOR]\r + ncbiFCS ==> Check{Check}\r +\r + VALIDATE_FORMAT ==> ncbiGX[NCBI FCS GX]\r + ncbiGX ==> Check\r + Check ==> |Clean|Run(Run)\r +\r + Check ==> |Contamination|Skip(Skip All)\r + Skip ==> REPORT\r +\r + VALIDATE_FORMAT ==> GFF_STATS[GENOMETOOLS GT STAT]\r +\r + Run ==> ASS_STATS[ASSEMBLATHON STATS]\r + Run ==> BUSCO\r + Run ==> TIDK\r + Run ==> LAI\r + Run ==> KRAKEN2\r + Run ==> HIC_CONTACT_MAP[HIC CONTACT MAP]\r + Run ==> MUMMER\r + Run ==> MINIMAP2\r + Run ==> MERQURY\r +\r + MUMMER ==> CIRCOS\r + MUMMER ==> DOTPLOT\r +\r + MINIMAP2 ==> PLOTSR\r +\r + ASS_STATS ==> REPORT\r + GFF_STATS ==> REPORT\r + BUSCO ==> REPORT\r + TIDK ==> REPORT\r + LAI ==> REPORT\r + KRAKEN2 ==> REPORT\r + HIC_CONTACT_MAP ==> REPORT\r + CIRCOS ==> REPORT\r + DOTPLOT ==> REPORT\r + PLOTSR ==> REPORT\r + MERQURY ==> REPORT\r +```\r +\r +- [FASTA VALIDATOR](https://github.com/linsalrob/fasta_validator) + [SEQKIT RMDUP](https://github.com/shenwei356/seqkit): FASTA validation\r +- [GENOMETOOLS GT GFF3VALIDATOR](https://genometools.org/tools/gt_gff3validator.html): GFF3 validation\r +- [ASSEMBLATHON STATS](https://github.com/PlantandFoodResearch/assemblathon2-analysis/blob/a93cba25d847434f7eadc04e63b58c567c46a56d/assemblathon_stats.pl): Assembly statistics\r +- [GENOMETOOLS GT STAT](https://genometools.org/tools/gt_stat.html): Annotation statistics\r +- [NCBI FCS ADAPTOR](https://github.com/ncbi/fcs): Adaptor contamination pass/fail\r +- [NCBI FCS GX](https://github.com/ncbi/fcs): Foreign organism contamination pass/fail\r +- [BUSCO](https://gitlab.com/ezlab/busco): Gene-space completeness estimation\r +- [TIDK](https://github.com/tolkit/telomeric-identifier): Telomere repeat identification\r +- [LAI](https://github.com/oushujun/LTR_retriever/blob/master/LAI): Continuity of repetitive sequences\r +- [KRAKEN2](https://github.com/DerrickWood/kraken2): Taxonomy classification\r +- [HIC CONTACT MAP](https://github.com/igvteam/juicebox.js): Alignment and visualisation of HiC data\r +- [MUMMER](https://github.com/mummer4/mummer) → [CIRCOS](http://circos.ca/documentation/) + [DOTPLOT](https://plotly.com) & [MINIMAP2](https://github.com/lh3/minimap2) → [PLOTSR](https://github.com/schneebergerlab/plotsr): Synteny analysis\r +- [MERQURY](https://github.com/marbl/merqury): K-mer completeness, consensus quality and phasing assessment\r +\r +## Usage\r +\r +Refer to [usage](./docs/usage.md), [parameters](./docs/parameters.md) and [output](./docs/output.md) documents for details.\r +\r +> [!NOTE]\r +> If you are new to Nextflow and nf-core, please refer to [this page](https://nf-co.re/docs/usage/installation) on how to set-up Nextflow. Make sure to [test your setup](https://nf-co.re/docs/usage/introduction#how-to-run-a-pipeline) with `-profile test` before running the workflow on actual data.\r +\r +Prepare an `assemblysheet.csv` file with following columns representing target assemblies and associated meta-data.\r +\r +- `tag:` A unique tag which represents the target assembly throughout the pipeline and in the final report\r +- `fasta:` FASTA file\r +\r +Now, you can run the pipeline using:\r +\r +```bash\r +nextflow run plant-food-research-open/assemblyqc \\\r + -profile \\\r + --input assemblysheet.csv \\\r + --outdir \r +```\r +\r +> [!WARNING]\r +> Please provide pipeline parameters via the CLI or Nextflow `-params-file` option. Custom config files including those provided by the `-c` Nextflow option can be used to provide any configuration _**except for parameters**_;\r +> see [docs](https://nf-co.re/usage/configuration#custom-configuration-files).\r +\r +### Plant&Food Users\r +\r +Download the pipeline to your `/workspace/$USER` folder. Change the parameters defined in the [pfr/params.json](./pfr/params.json) file. Submit the pipeline to SLURM for execution.\r +\r +```bash\r +sbatch ./pfr_assemblyqc\r +```\r +\r +## Credits\r +\r +plant-food-research-open/assemblyqc was originally written by Usman Rashid ([@gallvp](https://github.com/gallvp)) and Ken Smith ([@hzlnutspread](https://github.com/hzlnutspread)).\r +\r +Ross Crowhurst ([@rosscrowhurst](https://github.com/rosscrowhurst)), Chen Wu ([@christinawu2008](https://github.com/christinawu2008)) and Marcus Davy ([@mdavy86](https://github.com/mdavy86)) generously contributed their QC scripts.\r +\r +Mahesh Binzer-Panchal ([@mahesh-panchal](https://github.com/mahesh-panchal)) helped port the pipeline modules and sub-workflows to [nf-core](https://nf-co.re) schema.\r +\r +We thank the following people for their extensive assistance in the development of this pipeline:\r +\r +- [Cecilia Deng](https://github.com/CeciliaDeng)\r +- [Ignacio Carvajal](https://github.com/ignacio3437)\r +- [Jason Shiller](https://github.com/jasonshiller)\r +- [Sarah Bailey](https://github.com/SarahBailey1998)\r +- [Susan Thomson](https://github.com/cflsjt)\r +- [Ting-Hsuan Chen](https://github.com/ting-hsuan-chen)\r +\r +The pipeline uses nf-core modules contributed by following authors:\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +\r +## Contributions and Support\r +\r +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\r +\r +## Citations\r +\r +If you use plant-food-research-open/assemblyqc for your analysis, please cite it as:\r +\r +> Rashid, U., Wu, C., Shiller, J., Smith, K., Crowhurst, R., Davy, M., Chen, T.-H., Thomson, S., & Deng, C. (2024). AssemblyQC: A NextFlow pipeline for evaluating assembly quality (2.0.0). Zenodo. https://doi.org/10.5281/zenodo.10647870\r +\r +An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\r +\r +This pipeline uses code and infrastructure developed and maintained by the [nf-core](https://nf-co.re) community, reused here under the [MIT license](https://github.com/nf-core/tools/blob/master/LICENSE).\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +""" ; + schema1:keywords "quality control, Statistics, genome, taxonomy, Assembly, repeat, Hi-C, Report, k-mer, synteny, adaptor, fcs, contamination, phasing, BUSCO, telomere, n50, Merqury" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "AssemblyQC: A NextFlow pipeline for evaluating assembly quality" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1058?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2021-12-20T17:04:47.638855" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-se-illumina-wgs-variant-calling" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sars-cov-2-se-illumina-wgs-variant-calling/COVID-19-SE-WGS-ILLUMINA" ; + schema1:sdDatePublished "2021-12-21 03:01:00 +0000" ; + schema1:softwareVersion "v0.1.3" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=25" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-08-05 10:24:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=25" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12300 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:13Z" ; + schema1:dateModified "2024-06-11T12:55:13Z" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=25" ; + schema1:version 25 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.128.4" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_virtual-screening" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein-ligand Docking tutorial (PDBe REST API)" ; + schema1:sdDatePublished "2024-08-05 10:30:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/128/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 36987 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-07-26T09:29:17Z" ; + schema1:dateModified "2023-07-26T09:29:56Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/128?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein-ligand Docking tutorial (PDBe REST API)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_virtual-screening/blob/master/biobb_wf_virtual-screening/notebooks/ebi_api/wf_vs_ebi_api.ipynb" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# sqtlseeker2-nf\r +\r +[![nextflow](https://img.shields.io/badge/nextflow-%E2%89%A50.27.0-blue.svg)](http://nextflow.io)\r +[![CI-checks](https://github.com/guigolab/sqtlseeker2-nf/actions/workflows/ci.yaml/badge.svg)](https://github.com/guigolab/sqtlseeker2-nf/actions/workflows/ci.yaml)\r +\r +A pipeline for splicing quantitative trait loci (sQTL) mapping.\r +\r +The pipeline performs the following analysis steps:\r +\r +* Index the genotype file\r +* Preprocess the transcript expression data\r +* Test for association between splicing ratios and genetic variants in *cis* (nominal pass)\r +* Obtain an empirical P-value for each phenotype (permutation pass, optional)\r +* Control for multiple testing \r +\r +For details on each step, please read [sQTLseekeR2](https://github.com/guigolab/sQTLseekeR2) documentation.\r +\r +The pipeline uses [Nextflow](http://www.nextflow.io) as the execution backend. Please check [Nextflow documentation](http://www.nextflow.io/docs/latest/index.html) for more information.\r +\r +## Requirements\r +\r +- Unix-like operating system (Linux, MacOS, etc.)\r +- Java 8 or later \r +- [Docker](https://www.docker.com/) (v1.10.0 or later) or [Singularity](http://singularity.lbl.gov) (v2.5.0 or later)\r +\r +## Quickstart (~2 min)\r +\r +1. Install Nextflow:\r + ```\r + curl -fsSL get.nextflow.io | bash\r + ```\r +\r +2. Make a test run:\r + ```\r + ./nextflow run guigolab/sqtlseeker2-nf -with-docker\r + ```\r +\r + **Note**: set `-with-singularity` to use Singularity instead of Docker. \r +\r +## Pipeline usage\r +\r +Launching the pipeline with the `--help` parameter shows the help message:\r +\r +```\r +nextflow run sqtlseeker2-nf --help\r +```\r +\r +```\r +N E X T F L O W ~ version 0.27.2\r +Launching `sqtlseeker2.nf` [admiring_lichterman] - revision: 28c86caf1c\r +\r +sqtlseeker2-nf ~ A pipeline for splicing QTL mapping\r +----------------------------------------------------\r +Run sQTLseekeR2 on a set of data.\r +\r +Usage: \r + sqtlseeker2-nf [options]\r +\r +Options:\r +--genotype GENOTYPE_FILE the genotype file\r +--trexp EXPRESSION_FILE the transcript expression file\r +--metadata METADATA_FILE the metadata file\r +--genes GENES_FILE the gene location file\r +--dir DIRECTORY the output directory\r +--mode MODE the run mode: nominal or permuted (default: nominal)\r +--win WINDOW the cis window in bp (default: 5000)\r +--covariates COVARIATES include covariates in the model (default: false)\r +--fdr FDR false discovery rate level (default: 0.05)\r +--min_md MIN_MD minimum effect size reported (default: 0.05)\r +--svqtl SVQTLS report svQTLs (default: false)\r +\r +Additional parameters for mode = nominal:\r +--ld LD threshold for LD-based variant clustering (default: 0, no clustering)\r +--kn KN number of genes per batch in nominal pass (default: 10)\r +\r +Additional parameters for mode = permuted:\r +--kp KP number of genes per batch in permuted pass (default: 10)\r +--max_perm MAX_PERM maximum number of permutations (default: 1000)\r +```\r +\r +## Input files and format\r +\r +`sqtlseeker2-nf` takes as input files the following:\r +\r +* **Genotype file.**\r +Contains the genotype of each sample, coded as follows: 0 for REF/REF, 1 for REF/ALT, 2 for ALT/ALT, -1 for missing value.\r +The first four columns should be: `chr`, `start`, `end` and `snpId`. This file needs to be sorted by coordinate.\r +\r +* **Transcript expression file.**\r +Contains the expression of each transcript in each sample (e.g. read counts, RPKM, TPM).\r +It is not recommended to use transformed (log, quantile, or any non-linear transformation) expression.\r +Columns `trId` and `geneId`, corresponding to the transcript and gene IDs, are required. \r +\r +* **Metadata file.** Contains the covariate information for each sample. \r +In addition, it defines the groups or conditions for which sQTL mapping will be performed.\r +The first columns should be: `indId`, `sampleId`, `group`, followed by the covariates.\r +This file defines which samples will be tested.\r +\r +* **Gene location file.**\r +Contains the location of each gene. Columns `chr`, `start`, `end` and `geneId` are required. \r +This file defines which genes will be tested.\r +\r +Example [data](data) is available for the test run.\r +\r +## Pipeline results\r +\r +sQTL mapping results are saved into the folder specified with the `--dir` parameter. By default it is the `result` folder within the current working directory.\r +\r +Output files are organinzed into subfolders corresponding to the different `groups` specified in the metadata file: \r +\r +```\r +result\r +└── groups\r + ├── group1 \r + │   ├── all-tests.nominal.tsv \r + │   ├── all-tests.permuted.tsv \r + │   ├── sqtls-${level}fdr.nominal.tsv \r + │   └── sqtls-${level}fdr.permuted.tsv \r + ├── group2\r + ...\r +```\r +\r +Note: if only a nominal pass was run, files `*.permuted.tsv` will not be present.\r +\r +Output files contain the following information:\r +\r +`all-tests.nominal.tsv`\r +\r +* geneId: gene name \r +* snpId: variant name\r +* F: test statistic\r +* nb.groups: number of genotype groups\r +* md: maximum difference in relative expression between genotype groups (sQTL effect size)\r +* tr.first/tr.second: the transcript IDs of the two transcripts that change the most, in opposite directions\r +* info: number of individuals in each genotype group, including missing values (-1,0,1,2)\r +* pv: nominal P-value\r +\r +if `--svqtl true`\r +* F.svQTL: svQTL test statistic\r +* nb.perms.svQTL: number of permutations for svQTL test\r +* pv.svQTL: svQTL nominal P-value \r +\r +if `--ld ${r2}`\r +* LD: other variants in linkage disequilibrium with snpId above a given r2 threshold > 0\r +\r +`sqtls-${level}fdr.nominal.tsv` (in addition to the previous)\r +\r +* fdr: false discovery rate (computed across all nominal tests)\r +* fdr.svQTL: svQTL FDR\r +\r +`all-tests.permuted.tsv`\r +\r +* geneId: gene name\r +* variants.cis: number of variants tested in *cis*\r +* LD: median linkage disequilibrium in the region (r2)\r +* best.snp: ID of the top variant\r +* best.nominal.pv: P-value of the top variant\r +* shape1: first parameter value of the fitted beta distribution\r +* shape2: second parameter value of the fitted beta distribution (effective number of independent tests in the region)\r +* nb.perm: number of permutations\r +* pv.emp.perm: empirical P-value, computed based on permutations\r +* pv.emp.beta: empirical P-value, computed based on the fitted beta distribution\r +* runtime: run time in minutes\r +\r +`sqtls-${level}fdr.nominal.tsv` (in addition to the previous)\r +\r +* fdr: false discovery rate (computed across empirical P-values)\r +* p_tn: gene-level threshold for nominal P-values\r +\r +## Cite sqtlseeker2-nf\r +\r +If you find `sqtlseeker2-nf` useful in your research please cite the related publication:\r +\r +Garrido-Martín, D., Borsari, B., Calvo, M., Reverter, F., Guigó, R. Identification and analysis of splicing quantitative trait loci across multiple tissues in the human genome. *Nat Commun* 12, 727 (2021). [https://doi.org/10.1038/s41467-020-20578-2](https://doi.org/10.1038/s41467-020-20578-2)\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/435?version=1" ; + schema1:isBasedOn "https://github.com/guigolab/sqtlseeker2-nf.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for sqtlseeker2-nf" ; + schema1:sdDatePublished "2024-08-05 10:31:19 +0100" ; + schema1:url "https://workflowhub.eu/workflows/435/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9703 ; + schema1:creator , + ; + schema1:dateCreated "2023-02-15T11:54:54Z" ; + schema1:dateModified "2023-02-15T12:02:21Z" ; + schema1:description """# sqtlseeker2-nf\r +\r +[![nextflow](https://img.shields.io/badge/nextflow-%E2%89%A50.27.0-blue.svg)](http://nextflow.io)\r +[![CI-checks](https://github.com/guigolab/sqtlseeker2-nf/actions/workflows/ci.yaml/badge.svg)](https://github.com/guigolab/sqtlseeker2-nf/actions/workflows/ci.yaml)\r +\r +A pipeline for splicing quantitative trait loci (sQTL) mapping.\r +\r +The pipeline performs the following analysis steps:\r +\r +* Index the genotype file\r +* Preprocess the transcript expression data\r +* Test for association between splicing ratios and genetic variants in *cis* (nominal pass)\r +* Obtain an empirical P-value for each phenotype (permutation pass, optional)\r +* Control for multiple testing \r +\r +For details on each step, please read [sQTLseekeR2](https://github.com/guigolab/sQTLseekeR2) documentation.\r +\r +The pipeline uses [Nextflow](http://www.nextflow.io) as the execution backend. Please check [Nextflow documentation](http://www.nextflow.io/docs/latest/index.html) for more information.\r +\r +## Requirements\r +\r +- Unix-like operating system (Linux, MacOS, etc.)\r +- Java 8 or later \r +- [Docker](https://www.docker.com/) (v1.10.0 or later) or [Singularity](http://singularity.lbl.gov) (v2.5.0 or later)\r +\r +## Quickstart (~2 min)\r +\r +1. Install Nextflow:\r + ```\r + curl -fsSL get.nextflow.io | bash\r + ```\r +\r +2. Make a test run:\r + ```\r + ./nextflow run guigolab/sqtlseeker2-nf -with-docker\r + ```\r +\r + **Note**: set `-with-singularity` to use Singularity instead of Docker. \r +\r +## Pipeline usage\r +\r +Launching the pipeline with the `--help` parameter shows the help message:\r +\r +```\r +nextflow run sqtlseeker2-nf --help\r +```\r +\r +```\r +N E X T F L O W ~ version 0.27.2\r +Launching `sqtlseeker2.nf` [admiring_lichterman] - revision: 28c86caf1c\r +\r +sqtlseeker2-nf ~ A pipeline for splicing QTL mapping\r +----------------------------------------------------\r +Run sQTLseekeR2 on a set of data.\r +\r +Usage: \r + sqtlseeker2-nf [options]\r +\r +Options:\r +--genotype GENOTYPE_FILE the genotype file\r +--trexp EXPRESSION_FILE the transcript expression file\r +--metadata METADATA_FILE the metadata file\r +--genes GENES_FILE the gene location file\r +--dir DIRECTORY the output directory\r +--mode MODE the run mode: nominal or permuted (default: nominal)\r +--win WINDOW the cis window in bp (default: 5000)\r +--covariates COVARIATES include covariates in the model (default: false)\r +--fdr FDR false discovery rate level (default: 0.05)\r +--min_md MIN_MD minimum effect size reported (default: 0.05)\r +--svqtl SVQTLS report svQTLs (default: false)\r +\r +Additional parameters for mode = nominal:\r +--ld LD threshold for LD-based variant clustering (default: 0, no clustering)\r +--kn KN number of genes per batch in nominal pass (default: 10)\r +\r +Additional parameters for mode = permuted:\r +--kp KP number of genes per batch in permuted pass (default: 10)\r +--max_perm MAX_PERM maximum number of permutations (default: 1000)\r +```\r +\r +## Input files and format\r +\r +`sqtlseeker2-nf` takes as input files the following:\r +\r +* **Genotype file.**\r +Contains the genotype of each sample, coded as follows: 0 for REF/REF, 1 for REF/ALT, 2 for ALT/ALT, -1 for missing value.\r +The first four columns should be: `chr`, `start`, `end` and `snpId`. This file needs to be sorted by coordinate.\r +\r +* **Transcript expression file.**\r +Contains the expression of each transcript in each sample (e.g. read counts, RPKM, TPM).\r +It is not recommended to use transformed (log, quantile, or any non-linear transformation) expression.\r +Columns `trId` and `geneId`, corresponding to the transcript and gene IDs, are required. \r +\r +* **Metadata file.** Contains the covariate information for each sample. \r +In addition, it defines the groups or conditions for which sQTL mapping will be performed.\r +The first columns should be: `indId`, `sampleId`, `group`, followed by the covariates.\r +This file defines which samples will be tested.\r +\r +* **Gene location file.**\r +Contains the location of each gene. Columns `chr`, `start`, `end` and `geneId` are required. \r +This file defines which genes will be tested.\r +\r +Example [data](data) is available for the test run.\r +\r +## Pipeline results\r +\r +sQTL mapping results are saved into the folder specified with the `--dir` parameter. By default it is the `result` folder within the current working directory.\r +\r +Output files are organinzed into subfolders corresponding to the different `groups` specified in the metadata file: \r +\r +```\r +result\r +└── groups\r + ├── group1 \r + │   ├── all-tests.nominal.tsv \r + │   ├── all-tests.permuted.tsv \r + │   ├── sqtls-${level}fdr.nominal.tsv \r + │   └── sqtls-${level}fdr.permuted.tsv \r + ├── group2\r + ...\r +```\r +\r +Note: if only a nominal pass was run, files `*.permuted.tsv` will not be present.\r +\r +Output files contain the following information:\r +\r +`all-tests.nominal.tsv`\r +\r +* geneId: gene name \r +* snpId: variant name\r +* F: test statistic\r +* nb.groups: number of genotype groups\r +* md: maximum difference in relative expression between genotype groups (sQTL effect size)\r +* tr.first/tr.second: the transcript IDs of the two transcripts that change the most, in opposite directions\r +* info: number of individuals in each genotype group, including missing values (-1,0,1,2)\r +* pv: nominal P-value\r +\r +if `--svqtl true`\r +* F.svQTL: svQTL test statistic\r +* nb.perms.svQTL: number of permutations for svQTL test\r +* pv.svQTL: svQTL nominal P-value \r +\r +if `--ld ${r2}`\r +* LD: other variants in linkage disequilibrium with snpId above a given r2 threshold > 0\r +\r +`sqtls-${level}fdr.nominal.tsv` (in addition to the previous)\r +\r +* fdr: false discovery rate (computed across all nominal tests)\r +* fdr.svQTL: svQTL FDR\r +\r +`all-tests.permuted.tsv`\r +\r +* geneId: gene name\r +* variants.cis: number of variants tested in *cis*\r +* LD: median linkage disequilibrium in the region (r2)\r +* best.snp: ID of the top variant\r +* best.nominal.pv: P-value of the top variant\r +* shape1: first parameter value of the fitted beta distribution\r +* shape2: second parameter value of the fitted beta distribution (effective number of independent tests in the region)\r +* nb.perm: number of permutations\r +* pv.emp.perm: empirical P-value, computed based on permutations\r +* pv.emp.beta: empirical P-value, computed based on the fitted beta distribution\r +* runtime: run time in minutes\r +\r +`sqtls-${level}fdr.nominal.tsv` (in addition to the previous)\r +\r +* fdr: false discovery rate (computed across empirical P-values)\r +* p_tn: gene-level threshold for nominal P-values\r +\r +## Cite sqtlseeker2-nf\r +\r +If you find `sqtlseeker2-nf` useful in your research please cite the related publication:\r +\r +Garrido-Martín, D., Borsari, B., Calvo, M., Reverter, F., Guigó, R. Identification and analysis of splicing quantitative trait loci across multiple tissues in the human genome. *Nat Commun* 12, 727 (2021). [https://doi.org/10.1038/s41467-020-20578-2](https://doi.org/10.1038/s41467-020-20578-2)\r +\r +""" ; + schema1:keywords "QTL mapping, rna-seq, SNPs, Nextflow, Alternative splicing" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "sqtlseeker2-nf" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/435?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Genes and transcripts annotation with Isoseq using uLTRA and TAMA" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/993?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/isoseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/isoseq" ; + schema1:sdDatePublished "2024-08-05 10:23:46 +0100" ; + schema1:url "https://workflowhub.eu/workflows/993/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9995 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:58Z" ; + schema1:dateModified "2024-06-11T12:54:58Z" ; + schema1:description "Genes and transcripts annotation with Isoseq using uLTRA and TAMA" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/993?version=6" ; + schema1:keywords "isoseq, isoseq-3, rna, tama, ultra" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/isoseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/993?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=12" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-08-05 10:23:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=12" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5836 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:03Z" ; + schema1:dateModified "2024-06-11T12:55:03Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=12" ; + schema1:version 12 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """SINGLE-END workflow. \r +Align reads on fasta reference/assembly using bwa mem, get a consensus, variants, mutation explanations. \r +\r +IMPORTANT: \r +* For "bcftools call" consensus step, the --ploidy file is in "Données partagées" (Shared Data) and must be imported in your history to use the worflow by providing this file (tells bcftools to consider haploid variant calling). \r +* SELECT the mot ADAPTED VADR MODEL for annotation (see vadr parameters).""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/517?version=1" ; + schema1:isBasedOn "https://github.com/ANSES-Ploufragan/vvv2_display" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for VVV2_align_SE" ; + schema1:sdDatePublished "2024-08-05 10:27:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/517/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 30291 ; + schema1:creator ; + schema1:dateCreated "2023-06-27T14:41:53Z" ; + schema1:dateModified "2023-10-16T12:03:17Z" ; + schema1:description """SINGLE-END workflow. \r +Align reads on fasta reference/assembly using bwa mem, get a consensus, variants, mutation explanations. \r +\r +IMPORTANT: \r +* For "bcftools call" consensus step, the --ploidy file is in "Données partagées" (Shared Data) and must be imported in your history to use the worflow by providing this file (tells bcftools to consider haploid variant calling). \r +* SELECT the mot ADAPTED VADR MODEL for annotation (see vadr parameters).""" ; + schema1:image ; + schema1:keywords "single-end, Annotation, variant, Virus, variant_calling, high-throughput_sequencing_analysis, Galaxy, Bioinformatics, SNPs, variant calling" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "VVV2_align_SE" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/517?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 222967 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Common Workflow Language example that illustrate the process of setting up a\r +simulation system containing a protein, step by step, using the BioExcel\r +Building Blocks library (biobb). The particular example used is the Lysozyme\r +protein (PDB code 1AKI).\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.29.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb-wf-md-setup-protein-cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Example of setting up a simulation system" ; + schema1:sdDatePublished "2024-08-05 10:32:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/29/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11233 ; + schema1:creator ; + schema1:dateCreated "2021-05-07T15:27:34Z" ; + schema1:dateModified "2021-05-07T15:28:30Z" ; + schema1:description """Common Workflow Language example that illustrate the process of setting up a\r +simulation system containing a protein, step by step, using the BioExcel\r +Building Blocks library (biobb). The particular example used is the Lysozyme\r +protein (PDB code 1AKI).\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/29?version=2" ; + schema1:isPartOf , + ; + schema1:keywords "molecular dynamics, trajectories, protein" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Example of setting up a simulation system" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/29?version=2" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 53141 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "16S rRNA amplicon sequencing analysis workflow using QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-08-05 10:22:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4808 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:38Z" ; + schema1:dateModified "2024-06-11T12:54:38Z" ; + schema1:description "16S rRNA amplicon sequencing analysis workflow using QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.131.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/131/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 77840 ; + schema1:creator , + ; + schema1:dateCreated "2023-04-14T08:35:44Z" ; + schema1:dateModified "2023-04-14T08:37:57Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/131?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_amber_md_setup/master/biobb_wf_amber_md_setup/notebooks/mdsetup_lig/biobb_amber_complex_setup_notebook.ipynb" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """Galaxy version of pre-processing of reads from COVID-19 samples. \r +QC + human read cleaning\r +Based on https://github.com/Finn-Lab/Metagen-FastQC/blob/master/metagen-fastqc.sh""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/99?version=1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for COVID-19: read pre-processing" ; + schema1:sdDatePublished "2024-08-05 10:33:21 +0100" ; + schema1:url "https://workflowhub.eu/workflows/99/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13217 ; + schema1:dateCreated "2021-02-02T19:06:59Z" ; + schema1:dateModified "2023-02-13T14:06:45Z" ; + schema1:description """Galaxy version of pre-processing of reads from COVID-19 samples. \r +QC + human read cleaning\r +Based on https://github.com/Finn-Lab/Metagen-FastQC/blob/master/metagen-fastqc.sh""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "COVID-19: read pre-processing" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/99?version=1" ; + schema1:version 1 ; + ns1:input , + ; + ns1:output , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """Assembly polishing subworkflow: Racon polishing with long reads\r +\r +Inputs: long reads and assembly contigs\r +\r +Workflow steps:\r +* minimap2 : long reads are mapped to assembly => overlaps.paf. \r +* overaps, long reads, assembly => Racon => polished assembly 1\r +* using polished assembly 1 as input; repeat minimap2 + racon => polished assembly 2\r +* using polished assembly 2 as input, repeat minimap2 + racon => polished assembly 3\r +* using polished assembly 3 as input, repeat minimap2 + racon => polished assembly 4\r +* Racon long-read polished assembly => Fasta statistics\r +* Note: The Racon tool panel can be a bit confusing and is under review for improvement. Presently it requires sequences (= long reads), overlaps (= the paf file created by minimap2), and target sequences (= the contigs to be polished) as per "usage" described here https://github.com/isovic/racon/blob/master/README.md\r +* Note: Racon: the default setting for "output unpolished target sequences?" is No. This has been changed to Yes for all Racon steps in these polishing workflows. This means that even if no polishes are made in some contigs, they will be part of the output fasta file. \r +* Note: the contigs output by Racon have new tags in their headers. For more on this see https://github.com/isovic/racon/issues/85.\r +\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.227.1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Racon polish with long reads, x4" ; + schema1:sdDatePublished "2024-08-05 10:32:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/227/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 26968 ; + schema1:creator ; + schema1:dateCreated "2021-11-08T05:45:09Z" ; + schema1:dateModified "2023-01-30T18:21:31Z" ; + schema1:description """Assembly polishing subworkflow: Racon polishing with long reads\r +\r +Inputs: long reads and assembly contigs\r +\r +Workflow steps:\r +* minimap2 : long reads are mapped to assembly => overlaps.paf. \r +* overaps, long reads, assembly => Racon => polished assembly 1\r +* using polished assembly 1 as input; repeat minimap2 + racon => polished assembly 2\r +* using polished assembly 2 as input, repeat minimap2 + racon => polished assembly 3\r +* using polished assembly 3 as input, repeat minimap2 + racon => polished assembly 4\r +* Racon long-read polished assembly => Fasta statistics\r +* Note: The Racon tool panel can be a bit confusing and is under review for improvement. Presently it requires sequences (= long reads), overlaps (= the paf file created by minimap2), and target sequences (= the contigs to be polished) as per "usage" described here https://github.com/isovic/racon/blob/master/README.md\r +* Note: Racon: the default setting for "output unpolished target sequences?" is No. This has been changed to Yes for all Racon steps in these polishing workflows. This means that even if no polishes are made in some contigs, they will be part of the output fasta file. \r +* Note: the contigs output by Racon have new tags in their headers. For more on this see https://github.com/isovic/racon/issues/85.\r +\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "Large-genome-assembly" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Racon polish with long reads, x4" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/227?version=1" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 344676 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-23T13:33:07.965408" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/kmer-profiling-hifi-trio-VGP2" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "kmer-profiling-hifi-trio-VGP2/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Rbbt implementation of the Covid-19 pilot workflow from the Personalized Medicine Center of Excellence.\r +\r +This workflow processes single cell data to personalize boolean models that are then used in a multi-scale cellular simulation using PhysiBoSS.""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/469?version=1" ; + schema1:isBasedOn "https://github.com/Rbbt-Workflows/Covid19.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for PerMedCoE Covid19 Pilot workflow (Rbbt)" ; + schema1:sdDatePublished "2024-08-05 10:30:24 +0100" ; + schema1:url "https://workflowhub.eu/workflows/469/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5368 ; + schema1:dateCreated "2023-05-09T09:43:39Z" ; + schema1:dateModified "2023-05-23T12:33:53Z" ; + schema1:description """Rbbt implementation of the Covid-19 pilot workflow from the Personalized Medicine Center of Excellence.\r +\r +This workflow processes single cell data to personalize boolean models that are then used in a multi-scale cellular simulation using PhysiBoSS.""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "PerMedCoE Covid19 Pilot workflow (Rbbt)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/469?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.54.5" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_ligand_parameterization" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter GMX Notebook Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-08-05 10:25:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/54/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15271 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-07-26T09:20:04Z" ; + schema1:dateModified "2023-07-26T09:21:07Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/54?version=5" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter GMX Notebook Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_ligand_parameterization/blob/master/biobb_wf_ligand_parameterization/notebooks/biobb_ligand_parameterization_tutorial.ipynb" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Simple bacterial assembly and annotation" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/966?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/bacass" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/bacass" ; + schema1:sdDatePublished "2024-08-05 10:24:17 +0100" ; + schema1:url "https://workflowhub.eu/workflows/966/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7151 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:43Z" ; + schema1:dateModified "2024-06-11T12:54:43Z" ; + schema1:description "Simple bacterial assembly and annotation" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/966?version=7" ; + schema1:keywords "Assembly, bacterial-genomes, denovo, denovo-assembly, genome-assembly, hybrid-assembly, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/bacass" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/966?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/383?version=3" ; + schema1:isBasedOn "https://gitlab.bsc.es/lrodrig1/structuralvariants_poc/-/tree/1.1.3/structuralvariants/nextflow" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CNV_pipeline" ; + schema1:sdDatePublished "2024-08-05 10:31:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/383/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4731 ; + schema1:creator , + , + ; + schema1:dateCreated "2022-10-11T10:58:35Z" ; + schema1:dateModified "2023-01-16T14:02:25Z" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/383?version=2" ; + schema1:keywords "CODEX2, TransBioNet, ExomeDepth, variant calling, cancer, manta, GRIDS, structural variants" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CNV_pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/383?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Integration of viral sequences in genomic data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1026?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/viralintegration" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/viralintegration" ; + schema1:sdDatePublished "2024-08-05 10:23:07 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1026/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8264 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:20Z" ; + schema1:dateModified "2024-06-11T12:55:20Z" ; + schema1:description "Integration of viral sequences in genomic data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1026?version=1" ; + schema1:keywords "chimeric-alignment, ctat, viral-integration, Virus, virusintegrationfinder" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/viralintegration" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1026?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# BatchConvert\r +\r +A command line tool for converting image data into either of the standard file formats OME-TIFF or OME-Zarr. \r +\r +The tool wraps the dedicated file converters bfconvert and bioformats2raw to convert into OME-TIFF or OME-Zarr,\r +respectively. The workflow management system NextFlow is used to perform conversion in parallel for batches of images. \r +\r +The tool also wraps s3 and Aspera clients (go-mc and aspera-cli, respectively). Therefore, input and output locations can \r +be specified as local or remote storage and file transfer will be performed automatically. The conversion can be run on \r +HPC with Slurm. \r +\r +![](figures/diagram.png)\r +\r +## Installation & Dependencies\r +\r +**Important** note: The package has been so far only tested on Ubuntu 20.04.\r +\r +The minimal dependency to run the tool is NextFlow, which should be installed and made accessible from the command line.\r +\r +If conda exists on your system, you can install BatchConvert together with NextFlow using the following script:\r +```\r +git clone https://github.com/Euro-BioImaging/BatchConvert.git && \\ \r +source BatchConvert/installation/install_with_nextflow.sh\r +```\r +\r +\r +If you already have NextFlow installed and accessible from the command line (or if you prefer to install it manually \r +e.g., as shown [here](https://www.nextflow.io/docs/latest/getstarted.html)), you can also install BatchConvert alone, using the following script:\r +```\r +git clone https://github.com/Euro-BioImaging/BatchConvert.git && \\ \r +source BatchConvert/installation/install.sh\r +```\r +\r +\r +Other dependencies (which will be **automatically** installed):\r +- bioformats2raw (entrypoint bioformats2raw)\r +- bftools (entrypoint bfconvert)\r +- go-mc (entrypoint mc)\r +- aspera-cli (entrypoint ascp)\r +\r +These dependencies will be pulled and cached automatically at the first execution of the conversion command. \r +The mode of dependency management can be specified by using the command line option ``--profile`` or `-pf`. Depending \r +on how this option is specified, the dependencies will be acquired / run either via conda or via docker/singularity containers. \r +\r +Specifying ``--profile conda`` (default) will install the dependencies to an \r +environment at ``./.condaCache`` and use this environment to run the workflow. This option \r +requires that miniconda/anaconda is installed on your system. \r +\r +Alternatively, specifying ``--profile docker`` or ``--profile singularity`` will pull a docker or \r +singularity image with the dependencies, respectively, and use this image to run the workflow.\r +These options assume that the respective container runtime (docker or singularity) is available on \r +your system. If singularity is being used, a cache directory will be created at the path \r +``./.singularityCache`` where the singularity image is stored. \r +\r +Finally, you can still choose to install the dependencies manually and use your own installations to run\r +the workflow. In this case, you should specify ``--profile standard`` and make sure the entrypoints\r +specified above are recognised by your shell. \r +\r +\r +## Configuration\r +\r +BatchConvert can be configured to have default options for file conversion and transfer. Probably, the most important sets of parameters\r +to be configured include credentials for the remote ends. The easiest way to configure remote stores is by running the interactive \r +configuration command as indicated below.\r +\r +### Configuration of the s3 object store\r +\r +Run the interactive configuration command: \r +\r +`batchconvert configure_s3_remote`\r +\r +This will start a sequence of requests for s3 credentials such as name, url, access, etc. Provide each requested credential and click\r +enter. Continue this cycle until the process is finished. Upon completing the configuration, the sequence of commands should roughly look like this:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_s3_remote\r +enter remote name (for example s3)\r +s3\r +enter url:\r +https://s3.embl.de\r +enter access key:\r +\r +enter secret key:\r +\r +enter bucket name:\r +\r +Configuration of the default s3 credentials is complete\r +```\r +\r +\r +### Configuration of the BioStudies user space\r +\r +Run the interactive configuration command: \r +\r +`batchconvert configure_bia_remote`\r +\r +This will prompt a request for the secret directory to connect to. Enter the secret directory for your user space and click enter. \r +Upon completing the configuration, the sequence of commands should roughly look like this:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_bia_remote\r +enter the secret directory for BioImage Archive user space:\r +\r +configuration of the default bia credentials is complete\r +```\r +\r +### Configuration of the slurm options\r +\r +BatchConvert can also run on slurm clusters. In order to configure the slurm parameters, run the interactive configuration command: \r +\r +`batchconvert configure_slurm`\r +\r +This will start a sequence of requests for slurm options. Provide each requested option and click enter. \r +Continue this cycle until the process is finished. Upon completing the configuration, the sequence of commands should \r +roughly look like this:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_slurm\r +Please enter value for queue_size\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´50´\r +s\r +Please enter value for submit_rate_limit\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´10/2min´\r +s\r +Please enter value for cluster_options\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´--mem-per-cpu=3140 --cpus-per-task=16´\r +s\r +Please enter value for time\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´6h´\r +s\r +configuration of the default slurm parameters is complete\r +```\r +\r +### Configuration of the default conversion parameters\r +\r +While all conversion parameters can be specified as command line arguments, it can\r +be useful for the users to set their own default parameters to avoid re-entering those\r +parameters for subsequent executions. BatchConvert allows for interactive configuration of \r +conversion in the same way as configuration of the remote stores described above.\r +\r +To configure the conversion into OME-TIFF, run the following command:\r +\r +`batchconvert configure_ometiff`\r +\r +This will prompt the user to enter a series of parameters, which will then be saved as the \r +default parameters to be passed to the `batchconvert ometiff` command. Upon completing the \r +configuration, the sequence of commands should look similar to:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_ometiff\r +Please enter value for noflat\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +Please enter value for series\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +Please enter value for timepoint\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +...\r +...\r +...\r +...\r +...\r +...\r +Configuration of the default parameters for 'bfconvert' is complete\r +```\r +\r +\r +To configure the conversion into OME-Zarr, run the following command:\r +\r +`batchconvert configure_omezarr`\r +\r +Similarly, this will prompt the user to enter a series of parameters, which will then be saved as the \r +default parameters to be passed to the `batchconvert omezarr` command. Upon completing the configuration, \r +the sequence of commands should look similar to:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_omezarr\r +Please enter value for resolutions_zarr\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +Please enter value for chunk_h\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +Please enter value for chunk_w\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +...\r +...\r +...\r +...\r +...\r +...\r +Configuration of the default parameters for 'bioformats2raw' is complete\r +```\r +\r +It is important to note that the initial defaults for the conversion parameters are the same as the defaults\r +of the backend tools bfconvert and bioformats2raw, as noted in the prompt excerpt above. Through interactive configuration, \r +the user is overriding these initial defaults and setting their own defaults. It is possible to reset the initial\r +defaults by running the following command.\r +\r +`batchconvert reset_defaults`\r +\r +Another important point is that any of these configured parameters can be overridden by passing a value to that\r +parameter in the commandline. For instance, in the following command, the value of 20 will be assigned to `chunk_h` parameter \r +even if the value for the same parameter might be different in the configuration file. \r +\r +`batchconvert omezarr --chunk_h 20 `\r +\r +\r +## Examples\r +\r +### Local conversion\r +\r +#### Parallel conversion of files to separate OME-TIFFs / OME-Zarrs:\r +Convert a batch of images on your local storage into OME-TIFF format. \r +Note that the `input_path` in the command given below is typically a \r +directory with multiple image files but a single image file can also be passed:\\\r +`batchconvert ometiff -pf conda ` \r +\r +Note that if this is your first conversion with the profile `conda`, \r +it will take a while for a conda environment with the dependencies to be\r +created. All the subsequent conversion commands with the profile `conda`,\r +however, will use this environment, and thus show no such delay.\r +\r +Since conda is the default profile, it does not have to be \r +explicitly included in the command line. Thus, the command can be shortened to:\\\r +`batchconvert ometiff `\r +\r +Convert only the first channel of the images:\\\r +`batchconvert ometiff -chn 0 `\r +\r +Crop the images being converted along x and y axis by 150 pixels:\\\r +`batchconvert ometiff -cr 0,0,150,150 `\r +\r +Convert into OME-Zarr instead:\\\r +`batchconvert omezarr `\r +\r +Convert into OME-Zarr with 3 resolution levels:\\\r +`batchconvert omezarr -rz 3 `\r +\r +Select a subset of images with a matching string such as "mutation":\\\r +`batchconvert omezarr -p mutation `\r +\r +Select a subset of images using wildcards. Note that the use of "" around \r +the input path is necessary when using wildcards:\\\r +`batchconvert omezarr "/*D3*.oir" `\r +\r +Convert by using a singularity container instead of conda environment (requires\r +singularity to be installed on your system):\\\r +`batchconvert omezarr -pf singularity "/*D3*.oir" `\r +\r +Convert by using a docker container instead of conda environment (requires docker\r +to be installed on your system):\\\r +`batchconvert omezarr -pf docker "/*D3*.oir" `\r +\r +Note that similarly to the case with the profile `conda`, the first execution of\r +a conversion with the profile `singularity` or `docker` will take a while for the\r +container image to be pulled. All the subsequent conversion commands using a \r +container option will use this image, and thus show no such delay. \r +\r +Convert local data and upload the output to an s3 bucket. Note that the output \r +path is created relative to the bucket specified in your s3 configuration:\\\r +`batchconvert omezarr -dt s3 `\r +\r +Receive input files from an s3 bucket, convert locally and upload the output to \r +the same bucket. Note that wildcards cannot be used when the input is from s3. \r +Use pattern matching option `-p` for selecting a subset of input files:\\\r +`batchconvert omezarr -p mutation -st s3 -dt s3 `\r +\r +Receive input files from your private BioStudies user space and convert them locally.\r +Use pattern matching option `-p` for selecting a subset of input files:\\\r +`batchconvert omezarr -p mutation -st bia `\r +\r +Receive an input from an s3 bucket, convert locally and upload the output to your \r +private BioStudies user space. Use pattern matching option `-p` for selecting a subset \r +of input files:\\\r +`batchconvert omezarr -p mutation -st s3 -dt bia `\r +\r +Note that in all the examples shown above, BatchConvert treats each input file as separate,\r +standalone data point, disregarding the possibility that some of the input files might belong to \r +the same multidimensional array. Thus, each input file is converted to an independent \r +OME-TIFF / OME-Zarr and the number of outputs will thus equal the number of selected input files.\r +An alternative scenario is discussed below.\r +\r +#### Parallel conversion of file groups by stacking multiple files into single OME-TIFFs / OME-Zarrs:\r +\r +When the flag `--merge_files` is specified, BatchConvert tries to detect which input files might \r +belong to the same multidimensional array based on the patterns in the filenames. Then a "grouped conversion" \r +is performed, meaning that the files belonging to the same dataset will be incorporated into \r +a single OME-TIFF / OME-Zarr series, in that files will be concatenated along specific dimension(s) \r +during the conversion. Multiple file groups in the input directory can be detected and converted \r +in parallel. \r +\r +This feature uses Bio-Formats's pattern files as described [here](https://docs.openmicroscopy.org/bio-formats/6.6.0/formats/pattern-file.html).\r +However, BatchConvert generates pattern files automatically, allowing the user to directly use the \r +input directory in the conversion command. BatchConvert also has the option of specifying the \r +concatenation axes in the command line, which is especially useful in cases where the filenames \r +may not contain dimension information. \r +\r +To be able to use the `--merge files` flag, the input file names must obey certain rules:\r +1. File names in the same group must be uniform, except for one or more **numeric field(s)**, which\r +should show incremental change across the files. These so-called **variable fields** \r +will be detected and used as the dimension(s) of concatenation.\r +2. The length of variable fields must be uniform within the group. For instance, if the\r +variable field has values reaching multi-digit numbers, leading "0"s should be included where needed \r +in the file names to make the variable field length uniform within the group.\r +3. Typically, each variable field should follow a dimension specifier. What patterns can be used as \r +dimension specifiers are explained [here](https://docs.openmicroscopy.org/bio-formats/6.6.0/formats/pattern-file.html).\r +However, BatchConvert also has the option `--concatenation_order`, which allows the user to\r +specify from the command line, the dimension(s), along which the files must be concatenated.\r +4. File names that are unique and cannot be associated with any group will be assumed as\r +standalone images and converted accordingly. \r +\r +Below are some examples of grouped conversion commands in the context of different possible use-case scenarios:\r +\r +**Example 1:**\r +\r +This is an example of a folder with non-uniform filename lengths:\r +```\r +time-series/test_img_T2\r +time-series/test_img_T4\r +time-series/test_img_T6\r +time-series/test_img_T8\r +time-series/test_img_T10\r +time-series/test_img_T12\r +```\r +In this example, leading zeroes are missing in the variable fields of some filenames. \r +A typical command to convert this folder to a single OME-TIFF would look like: \\\r +`batchconvert --ometiff --merge_files /time-series `\r +\r +However, this command would fail to create a single OME-Zarr folder due to the non-uniform \r +lengths of the filenames. Instead, the files would be split into two groups based on the\r +filename length, leading to two separate OME-Zarrs with names:\r +\r +`test_img_TRange{2-8-2}.ome.zarr` and `test_img_TRange{10-12-2}.ome.zarr`\r +\r +Here is the corrected version of the folder for the above example-\r +```\r +time-series/test_img_T02\r +time-series/test_img_T04\r +time-series/test_img_T06\r +time-series/test_img_T08\r +time-series/test_img_T10\r +time-series/test_img_T12\r +```\r +\r +Executing the same command on this folder would result in a single OME-Zarr with the name:\r +`test_img_TRange{02-12-2}.ome.zarr`\r +\r +**Example 2**- \r +\r +In this example, the filename lengths are uniform but the incrementation within the variable field is not.\r +```\r +time-series/test_img_T2\r +time-series/test_img_T4\r +time-series/test_img_T5\r +time-series/test_img_T7\r +```\r +\r +A typical command to convert this folder to a single OME-Zarr would look like: \\\r +`batchconvert --omezarr --merge_files /time-series `\r +\r +However, the command would fail to assume these files as a single group due to the\r +non-uniform incrementation in the variable field of the filenames. Instead, the dataset \r +would be split into two groups, leading to two separate OME-Zarrs with the following names:\r +`test_img_TRange{2-4-2}.ome.zarr` and `test_img_TRange{5-7-2}.ome.zarr` \r +\r +\r +**Example 3**\r +\r +This is an example of a case where the conversion attempts to concatenate files along two\r +dimensions, channel and time.\r +```\r +multichannel_time-series/test_img_C1-T1\r +multichannel_time-series/test_img_C1-T2\r +multichannel_time-series/test_img_C1-T3\r +multichannel_time-series/test_img_C2-T1\r +multichannel_time-series/test_img_C2-T2\r +```\r +To convert this folder to a single OME-Zarr, one could try the following command: \\\r +`batchconvert --omezarr --merge_files /multichannel_time-series `\r +\r +However, since the channel-2 does not have the same number of timeframes as the channel-1, \r +BatchConvert will fail to assume these two channels as part of the same series and\r +will instead split the two channels into two separate OME-Zarrs. \r +\r +The output would look like: \\\r +`test_img_C1-TRange{1-3-1}.ome.zarr` \\\r +`test_img_C2-TRange{1-2-1}.ome.zarr`\r +\r +To be able to really incorporate all files into a single OME-Zarr, the folder should have equal\r +number of images corresponding to both channels, as shown below:\r +```\r +multichannel_time-series/test_img_C1-T1\r +multichannel_time-series/test_img_C1-T2\r +multichannel_time-series/test_img_C1-T3\r +multichannel_time-series/test_img_C2-T1\r +multichannel_time-series/test_img_C2-T2\r +multichannel_time-series/test_img_C2-T3\r +```\r +The same conversion command on this version of the input folder would result in a single \r +OME-Zarr with the name: \\\r +`test_img_CRange{1-2-1}-TRange{1-3-1}.ome.zarr`\r +\r +\r +**Example 4**\r +\r +This is another example of a case, where there are multiple filename patterns in the input folder.\r +\r +```\r +folder_with_multiple_groups/test_img_C1-T1\r +folder_with_multiple_groups/test_img_C1-T2\r +folder_with_multiple_groups/test_img_C2-T1\r +folder_with_multiple_groups/test_img_C2-T2\r +folder_with_multiple_groups/test_img_T1-Z1\r +folder_with_multiple_groups/test_img_T1-Z2\r +folder_with_multiple_groups/test_img_T1-Z3\r +folder_with_multiple_groups/test_img_T2-Z1\r +folder_with_multiple_groups/test_img_T2-Z2\r +folder_with_multiple_groups/test_img_T2-Z3\r +```\r +\r +One can convert this folder with- \\\r +`batchconvert --omezarr --merge_files /folder_with_multiple_groups `\r + \r +BatchConvert will detect the two patterns in this folder and perform two grouped conversions. \r +The output folders will be named as `test_img_CRange{1-2-1}-TRange{1-2-1}.ome.zarr` and \r +`test_img_TRange{1-2-1}-ZRange{1-3-1}.ome.zarr`. \r +\r +\r +**Example 5**\r +\r +Now imagine that we have the same files as in the example 4 but the filenames of the\r +first group lack any dimension specifier, so we have the following folder:\r +\r +```\r +folder_with_multiple_groups/test_img_1-1\r +folder_with_multiple_groups/test_img_1-2\r +folder_with_multiple_groups/test_img_2-1\r +folder_with_multiple_groups/test_img_2-2\r +folder_with_multiple_groups/test_img_T1-Z1\r +folder_with_multiple_groups/test_img_T1-Z2\r +folder_with_multiple_groups/test_img_T1-Z3\r +folder_with_multiple_groups/test_img_T2-Z1\r +folder_with_multiple_groups/test_img_T2-Z2\r +folder_with_multiple_groups/test_img_T2-Z3\r +```\r +\r +In such a scenario, BatchConvert allows the user to specify the concatenation axes \r +via `--concatenation_order` option. This option expects comma-separated strings of dimensions \r +for each group. In this example, the user must provide a string of 2 characters, such as `ct` for \r +channel and time, for group 1, since there are two variable fields for this group. Since group 2 \r +already has dimension specifiers (T and Z as specified in the filenames preceding the variable fields),\r +the user does not need to specify anything for this group, and can enter `auto` or `aa` for automatic\r +detection of the specifiers. \r +\r +So the following line can be used to convert this folder: \\\r +`batchconvert --omezarr --merge_files --concatenation_order ct,aa /folder_with_multiple_groups `\r +\r +The resulting OME-Zarrs will have the names:\r +`test_img_CRange{1-2-1}-TRange{1-2-1}.ome.zarr` and\r +`test_img_TRange{1-2-1}-ZRange{1-3-1}.ome.zarr`\r +\r +Note that `--concatenation_order` will override any dimension specifiers already\r +existing in the filenames.\r +\r +\r +**Example 6**\r +\r +There can be scenarios where the user may want to have further control over the axes along \r +which to concatenate the images. For example, the filenames might contain the data acquisition\r +date, which can be recognised by BatchConvert as a concatenation axis in the automatic \r +detection mode. An example of such a fileset might look like:\r +\r +```\r +filenames_with_dates/test_data_date03.03.2023_imageZ1-T1\r +filenames_with_dates/test_data_date03.03.2023_imageZ1-T2\r +filenames_with_dates/test_data_date03.03.2023_imageZ1-T3\r +filenames_with_dates/test_data_date03.03.2023_imageZ2-T1\r +filenames_with_dates/test_data_date03.03.2023_imageZ2-T2\r +filenames_with_dates/test_data_date03.03.2023_imageZ2-T3\r +filenames_with_dates/test_data_date04.03.2023_imageZ1-T1\r +filenames_with_dates/test_data_date04.03.2023_imageZ1-T2\r +filenames_with_dates/test_data_date04.03.2023_imageZ1-T3\r +filenames_with_dates/test_data_date04.03.2023_imageZ2-T1\r +filenames_with_dates/test_data_date04.03.2023_imageZ2-T2\r +filenames_with_dates/test_data_date04.03.2023_imageZ2-T3\r +```\r +\r +One may try the following command to convert this folder:\r +\r +`batchconvert --omezarr --merge_files /filenames_with_dates `\r +\r +Since the concatenation axes are not specified, this command would try to create\r +a single OME-Zarr with name: `test_data_dateRange{03-04-1}.03.2023_imageZRange{1-2-1}-TRange{1-3-1}`.\r +\r +In order to force BatchConvert to ignore the date field, the user can restrict the concatenation \r +axes to the last two numeric fields. This can be done by using a command such as: \\\r +`batchconvert --omezarr --merge_files --concatenation_order aa /filenames_with_dates ` \\\r +This command will avoid concatenation along the date field, and therefore, there will be two\r +OME-Zarrs corresponding to the two dates. The number of characters being passed to the \r +`--concatenation_order` option specifies the number of numeric fields (starting from the right \r +end of the filename) that are recognised by the BatchConvert as valid concatenation axes. \r +Passing `aa`, therefore, means that the last two numeric fields must be recognised as \r +concatenation axes and the dimension type should be automatically detected (`a` for automatic). \r +In the same logic, one could, for example, convert each Z section into a separate OME-Zarr by \r +specifying `--concatenation_order a`.\r +\r +\r +\r +### Conversion on slurm\r +\r +All the examples given above can also be run on slurm by specifying `-pf cluster` option. \r +Note that this option automatically uses the singularity profile:\\\r +`batchconvert omezarr -pf cluster -p .oir `\r +\r +\r +\r +\r +\r +\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/453?version=1" ; + schema1:isBasedOn "https://github.com/Euro-BioImaging/BatchConvert.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for BatchConvert" ; + schema1:sdDatePublished "2024-08-05 10:29:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/453/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 89 ; + schema1:creator ; + schema1:dateCreated "2023-04-10T10:40:10Z" ; + schema1:dateModified "2023-04-27T10:51:07Z" ; + schema1:description """# BatchConvert\r +\r +A command line tool for converting image data into either of the standard file formats OME-TIFF or OME-Zarr. \r +\r +The tool wraps the dedicated file converters bfconvert and bioformats2raw to convert into OME-TIFF or OME-Zarr,\r +respectively. The workflow management system NextFlow is used to perform conversion in parallel for batches of images. \r +\r +The tool also wraps s3 and Aspera clients (go-mc and aspera-cli, respectively). Therefore, input and output locations can \r +be specified as local or remote storage and file transfer will be performed automatically. The conversion can be run on \r +HPC with Slurm. \r +\r +![](figures/diagram.png)\r +\r +## Installation & Dependencies\r +\r +**Important** note: The package has been so far only tested on Ubuntu 20.04.\r +\r +The minimal dependency to run the tool is NextFlow, which should be installed and made accessible from the command line.\r +\r +If conda exists on your system, you can install BatchConvert together with NextFlow using the following script:\r +```\r +git clone https://github.com/Euro-BioImaging/BatchConvert.git && \\ \r +source BatchConvert/installation/install_with_nextflow.sh\r +```\r +\r +\r +If you already have NextFlow installed and accessible from the command line (or if you prefer to install it manually \r +e.g., as shown [here](https://www.nextflow.io/docs/latest/getstarted.html)), you can also install BatchConvert alone, using the following script:\r +```\r +git clone https://github.com/Euro-BioImaging/BatchConvert.git && \\ \r +source BatchConvert/installation/install.sh\r +```\r +\r +\r +Other dependencies (which will be **automatically** installed):\r +- bioformats2raw (entrypoint bioformats2raw)\r +- bftools (entrypoint bfconvert)\r +- go-mc (entrypoint mc)\r +- aspera-cli (entrypoint ascp)\r +\r +These dependencies will be pulled and cached automatically at the first execution of the conversion command. \r +The mode of dependency management can be specified by using the command line option ``--profile`` or `-pf`. Depending \r +on how this option is specified, the dependencies will be acquired / run either via conda or via docker/singularity containers. \r +\r +Specifying ``--profile conda`` (default) will install the dependencies to an \r +environment at ``./.condaCache`` and use this environment to run the workflow. This option \r +requires that miniconda/anaconda is installed on your system. \r +\r +Alternatively, specifying ``--profile docker`` or ``--profile singularity`` will pull a docker or \r +singularity image with the dependencies, respectively, and use this image to run the workflow.\r +These options assume that the respective container runtime (docker or singularity) is available on \r +your system. If singularity is being used, a cache directory will be created at the path \r +``./.singularityCache`` where the singularity image is stored. \r +\r +Finally, you can still choose to install the dependencies manually and use your own installations to run\r +the workflow. In this case, you should specify ``--profile standard`` and make sure the entrypoints\r +specified above are recognised by your shell. \r +\r +\r +## Configuration\r +\r +BatchConvert can be configured to have default options for file conversion and transfer. Probably, the most important sets of parameters\r +to be configured include credentials for the remote ends. The easiest way to configure remote stores is by running the interactive \r +configuration command as indicated below.\r +\r +### Configuration of the s3 object store\r +\r +Run the interactive configuration command: \r +\r +`batchconvert configure_s3_remote`\r +\r +This will start a sequence of requests for s3 credentials such as name, url, access, etc. Provide each requested credential and click\r +enter. Continue this cycle until the process is finished. Upon completing the configuration, the sequence of commands should roughly look like this:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_s3_remote\r +enter remote name (for example s3)\r +s3\r +enter url:\r +https://s3.embl.de\r +enter access key:\r +\r +enter secret key:\r +\r +enter bucket name:\r +\r +Configuration of the default s3 credentials is complete\r +```\r +\r +\r +### Configuration of the BioStudies user space\r +\r +Run the interactive configuration command: \r +\r +`batchconvert configure_bia_remote`\r +\r +This will prompt a request for the secret directory to connect to. Enter the secret directory for your user space and click enter. \r +Upon completing the configuration, the sequence of commands should roughly look like this:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_bia_remote\r +enter the secret directory for BioImage Archive user space:\r +\r +configuration of the default bia credentials is complete\r +```\r +\r +### Configuration of the slurm options\r +\r +BatchConvert can also run on slurm clusters. In order to configure the slurm parameters, run the interactive configuration command: \r +\r +`batchconvert configure_slurm`\r +\r +This will start a sequence of requests for slurm options. Provide each requested option and click enter. \r +Continue this cycle until the process is finished. Upon completing the configuration, the sequence of commands should \r +roughly look like this:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_slurm\r +Please enter value for queue_size\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´50´\r +s\r +Please enter value for submit_rate_limit\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´10/2min´\r +s\r +Please enter value for cluster_options\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´--mem-per-cpu=3140 --cpus-per-task=16´\r +s\r +Please enter value for time\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´6h´\r +s\r +configuration of the default slurm parameters is complete\r +```\r +\r +### Configuration of the default conversion parameters\r +\r +While all conversion parameters can be specified as command line arguments, it can\r +be useful for the users to set their own default parameters to avoid re-entering those\r +parameters for subsequent executions. BatchConvert allows for interactive configuration of \r +conversion in the same way as configuration of the remote stores described above.\r +\r +To configure the conversion into OME-TIFF, run the following command:\r +\r +`batchconvert configure_ometiff`\r +\r +This will prompt the user to enter a series of parameters, which will then be saved as the \r +default parameters to be passed to the `batchconvert ometiff` command. Upon completing the \r +configuration, the sequence of commands should look similar to:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_ometiff\r +Please enter value for noflat\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +Please enter value for series\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +Please enter value for timepoint\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +...\r +...\r +...\r +...\r +...\r +...\r +Configuration of the default parameters for 'bfconvert' is complete\r +```\r +\r +\r +To configure the conversion into OME-Zarr, run the following command:\r +\r +`batchconvert configure_omezarr`\r +\r +Similarly, this will prompt the user to enter a series of parameters, which will then be saved as the \r +default parameters to be passed to the `batchconvert omezarr` command. Upon completing the configuration, \r +the sequence of commands should look similar to:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_omezarr\r +Please enter value for resolutions_zarr\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +Please enter value for chunk_h\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +Please enter value for chunk_w\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +...\r +...\r +...\r +...\r +...\r +...\r +Configuration of the default parameters for 'bioformats2raw' is complete\r +```\r +\r +It is important to note that the initial defaults for the conversion parameters are the same as the defaults\r +of the backend tools bfconvert and bioformats2raw, as noted in the prompt excerpt above. Through interactive configuration, \r +the user is overriding these initial defaults and setting their own defaults. It is possible to reset the initial\r +defaults by running the following command.\r +\r +`batchconvert reset_defaults`\r +\r +Another important point is that any of these configured parameters can be overridden by passing a value to that\r +parameter in the commandline. For instance, in the following command, the value of 20 will be assigned to `chunk_h` parameter \r +even if the value for the same parameter might be different in the configuration file. \r +\r +`batchconvert omezarr --chunk_h 20 `\r +\r +\r +## Examples\r +\r +### Local conversion\r +\r +#### Parallel conversion of files to separate OME-TIFFs / OME-Zarrs:\r +Convert a batch of images on your local storage into OME-TIFF format. \r +Note that the `input_path` in the command given below is typically a \r +directory with multiple image files but a single image file can also be passed:\\\r +`batchconvert ometiff -pf conda ` \r +\r +Note that if this is your first conversion with the profile `conda`, \r +it will take a while for a conda environment with the dependencies to be\r +created. All the subsequent conversion commands with the profile `conda`,\r +however, will use this environment, and thus show no such delay.\r +\r +Since conda is the default profile, it does not have to be \r +explicitly included in the command line. Thus, the command can be shortened to:\\\r +`batchconvert ometiff `\r +\r +Convert only the first channel of the images:\\\r +`batchconvert ometiff -chn 0 `\r +\r +Crop the images being converted along x and y axis by 150 pixels:\\\r +`batchconvert ometiff -cr 0,0,150,150 `\r +\r +Convert into OME-Zarr instead:\\\r +`batchconvert omezarr `\r +\r +Convert into OME-Zarr with 3 resolution levels:\\\r +`batchconvert omezarr -rz 3 `\r +\r +Select a subset of images with a matching string such as "mutation":\\\r +`batchconvert omezarr -p mutation `\r +\r +Select a subset of images using wildcards. Note that the use of "" around \r +the input path is necessary when using wildcards:\\\r +`batchconvert omezarr "/*D3*.oir" `\r +\r +Convert by using a singularity container instead of conda environment (requires\r +singularity to be installed on your system):\\\r +`batchconvert omezarr -pf singularity "/*D3*.oir" `\r +\r +Convert by using a docker container instead of conda environment (requires docker\r +to be installed on your system):\\\r +`batchconvert omezarr -pf docker "/*D3*.oir" `\r +\r +Note that similarly to the case with the profile `conda`, the first execution of\r +a conversion with the profile `singularity` or `docker` will take a while for the\r +container image to be pulled. All the subsequent conversion commands using a \r +container option will use this image, and thus show no such delay. \r +\r +Convert local data and upload the output to an s3 bucket. Note that the output \r +path is created relative to the bucket specified in your s3 configuration:\\\r +`batchconvert omezarr -dt s3 `\r +\r +Receive input files from an s3 bucket, convert locally and upload the output to \r +the same bucket. Note that wildcards cannot be used when the input is from s3. \r +Use pattern matching option `-p` for selecting a subset of input files:\\\r +`batchconvert omezarr -p mutation -st s3 -dt s3 `\r +\r +Receive input files from your private BioStudies user space and convert them locally.\r +Use pattern matching option `-p` for selecting a subset of input files:\\\r +`batchconvert omezarr -p mutation -st bia `\r +\r +Receive an input from an s3 bucket, convert locally and upload the output to your \r +private BioStudies user space. Use pattern matching option `-p` for selecting a subset \r +of input files:\\\r +`batchconvert omezarr -p mutation -st s3 -dt bia `\r +\r +Note that in all the examples shown above, BatchConvert treats each input file as separate,\r +standalone data point, disregarding the possibility that some of the input files might belong to \r +the same multidimensional array. Thus, each input file is converted to an independent \r +OME-TIFF / OME-Zarr and the number of outputs will thus equal the number of selected input files.\r +An alternative scenario is discussed below.\r +\r +#### Parallel conversion of file groups by stacking multiple files into single OME-TIFFs / OME-Zarrs:\r +\r +When the flag `--merge_files` is specified, BatchConvert tries to detect which input files might \r +belong to the same multidimensional array based on the patterns in the filenames. Then a "grouped conversion" \r +is performed, meaning that the files belonging to the same dataset will be incorporated into \r +a single OME-TIFF / OME-Zarr series, in that files will be concatenated along specific dimension(s) \r +during the conversion. Multiple file groups in the input directory can be detected and converted \r +in parallel. \r +\r +This feature uses Bio-Formats's pattern files as described [here](https://docs.openmicroscopy.org/bio-formats/6.6.0/formats/pattern-file.html).\r +However, BatchConvert generates pattern files automatically, allowing the user to directly use the \r +input directory in the conversion command. BatchConvert also has the option of specifying the \r +concatenation axes in the command line, which is especially useful in cases where the filenames \r +may not contain dimension information. \r +\r +To be able to use the `--merge files` flag, the input file names must obey certain rules:\r +1. File names in the same group must be uniform, except for one or more **numeric field(s)**, which\r +should show incremental change across the files. These so-called **variable fields** \r +will be detected and used as the dimension(s) of concatenation.\r +2. The length of variable fields must be uniform within the group. For instance, if the\r +variable field has values reaching multi-digit numbers, leading "0"s should be included where needed \r +in the file names to make the variable field length uniform within the group.\r +3. Typically, each variable field should follow a dimension specifier. What patterns can be used as \r +dimension specifiers are explained [here](https://docs.openmicroscopy.org/bio-formats/6.6.0/formats/pattern-file.html).\r +However, BatchConvert also has the option `--concatenation_order`, which allows the user to\r +specify from the command line, the dimension(s), along which the files must be concatenated.\r +4. File names that are unique and cannot be associated with any group will be assumed as\r +standalone images and converted accordingly. \r +\r +Below are some examples of grouped conversion commands in the context of different possible use-case scenarios:\r +\r +**Example 1:**\r +\r +This is an example of a folder with non-uniform filename lengths:\r +```\r +time-series/test_img_T2\r +time-series/test_img_T4\r +time-series/test_img_T6\r +time-series/test_img_T8\r +time-series/test_img_T10\r +time-series/test_img_T12\r +```\r +In this example, leading zeroes are missing in the variable fields of some filenames. \r +A typical command to convert this folder to a single OME-TIFF would look like: \\\r +`batchconvert --ometiff --merge_files /time-series `\r +\r +However, this command would fail to create a single OME-Zarr folder due to the non-uniform \r +lengths of the filenames. Instead, the files would be split into two groups based on the\r +filename length, leading to two separate OME-Zarrs with names:\r +\r +`test_img_TRange{2-8-2}.ome.zarr` and `test_img_TRange{10-12-2}.ome.zarr`\r +\r +Here is the corrected version of the folder for the above example-\r +```\r +time-series/test_img_T02\r +time-series/test_img_T04\r +time-series/test_img_T06\r +time-series/test_img_T08\r +time-series/test_img_T10\r +time-series/test_img_T12\r +```\r +\r +Executing the same command on this folder would result in a single OME-Zarr with the name:\r +`test_img_TRange{02-12-2}.ome.zarr`\r +\r +**Example 2**- \r +\r +In this example, the filename lengths are uniform but the incrementation within the variable field is not.\r +```\r +time-series/test_img_T2\r +time-series/test_img_T4\r +time-series/test_img_T5\r +time-series/test_img_T7\r +```\r +\r +A typical command to convert this folder to a single OME-Zarr would look like: \\\r +`batchconvert --omezarr --merge_files /time-series `\r +\r +However, the command would fail to assume these files as a single group due to the\r +non-uniform incrementation in the variable field of the filenames. Instead, the dataset \r +would be split into two groups, leading to two separate OME-Zarrs with the following names:\r +`test_img_TRange{2-4-2}.ome.zarr` and `test_img_TRange{5-7-2}.ome.zarr` \r +\r +\r +**Example 3**\r +\r +This is an example of a case where the conversion attempts to concatenate files along two\r +dimensions, channel and time.\r +```\r +multichannel_time-series/test_img_C1-T1\r +multichannel_time-series/test_img_C1-T2\r +multichannel_time-series/test_img_C1-T3\r +multichannel_time-series/test_img_C2-T1\r +multichannel_time-series/test_img_C2-T2\r +```\r +To convert this folder to a single OME-Zarr, one could try the following command: \\\r +`batchconvert --omezarr --merge_files /multichannel_time-series `\r +\r +However, since the channel-2 does not have the same number of timeframes as the channel-1, \r +BatchConvert will fail to assume these two channels as part of the same series and\r +will instead split the two channels into two separate OME-Zarrs. \r +\r +The output would look like: \\\r +`test_img_C1-TRange{1-3-1}.ome.zarr` \\\r +`test_img_C2-TRange{1-2-1}.ome.zarr`\r +\r +To be able to really incorporate all files into a single OME-Zarr, the folder should have equal\r +number of images corresponding to both channels, as shown below:\r +```\r +multichannel_time-series/test_img_C1-T1\r +multichannel_time-series/test_img_C1-T2\r +multichannel_time-series/test_img_C1-T3\r +multichannel_time-series/test_img_C2-T1\r +multichannel_time-series/test_img_C2-T2\r +multichannel_time-series/test_img_C2-T3\r +```\r +The same conversion command on this version of the input folder would result in a single \r +OME-Zarr with the name: \\\r +`test_img_CRange{1-2-1}-TRange{1-3-1}.ome.zarr`\r +\r +\r +**Example 4**\r +\r +This is another example of a case, where there are multiple filename patterns in the input folder.\r +\r +```\r +folder_with_multiple_groups/test_img_C1-T1\r +folder_with_multiple_groups/test_img_C1-T2\r +folder_with_multiple_groups/test_img_C2-T1\r +folder_with_multiple_groups/test_img_C2-T2\r +folder_with_multiple_groups/test_img_T1-Z1\r +folder_with_multiple_groups/test_img_T1-Z2\r +folder_with_multiple_groups/test_img_T1-Z3\r +folder_with_multiple_groups/test_img_T2-Z1\r +folder_with_multiple_groups/test_img_T2-Z2\r +folder_with_multiple_groups/test_img_T2-Z3\r +```\r +\r +One can convert this folder with- \\\r +`batchconvert --omezarr --merge_files /folder_with_multiple_groups `\r + \r +BatchConvert will detect the two patterns in this folder and perform two grouped conversions. \r +The output folders will be named as `test_img_CRange{1-2-1}-TRange{1-2-1}.ome.zarr` and \r +`test_img_TRange{1-2-1}-ZRange{1-3-1}.ome.zarr`. \r +\r +\r +**Example 5**\r +\r +Now imagine that we have the same files as in the example 4 but the filenames of the\r +first group lack any dimension specifier, so we have the following folder:\r +\r +```\r +folder_with_multiple_groups/test_img_1-1\r +folder_with_multiple_groups/test_img_1-2\r +folder_with_multiple_groups/test_img_2-1\r +folder_with_multiple_groups/test_img_2-2\r +folder_with_multiple_groups/test_img_T1-Z1\r +folder_with_multiple_groups/test_img_T1-Z2\r +folder_with_multiple_groups/test_img_T1-Z3\r +folder_with_multiple_groups/test_img_T2-Z1\r +folder_with_multiple_groups/test_img_T2-Z2\r +folder_with_multiple_groups/test_img_T2-Z3\r +```\r +\r +In such a scenario, BatchConvert allows the user to specify the concatenation axes \r +via `--concatenation_order` option. This option expects comma-separated strings of dimensions \r +for each group. In this example, the user must provide a string of 2 characters, such as `ct` for \r +channel and time, for group 1, since there are two variable fields for this group. Since group 2 \r +already has dimension specifiers (T and Z as specified in the filenames preceding the variable fields),\r +the user does not need to specify anything for this group, and can enter `auto` or `aa` for automatic\r +detection of the specifiers. \r +\r +So the following line can be used to convert this folder: \\\r +`batchconvert --omezarr --merge_files --concatenation_order ct,aa /folder_with_multiple_groups `\r +\r +The resulting OME-Zarrs will have the names:\r +`test_img_CRange{1-2-1}-TRange{1-2-1}.ome.zarr` and\r +`test_img_TRange{1-2-1}-ZRange{1-3-1}.ome.zarr`\r +\r +Note that `--concatenation_order` will override any dimension specifiers already\r +existing in the filenames.\r +\r +\r +**Example 6**\r +\r +There can be scenarios where the user may want to have further control over the axes along \r +which to concatenate the images. For example, the filenames might contain the data acquisition\r +date, which can be recognised by BatchConvert as a concatenation axis in the automatic \r +detection mode. An example of such a fileset might look like:\r +\r +```\r +filenames_with_dates/test_data_date03.03.2023_imageZ1-T1\r +filenames_with_dates/test_data_date03.03.2023_imageZ1-T2\r +filenames_with_dates/test_data_date03.03.2023_imageZ1-T3\r +filenames_with_dates/test_data_date03.03.2023_imageZ2-T1\r +filenames_with_dates/test_data_date03.03.2023_imageZ2-T2\r +filenames_with_dates/test_data_date03.03.2023_imageZ2-T3\r +filenames_with_dates/test_data_date04.03.2023_imageZ1-T1\r +filenames_with_dates/test_data_date04.03.2023_imageZ1-T2\r +filenames_with_dates/test_data_date04.03.2023_imageZ1-T3\r +filenames_with_dates/test_data_date04.03.2023_imageZ2-T1\r +filenames_with_dates/test_data_date04.03.2023_imageZ2-T2\r +filenames_with_dates/test_data_date04.03.2023_imageZ2-T3\r +```\r +\r +One may try the following command to convert this folder:\r +\r +`batchconvert --omezarr --merge_files /filenames_with_dates `\r +\r +Since the concatenation axes are not specified, this command would try to create\r +a single OME-Zarr with name: `test_data_dateRange{03-04-1}.03.2023_imageZRange{1-2-1}-TRange{1-3-1}`.\r +\r +In order to force BatchConvert to ignore the date field, the user can restrict the concatenation \r +axes to the last two numeric fields. This can be done by using a command such as: \\\r +`batchconvert --omezarr --merge_files --concatenation_order aa /filenames_with_dates ` \\\r +This command will avoid concatenation along the date field, and therefore, there will be two\r +OME-Zarrs corresponding to the two dates. The number of characters being passed to the \r +`--concatenation_order` option specifies the number of numeric fields (starting from the right \r +end of the filename) that are recognised by the BatchConvert as valid concatenation axes. \r +Passing `aa`, therefore, means that the last two numeric fields must be recognised as \r +concatenation axes and the dimension type should be automatically detected (`a` for automatic). \r +In the same logic, one could, for example, convert each Z section into a separate OME-Zarr by \r +specifying `--concatenation_order a`.\r +\r +\r +\r +### Conversion on slurm\r +\r +All the examples given above can also be run on slurm by specifying `-pf cluster` option. \r +Note that this option automatically uses the singularity profile:\\\r +`batchconvert omezarr -pf cluster -p .oir `\r +\r +\r +\r +\r +\r +\r +\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/453?version=2" ; + schema1:keywords "Nextflow, bash, Python, NGFF, OME-Zarr, Conversion, imaging, bioimaging, image file format, file conversion, OME-TIFF, S3, BioStudies, bioformats, bioformats2raw" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "BatchConvert" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/453?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 151518 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Workflow converts one or multiple bam/cram files to fastq format" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/968?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/bamtofastq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/bamtofastq" ; + schema1:sdDatePublished "2024-08-05 10:24:15 +0100" ; + schema1:url "https://workflowhub.eu/workflows/968/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9637 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:44Z" ; + schema1:dateModified "2024-06-11T12:54:44Z" ; + schema1:description "Workflow converts one or multiple bam/cram files to fastq format" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/968?version=5" ; + schema1:keywords "bamtofastq, Conversion, cramtofastq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/bamtofastq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/968?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + schema1:datePublished "2024-07-09T15:31:05.900075" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-Trio-phasing-VGP5/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.24" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "The pangenome graph construction pipeline renders a collection of sequences into a pangenome graph. Its goal is to build a graph that is locally directed and acyclic while preserving large-scale variation. Maintaining local linearity is important for interpretation, visualization, mapping, comparative genomics, and reuse of pangenome graphs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1007?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/pangenome" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/pangenome" ; + schema1:sdDatePublished "2024-08-05 10:23:28 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1007/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11517 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:07Z" ; + schema1:dateModified "2024-06-11T12:55:07Z" ; + schema1:description "The pangenome graph construction pipeline renders a collection of sequences into a pangenome graph. Its goal is to build a graph that is locally directed and acyclic while preserving large-scale variation. Maintaining local linearity is important for interpretation, visualization, mapping, comparative genomics, and reuse of pangenome graphs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1007?version=3" ; + schema1:keywords "pangenome" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/pangenome" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1007?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "call and score variants from WGS/WES of rare disease patients" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1014?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/raredisease" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/raredisease" ; + schema1:sdDatePublished "2024-08-05 10:23:24 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1014/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13482 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:08Z" ; + schema1:dateModified "2024-06-11T12:55:08Z" ; + schema1:description "call and score variants from WGS/WES of rare disease patients" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1014?version=5" ; + schema1:keywords "diagnostics, rare-disease, snv, structural-variants, variant-annotation, variant-calling, wes, WGS" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/raredisease" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1014?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.282.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_virtual_screening/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Protein-ligand Docking tutorial (Fpocket)" ; + schema1:sdDatePublished "2024-08-05 10:30:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/282/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2666 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-22T10:10:56Z" ; + schema1:dateModified "2023-01-16T13:58:31Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/282?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Protein-ligand Docking tutorial (Fpocket)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_virtual_screening/python/workflow.py" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1021?version=12" ; + schema1:isBasedOn "https://github.com/nf-core/scrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/scrnaseq" ; + schema1:sdDatePublished "2024-08-05 10:23:15 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1021/ro_crate?version=12" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10731 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:18Z" ; + schema1:dateModified "2024-06-11T12:55:18Z" ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1021?version=13" ; + schema1:keywords "10x-genomics, 10xgenomics, alevin, bustools, Cellranger, kallisto, rna-seq, single-cell, star-solo" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/scrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1021?version=12" ; + schema1:version 12 . + + a schema1:Dataset ; + schema1:datePublished "2023-10-30T19:54:20.930054" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Scaffolding-HiC-VGP8" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Scaffolding-HiC-VGP8/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.15" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/abcix-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.299.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_abc_md_setup/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy ABC MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:33 +0100" ; + schema1:url "https://workflowhub.eu/workflows/299/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 107223 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-29T10:43:15Z" ; + schema1:dateModified "2022-11-23T09:03:34Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/abcix-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/299?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy ABC MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_abc_md_setup/galaxy/biobb_wf_amber_abc_md_setup.ga" ; + schema1:version 1 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """ASPICov was developed to provide a rapid, reliable and complete analysis of NGS SARS-Cov2 samples to the biologist. This broad application tool allows to process samples from either capture or amplicon strategy and Illumina or Ion Torrent technology. To ensure FAIR data analysis, this Nextflow pipeline follows nf-core guidelines and use Singularity containers. \r +\r +Availability and Implementation: https://gitlab.com/vtilloy/aspicov\r +\r +Citation: Valentin Tilloy, Pierre Cuzin, Laura Leroi, Emilie Guérin, Patrick Durand, Sophie Alain\r + ASPICov: An automated pipeline for identification of SARS-Cov2 nucleotidic variants\r + PLoS One 2022 Jan 26;17(1):e0262953: https://pubmed.ncbi.nlm.nih.gov/35081137/""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/192?version=1" ; + schema1:isBasedOn "https://gitlab.com/vtilloy/aspicov/-/blob/master/main.nf" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ASPICov" ; + schema1:sdDatePublished "2024-08-05 10:32:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/192/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 140575 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 91310 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2021-09-24T14:22:51Z" ; + schema1:dateModified "2023-01-16T13:52:59Z" ; + schema1:description """ASPICov was developed to provide a rapid, reliable and complete analysis of NGS SARS-Cov2 samples to the biologist. This broad application tool allows to process samples from either capture or amplicon strategy and Illumina or Ion Torrent technology. To ensure FAIR data analysis, this Nextflow pipeline follows nf-core guidelines and use Singularity containers. \r +\r +Availability and Implementation: https://gitlab.com/vtilloy/aspicov\r +\r +Citation: Valentin Tilloy, Pierre Cuzin, Laura Leroi, Emilie Guérin, Patrick Durand, Sophie Alain\r + ASPICov: An automated pipeline for identification of SARS-Cov2 nucleotidic variants\r + PLoS One 2022 Jan 26;17(1):e0262953: https://pubmed.ncbi.nlm.nih.gov/35081137/""" ; + schema1:image ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "ASPICov" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/192?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2024-06-25T09:57:05.227636" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/nanopore-pre-processing" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "nanopore-pre-processing/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "The tutorial for this workflow can be found on [Galaxy Training Network](https://training.galaxyproject.org/training-material/topics/climate/tutorials/climate-101/tutorial.html)" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/42?version=1" ; + schema1:isBasedOn "https://climate.usegalaxy.eu/u/annefou/w/workflow-constructed-from-history-climate-101" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Climate - Climate 101" ; + schema1:sdDatePublished "2024-08-05 10:33:28 +0100" ; + schema1:url "https://workflowhub.eu/workflows/42/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 5136 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 24703 ; + schema1:creator ; + schema1:dateCreated "2020-06-29T14:00:25Z" ; + schema1:dateModified "2023-01-16T13:43:36Z" ; + schema1:description "The tutorial for this workflow can be found on [Galaxy Training Network](https://training.galaxyproject.org/training-material/topics/climate/tutorials/climate-101/tutorial.html)" ; + schema1:image ; + schema1:keywords "GTN, Climate" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Climate - Climate 101" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/42?version=1" ; + schema1:version 1 ; + ns1:input , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 12807 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-08-05 10:23:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10326 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:58Z" ; + schema1:dateModified "2024-06-11T12:54:58Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + schema1:datePublished "2024-03-13T21:38:33.354139" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-decontamination-VGP9" ; + schema1:license "BSD-3-Clause" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-decontamination-VGP9/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "The workflow takes a HiFi reads collection, runs FastQC and SeqKit, filters with Cutadapt, and creates a MultiQC report. The main outputs are a collection of filtred reads, a report with raw and filtered reads stats, and a table with raw reads stats." ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.602.1" ; + schema1:isBasedOn "https://github.com/ERGA-consortium/pipelines/tree/main/assembly/galaxy" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ERGA DataQC HiFi v2309 (WF0)" ; + schema1:sdDatePublished "2024-08-05 10:27:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/602/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15463 ; + schema1:creator , + ; + schema1:dateCreated "2023-10-06T13:17:36Z" ; + schema1:dateModified "2024-03-13T09:04:37Z" ; + schema1:description "The workflow takes a HiFi reads collection, runs FastQC and SeqKit, filters with Cutadapt, and creates a MultiQC report. The main outputs are a collection of filtred reads, a report with raw and filtered reads stats, and a table with raw reads stats." ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "ERGA, DataQC, HiFi" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ERGA DataQC HiFi v2309 (WF0)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/0.Data_QC/Galaxy-Workflow-ERGA_DataQC_HiFi_v2309_(WF0).ga" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 164392 ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/0.Data_QC/pics/QC_hifi_2309.png" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.259.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_virtual_screening/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL Protein-ligand Docking tutorial (Fpocket)" ; + schema1:sdDatePublished "2024-08-05 10:32:33 +0100" ; + schema1:url "https://workflowhub.eu/workflows/259/ro_crate?version=2" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 28572 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6352 ; + schema1:creator , + ; + schema1:dateCreated "2023-06-06T14:57:00Z" ; + schema1:dateModified "2023-06-06T15:02:00Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/259?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CWL Protein-ligand Docking tutorial (Fpocket)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_virtual_screening/cwl/workflow.cwl" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Differential abundance analysis" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/981?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/differentialabundance" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/differentialabundance" ; + schema1:sdDatePublished "2024-08-05 10:24:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/981/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13413 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:48Z" ; + schema1:dateModified "2024-06-11T12:54:48Z" ; + schema1:description "Differential abundance analysis" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/981?version=8" ; + schema1:keywords "ATAC-seq, ChIP-seq, deseq2, differential-abundance, differential-expression, gsea, limma, microarray, rna-seq, shiny" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/differentialabundance" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/981?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """CWL workflow for NMR spectra Peak Picking\r +The workflow takes as input a series of 2D 1H 15N HSQC NMR spectra and uses nmrpipe tools to convert the spectra in nmrpipe format and performs an automatic peak picking.\r +This test uses a protein MDM2 with different ligands and peptide and generates a peak list with 1H and 15N chemical shift values for each spectrum. The difference among these peak lists can be used to characterize the ligand binding site on the protein.""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/43?version=1" ; + schema1:isBasedOn "https://github.com/andreagia/CWL_dem1_NMR_Peak_Picking" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for NMR pipe" ; + schema1:sdDatePublished "2024-08-05 10:33:28 +0100" ; + schema1:url "https://workflowhub.eu/workflows/43/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1268 ; + schema1:dateCreated "2020-07-22T10:49:00Z" ; + schema1:dateModified "2023-01-16T13:43:41Z" ; + schema1:description """CWL workflow for NMR spectra Peak Picking\r +The workflow takes as input a series of 2D 1H 15N HSQC NMR spectra and uses nmrpipe tools to convert the spectra in nmrpipe format and performs an automatic peak picking.\r +This test uses a protein MDM2 with different ligands and peptide and generates a peak list with 1H and 15N chemical shift values for each spectrum. The difference among these peak lists can be used to characterize the ligand binding site on the protein.""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "NMR pipe" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/43?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 6650 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """### Workflow for Illumina Quality Control and Filtering\r +_Multiple paired datasets will be merged into single paired dataset._\r +\r +**Summary:**\r +- FastQC on raw data files
\r +- fastp for read quality trimming
\r +- BBduk for phiX and (optional) rRNA filtering
\r +- Kraken2 for taxonomic classification of reads (optional)
\r +- BBmap for (contamination) filtering using given references (optional)
\r +- FastQC on filtered (merged) data
\r +\r +Other UNLOCK workflows on WorkflowHub: https://workflowhub.eu/projects/16/workflows?view=default

\r +\r +**All tool CWL files and other workflows can be found here:**
\r +https://gitlab.com/m-unlock/cwl\r +\r +**How to setup and use an UNLOCK workflow:**
\r +https://m-unlock.gitlab.io/docs/setup/setup.html
\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/336?version=1" ; + schema1:isBasedOn "https://gitlab.com/m-unlock/cwl/-/blob/master/cwl/workflows/workflow_illumina_quality.cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Workflow for Illumina Quality Control and Filtering" ; + schema1:sdDatePublished "2024-08-05 10:31:08 +0100" ; + schema1:url "https://workflowhub.eu/workflows/336/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 85490 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 16671 ; + schema1:creator , + ; + schema1:dateCreated "2022-04-21T13:00:34Z" ; + schema1:dateModified "2023-04-07T14:04:28Z" ; + schema1:description """### Workflow for Illumina Quality Control and Filtering\r +_Multiple paired datasets will be merged into single paired dataset._\r +\r +**Summary:**\r +- FastQC on raw data files
\r +- fastp for read quality trimming
\r +- BBduk for phiX and (optional) rRNA filtering
\r +- Kraken2 for taxonomic classification of reads (optional)
\r +- BBmap for (contamination) filtering using given references (optional)
\r +- FastQC on filtered (merged) data
\r +\r +Other UNLOCK workflows on WorkflowHub: https://workflowhub.eu/projects/16/workflows?view=default

\r +\r +**All tool CWL files and other workflows can be found here:**
\r +https://gitlab.com/m-unlock/cwl\r +\r +**How to setup and use an UNLOCK workflow:**
\r +https://m-unlock.gitlab.io/docs/setup/setup.html
\r +""" ; + schema1:image ; + schema1:keywords "illumina, Genomics, Transcriptomics, quality, filtering, Classification" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Workflow for Illumina Quality Control and Filtering" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/336?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for the analysis of crispr data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/975?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/crisprseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/crisprseq" ; + schema1:sdDatePublished "2024-08-05 10:22:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/975/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7872 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:45Z" ; + schema1:dateModified "2024-06-11T12:54:46Z" ; + schema1:description "Pipeline for the analysis of crispr data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/975?version=5" ; + schema1:keywords "crispr, crispr-analysis, crispr-cas, NGS" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/crisprseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/975?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/gmx-protein-ligand-complex-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.295.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_protein_complex_md_setup/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy Protein Ligand Complex MD Setup" ; + schema1:sdDatePublished "2024-08-05 10:32:25 +0100" ; + schema1:url "https://workflowhub.eu/workflows/295/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 97199 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-24T14:47:53Z" ; + schema1:dateModified "2022-11-22T09:57:03Z" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/gmx-protein-ligand-complex-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/295?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy Protein Ligand Complex MD Setup" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_protein_complex_md_setup/galaxy/biobb_wf_protein_complex_md_setup.ga" ; + schema1:version 1 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Protein 3D structure prediction pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1011?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/proteinfold" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/proteinfold" ; + schema1:sdDatePublished "2024-08-05 10:23:26 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1011/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14542 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-07-31T03:03:11Z" ; + schema1:dateModified "2024-07-31T03:03:11Z" ; + schema1:description "Protein 3D structure prediction pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1011?version=2" ; + schema1:keywords "alphafold2, protein-fold-prediction, protein-folding, protein-sequences, protein-structure" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/proteinfold" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1011?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for processing of 10xGenomics single cell rnaseq data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1021?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/scrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/scrnaseq" ; + schema1:sdDatePublished "2024-08-05 10:23:14 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1021/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7959 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:17Z" ; + schema1:dateModified "2024-06-11T12:55:17Z" ; + schema1:description "Pipeline for processing of 10xGenomics single cell rnaseq data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1021?version=13" ; + schema1:keywords "10x-genomics, 10xgenomics, alevin, bustools, Cellranger, kallisto, rna-seq, single-cell, star-solo" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/scrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1021?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "The workflow takes trimmed HiC forward and reverse reads, and one assembly (e.g.: Hap1 or Pri or Collapsed) to produce a scaffolded assembly using YaHS. It also runs all the QC analyses (gfastats, BUSCO, and Merqury). " ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/702?version=1" ; + schema1:isBasedOn "https://github.com/ERGA-consortium/pipelines/tree/main/assembly/galaxy" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ERGA HiC Collapsed Scaffolding+QC YaHS v2311 (WF4)" ; + schema1:sdDatePublished "2024-08-05 10:26:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/702/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 46334 ; + schema1:creator , + ; + schema1:dateCreated "2024-01-09T11:00:47Z" ; + schema1:dateModified "2024-01-09T11:00:47Z" ; + schema1:description "The workflow takes trimmed HiC forward and reverse reads, and one assembly (e.g.: Hap1 or Pri or Collapsed) to produce a scaffolded assembly using YaHS. It also runs all the QC analyses (gfastats, BUSCO, and Merqury). " ; + schema1:image ; + schema1:isPartOf , + ; + schema1:keywords "name:ASSEMBLY+QC, ERGA, HiC" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ERGA HiC Collapsed Scaffolding+QC YaHS v2311 (WF4)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/4.Scaffolding/Galaxy-Workflow-ERGA_HiC_Collapsed_Scaffolding_QC_YaHS_v2311_(WF4).ga" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + ; + ns1:output , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 172042 ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/4.Scaffolding/pics/Scaf_yahs_pri_2311.png" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """Assess genome quality; can run alone or as part of a combined workflow for large genome assembly. \r +\r +* What it does: Assesses the quality of the genome assembly: generate some statistics and determine if expected genes are present; align contigs to a reference genome.\r +* Inputs: polished assembly; reference_genome.fasta (e.g. of a closely-related species, if available). \r +* Outputs: Busco table of genes found; Quast HTML report, and link to Icarus contigs browser, showing contigs aligned to a reference genome\r +* Tools used: Busco, Quast\r +* Input parameters: None required\r +\r +Workflow steps: \r +\r +Polished assembly => Busco\r +* First: predict genes in the assembly: using Metaeuk\r +* Second: compare the set of predicted genes to the set of expected genes in a particular lineage. Default setting for lineage: Eukaryota\r +\r +Polished assembly and a reference genome => Quast\r +* Contigs/scaffolds file: polished assembly\r +* Type of assembly: Genome\r +* Use a reference genome: Yes\r +* Reference genome: Arabidopsis genome\r +* Is the genome large (> 100Mbp)? Yes. \r +* All other settings as defaults, except second last setting: Distinguish contigs with more than 50% unaligned bases as a separate group of contigs?: change to No\r +\r +Options\r +\r +Gene prediction: \r +* Change tool used by Busco to predict genes in the assembly: instead of Metaeuk, use Augustus. \r +* To do this: select: Use Augustus; Use another predefined species model; then choose from the drop down list.\r +* Select from a database of trained species models. list here: https://github.com/Gaius-Augustus/Augustus/tree/master/config/species\r +* Note: if using Augustus: it may fail if the input assembly is too small (e.g. a test-size data assembly). It can't do the training part properly. \r +\r +Compare genes found to other lineage: \r +* Busco has databases of lineages and their expected genes. Option to change lineage. \r +* Not all lineages are available - there is a mix of broader and narrower lineages. - list of lineages here: https://busco.ezlab.org/list_of_lineages.html. \r +* To see the groups in taxonomic hierarchies: Eukaryotes: https://busco.ezlab.org/frames/euka.htm\r +* For example, if you have a plant species from Fabales, you could set that as the lineage. \r +* The narrower the taxonomic group, the more total genes are expected. \r +\r +\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.229.1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Assess genome quality" ; + schema1:sdDatePublished "2024-08-05 10:32:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/229/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 159462 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10556 ; + schema1:creator ; + schema1:dateCreated "2021-11-08T06:03:05Z" ; + schema1:dateModified "2023-01-30T18:21:31Z" ; + schema1:description """Assess genome quality; can run alone or as part of a combined workflow for large genome assembly. \r +\r +* What it does: Assesses the quality of the genome assembly: generate some statistics and determine if expected genes are present; align contigs to a reference genome.\r +* Inputs: polished assembly; reference_genome.fasta (e.g. of a closely-related species, if available). \r +* Outputs: Busco table of genes found; Quast HTML report, and link to Icarus contigs browser, showing contigs aligned to a reference genome\r +* Tools used: Busco, Quast\r +* Input parameters: None required\r +\r +Workflow steps: \r +\r +Polished assembly => Busco\r +* First: predict genes in the assembly: using Metaeuk\r +* Second: compare the set of predicted genes to the set of expected genes in a particular lineage. Default setting for lineage: Eukaryota\r +\r +Polished assembly and a reference genome => Quast\r +* Contigs/scaffolds file: polished assembly\r +* Type of assembly: Genome\r +* Use a reference genome: Yes\r +* Reference genome: Arabidopsis genome\r +* Is the genome large (> 100Mbp)? Yes. \r +* All other settings as defaults, except second last setting: Distinguish contigs with more than 50% unaligned bases as a separate group of contigs?: change to No\r +\r +Options\r +\r +Gene prediction: \r +* Change tool used by Busco to predict genes in the assembly: instead of Metaeuk, use Augustus. \r +* To do this: select: Use Augustus; Use another predefined species model; then choose from the drop down list.\r +* Select from a database of trained species models. list here: https://github.com/Gaius-Augustus/Augustus/tree/master/config/species\r +* Note: if using Augustus: it may fail if the input assembly is too small (e.g. a test-size data assembly). It can't do the training part properly. \r +\r +Compare genes found to other lineage: \r +* Busco has databases of lineages and their expected genes. Option to change lineage. \r +* Not all lineages are available - there is a mix of broader and narrower lineages. - list of lineages here: https://busco.ezlab.org/list_of_lineages.html. \r +* To see the groups in taxonomic hierarchies: Eukaryotes: https://busco.ezlab.org/frames/euka.htm\r +* For example, if you have a plant species from Fabales, you could set that as the lineage. \r +* The narrower the taxonomic group, the more total genes are expected. \r +\r +\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)\r +""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "Large-genome-assembly" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Assess genome quality" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/229?version=1" ; + schema1:version 1 ; + ns1:input , + ; + ns1:output , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """Workflow for tracking objects in Cell Profiler:\r +https://training.galaxyproject.org/training-material/topics/imaging/tutorials/object-tracking-using-cell-profiler/tutorial.html""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/115?version=1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Object tracking using CellProfiler" ; + schema1:sdDatePublished "2024-08-05 10:33:18 +0100" ; + schema1:url "https://workflowhub.eu/workflows/115/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 162671 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 30831 ; + schema1:dateCreated "2021-03-21T18:12:10Z" ; + schema1:dateModified "2023-07-03T10:16:04Z" ; + schema1:description """Workflow for tracking objects in Cell Profiler:\r +https://training.galaxyproject.org/training-material/topics/imaging/tutorials/object-tracking-using-cell-profiler/tutorial.html""" ; + schema1:image ; + schema1:keywords "CellProfiler, imaging, Galaxy, image processing" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Object tracking using CellProfiler" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/115?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Molecular Structure Checking using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **checking** a **molecular structure** before using it as an input for a **Molecular Dynamics** simulation. The workflow uses the **BioExcel Building Blocks library (biobb)**. The particular structure used is the crystal structure of **human Adenylate Kinase 1A (AK1A)**, in complex with the **AP5A inhibitor** (PDB code [1Z83](https://www.rcsb.org/structure/1z83)). \r +\r +**Structure checking** is a key step before setting up a protein system for **simulations**. A number of **common issues** found in structures at **Protein Data Bank** may compromise the success of the **simulation**, or may suggest that longer **equilibration** procedures are necessary.\r +\r +The **workflow** shows how to:\r +\r +- Run **basic manipulations on structures** (selection of models, chains, alternative locations\r +- Detect and fix **amide assignments** and **wrong chiralities**\r +- Detect and fix **protein backbone** issues (missing fragments, and atoms, capping)\r +- Detect and fix **missing side-chain atoms**\r +- **Add hydrogen atoms** according to several criteria\r +- Detect and classify **atomic clashes**\r +- Detect possible **disulfide bonds (SS)**\r +\r +An implementation of this workflow in a **web-based Graphical User Interface (GUI)** can be found in the [https://mmb.irbbarcelona.org/biobb-wfs/](https://mmb.irbbarcelona.org/biobb-wfs/) server (see [https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check](https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check)).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.777.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/main/biobb_wf_structure_checking/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Molecular Structure Checking" ; + schema1:sdDatePublished "2024-08-05 10:25:33 +0100" ; + schema1:url "https://workflowhub.eu/workflows/777/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5246 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-05T08:52:36Z" ; + schema1:dateModified "2024-03-05T08:55:07Z" ; + schema1:description """# Molecular Structure Checking using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **checking** a **molecular structure** before using it as an input for a **Molecular Dynamics** simulation. The workflow uses the **BioExcel Building Blocks library (biobb)**. The particular structure used is the crystal structure of **human Adenylate Kinase 1A (AK1A)**, in complex with the **AP5A inhibitor** (PDB code [1Z83](https://www.rcsb.org/structure/1z83)). \r +\r +**Structure checking** is a key step before setting up a protein system for **simulations**. A number of **common issues** found in structures at **Protein Data Bank** may compromise the success of the **simulation**, or may suggest that longer **equilibration** procedures are necessary.\r +\r +The **workflow** shows how to:\r +\r +- Run **basic manipulations on structures** (selection of models, chains, alternative locations\r +- Detect and fix **amide assignments** and **wrong chiralities**\r +- Detect and fix **protein backbone** issues (missing fragments, and atoms, capping)\r +- Detect and fix **missing side-chain atoms**\r +- **Add hydrogen atoms** according to several criteria\r +- Detect and classify **atomic clashes**\r +- Detect possible **disulfide bonds (SS)**\r +\r +An implementation of this workflow in a **web-based Graphical User Interface (GUI)** can be found in the [https://mmb.irbbarcelona.org/biobb-wfs/](https://mmb.irbbarcelona.org/biobb-wfs/) server (see [https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check](https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check)).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Molecular Structure Checking" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/main/biobb_wf_structure_checking/python/workflow.py" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """The Flashlite-Supernova pipeline runs Supernova to generate phased whole-genome de novo assemblies from a Chromium prepared library on [University of Queensland's HPC, Flashlite](https://rcc.uq.edu.au/flashlite). \r +\r +Infrastructure\\_deployment\\_metadata: FlashLite (QRISCloud)""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.151.1" ; + schema1:isBasedOn "https://github.com/Sydney-Informatics-Hub/Flashlite-Supernova" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Flashlite-Supernova" ; + schema1:sdDatePublished "2024-08-05 10:33:06 +0100" ; + schema1:url "https://workflowhub.eu/workflows/151/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2606 ; + schema1:dateCreated "2021-08-18T23:21:08Z" ; + schema1:dateModified "2023-01-16T13:51:45Z" ; + schema1:description """The Flashlite-Supernova pipeline runs Supernova to generate phased whole-genome de novo assemblies from a Chromium prepared library on [University of Queensland's HPC, Flashlite](https://rcc.uq.edu.au/flashlite). \r +\r +Infrastructure\\_deployment\\_metadata: FlashLite (QRISCloud)""" ; + schema1:isPartOf ; + schema1:keywords "Flashlite, Supernova, 10X, TELLSeq" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Flashlite-Supernova" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/151?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +****\r +\r +About this workflow:\r +\r +* Run this workflow per tissue. \r +* Inputs: masked_genome.fasta and the trimmed RNAseq reads (R1 and R2) from one type of tissue. \r +* Index genome and align reads to genome with HISAT2, with default settings except for: Advanced options: spliced alignment options: specify options: Transcriptome assembly reporting: selected option: Report alignments tailored for transcript assemblers including StringTie (equivalent to -dta flag). \r +* Runs samtools sort to sort bam by coordinate. \r +* Runs StringTie to generate gtf from sorted bam. \r +* Output: transcripts.gtf from a single tissue.""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.877.1" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Find transcripts - TSI" ; + schema1:sdDatePublished "2024-08-05 10:24:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/877/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11408 ; + schema1:creator , + ; + schema1:dateCreated "2024-05-08T06:51:41Z" ; + schema1:dateModified "2024-05-09T04:05:20Z" ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +****\r +\r +About this workflow:\r +\r +* Run this workflow per tissue. \r +* Inputs: masked_genome.fasta and the trimmed RNAseq reads (R1 and R2) from one type of tissue. \r +* Index genome and align reads to genome with HISAT2, with default settings except for: Advanced options: spliced alignment options: specify options: Transcriptome assembly reporting: selected option: Report alignments tailored for transcript assemblers including StringTie (equivalent to -dta flag). \r +* Runs samtools sort to sort bam by coordinate. \r +* Runs StringTie to generate gtf from sorted bam. \r +* Output: transcripts.gtf from a single tissue.""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "TSI-annotation" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Find transcripts - TSI" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/877?version=1" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 299849 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for analysis of Molecular Pixelation assays" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1010?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/pixelator" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/pixelator" ; + schema1:sdDatePublished "2024-08-05 10:23:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1010/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11496 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:07Z" ; + schema1:dateModified "2024-06-11T12:55:07Z" ; + schema1:description "Pipeline for analysis of Molecular Pixelation assays" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1010?version=7" ; + schema1:keywords "molecular-pixelation, pixelator, pixelgen-technologies, proteins, single-cell, single-cell-omics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/pixelator" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1010?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-08-05 10:24:24 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5800 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:11Z" ; + schema1:dateModified "2024-06-11T12:55:11Z" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + schema1:datePublished "2023-10-30T19:49:48.921572" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/kmer-profiling-hifi-trio-VGP2" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "kmer-profiling-hifi-trio-VGP2/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.15" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Genome-wide alternative splicing analysis v.2" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/482?version=7" ; + schema1:license "CC-BY-NC-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genome-wide alternative splicing analysis v.2" ; + schema1:sdDatePublished "2024-08-05 10:30:12 +0100" ; + schema1:url "https://workflowhub.eu/workflows/482/ro_crate?version=7" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 214419 . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 30115 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 109284 ; + schema1:creator ; + schema1:dateCreated "2023-06-11T19:17:35Z" ; + schema1:dateModified "2023-06-11T19:17:56Z" ; + schema1:description "Genome-wide alternative splicing analysis v.2" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/482?version=6" ; + schema1:keywords "Transcriptomics, Alternative splicing, isoform switching" ; + schema1:license "https://spdx.org/licenses/CC-BY-NC-4.0" ; + schema1:name "Genome-wide alternative splicing analysis v.2" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/482?version=7" ; + schema1:version 7 ; + ns1:input , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/807?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for A workflow demonstrating the 'Run interpolation based on IDW' tool" ; + schema1:sdDatePublished "2024-08-05 10:25:02 +0100" ; + schema1:url "https://workflowhub.eu/workflows/807/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10504 ; + schema1:dateCreated "2024-03-28T13:42:44Z" ; + schema1:dateModified "2024-03-28T13:46:06Z" ; + schema1:description "" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "A workflow demonstrating the 'Run interpolation based on IDW' tool" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/AquaINFRA/galaxy-workflow-idw/main/galaxy_workflow.ga" ; + schema1:version 1 ; + ns1:input , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Fastq-to-BAM @ NCI-Gadi is a genome alignment workflow that takes raw FASTQ files, aligns them to a reference genome and outputs analysis ready BAM files. This workflow is designed for the National Computational Infrastructure's (NCI) Gadi supercompter, leveraging multiple nodes on NCI Gadi to run all stages of the workflow in parallel, either massively parallel using the scatter-gather approach or parallel by sample. It consists of a number of stages and follows the BROAD Institute's best practice recommendations. \r +\r +Infrastructure\\_deployment\\_metadata: Gadi (NCI)""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.146.1" ; + schema1:isBasedOn "https://github.com/Sydney-Informatics-Hub/Fastq-to-BAM/blob/fastq-to-bam-v2/README.md" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Fastq-to-bam @ NCI-Gadi" ; + schema1:sdDatePublished "2024-08-05 10:31:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/146/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 1189980 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 30768 ; + schema1:creator , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2021-08-17T04:45:57Z" ; + schema1:dateModified "2023-01-16T13:51:45Z" ; + schema1:description """Fastq-to-BAM @ NCI-Gadi is a genome alignment workflow that takes raw FASTQ files, aligns them to a reference genome and outputs analysis ready BAM files. This workflow is designed for the National Computational Infrastructure's (NCI) Gadi supercompter, leveraging multiple nodes on NCI Gadi to run all stages of the workflow in parallel, either massively parallel using the scatter-gather approach or parallel by sample. It consists of a number of stages and follows the BROAD Institute's best practice recommendations. \r +\r +Infrastructure\\_deployment\\_metadata: Gadi (NCI)""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "Genomics, Alignment, BROAD, WGS, BWA-mem, scalable, NCI, Gadi, PBS, genome, DNA, mapping" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Fastq-to-bam @ NCI-Gadi" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/146?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2024-07-17T13:10:20.412916" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-only-VGP3" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-only-VGP3/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.24" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Taxonomic classification and profiling of shotgun short- and long-read metagenomic data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1025?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/taxprofiler" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/taxprofiler" ; + schema1:sdDatePublished "2024-08-05 10:23:09 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1025/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15607 ; + schema1:creator , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:19Z" ; + schema1:dateModified "2024-06-11T12:55:19Z" ; + schema1:description "Taxonomic classification and profiling of shotgun short- and long-read metagenomic data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1025?version=10" ; + schema1:keywords "Classification, illumina, long-reads, Metagenomics, microbiome, nanopore, pathogen, Profiling, shotgun, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/taxprofiler" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1025?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + schema1:datePublished "2024-01-26T14:52:42.979858" ; + schema1:hasPart , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/velocyto" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + ; + schema1:name "velocyto/Velocyto-on10X-from-bundled" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.19" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "velocyto/Velocyto-on10X-filtered-barcodes" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/velocyto" ; + schema1:version "0.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.19" . + + a schema1:Dataset ; + schema1:datePublished "2023-11-23T13:33:07.987369" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/cutandrun" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "cutandrun/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """### Workflow for Metagenomics binning from assembly\r +\r +**Minimal inputs are: Identifier, assembly (fasta) and a associated sorted BAM file**\r +\r +**Summary**\r + - MetaBAT2 (binning)\r + - MaxBin2 (binning)\r + - SemiBin (binning)\r + - DAS Tool (bin merging)\r + - EukRep (eukaryotic classification)\r + - CheckM (bin completeness and contamination)\r + - BUSCO (bin completeness)\r + - GTDB-Tk (bin taxonomic classification)\r +\r +Other UNLOCK workflows on WorkflowHub: https://workflowhub.eu/projects/16/workflows?view=default

\r +\r +**All tool CWL files and other workflows can be found here:**
\r + Tools: https://gitlab.com/m-unlock/cwl
\r + Workflows: https://gitlab.com/m-unlock/cwl/workflows
\r +\r +**How to setup and use an UNLOCK workflow:**
\r +https://m-unlock.gitlab.io/docs/setup/setup.html
\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/64?version=11" ; + schema1:isBasedOn "https://gitlab.com/m-unlock/cwl/-/blob/master/cwl/workflows/workflow_metagenomics_binning.cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Metagenomic Binning from Assembly" ; + schema1:sdDatePublished "2024-08-05 10:31:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/64/ro_crate?version=11" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 82211 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 18745 ; + schema1:creator , + ; + schema1:dateCreated "2021-10-18T09:49:33Z" ; + schema1:dateModified "2023-02-02T15:15:38Z" ; + schema1:description """### Workflow for Metagenomics binning from assembly\r +\r +**Minimal inputs are: Identifier, assembly (fasta) and a associated sorted BAM file**\r +\r +**Summary**\r + - MetaBAT2 (binning)\r + - MaxBin2 (binning)\r + - SemiBin (binning)\r + - DAS Tool (bin merging)\r + - EukRep (eukaryotic classification)\r + - CheckM (bin completeness and contamination)\r + - BUSCO (bin completeness)\r + - GTDB-Tk (bin taxonomic classification)\r +\r +Other UNLOCK workflows on WorkflowHub: https://workflowhub.eu/projects/16/workflows?view=default

\r +\r +**All tool CWL files and other workflows can be found here:**
\r + Tools: https://gitlab.com/m-unlock/cwl
\r + Workflows: https://gitlab.com/m-unlock/cwl/workflows
\r +\r +**How to setup and use an UNLOCK workflow:**
\r +https://m-unlock.gitlab.io/docs/setup/setup.html
\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/64?version=10" ; + schema1:keywords "Metagenomics, microbial, metagenome, binning" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Metagenomic Binning from Assembly" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/64?version=11" ; + schema1:version 11 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=21" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-08-05 10:24:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=21" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13448 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:50Z" ; + schema1:dateModified "2024-06-11T12:54:50Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=21" ; + schema1:version 21 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "A workflow querying on an endpoint of a graph database by a file containing a SPARQL query." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/122?version=1" ; + schema1:isBasedOn "https://github.com/longmanplus/EOSC-Life_demos" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for SPARQL query on graph database" ; + schema1:sdDatePublished "2024-08-05 10:33:14 +0100" ; + schema1:url "https://workflowhub.eu/workflows/122/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 283 ; + schema1:dateCreated "2021-05-23T16:14:17Z" ; + schema1:dateModified "2021-05-26T10:43:23Z" ; + schema1:description "A workflow querying on an endpoint of a graph database by a file containing a SPARQL query." ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/122?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "SPARQL query on graph database" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/122?version=1" ; + schema1:version 1 ; + ns1:input , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 1556 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A pipeline to demultiplex, QC and map Nanopore data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1002?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/nanoseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/nanoseq" ; + schema1:sdDatePublished "2024-08-05 10:23:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1002/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9753 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:05Z" ; + schema1:dateModified "2024-06-11T12:55:05Z" ; + schema1:description "A pipeline to demultiplex, QC and map Nanopore data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1002?version=5" ; + schema1:keywords "Alignment, demultiplexing, nanopore, QC" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/nanoseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1002?version=6" ; + schema1:version 6 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 320859 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:MediaObject ; + schema1:contentSize 769 ; + schema1:dateModified "2024-03-21T11:54:38+00:00" ; + schema1:name "matmul_case1.csv" ; + schema1:sdDatePublished "2024-03-22T11:39:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 765 ; + schema1:dateModified "2023-11-28T00:25:59+00:00" ; + schema1:name "matmul_case2.csv" ; + schema1:sdDatePublished "2024-03-22T11:39:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 717 ; + schema1:dateModified "2023-11-28T19:49:11+00:00" ; + schema1:name "matmul_case3.csv" ; + schema1:sdDatePublished "2024-03-22T11:39:26+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state of the art epitope prediction pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/984?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/epitopeprediction" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/epitopeprediction" ; + schema1:sdDatePublished "2024-08-05 10:23:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/984/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9453 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:52Z" ; + schema1:dateModified "2024-06-11T12:54:52Z" ; + schema1:description "A fully reproducible and state of the art epitope prediction pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/984?version=7" ; + schema1:keywords "epitope, epitope-prediction, mhc-binding-prediction" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/epitopeprediction" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/984?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Galaxy Workflow created on Galaxy-E european instance, ecology.usegalaxy.eu, related to the Galaxy training tutorial "[Biodiversity data exploration](https://training.galaxyproject.org/training-material/topics/ecology/tutorials/biodiversity-data-exploration/tutorial.html)"\r +\r +This workflow allows to explore biodiversity data looking at homoscedasticity, normality or collinearity of presences-absence or abundance data and at comparing beta diversity taking into account space, time and species components""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/656?version=1" ; + schema1:isBasedOn "https://ecology.usegalaxy.eu/u/ylebras/w/copy-of-workflow-biodiversity-data-exploration-tuto" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Biodiversity data exploration tutorial" ; + schema1:sdDatePublished "2024-08-05 10:27:18 +0100" ; + schema1:url "https://workflowhub.eu/workflows/656/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14766 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-11-09T12:47:49Z" ; + schema1:dateModified "2023-11-09T21:02:04Z" ; + schema1:description """Galaxy Workflow created on Galaxy-E european instance, ecology.usegalaxy.eu, related to the Galaxy training tutorial "[Biodiversity data exploration](https://training.galaxyproject.org/training-material/topics/ecology/tutorials/biodiversity-data-exploration/tutorial.html)"\r +\r +This workflow allows to explore biodiversity data looking at homoscedasticity, normality or collinearity of presences-absence or abundance data and at comparing beta diversity taking into account space, time and species components""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Biodiversity data exploration tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/656?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2023-11-23T13:33:07.954489" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-only-VGP3" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-only-VGP3/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.130.4" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Amber Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:25:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/130/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 66118 ; + schema1:creator , + ; + schema1:dateCreated "2023-07-26T09:36:49Z" ; + schema1:dateModified "2023-07-26T09:37:20Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/130?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Amber Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_amber_md_setup/master/biobb_wf_amber_md_setup/notebooks/mdsetup/biobb_amber_setup_notebook.ipynb" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=14" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-08-05 10:24:25 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=14" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9304 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:12Z" ; + schema1:dateModified "2024-06-11T12:55:12Z" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=14" ; + schema1:version 14 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/278?version=7" ; + schema1:isBasedOn "https://gitlab.bsc.es/lrodrig1/structuralvariants_poc/-/tree/1.1.3/structuralvariants/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CNV_pipeline" ; + schema1:sdDatePublished "2024-08-05 10:31:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/278/ro_crate?version=7" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 9858 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9607 ; + schema1:creator , + ; + schema1:dateCreated "2022-06-01T17:26:39Z" ; + schema1:dateModified "2022-06-01T17:26:39Z" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/278?version=10" ; + schema1:keywords "cancer, CODEX2, ExomeDepth, manta, TransBioNet, variant calling, GRIDSS, structural variants" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CNV_pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/278?version=7" ; + schema1:version 7 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 74039 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Differential abundance analysis" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/981?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/differentialabundance" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/differentialabundance" ; + schema1:sdDatePublished "2024-08-05 10:24:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/981/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15641 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:48Z" ; + schema1:dateModified "2024-06-11T12:54:48Z" ; + schema1:description "Differential abundance analysis" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/981?version=8" ; + schema1:keywords "ATAC-seq, ChIP-seq, deseq2, differential-abundance, differential-expression, gsea, limma, microarray, rna-seq, shiny" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/differentialabundance" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/981?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """The second-level complexity workflow is one among a collection of workflows designed to address tasks up to CTF estimation. In addition to the functionalities provided by the layer 0 workflow, this workflow aims to enhance the quality of acquisition images using quality protocols.\r +\r +**Quality control protocols**\r +\r +* **Movie max shift**: automatic reject those movies whose frames move more than a given threshold. \r +\r +* **Tilt analysis**: quality score based in the Power Spectrum Density (astigmatism and tilt) \r +\r +* **CTF consensus**: acts as a filter discarding micrographs based on their CTF (limit resolution, defocus, astigmatism, etc.).\r +\r +**Advantages:** \r +\r +* More control of the acquisition quality\r +\r +* Reduce unnecessary processing time and storage""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/599?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CEITEC layer 1 workflow" ; + schema1:sdDatePublished "2024-08-05 10:22:29 +0100" ; + schema1:url "https://workflowhub.eu/workflows/599/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 53166 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7675 ; + schema1:dateCreated "2023-10-04T13:01:57Z" ; + schema1:dateModified "2024-07-10T14:16:39Z" ; + schema1:description """The second-level complexity workflow is one among a collection of workflows designed to address tasks up to CTF estimation. In addition to the functionalities provided by the layer 0 workflow, this workflow aims to enhance the quality of acquisition images using quality protocols.\r +\r +**Quality control protocols**\r +\r +* **Movie max shift**: automatic reject those movies whose frames move more than a given threshold. \r +\r +* **Tilt analysis**: quality score based in the Power Spectrum Density (astigmatism and tilt) \r +\r +* **CTF consensus**: acts as a filter discarding micrographs based on their CTF (limit resolution, defocus, astigmatism, etc.).\r +\r +**Advantages:** \r +\r +* More control of the acquisition quality\r +\r +* Reduce unnecessary processing time and storage""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/599?version=1" ; + schema1:isPartOf ; + schema1:keywords "image processing, cryoem, spa, scipion" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "CEITEC layer 1 workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/599?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=16" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-08-05 10:23:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=16" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8751 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:04Z" ; + schema1:dateModified "2024-06-11T12:55:04Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=16" ; + schema1:version 16 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "call and score variants from WGS/WES of rare disease patients" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1014?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/raredisease" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/raredisease" ; + schema1:sdDatePublished "2024-08-05 10:23:23 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1014/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13482 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:08Z" ; + schema1:dateModified "2024-06-11T12:55:08Z" ; + schema1:description "call and score variants from WGS/WES of rare disease patients" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1014?version=5" ; + schema1:keywords "diagnostics, rare-disease, snv, structural-variants, variant-annotation, variant-calling, wes, WGS" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/raredisease" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1014?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Abstract CWL Automatically generated from the Galaxy workflow file: CLM-FATES_ ALP1 simulation (5 years)" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/65?version=1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CLM-FATES_ALP1_simulation_5years" ; + schema1:sdDatePublished "2024-08-05 10:33:24 +0100" ; + schema1:url "https://workflowhub.eu/workflows/65/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 3407 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 20149 ; + schema1:dateCreated "2020-10-27T12:14:22Z" ; + schema1:dateModified "2023-01-16T13:45:44Z" ; + schema1:description "Abstract CWL Automatically generated from the Galaxy workflow file: CLM-FATES_ ALP1 simulation (5 years)" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CLM-FATES_ALP1_simulation_5years" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/65?version=1" ; + schema1:version 1 ; + ns1:input , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 8510 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/963?version=12" ; + schema1:isBasedOn "https://github.com/nf-core/airrflow" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/airrflow" ; + schema1:sdDatePublished "2024-08-05 10:24:22 +0100" ; + schema1:url "https://workflowhub.eu/workflows/963/ro_crate?version=12" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14129 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:37Z" ; + schema1:dateModified "2024-06-11T12:54:37Z" ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/963?version=13" ; + schema1:keywords "airr, b-cell, immcantation, immunorepertoire, repseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/airrflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/963?version=12" ; + schema1:version 12 . + + a schema1:Dataset ; + schema1:datePublished "2024-06-24T13:49:03.518903" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/bacterial-genome-assembly" ; + schema1:license "GPL-3.0-or-later" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "bacterial-genome-assembly/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An automated processing pipeline for mammalian bulk calling cards experiments" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/970?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/callingcards" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/callingcards" ; + schema1:sdDatePublished "2024-08-05 10:24:14 +0100" ; + schema1:url "https://workflowhub.eu/workflows/970/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10916 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:44Z" ; + schema1:dateModified "2024-06-11T12:54:44Z" ; + schema1:description "An automated processing pipeline for mammalian bulk calling cards experiments" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/callingcards" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/970?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Virus genome assembly with Unicycler and Spades,\r +The 2 assemblers works in parallel. The graph visualization is made with Bandage.\r +workflow git repository : https://github.com/fjrmoreews/cwl-workflow-SARS-CoV-2/blob/master/Assembly/workflow/assembly-wf-virus.cwl\r +Based on https://github.com/galaxyproject/SARS-CoV-2/blob/master/genomics/2-Assembly/as_wf.png\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/3?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Virus genome assembly with Unicycler and Spades." ; + schema1:sdDatePublished "2024-08-05 10:33:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/3/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8602 ; + schema1:dateCreated "2020-04-10T10:45:00Z" ; + schema1:dateModified "2023-01-16T13:39:45Z" ; + schema1:description """Virus genome assembly with Unicycler and Spades,\r +The 2 assemblers works in parallel. The graph visualization is made with Bandage.\r +workflow git repository : https://github.com/fjrmoreews/cwl-workflow-SARS-CoV-2/blob/master/Assembly/workflow/assembly-wf-virus.cwl\r +Based on https://github.com/galaxyproject/SARS-CoV-2/blob/master/genomics/2-Assembly/as_wf.png\r +""" ; + schema1:image ; + schema1:keywords "covid-19, Assembly" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Virus genome assembly with Unicycler and Spades." ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/3?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 34311 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Genome-wide alternative splicing analysis v.2" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/482?version=4" ; + schema1:license "CC-BY-NC-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genome-wide alternative splicing analysis v.2" ; + schema1:sdDatePublished "2024-08-05 10:30:12 +0100" ; + schema1:url "https://workflowhub.eu/workflows/482/ro_crate?version=4" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 214419 . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 30115 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 119329 ; + schema1:creator ; + schema1:dateCreated "2023-06-11T11:57:01Z" ; + schema1:dateModified "2023-06-11T11:57:19Z" ; + schema1:description "Genome-wide alternative splicing analysis v.2" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/482?version=6" ; + schema1:keywords "Transcriptomics, Alternative splicing, isoform switching" ; + schema1:license "https://spdx.org/licenses/CC-BY-NC-4.0" ; + schema1:name "Genome-wide alternative splicing analysis v.2" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/482?version=4" ; + schema1:version 4 ; + ns1:input , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/278?version=9" ; + schema1:isBasedOn "https://gitlab.bsc.es/lrodrig1/structuralvariants_poc/-/tree/1.1.3/structuralvariants/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CNV_pipeline" ; + schema1:sdDatePublished "2024-08-05 10:31:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/278/ro_crate?version=9" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 9694 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9435 ; + schema1:creator , + ; + schema1:dateCreated "2022-06-30T13:07:39Z" ; + schema1:dateModified "2022-06-30T13:07:39Z" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/278?version=10" ; + schema1:keywords "cancer, CODEX2, ExomeDepth, manta, TransBioNet, variant calling, GRIDSS, structural variants" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CNV_pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/278?version=9" ; + schema1:version 9 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 72914 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# The Polygenic Score Catalog Calculator (`pgsc_calc`)\r +\r +[![Documentation Status](https://readthedocs.org/projects/pgsc-calc/badge/?version=latest)](https://pgsc-calc.readthedocs.io/en/latest/?badge=latest)\r +[![pgscatalog/pgsc_calc CI](https://github.com/PGScatalog/pgsc_calc/actions/workflows/ci.yml/badge.svg)](https://github.com/PGScatalog/pgsc_calc/actions/workflows/ci.yml)\r +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.5970794.svg)](https://doi.org/10.5281/zenodo.5970794)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-≥22.10.0-23aa62.svg?labelColor=000000)](https://www.nextflow.io/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +\r +## Introduction\r +\r +`pgsc_calc` is a bioinformatics best-practice analysis pipeline for calculating\r +polygenic [risk] scores on samples with imputed genotypes using existing scoring\r +files from the [Polygenic Score (PGS) Catalog](https://www.pgscatalog.org/)\r +and/or user-defined PGS/PRS.\r +\r +## Pipeline summary\r +\r +

\r + \r +

\r +\r +The workflow performs the following steps:\r +\r +* Downloading scoring files using the PGS Catalog API in a specified genome build (GRCh37 and GRCh38).\r +* Reading custom scoring files (and performing a liftover if genotyping data is in a different build).\r +* Automatically combines and creates scoring files for efficient parallel computation of multiple PGS\r + - Matching variants in the scoring files against variants in the target dataset (in plink bfile/pfile or VCF format)\r +* Calculates PGS for all samples (linear sum of weights and dosages)\r +* Creates a summary report to visualize score distributions and pipeline metadata (variant matching QC)\r +\r +And optionally:\r +\r +- Genetic Ancestry: calculate similarity of target samples to populations in a\r + reference dataset ([1000 Genomes (1000G)](http://www.nature.com/nature/journal/v526/n7571/full/nature15393.html)), using principal components analysis (PCA)\r +- PGS Normalization: Using reference population data and/or PCA projections to report\r + individual-level PGS predictions (e.g. percentiles, z-scores) that account for genetic ancestry\r +\r +See documentation for a list of planned [features under development](https://pgsc-calc.readthedocs.io/en/latest/index.html#Features-under-development).\r +\r +## Quick start\r +\r +1. Install\r +[`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation)\r +(`>=22.10.0`)\r +\r +2. Install [`Docker`](https://docs.docker.com/engine/installation/) or\r +[`Singularity (v3.8.3 minimum)`](https://www.sylabs.io/guides/3.0/user-guide/)\r +(please only use [`Conda`](https://conda.io/miniconda.html) as a last resort)\r +\r +3. Download the pipeline and test it on a minimal dataset with a single command:\r +\r + ```console\r + nextflow run pgscatalog/pgsc_calc -profile test,\r + ```\r +\r +4. Start running your own analysis!\r +\r + ```console\r + nextflow run pgscatalog/pgsc_calc -profile --input samplesheet.csv --pgs_id PGS001229\r + ```\r +\r +See [getting\r +started](https://pgsc-calc.readthedocs.io/en/latest/getting-started.html) for more\r +details.\r +\r +## Documentation\r +\r +[Full documentation is available on Read the Docs](https://pgsc-calc.readthedocs.io/)\r +\r +## Credits\r +\r +pgscatalog/pgsc_calc is developed as part of the PGS Catalog project, a\r +collaboration between the University of Cambridge’s Department of Public Health\r +and Primary Care (Michael Inouye, Samuel Lambert) and the European\r +Bioinformatics Institute (Helen Parkinson, Laura Harris).\r +\r +The pipeline seeks to provide a standardized workflow for PGS calculation and\r +ancestry inference implemented in nextflow derived from an existing set of\r +tools/scripts developed by Inouye lab (Rodrigo Canovas, Scott Ritchie, Jingqin\r +Wu) and PGS Catalog teams (Samuel Lambert, Laurent Gil).\r +\r +The adaptation of the codebase, nextflow implementation, and PGS Catalog features\r +are written by Benjamin Wingfield, Samuel Lambert, Laurent Gil with additional input\r +from Aoife McMahon (EBI). Development of new features, testing, and code review\r +is ongoing including Inouye lab members (Rodrigo Canovas, Scott Ritchie) and others. A\r +manuscript describing the tool is *in preparation*. In the meantime if you use the\r +tool we ask you to cite the repo and the paper describing the PGS Catalog\r +resource:\r +\r +- >PGS Catalog Calculator _(in preparation)_. PGS Catalog\r + Team. [https://github.com/PGScatalog/pgsc_calc](https://github.com/PGScatalog/pgsc_calc)\r +- >Lambert _et al._ (2021) The Polygenic Score Catalog as an open database for\r +reproducibility and systematic evaluation. Nature Genetics. 53:420–425\r +doi:[10.1038/s41588-021-00783-5](https://doi.org/10.1038/s41588-021-00783-5).\r +\r +This pipeline is distrubuted under an [Apache License](LICENSE) amd uses code and \r +infrastructure developed and maintained by the [nf-core](https://nf-co.re) community \r +(Ewels *et al. Nature Biotech* (2020) doi:[10.1038/s41587-020-0439-x](https://doi.org/10.1038/s41587-020-0439-x)), \r +reused here under the [MIT license](https://github.com/nf-core/tools/blob/master/LICENSE).\r +\r +Additional references of open-source tools and data used in this pipeline are described in\r +[`CITATIONS.md`](CITATIONS.md).\r +\r +This work has received funding from EMBL-EBI core funds, the Baker Institute,\r +the University of Cambridge, Health Data Research UK (HDRUK), and the European\r +Union’s Horizon 2020 research and innovation programme under grant agreement No\r +101016775 INTERVENE.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/556?version=1" ; + schema1:isBasedOn "https://github.com/PGScatalog/pgsc_calc" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for The Polygenic Score Catalog Calculator" ; + schema1:sdDatePublished "2024-08-05 10:28:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/556/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1673 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-08-10T09:01:48Z" ; + schema1:dateModified "2023-08-10T09:01:48Z" ; + schema1:description """# The Polygenic Score Catalog Calculator (`pgsc_calc`)\r +\r +[![Documentation Status](https://readthedocs.org/projects/pgsc-calc/badge/?version=latest)](https://pgsc-calc.readthedocs.io/en/latest/?badge=latest)\r +[![pgscatalog/pgsc_calc CI](https://github.com/PGScatalog/pgsc_calc/actions/workflows/ci.yml/badge.svg)](https://github.com/PGScatalog/pgsc_calc/actions/workflows/ci.yml)\r +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.5970794.svg)](https://doi.org/10.5281/zenodo.5970794)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-≥22.10.0-23aa62.svg?labelColor=000000)](https://www.nextflow.io/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +\r +## Introduction\r +\r +`pgsc_calc` is a bioinformatics best-practice analysis pipeline for calculating\r +polygenic [risk] scores on samples with imputed genotypes using existing scoring\r +files from the [Polygenic Score (PGS) Catalog](https://www.pgscatalog.org/)\r +and/or user-defined PGS/PRS.\r +\r +## Pipeline summary\r +\r +

\r + \r +

\r +\r +The workflow performs the following steps:\r +\r +* Downloading scoring files using the PGS Catalog API in a specified genome build (GRCh37 and GRCh38).\r +* Reading custom scoring files (and performing a liftover if genotyping data is in a different build).\r +* Automatically combines and creates scoring files for efficient parallel computation of multiple PGS\r + - Matching variants in the scoring files against variants in the target dataset (in plink bfile/pfile or VCF format)\r +* Calculates PGS for all samples (linear sum of weights and dosages)\r +* Creates a summary report to visualize score distributions and pipeline metadata (variant matching QC)\r +\r +And optionally:\r +\r +- Genetic Ancestry: calculate similarity of target samples to populations in a\r + reference dataset ([1000 Genomes (1000G)](http://www.nature.com/nature/journal/v526/n7571/full/nature15393.html)), using principal components analysis (PCA)\r +- PGS Normalization: Using reference population data and/or PCA projections to report\r + individual-level PGS predictions (e.g. percentiles, z-scores) that account for genetic ancestry\r +\r +See documentation for a list of planned [features under development](https://pgsc-calc.readthedocs.io/en/latest/index.html#Features-under-development).\r +\r +## Quick start\r +\r +1. Install\r +[`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation)\r +(`>=22.10.0`)\r +\r +2. Install [`Docker`](https://docs.docker.com/engine/installation/) or\r +[`Singularity (v3.8.3 minimum)`](https://www.sylabs.io/guides/3.0/user-guide/)\r +(please only use [`Conda`](https://conda.io/miniconda.html) as a last resort)\r +\r +3. Download the pipeline and test it on a minimal dataset with a single command:\r +\r + ```console\r + nextflow run pgscatalog/pgsc_calc -profile test,\r + ```\r +\r +4. Start running your own analysis!\r +\r + ```console\r + nextflow run pgscatalog/pgsc_calc -profile --input samplesheet.csv --pgs_id PGS001229\r + ```\r +\r +See [getting\r +started](https://pgsc-calc.readthedocs.io/en/latest/getting-started.html) for more\r +details.\r +\r +## Documentation\r +\r +[Full documentation is available on Read the Docs](https://pgsc-calc.readthedocs.io/)\r +\r +## Credits\r +\r +pgscatalog/pgsc_calc is developed as part of the PGS Catalog project, a\r +collaboration between the University of Cambridge’s Department of Public Health\r +and Primary Care (Michael Inouye, Samuel Lambert) and the European\r +Bioinformatics Institute (Helen Parkinson, Laura Harris).\r +\r +The pipeline seeks to provide a standardized workflow for PGS calculation and\r +ancestry inference implemented in nextflow derived from an existing set of\r +tools/scripts developed by Inouye lab (Rodrigo Canovas, Scott Ritchie, Jingqin\r +Wu) and PGS Catalog teams (Samuel Lambert, Laurent Gil).\r +\r +The adaptation of the codebase, nextflow implementation, and PGS Catalog features\r +are written by Benjamin Wingfield, Samuel Lambert, Laurent Gil with additional input\r +from Aoife McMahon (EBI). Development of new features, testing, and code review\r +is ongoing including Inouye lab members (Rodrigo Canovas, Scott Ritchie) and others. A\r +manuscript describing the tool is *in preparation*. In the meantime if you use the\r +tool we ask you to cite the repo and the paper describing the PGS Catalog\r +resource:\r +\r +- >PGS Catalog Calculator _(in preparation)_. PGS Catalog\r + Team. [https://github.com/PGScatalog/pgsc_calc](https://github.com/PGScatalog/pgsc_calc)\r +- >Lambert _et al._ (2021) The Polygenic Score Catalog as an open database for\r +reproducibility and systematic evaluation. Nature Genetics. 53:420–425\r +doi:[10.1038/s41588-021-00783-5](https://doi.org/10.1038/s41588-021-00783-5).\r +\r +This pipeline is distrubuted under an [Apache License](LICENSE) amd uses code and \r +infrastructure developed and maintained by the [nf-core](https://nf-co.re) community \r +(Ewels *et al. Nature Biotech* (2020) doi:[10.1038/s41587-020-0439-x](https://doi.org/10.1038/s41587-020-0439-x)), \r +reused here under the [MIT license](https://github.com/nf-core/tools/blob/master/LICENSE).\r +\r +Additional references of open-source tools and data used in this pipeline are described in\r +[`CITATIONS.md`](CITATIONS.md).\r +\r +This work has received funding from EMBL-EBI core funds, the Baker Institute,\r +the University of Cambridge, Health Data Research UK (HDRUK), and the European\r +Union’s Horizon 2020 research and innovation programme under grant agreement No\r +101016775 INTERVENE.\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/556?version=3" ; + schema1:keywords "Nextflow, Workflows, polygenic risk score, polygenic score, prediction, GWAS, genomic ancestry" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "The Polygenic Score Catalog Calculator" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/556?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/963?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/airrflow" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/airrflow" ; + schema1:sdDatePublished "2024-08-05 10:24:21 +0100" ; + schema1:url "https://workflowhub.eu/workflows/963/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8464 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:36Z" ; + schema1:dateModified "2024-06-11T12:54:36Z" ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/963?version=13" ; + schema1:keywords "airr, b-cell, immcantation, immunorepertoire, repseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/airrflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/963?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-variant-calling" ; + schema1:mainEntity ; + schema1:name "COVID-19-PE-ARTIC-ILLUMINA (v0.1)" ; + schema1:sdDatePublished "2021-03-12 13:41:23 +0000" ; + schema1:softwareVersion "v0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 56967 ; + schema1:name "COVID-19-PE-ARTIC-ILLUMINA" ; + schema1:programmingLanguage . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "The workflow takes ONT reads collection, runs SeqKit and Nanoplot. The main outputs are a table and plots of raw reads stats." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/697?version=1" ; + schema1:isBasedOn "https://github.com/ERGA-consortium/pipelines/tree/main/assembly/galaxy" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ERGA DataQC ONT v2311 (WF0)" ; + schema1:sdDatePublished "2024-08-05 10:26:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/697/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8849 ; + schema1:creator , + ; + schema1:dateCreated "2024-01-08T15:25:44Z" ; + schema1:dateModified "2024-01-08T15:57:26Z" ; + schema1:description "The workflow takes ONT reads collection, runs SeqKit and Nanoplot. The main outputs are a table and plots of raw reads stats." ; + schema1:image ; + schema1:isPartOf , + ; + schema1:keywords "ONT, ERGA, DataQC" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ERGA DataQC ONT v2311 (WF0)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/0.Data_QC/Galaxy-Workflow-ERGA_DataQC_ONT_v2311_(WF0).ga" ; + schema1:version 1 ; + ns1:input . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 147798 ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/0.Data_QC/pics/QC_ont_2311.png" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 167040 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:MediaObject ; + schema1:contentSize 3277 ; + schema1:dateModified "2024-01-17T10:54:05+00:00" ; + schema1:name "dnn_cnn_5epochs.csv" ; + schema1:sdDatePublished "2024-03-25T11:02:59+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.280.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_ligand_parameterization/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python GMX Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-08-05 10:31:02 +0100" ; + schema1:url "https://workflowhub.eu/workflows/280/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1728 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-24T14:17:18Z" ; + schema1:dateModified "2022-04-11T09:29:42Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/280?version=3" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python GMX Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_ligand_parameterization/python/workflow.py" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=11" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-08-05 10:23:18 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14236 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:15Z" ; + schema1:dateModified "2024-06-11T12:55:15Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=11" ; + schema1:version 11 . + + a schema1:Dataset ; + schema1:datePublished "2024-07-03T12:31:48.614143" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/allele-based-pathogen-identification" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "allele-based-pathogen-identification/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1023?version=11" ; + schema1:isBasedOn "https://github.com/nf-core/smrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/smrnaseq" ; + schema1:sdDatePublished "2024-08-05 10:23:12 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1023/ro_crate?version=11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12489 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:19Z" ; + schema1:dateModified "2024-06-11T12:55:19Z" ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1023?version=10" ; + schema1:keywords "small-rna, smrna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/smrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1023?version=11" ; + schema1:version 11 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# PacBio HiFi genome assembly using hifiasm v2.1\r +\r +## General usage recommendations\r +Please see the [Genome assembly with hifiasm on Galaxy Australia](https://australianbiocommons.github.io/how-to-guides/genome_assembly/hifi_assembly) guide.\r +\r +## See [change log](./change_log.md)\r +\r +## Acknowledgements\r +\r +The workflow & the [doc_guidelines template used](https://github.com/AustralianBioCommons/doc_guidelines) are \r +supported by the Australian BioCommons via Bioplatforms Australia funding, the Australian Research Data Commons \r +(https://doi.org/10.47486/PL105) and the Queensland Government RICF programme. Bioplatforms Australia and the \r +Australian Research Data Commons are enabled by the National Collaborative Research Infrastructure Strategy (NCRIS).\r +\r +\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.221.3" ; + schema1:isBasedOn "https://github.com/AustralianBioCommons/PacBio-HiFi-genome-assembly-using-hifiasm" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for PacBio HiFi genome assembly using hifiasm v2.1" ; + schema1:sdDatePublished "2024-08-05 10:31:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/221/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 27208 ; + schema1:creator , + ; + schema1:dateCreated "2022-10-21T05:13:12Z" ; + schema1:dateModified "2023-01-30T18:21:31Z" ; + schema1:description """# PacBio HiFi genome assembly using hifiasm v2.1\r +\r +## General usage recommendations\r +Please see the [Genome assembly with hifiasm on Galaxy Australia](https://australianbiocommons.github.io/how-to-guides/genome_assembly/hifi_assembly) guide.\r +\r +## See [change log](./change_log.md)\r +\r +## Acknowledgements\r +\r +The workflow & the [doc_guidelines template used](https://github.com/AustralianBioCommons/doc_guidelines) are \r +supported by the Australian BioCommons via Bioplatforms Australia funding, the Australian Research Data Commons \r +(https://doi.org/10.47486/PL105) and the Queensland Government RICF programme. Bioplatforms Australia and the \r +Australian Research Data Commons are enabled by the National Collaborative Research Infrastructure Strategy (NCRIS).\r +\r +\r +\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/221?version=2" ; + schema1:isPartOf , + ; + schema1:keywords "FASTQ, hifiasm, HiFi, genome_assembly" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "PacBio HiFi genome assembly using hifiasm v2.1" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/221?version=3" ; + schema1:version 3 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Takes fastqs and reference data, to produce a single cell counts matrix into and save in annData format - adding a column called sample with the sample name. " ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/513?version=3" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq: Count and Load with starSOLO" ; + schema1:sdDatePublished "2024-08-05 10:24:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/513/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 28457 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-05-30T05:47:35Z" ; + schema1:dateModified "2024-05-30T05:47:35Z" ; + schema1:description "Takes fastqs and reference data, to produce a single cell counts matrix into and save in annData format - adding a column called sample with the sample name. " ; + schema1:isBasedOn "https://workflowhub.eu/workflows/513?version=2" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "scRNAseq: Count and Load with starSOLO" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/513?version=3" ; + schema1:version 3 ; + ns1:input , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1017?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/rnafusion" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnafusion" ; + schema1:sdDatePublished "2024-08-05 10:22:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1017/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4606 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:09Z" ; + schema1:dateModified "2024-06-11T12:55:09Z" ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1017?version=16" ; + schema1:keywords "fusion, fusion-genes, gene-fusion, rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnafusion" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1017?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Differential abundance analysis" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/981?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/differentialabundance" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/differentialabundance" ; + schema1:sdDatePublished "2024-08-05 10:24:02 +0100" ; + schema1:url "https://workflowhub.eu/workflows/981/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12719 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:48Z" ; + schema1:dateModified "2024-06-11T12:54:48Z" ; + schema1:description "Differential abundance analysis" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/981?version=8" ; + schema1:keywords "ATAC-seq, ChIP-seq, deseq2, differential-abundance, differential-expression, gsea, limma, microarray, rna-seq, shiny" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/differentialabundance" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/981?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """# workflow-denovo-stacks\r +\r +These workflows are part of a set designed to work for RAD-seq data on the Galaxy platform, using the tools from the Stacks program. \r +\r +Galaxy Australia: https://usegalaxy.org.au/\r +\r +Stacks: http://catchenlab.life.illinois.edu/stacks/\r +\r +## Inputs\r +* demultiplexed reads in fastq format, may be output from the QC workflow. Files are in a collection. \r +* population map in text format\r +\r +\r +## Steps and outputs\r +\r +ustacks:\r +* input reads go to ustacks. \r +* ustacks assembles the reads into matching stacks (hypothetical alleles). \r +* The outputs are in a collection called something like: Stacks2: ustacks on data 21, data 20, and others Loci and polymorphism. Click on this to see the files:\r +* for each sample, assembled loci (tsv format), named e.g. sample_CAAC.tags\r +* for each sample, model calls from each locus (tsv format), named e.g. sample_CAAC.snps\r +* for each sample, haplotypes/alleles recorded from each locus (tsv format), named e.g. sample_CAAC.alleles\r +* Please see sections 6.1 to 6.4 in https://catchenlab.life.illinois.edu/stacks/manual/#ufiles for a full description. \r +\r +cstacks:\r +* cstacks will merge stacks into a catalog of consensus loci. \r +* The outputs are in a collection called something like Stacks2: cstacks on data 3, data 71, and others Catalog of loci. Click on this to see the three files, each in tsv format:\r +catalog.tags\r +catalog.snps\r +catalog.alleles\r +\r +\r +sstacks:\r +* sstacks will compare each sample to the loci in the catalog. \r +* The outputs are in a collection called something like Stacks2: sstacks on data 3, data 76, and others Matches to the catalog.Click on this to see the files:\r +There is one file for each sample, named e.g. sample_CAAC.matches, in tsv format. \r +\r +tsv2bam:\r +* Conversion to BAM format\r +* Reads from each sample are now aligned to each locus, and the tsv2bam tool will convert this into a bam file for each sample. \r +* The outputs are in a collection called something like Stacks2: tsv2bam on data 3, data 94, and others Matches to the catalog.Click on this to see the files:\r +There is one file for each sample, named e.g sample_CAAC.matches, in BAM format. \r +\r +gstacks:\r +* Catalog of loci in fasta format\r +* Variant calls in VCF format\r +\r +populations:\r +* Locus consensus sequences in fasta format\r +* Snp calls, in VCF format\r +* Haplotypes, in VCF format\r +* Summary statistics\r +\r +![denovo](wf-denovo.png)\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/348?version=1" ; + schema1:isBasedOn "https://github.com/AnnaSyme/workflow-denovo-stacks.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Stacks RAD-seq de novo workflow" ; + schema1:sdDatePublished "2024-08-05 10:32:08 +0100" ; + schema1:url "https://workflowhub.eu/workflows/348/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 25118 ; + schema1:creator ; + schema1:dateCreated "2022-05-31T07:39:10Z" ; + schema1:dateModified "2023-01-30T18:21:31Z" ; + schema1:description """# workflow-denovo-stacks\r +\r +These workflows are part of a set designed to work for RAD-seq data on the Galaxy platform, using the tools from the Stacks program. \r +\r +Galaxy Australia: https://usegalaxy.org.au/\r +\r +Stacks: http://catchenlab.life.illinois.edu/stacks/\r +\r +## Inputs\r +* demultiplexed reads in fastq format, may be output from the QC workflow. Files are in a collection. \r +* population map in text format\r +\r +\r +## Steps and outputs\r +\r +ustacks:\r +* input reads go to ustacks. \r +* ustacks assembles the reads into matching stacks (hypothetical alleles). \r +* The outputs are in a collection called something like: Stacks2: ustacks on data 21, data 20, and others Loci and polymorphism. Click on this to see the files:\r +* for each sample, assembled loci (tsv format), named e.g. sample_CAAC.tags\r +* for each sample, model calls from each locus (tsv format), named e.g. sample_CAAC.snps\r +* for each sample, haplotypes/alleles recorded from each locus (tsv format), named e.g. sample_CAAC.alleles\r +* Please see sections 6.1 to 6.4 in https://catchenlab.life.illinois.edu/stacks/manual/#ufiles for a full description. \r +\r +cstacks:\r +* cstacks will merge stacks into a catalog of consensus loci. \r +* The outputs are in a collection called something like Stacks2: cstacks on data 3, data 71, and others Catalog of loci. Click on this to see the three files, each in tsv format:\r +catalog.tags\r +catalog.snps\r +catalog.alleles\r +\r +\r +sstacks:\r +* sstacks will compare each sample to the loci in the catalog. \r +* The outputs are in a collection called something like Stacks2: sstacks on data 3, data 76, and others Matches to the catalog.Click on this to see the files:\r +There is one file for each sample, named e.g. sample_CAAC.matches, in tsv format. \r +\r +tsv2bam:\r +* Conversion to BAM format\r +* Reads from each sample are now aligned to each locus, and the tsv2bam tool will convert this into a bam file for each sample. \r +* The outputs are in a collection called something like Stacks2: tsv2bam on data 3, data 94, and others Matches to the catalog.Click on this to see the files:\r +There is one file for each sample, named e.g sample_CAAC.matches, in BAM format. \r +\r +gstacks:\r +* Catalog of loci in fasta format\r +* Variant calls in VCF format\r +\r +populations:\r +* Locus consensus sequences in fasta format\r +* Snp calls, in VCF format\r +* Haplotypes, in VCF format\r +* Summary statistics\r +\r +![denovo](wf-denovo.png)\r +""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Stacks RAD-seq de novo workflow" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/348?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 350220 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "call and score variants from WGS/WES of rare disease patients" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1014?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/raredisease" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/raredisease" ; + schema1:sdDatePublished "2024-08-05 10:23:24 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1014/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14007 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:08Z" ; + schema1:dateModified "2024-06-11T12:55:08Z" ; + schema1:description "call and score variants from WGS/WES of rare disease patients" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1014?version=5" ; + schema1:keywords "diagnostics, rare-disease, snv, structural-variants, variant-annotation, variant-calling, wes, WGS" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/raredisease" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1014?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + schema1:datePublished "2023-09-12T13:46:41.022242" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/parallel-accession-download" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "parallel-accession-download/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.10" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=13" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-08-05 10:24:25 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=13" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9077 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:12Z" ; + schema1:dateModified "2024-06-11T12:55:12Z" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=13" ; + schema1:version 13 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/963?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/airrflow" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/airrflow" ; + schema1:sdDatePublished "2024-08-05 10:24:21 +0100" ; + schema1:url "https://workflowhub.eu/workflows/963/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9444 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:37Z" ; + schema1:dateModified "2024-06-11T12:54:37Z" ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/963?version=13" ; + schema1:keywords "airr, b-cell, immcantation, immunorepertoire, repseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/airrflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/963?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + schema1:datePublished "2024-06-19T07:34:48.016693" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/brew3r" ; + schema1:license "GPL-3.0-or-later" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "brew3r/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Alignment, assembly and annotation of generated transcripts from RNASEQ reads." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/40?version=1" ; + schema1:isBasedOn "https://usegalaxy.eu/u/ambarishk/w/covid-19-assembly-using-tophat-and-annotation" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Assembly using Tophat2 and annotation" ; + schema1:sdDatePublished "2024-08-05 10:33:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/40/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 46216 ; + schema1:dateCreated "2020-06-18T23:17:39Z" ; + schema1:dateModified "2023-02-13T14:06:46Z" ; + schema1:description "Alignment, assembly and annotation of generated transcripts from RNASEQ reads." ; + schema1:image ; + schema1:keywords "Alignment, Assembly, Annotation, Tophat2, RNASEQ, covid-19" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Assembly using Tophat2 and annotation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/40?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 280925 . + + a schema1:Dataset ; + schema1:datePublished "2024-03-06T19:15:00.868427" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Scaffolding-Bionano-VGP7" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Scaffolding-Bionano-VGP7/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# HiC scaffolding pipeline\r +\r +Snakemake pipeline for scaffolding of a genome using HiC reads using yahs.\r +\r +## Prerequisites\r +\r +This pipeine has been tested using `Snakemake v7.32.4` and requires conda for installation of required tools. To run the pipline use the command:\r +\r +`snakemake --use-conda --cores N`\r +\r +where N is number of cores to use. There are provided a set of configuration and running scripts for exectution on a slurm queueing system. After configuring the `cluster.json` file run:\r +\r +`./run_cluster`\r +\r +## Before starting\r +\r +You need to create a temporary folder and specify the path in the `config.yaml` file. This should be able to hold the temporary files created when sorting the `.pairsam` file (100s of GB or even many TBs)\r +\r +The path to the genome assemly must be given in the `config.yaml`.\r +\r +The HiC reads should be paired and named as follows: `Library_1.fastq.gz Library_2.fastq.gz`. The pipeline can accept any number of paired HiC read files, but the naming must be consistent. The folder containing these files must be provided in the `config.yaml`.""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.796.1" ; + schema1:isBasedOn "https://github.com/ERGA-consortium/pipelines/tree/main/assembly/snakemake/4.Scaffolding/yahs" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for HiC scaffolding pipeline" ; + schema1:sdDatePublished "2024-08-05 10:25:07 +0100" ; + schema1:url "https://workflowhub.eu/workflows/796/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4471 ; + schema1:creator ; + schema1:dateCreated "2024-03-16T09:01:33Z" ; + schema1:dateModified "2024-06-21T09:41:21Z" ; + schema1:description """# HiC scaffolding pipeline\r +\r +Snakemake pipeline for scaffolding of a genome using HiC reads using yahs.\r +\r +## Prerequisites\r +\r +This pipeine has been tested using `Snakemake v7.32.4` and requires conda for installation of required tools. To run the pipline use the command:\r +\r +`snakemake --use-conda --cores N`\r +\r +where N is number of cores to use. There are provided a set of configuration and running scripts for exectution on a slurm queueing system. After configuring the `cluster.json` file run:\r +\r +`./run_cluster`\r +\r +## Before starting\r +\r +You need to create a temporary folder and specify the path in the `config.yaml` file. This should be able to hold the temporary files created when sorting the `.pairsam` file (100s of GB or even many TBs)\r +\r +The path to the genome assemly must be given in the `config.yaml`.\r +\r +The HiC reads should be paired and named as follows: `Library_1.fastq.gz Library_2.fastq.gz`. The pipeline can accept any number of paired HiC read files, but the naming must be consistent. The folder containing these files must be provided in the `config.yaml`.""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/796?version=1" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "HiC scaffolding pipeline" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/796?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.284.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_complex_md_setup/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/284/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7128 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-17T10:37:49Z" ; + schema1:dateModified "2022-04-11T09:29:43Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/284?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_complex_md_setup/python/workflow.py" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/amber-protein-ligand-complex-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/298?version=2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_complex_md_setup/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/298/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 102255 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-23T08:43:48Z" ; + schema1:dateModified "2023-01-16T13:58:55Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/amber-protein-ligand-complex-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/298?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_complex_md_setup/galaxy/biobb_wf_amber_complex_md_setup.ga" ; + schema1:version 2 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2023-10-23T13:04:32.570375" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.14" . + + a schema1:Dataset ; + schema1:datePublished "2024-06-17T16:32:31.401725" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/bacterial-genome-assembly" ; + schema1:license "GPL-3.0-or-later" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "bacterial-genome-assembly/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A workflow to simulate reads" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1015?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/readsimulator" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/readsimulator" ; + schema1:sdDatePublished "2024-08-05 10:23:22 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1015/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11759 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:09Z" ; + schema1:dateModified "2024-06-11T12:55:09Z" ; + schema1:description "A workflow to simulate reads" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1015?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/readsimulator" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1015?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """## Summary\r +\r +This pipeline has as major goal provide a tool for protein interactions (PPI) prediction data formalization and standardization using the [OntoPPI](https://link.springer.com/chapter/10.1007/978-3-030-36599-8_23) ontology. This pipeline is splitted in two parts: (i) a part to prepare data from three main sources of PPI data ([HINT](http://hint.yulab.org/), [STRING](https://string-db.org/) and [PredPrin](https://github.com/YasCoMa/PredPrin.git)) and create the standard files to be processed by the next part; (ii) the second part uses the data prepared before to semantically describe using ontologies related to the concepts of this domain. It describes the provenance information of PPI prediction experiments, datasets characteristics, functional annotations of proteins involved in the PPIs, description of the PPI detection methods (also named as evidence) used in the experiment, and the prediction score obtained by each PPI detection method for the PPIs. This pipeline also execute data fusion to map the same protein pairs from different data sources and, finally, it creates a database of all these information in the [alegro](https://allegrograph.com/) graph triplestore.\r +\r +## Requirements:\r +* Python packages needed:\r + - pip3 install numpy\r + - pip3 install rdflib\r + - pip3 install uuid\r + - pip3 install SPARQLWrapper\r + - alegro graph tools (pip3 install agraph-python)
\r + Go to this [site](https://franz.com/agraph/support/documentation/current/python/install.html) for the installation tutorial\r +\r +## Usage Instructions\r +### Preparation:\r +1. ````git clone https://github.com/YasCoMa/ppintegrator.git````\r +2. ````cd ppintegrator````\r +3. `pip3 install -r requirements.txt`\r +**Allegrograph is a triple store, which is a database to maintain semantic descriptions. This database's server provides a web application with a user interface to run, edit and manage queries, visualize results and manipulate the data without writing codes other than SPARQL query language. The use of the Allegregraph option is not mandatory, but if you want to export and use it, you have to install the server and the client.**\r +4. if you want to use the Allegrograph server option (this triple store has free license up to 5,000,000 triples), install allegrograph server in your machine (configure a user and password): Server - https://franz.com/agraph/support/documentation/current/server-installation.html; Client - https://franz.com/agraph/support/documentation/current/python/install.html\r +5. Export the following environment variables to configure Allegrograph server\r +\r +````\r +export AGRAPH_HOST=127.0.0.1\r +export AGRAPH_PORT=10035\r +export AGRAPH_USER=chosen_user\r +export AGRAPH_PASSWORD=chosen_password\r +````\r +5. Start allegrograph: ````path/to/allegrograph/bin/agraph-control --config path/to/allegrograph/lib/agraph.cfg start````\r +6. Read the file data_requirements.txt to understand which files are needed for the process\r +\r +### Data preparation (first part) - File ````prepare_data_triplification.py```` :\r +* Pipeline parameters:\r + - __-rt__ or __--running_type__
\r + Use to indicate from which source you want to prepare PPI data, as follows:
\r + 1 - Prepare data for PredPrin
\r + 2 - Prepare data for String
\r + 3 - Prepare data for HINT\r + - __-fec__ or __--file_experiment_config__
\r + File with the experiment configuration in json format
\r + \r + Examples are in these files (all the metadata are required): params_hint.json, params_predrep_5k.json e params_string.json\r +\r + - __-org__ or __--organism__
\r + Prepare data only for one organism of interest (example: homo_sapiens)
\r +\r + This parameter is optional. If you do not specify, it will automatically use the organisms described in the experiment configuration file above\r +\r +\r +* Running modes examples:\r + 1. Running for PPI data generated by PredPrin:
\r + ````python3 prepare_data_triplification.py -rt 1 -fec params_predrep_5k.json````\r +\r + 2. Running for HINT database:
\r + ````python3 prepare_data_triplification.py -rt 3 -fec params_hint.json````\r +\r + 3. Running for STRING database:
\r + ````python3 prepare_data_triplification.py -rt 2 -fec params_string.json````\r +\r + In the file ````auxiliar_data_preparation.py```` you can run it for all the examples provided automatically, as follows:
\r + ````python3 auxiliar_data_preparation.py````\r +\r +\r +### PPI data triplification (second part) - File ````triplification_ppi_data.py````:\r +\r +* Pipeline parameters:\r + - __-rt__ or __--running_type__
\r + Use to indicate which execution step you want to run (it is desirable following the order showed):
\r + 0 - Generate the descriptions for all the protein interaction steps of an experiment (run steps 1, 2 and 3)
\r + 1 - Generate triples just about data provenance
\r + 2 - Generate triples just for protein functional annotations
\r + 3 - Generate triples just for the score results of each evidence
\r + 4 - Execute data fusion
\r + 5 - Generate descriptions and execute data fusion (run steps 1, 2, 3 and 4)
\r + 6 - Export to allegrograph server\r +\r + - __-fec__ or __--file_experiment_config__
\r + File with the experiment configuration in json format
\r + \r + Examples are in these files (all the metadata are required): params_hint.json, params_predrep_5k.json e params_string.json\r +\r + - __-fev__ or __--file_evidence_info__
\r + File with the PPI detection methods information in json format
\r + \r + Examples are in these files (all the metadata are required): evidences_information.json, evidences_information_hint.json e evidences_information_string.json\r +\r + - __-fcv__ or __--file_config_evidence__
\r + File with the experiment and evidence methods files addresses in tsv format
\r + \r + Example of this file: config_evidence_file.tsv\r +\r +* Running modes examples:\r + 1. Running to generate all semantic descriptions for PredPrin:
\r + ````python3 triplification_ppi_data.py -rt 0 -fec params_predrep_5k.json -fev evidences_information.json````\r +\r + 2. Running to generate only triples of data provenance:
\r + ````python3 triplification_ppi_data.py -rt 1 -fec params_hint.json -fev evidences_information_hint.json````\r +\r + 3. Running to generate only triples of PPI scores for each evidence:
\r + ````python3 triplification_ppi_data.py -rt 3 -fec params_hint.json -fev evidences_information_hint.json````\r +\r + 4. Running to generate only triples of protein functional annotations (only PredPrin exports these annotations):
\r + ````python3 triplification_ppi_data.py -rt 2 -fec params_predrep_5k.json -fev evidences_information.json````\r +\r + 5. Running to generate all semantic descrptions for STRING:
\r + ````python3 triplification_ppi_data.py -rt 0 -fec params_string.json -fev evidences_information_string.json````\r + \r + **For the next options (4, 5 and 6), it is mandatory running at least mode 1 and 3 for HINT, STRING and PredPrin**\r + \r + 6. Running to execute data fusion of different sources:
\r + ````python3 triplification_ppi_data.py -rt 4 -fcv config_evidence_file.tsv````\r +\r + 7. Running to generate all semantic descriptions and execute data fusion of different sources (combines mode 0 and 4):
\r + ````python3 triplification_ppi_data.py -rt 5 -fcv config_evidence_file.tsv````\r +\r + 8. Export semantic data to allegrograph server:
\r + ````python3 triplification_ppi_data.py -rt 6 -fcv config_evidence_file.tsv````\r +\r +## Query Scenarios for analysis\r +Supposing you ran all the steps showed in the section above, you can run the following options to analyse the data stored alegro graph triple store.
\r +File to use for this section: ````query_analysis_ppitriplificator.py````
\r +\r +* Parameter:\r + - __-q__ or __--query_option__
\r + Use to indicate which query you want to perform:
\r + 1 - Get all the different organisms whose interactions are stored in the database
\r + 2 - Get the interactions that have scientific papers associated and the list of these papers
\r + 3 - Get a list of the most frequent biological processes annotated for the interactions of Escherichia coli bacteria
\r + 4 - Get only the interactions belonging to a specific biological process (regulation of transcription, DNA-templated) in Escherichia coli bacteria
\r + 5 - Get the scores of interactions belonging to a specific biological process (regulation of transcription, DNA-templated) in Escherichia coli bacteria
\r + 6 - Get a list of the most frequent biological processes annotated for the interactions of human organism
\r + 7 - Get only the interactions belonging to a specific biological process (positive regulation of transcription by RNA polymerase II) in human organism
\r + 8 - Get the scores of interactions belonging to a specific biological process (positive regulation of transcription by RNA polymerase II) in human organism\r +\r +* Running modes examples:\r + 1. Running queries:
\r + ````python3 query_analysis_ppitriplificator.py -q 1 ````
\r + Change number 1 to the respective number of the query you want to perform\r +\r +## Reference\r +Martins, Y. C., Ziviani, A., Cerqueira e Costa, M. D. O., Cavalcanti, M. C. R., Nicolás, M. F., & de Vasconcelos, A. T. R. (2023). PPIntegrator: semantic integrative system for protein–protein interaction and application for host–pathogen datasets. Bioinformatics Advances, 3(1), vbad067.\r +\r +## Bug Report\r +Please, use the [Issues](https://github.com/YasCoMa/ppintegrator/issues) tab to report any bug.""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/618?version=1" ; + schema1:isBasedOn "https://github.com/YasCoMa/ppintegrator" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for PPIntegrator - PPI Triplification Process" ; + schema1:sdDatePublished "2024-08-05 10:27:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/618/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 81252 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 28124 ; + schema1:creator ; + schema1:dateCreated "2023-10-21T23:56:39Z" ; + schema1:dateModified "2023-10-21T23:56:39Z" ; + schema1:description """## Summary\r +\r +This pipeline has as major goal provide a tool for protein interactions (PPI) prediction data formalization and standardization using the [OntoPPI](https://link.springer.com/chapter/10.1007/978-3-030-36599-8_23) ontology. This pipeline is splitted in two parts: (i) a part to prepare data from three main sources of PPI data ([HINT](http://hint.yulab.org/), [STRING](https://string-db.org/) and [PredPrin](https://github.com/YasCoMa/PredPrin.git)) and create the standard files to be processed by the next part; (ii) the second part uses the data prepared before to semantically describe using ontologies related to the concepts of this domain. It describes the provenance information of PPI prediction experiments, datasets characteristics, functional annotations of proteins involved in the PPIs, description of the PPI detection methods (also named as evidence) used in the experiment, and the prediction score obtained by each PPI detection method for the PPIs. This pipeline also execute data fusion to map the same protein pairs from different data sources and, finally, it creates a database of all these information in the [alegro](https://allegrograph.com/) graph triplestore.\r +\r +## Requirements:\r +* Python packages needed:\r + - pip3 install numpy\r + - pip3 install rdflib\r + - pip3 install uuid\r + - pip3 install SPARQLWrapper\r + - alegro graph tools (pip3 install agraph-python)
\r + Go to this [site](https://franz.com/agraph/support/documentation/current/python/install.html) for the installation tutorial\r +\r +## Usage Instructions\r +### Preparation:\r +1. ````git clone https://github.com/YasCoMa/ppintegrator.git````\r +2. ````cd ppintegrator````\r +3. `pip3 install -r requirements.txt`\r +**Allegrograph is a triple store, which is a database to maintain semantic descriptions. This database's server provides a web application with a user interface to run, edit and manage queries, visualize results and manipulate the data without writing codes other than SPARQL query language. The use of the Allegregraph option is not mandatory, but if you want to export and use it, you have to install the server and the client.**\r +4. if you want to use the Allegrograph server option (this triple store has free license up to 5,000,000 triples), install allegrograph server in your machine (configure a user and password): Server - https://franz.com/agraph/support/documentation/current/server-installation.html; Client - https://franz.com/agraph/support/documentation/current/python/install.html\r +5. Export the following environment variables to configure Allegrograph server\r +\r +````\r +export AGRAPH_HOST=127.0.0.1\r +export AGRAPH_PORT=10035\r +export AGRAPH_USER=chosen_user\r +export AGRAPH_PASSWORD=chosen_password\r +````\r +5. Start allegrograph: ````path/to/allegrograph/bin/agraph-control --config path/to/allegrograph/lib/agraph.cfg start````\r +6. Read the file data_requirements.txt to understand which files are needed for the process\r +\r +### Data preparation (first part) - File ````prepare_data_triplification.py```` :\r +* Pipeline parameters:\r + - __-rt__ or __--running_type__
\r + Use to indicate from which source you want to prepare PPI data, as follows:
\r + 1 - Prepare data for PredPrin
\r + 2 - Prepare data for String
\r + 3 - Prepare data for HINT\r + - __-fec__ or __--file_experiment_config__
\r + File with the experiment configuration in json format
\r + \r + Examples are in these files (all the metadata are required): params_hint.json, params_predrep_5k.json e params_string.json\r +\r + - __-org__ or __--organism__
\r + Prepare data only for one organism of interest (example: homo_sapiens)
\r +\r + This parameter is optional. If you do not specify, it will automatically use the organisms described in the experiment configuration file above\r +\r +\r +* Running modes examples:\r + 1. Running for PPI data generated by PredPrin:
\r + ````python3 prepare_data_triplification.py -rt 1 -fec params_predrep_5k.json````\r +\r + 2. Running for HINT database:
\r + ````python3 prepare_data_triplification.py -rt 3 -fec params_hint.json````\r +\r + 3. Running for STRING database:
\r + ````python3 prepare_data_triplification.py -rt 2 -fec params_string.json````\r +\r + In the file ````auxiliar_data_preparation.py```` you can run it for all the examples provided automatically, as follows:
\r + ````python3 auxiliar_data_preparation.py````\r +\r +\r +### PPI data triplification (second part) - File ````triplification_ppi_data.py````:\r +\r +* Pipeline parameters:\r + - __-rt__ or __--running_type__
\r + Use to indicate which execution step you want to run (it is desirable following the order showed):
\r + 0 - Generate the descriptions for all the protein interaction steps of an experiment (run steps 1, 2 and 3)
\r + 1 - Generate triples just about data provenance
\r + 2 - Generate triples just for protein functional annotations
\r + 3 - Generate triples just for the score results of each evidence
\r + 4 - Execute data fusion
\r + 5 - Generate descriptions and execute data fusion (run steps 1, 2, 3 and 4)
\r + 6 - Export to allegrograph server\r +\r + - __-fec__ or __--file_experiment_config__
\r + File with the experiment configuration in json format
\r + \r + Examples are in these files (all the metadata are required): params_hint.json, params_predrep_5k.json e params_string.json\r +\r + - __-fev__ or __--file_evidence_info__
\r + File with the PPI detection methods information in json format
\r + \r + Examples are in these files (all the metadata are required): evidences_information.json, evidences_information_hint.json e evidences_information_string.json\r +\r + - __-fcv__ or __--file_config_evidence__
\r + File with the experiment and evidence methods files addresses in tsv format
\r + \r + Example of this file: config_evidence_file.tsv\r +\r +* Running modes examples:\r + 1. Running to generate all semantic descriptions for PredPrin:
\r + ````python3 triplification_ppi_data.py -rt 0 -fec params_predrep_5k.json -fev evidences_information.json````\r +\r + 2. Running to generate only triples of data provenance:
\r + ````python3 triplification_ppi_data.py -rt 1 -fec params_hint.json -fev evidences_information_hint.json````\r +\r + 3. Running to generate only triples of PPI scores for each evidence:
\r + ````python3 triplification_ppi_data.py -rt 3 -fec params_hint.json -fev evidences_information_hint.json````\r +\r + 4. Running to generate only triples of protein functional annotations (only PredPrin exports these annotations):
\r + ````python3 triplification_ppi_data.py -rt 2 -fec params_predrep_5k.json -fev evidences_information.json````\r +\r + 5. Running to generate all semantic descrptions for STRING:
\r + ````python3 triplification_ppi_data.py -rt 0 -fec params_string.json -fev evidences_information_string.json````\r + \r + **For the next options (4, 5 and 6), it is mandatory running at least mode 1 and 3 for HINT, STRING and PredPrin**\r + \r + 6. Running to execute data fusion of different sources:
\r + ````python3 triplification_ppi_data.py -rt 4 -fcv config_evidence_file.tsv````\r +\r + 7. Running to generate all semantic descriptions and execute data fusion of different sources (combines mode 0 and 4):
\r + ````python3 triplification_ppi_data.py -rt 5 -fcv config_evidence_file.tsv````\r +\r + 8. Export semantic data to allegrograph server:
\r + ````python3 triplification_ppi_data.py -rt 6 -fcv config_evidence_file.tsv````\r +\r +## Query Scenarios for analysis\r +Supposing you ran all the steps showed in the section above, you can run the following options to analyse the data stored alegro graph triple store.
\r +File to use for this section: ````query_analysis_ppitriplificator.py````
\r +\r +* Parameter:\r + - __-q__ or __--query_option__
\r + Use to indicate which query you want to perform:
\r + 1 - Get all the different organisms whose interactions are stored in the database
\r + 2 - Get the interactions that have scientific papers associated and the list of these papers
\r + 3 - Get a list of the most frequent biological processes annotated for the interactions of Escherichia coli bacteria
\r + 4 - Get only the interactions belonging to a specific biological process (regulation of transcription, DNA-templated) in Escherichia coli bacteria
\r + 5 - Get the scores of interactions belonging to a specific biological process (regulation of transcription, DNA-templated) in Escherichia coli bacteria
\r + 6 - Get a list of the most frequent biological processes annotated for the interactions of human organism
\r + 7 - Get only the interactions belonging to a specific biological process (positive regulation of transcription by RNA polymerase II) in human organism
\r + 8 - Get the scores of interactions belonging to a specific biological process (positive regulation of transcription by RNA polymerase II) in human organism\r +\r +* Running modes examples:\r + 1. Running queries:
\r + ````python3 query_analysis_ppitriplificator.py -q 1 ````
\r + Change number 1 to the respective number of the query you want to perform\r +\r +## Reference\r +Martins, Y. C., Ziviani, A., Cerqueira e Costa, M. D. O., Cavalcanti, M. C. R., Nicolás, M. F., & de Vasconcelos, A. T. R. (2023). PPIntegrator: semantic integrative system for protein–protein interaction and application for host–pathogen datasets. Bioinformatics Advances, 3(1), vbad067.\r +\r +## Bug Report\r +Please, use the [Issues](https://github.com/YasCoMa/ppintegrator/issues) tab to report any bug.""" ; + schema1:image ; + schema1:keywords "protein interactin data triplification, protein interactions database integration, data fusion, data annotation" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "PPIntegrator - PPI Triplification Process" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/618?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2023-10-17T19:52:22.592351" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "atacseq/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.12" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/gmx-protein-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.277.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_md_setup/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/277/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 55159 ; + schema1:creator , + ; + schema1:dateCreated "2023-05-03T13:41:05Z" ; + schema1:dateModified "2023-05-03T13:43:16Z" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/gmx-protein-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/277?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_md_setup/galaxy/biobb_wf_md_setup.ga" ; + schema1:version 3 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for the identification of circular DNAs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/972?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/circdna" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/circdna" ; + schema1:sdDatePublished "2024-08-05 10:24:11 +0100" ; + schema1:url "https://workflowhub.eu/workflows/972/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10166 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:45Z" ; + schema1:dateModified "2024-06-11T12:54:45Z" ; + schema1:description "Pipeline for the identification of circular DNAs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/972?version=7" ; + schema1:keywords "ampliconarchitect, ampliconsuite, circle-seq, circular, DNA, eccdna, ecdna, extrachromosomal-circular-dna, Genomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/circdna" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/972?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.54.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_ligand_parameterization" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter GMX Notebook Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-08-05 10:25:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/54/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15271 ; + schema1:creator , + , + ; + schema1:dateCreated "2022-09-15T10:59:36Z" ; + schema1:dateModified "2023-01-16T13:44:50Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/54?version=5" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter GMX Notebook Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_ligand_parameterization/master/biobb_wf_ligand_parameterization/notebooks/biobb_ligand_parameterization_tutorial.ipynb" ; + schema1:version 3 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-27T09:48:18.733544" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "The pangenome graph construction pipeline renders a collection of sequences into a pangenome graph. Its goal is to build a graph that is locally directed and acyclic while preserving large-scale variation. Maintaining local linearity is important for interpretation, visualization, mapping, comparative genomics, and reuse of pangenome graphs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1007?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/pangenome" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/pangenome" ; + schema1:sdDatePublished "2024-08-05 10:23:29 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1007/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11520 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:07Z" ; + schema1:dateModified "2024-06-11T12:55:07Z" ; + schema1:description "The pangenome graph construction pipeline renders a collection of sequences into a pangenome graph. Its goal is to build a graph that is locally directed and acyclic while preserving large-scale variation. Maintaining local linearity is important for interpretation, visualization, mapping, comparative genomics, and reuse of pangenome graphs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1007?version=3" ; + schema1:keywords "pangenome" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/pangenome" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1007?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/998?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/methylseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/methylseq" ; + schema1:sdDatePublished "2024-08-05 10:22:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/998/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6562 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:02Z" ; + schema1:dateModified "2024-06-11T12:55:02Z" ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/998?version=14" ; + schema1:keywords "bisulfite-sequencing, dna-methylation, em-seq, epigenome, Epigenomics, methyl-seq, pbat, rrbs" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/methylseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/998?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# BatchConvert\r +\r +A command line tool for converting image data into either of the standard file formats OME-TIFF or OME-Zarr. \r +\r +The tool wraps the dedicated file converters bfconvert and bioformats2raw to convert into OME-TIFF or OME-Zarr,\r +respectively. The workflow management system NextFlow is used to perform conversion in parallel for batches of images. \r +\r +The tool also wraps s3 and Aspera clients (go-mc and aspera-cli, respectively). Therefore, input and output locations can \r +be specified as local or remote storage and file transfer will be performed automatically. The conversion can be run on \r +HPC with Slurm. \r +\r +![](figures/diagram.png)\r +\r +## Installation & Dependencies\r +\r +**Important** note: The package has been so far only tested on Ubuntu 20.04.\r +\r +The minimal dependency to run the tool is NextFlow, which should be installed and made accessible from the command line.\r +\r +If conda exists on your system, you can install BatchConvert together with NextFlow using the following script:\r +```\r +git clone https://github.com/Euro-BioImaging/BatchConvert.git && \\ \r +source BatchConvert/installation/install_with_nextflow.sh\r +```\r +\r +\r +If you already have NextFlow installed and accessible from the command line (or if you prefer to install it manually \r +e.g., as shown [here](https://www.nextflow.io/docs/latest/getstarted.html)), you can also install BatchConvert alone, using the following script:\r +```\r +git clone https://github.com/Euro-BioImaging/BatchConvert.git && \\ \r +source BatchConvert/installation/install.sh\r +```\r +\r +\r +Other dependencies (which will be **automatically** installed):\r +- bioformats2raw (entrypoint bioformats2raw)\r +- bftools (entrypoint bfconvert)\r +- go-mc (entrypoint mc)\r +- aspera-cli (entrypoint ascp)\r +\r +These dependencies will be pulled and cached automatically at the first execution of the conversion command. \r +The mode of dependency management can be specified by using the command line option ``--profile`` or `-pf`. Depending \r +on how this option is specified, the dependencies will be acquired / run either via conda or via docker/singularity containers. \r +\r +Specifying ``--profile conda`` (default) will install the dependencies to an \r +environment at ``./.condaCache`` and use this environment to run the workflow. This option \r +requires that miniconda/anaconda is installed on your system. \r +\r +Alternatively, specifying ``--profile docker`` or ``--profile singularity`` will pull a docker or \r +singularity image with the dependencies, respectively, and use this image to run the workflow.\r +These options assume that the respective container runtime (docker or singularity) is available on \r +your system. If singularity is being used, a cache directory will be created at the path \r +``./.singularityCache`` where the singularity image is stored. \r +\r +Finally, you can still choose to install the dependencies manually and use your own installations to run\r +the workflow. In this case, you should specify ``--profile standard`` and make sure the entrypoints\r +specified above are recognised by your shell. \r +\r +\r +## Configuration\r +\r +BatchConvert can be configured to have default options for file conversion and transfer. Probably, the most important sets of parameters\r +to be configured include credentials for the remote ends. The easiest way to configure remote stores is by running the interactive \r +configuration command as indicated below.\r +\r +### Configuration of the s3 object store\r +\r +Run the interactive configuration command: \r +\r +`batchconvert configure_s3_remote`\r +\r +This will start a sequence of requests for s3 credentials such as name, url, access, etc. Provide each requested credential and click\r +enter. Continue this cycle until the process is finished. Upon completing the configuration, the sequence of commands should roughly look like this:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_s3_remote\r +enter remote name (for example s3)\r +s3\r +enter url:\r +https://s3.embl.de\r +enter access key:\r +\r +enter secret key:\r +\r +enter bucket name:\r +\r +Configuration of the default s3 credentials is complete\r +```\r +\r +\r +### Configuration of the BioStudies user space\r +\r +Run the interactive configuration command: \r +\r +`batchconvert configure_bia_remote`\r +\r +This will prompt a request for the secret directory to connect to. Enter the secret directory for your user space and click enter. \r +Upon completing the configuration, the sequence of commands should roughly look like this:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_bia_remote\r +enter the secret directory for BioImage Archive user space:\r +\r +configuration of the default bia credentials is complete\r +```\r +\r +### Configuration of the slurm options\r +\r +BatchConvert can also run on slurm clusters. In order to configure the slurm parameters, run the interactive configuration command: \r +\r +`batchconvert configure_slurm`\r +\r +This will start a sequence of requests for slurm options. Provide each requested option and click enter. \r +Continue this cycle until the process is finished. Upon completing the configuration, the sequence of commands should \r +roughly look like this:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_slurm\r +Please enter value for queue_size\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´50´\r +s\r +Please enter value for submit_rate_limit\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´10/2min´\r +s\r +Please enter value for cluster_options\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´--mem-per-cpu=3140 --cpus-per-task=16´\r +s\r +Please enter value for time\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´6h´\r +s\r +configuration of the default slurm parameters is complete\r +```\r +\r +### Configuration of the default conversion parameters\r +\r +While all conversion parameters can be specified as command line arguments, it can\r +be useful for the users to set their own default parameters to avoid re-entering those\r +parameters for subsequent executions. BatchConvert allows for interactive configuration of \r +conversion in the same way as configuration of the remote stores described above.\r +\r +To configure the conversion into OME-TIFF, run the following command:\r +\r +`batchconvert configure_ometiff`\r +\r +This will prompt the user to enter a series of parameters, which will then be saved as the \r +default parameters to be passed to the `batchconvert ometiff` command. Upon completing the \r +configuration, the sequence of commands should look similar to:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_ometiff\r +Please enter value for noflat\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +Please enter value for series\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +Please enter value for timepoint\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +...\r +...\r +...\r +...\r +...\r +...\r +Configuration of the default parameters for 'bfconvert' is complete\r +```\r +\r +\r +To configure the conversion into OME-Zarr, run the following command:\r +\r +`batchconvert configure_omezarr`\r +\r +Similarly, this will prompt the user to enter a series of parameters, which will then be saved as the \r +default parameters to be passed to the `batchconvert omezarr` command. Upon completing the configuration, \r +the sequence of commands should look similar to:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_omezarr\r +Please enter value for resolutions_zarr\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +Please enter value for chunk_h\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +Please enter value for chunk_w\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +...\r +...\r +...\r +...\r +...\r +...\r +Configuration of the default parameters for 'bioformats2raw' is complete\r +```\r +\r +It is important to note that the initial defaults for the conversion parameters are the same as the defaults\r +of the backend tools bfconvert and bioformats2raw, as noted in the prompt excerpt above. Through interactive configuration, \r +the user is overriding these initial defaults and setting their own defaults. It is possible to reset the initial\r +defaults by running the following command.\r +\r +`batchconvert reset_defaults`\r +\r +Another important point is that any of these configured parameters can be overridden by passing a value to that\r +parameter in the commandline. For instance, in the following command, the value of 20 will be assigned to `chunk_h` parameter \r +even if the value for the same parameter might be different in the configuration file. \r +\r +`batchconvert omezarr --chunk_h 20 `\r +\r +\r +## Examples\r +\r +### Local conversion\r +\r +#### Parallel conversion of files to separate OME-TIFFs / OME-Zarrs:\r +Convert a batch of images on your local storage into OME-TIFF format. \r +Note that the `input_path` in the command given below is typically a \r +directory with multiple image files but a single image file can also be passed:\\\r +`batchconvert ometiff -pf conda ` \r +\r +Note that if this is your first conversion with the profile `conda`, \r +it will take a while for a conda environment with the dependencies to be\r +created. All the subsequent conversion commands with the profile `conda`,\r +however, will use this environment, and thus show no such delay.\r +\r +Since conda is the default profile, it does not have to be \r +explicitly included in the command line. Thus, the command can be shortened to:\\\r +`batchconvert ometiff `\r +\r +Convert only the first channel of the images:\\\r +`batchconvert ometiff -chn 0 `\r +\r +Crop the images being converted along x and y axis by 150 pixels:\\\r +`batchconvert ometiff -cr 0,0,150,150 `\r +\r +Convert into OME-Zarr instead:\\\r +`batchconvert omezarr `\r +\r +Convert into OME-Zarr with 3 resolution levels:\\\r +`batchconvert omezarr -rz 3 `\r +\r +Select a subset of images with a matching string such as "mutation":\\\r +`batchconvert omezarr -p mutation `\r +\r +Select a subset of images using wildcards. Note that the use of "" around \r +the input path is necessary when using wildcards:\\\r +`batchconvert omezarr "/*D3*.oir" `\r +\r +Convert by using a singularity container instead of conda environment (requires\r +singularity to be installed on your system):\\\r +`batchconvert omezarr -pf singularity "/*D3*.oir" `\r +\r +Convert by using a docker container instead of conda environment (requires docker\r +to be installed on your system):\\\r +`batchconvert omezarr -pf docker "/*D3*.oir" `\r +\r +Note that similarly to the case with the profile `conda`, the first execution of\r +a conversion with the profile `singularity` or `docker` will take a while for the\r +container image to be pulled. All the subsequent conversion commands using a \r +container option will use this image, and thus show no such delay. \r +\r +Convert local data and upload the output to an s3 bucket. Note that the output \r +path is created relative to the bucket specified in your s3 configuration:\\\r +`batchconvert omezarr -dt s3 `\r +\r +Receive input files from an s3 bucket, convert locally and upload the output to \r +the same bucket. Note that wildcards cannot be used when the input is from s3. \r +Use pattern matching option `-p` for selecting a subset of input files:\\\r +`batchconvert omezarr -p mutation -st s3 -dt s3 `\r +\r +Receive input files from your private BioStudies user space and convert them locally.\r +Use pattern matching option `-p` for selecting a subset of input files:\\\r +`batchconvert omezarr -p mutation -st bia `\r +\r +Receive an input from an s3 bucket, convert locally and upload the output to your \r +private BioStudies user space. Use pattern matching option `-p` for selecting a subset \r +of input files:\\\r +`batchconvert omezarr -p mutation -st s3 -dt bia `\r +\r +Note that in all the examples shown above, BatchConvert treats each input file as separate,\r +standalone data point, disregarding the possibility that some of the input files might belong to \r +the same multidimensional array. Thus, each input file is converted to an independent \r +OME-TIFF / OME-Zarr and the number of outputs will thus equal the number of selected input files.\r +An alternative scenario is discussed below.\r +\r +#### Parallel conversion of file groups by stacking multiple files into single OME-TIFFs / OME-Zarrs:\r +\r +When the flag `--merge_files` is specified, BatchConvert tries to detect which input files might \r +belong to the same multidimensional array based on the patterns in the filenames. Then a "grouped conversion" \r +is performed, meaning that the files belonging to the same dataset will be incorporated into \r +a single OME-TIFF / OME-Zarr series, in that files will be concatenated along specific dimension(s) \r +during the conversion. Multiple file groups in the input directory can be detected and converted \r +in parallel. \r +\r +This feature uses Bio-Formats's pattern files as described [here](https://docs.openmicroscopy.org/bio-formats/6.6.0/formats/pattern-file.html).\r +However, BatchConvert generates pattern files automatically, allowing the user to directly use the \r +input directory in the conversion command. BatchConvert also has the option of specifying the \r +concatenation axes in the command line, which is especially useful in cases where the filenames \r +may not contain dimension information. \r +\r +To be able to use the `--merge files` flag, the input file names must obey certain rules:\r +1. File names in the same group must be uniform, except for one or more **numeric field(s)**, which\r +should show incremental change across the files. These so-called **variable fields** \r +will be detected and used as the dimension(s) of concatenation.\r +2. The length of variable fields must be uniform within the group. For instance, if the\r +variable field has values reaching multi-digit numbers, leading "0"s should be included where needed \r +in the file names to make the variable field length uniform within the group.\r +3. Typically, each variable field should follow a dimension specifier. What patterns can be used as \r +dimension specifiers are explained [here](https://docs.openmicroscopy.org/bio-formats/6.6.0/formats/pattern-file.html).\r +However, BatchConvert also has the option `--concatenation_order`, which allows the user to\r +specify from the command line, the dimension(s), along which the files must be concatenated.\r +4. File names that are unique and cannot be associated with any group will be assumed as\r +standalone images and converted accordingly. \r +\r +Below are some examples of grouped conversion commands in the context of different possible use-case scenarios:\r +\r +**Example 1:**\r +\r +This is an example of a folder with non-uniform filename lengths:\r +```\r +time-series/test_img_T2\r +time-series/test_img_T4\r +time-series/test_img_T6\r +time-series/test_img_T8\r +time-series/test_img_T10\r +time-series/test_img_T12\r +```\r +In this example, leading zeroes are missing in the variable fields of some filenames. \r +A typical command to convert this folder to a single OME-TIFF would look like: \\\r +`batchconvert --ometiff --merge_files /time-series `\r +\r +However, this command would fail to create a single OME-Zarr folder due to the non-uniform \r +lengths of the filenames. Instead, the files would be split into two groups based on the\r +filename length, leading to two separate OME-Zarrs with names:\r +\r +`test_img_TRange{2-8-2}.ome.zarr` and `test_img_TRange{10-12-2}.ome.zarr`\r +\r +Here is the corrected version of the folder for the above example-\r +```\r +time-series/test_img_T02\r +time-series/test_img_T04\r +time-series/test_img_T06\r +time-series/test_img_T08\r +time-series/test_img_T10\r +time-series/test_img_T12\r +```\r +\r +Executing the same command on this folder would result in a single OME-Zarr with the name:\r +`test_img_TRange{02-12-2}.ome.zarr`\r +\r +**Example 2**- \r +\r +In this example, the filename lengths are uniform but the incrementation within the variable field is not.\r +```\r +time-series/test_img_T2\r +time-series/test_img_T4\r +time-series/test_img_T5\r +time-series/test_img_T7\r +```\r +\r +A typical command to convert this folder to a single OME-Zarr would look like: \\\r +`batchconvert --omezarr --merge_files /time-series `\r +\r +However, the command would fail to assume these files as a single group due to the\r +non-uniform incrementation in the variable field of the filenames. Instead, the dataset \r +would be split into two groups, leading to two separate OME-Zarrs with the following names:\r +`test_img_TRange{2-4-2}.ome.zarr` and `test_img_TRange{5-7-2}.ome.zarr` \r +\r +\r +**Example 3**\r +\r +This is an example of a case where the conversion attempts to concatenate files along two\r +dimensions, channel and time.\r +```\r +multichannel_time-series/test_img_C1-T1\r +multichannel_time-series/test_img_C1-T2\r +multichannel_time-series/test_img_C1-T3\r +multichannel_time-series/test_img_C2-T1\r +multichannel_time-series/test_img_C2-T2\r +```\r +To convert this folder to a single OME-Zarr, one could try the following command: \\\r +`batchconvert --omezarr --merge_files /multichannel_time-series `\r +\r +However, since the channel-2 does not have the same number of timeframes as the channel-1, \r +BatchConvert will fail to assume these two channels as part of the same series and\r +will instead split the two channels into two separate OME-Zarrs. \r +\r +The output would look like: \\\r +`test_img_C1-TRange{1-3-1}.ome.zarr` \\\r +`test_img_C2-TRange{1-2-1}.ome.zarr`\r +\r +To be able to really incorporate all files into a single OME-Zarr, the folder should have equal\r +number of images corresponding to both channels, as shown below:\r +```\r +multichannel_time-series/test_img_C1-T1\r +multichannel_time-series/test_img_C1-T2\r +multichannel_time-series/test_img_C1-T3\r +multichannel_time-series/test_img_C2-T1\r +multichannel_time-series/test_img_C2-T2\r +multichannel_time-series/test_img_C2-T3\r +```\r +The same conversion command on this version of the input folder would result in a single \r +OME-Zarr with the name: \\\r +`test_img_CRange{1-2-1}-TRange{1-3-1}.ome.zarr`\r +\r +\r +**Example 4**\r +\r +This is another example of a case, where there are multiple filename patterns in the input folder.\r +\r +```\r +folder_with_multiple_groups/test_img_C1-T1\r +folder_with_multiple_groups/test_img_C1-T2\r +folder_with_multiple_groups/test_img_C2-T1\r +folder_with_multiple_groups/test_img_C2-T2\r +folder_with_multiple_groups/test_img_T1-Z1\r +folder_with_multiple_groups/test_img_T1-Z2\r +folder_with_multiple_groups/test_img_T1-Z3\r +folder_with_multiple_groups/test_img_T2-Z1\r +folder_with_multiple_groups/test_img_T2-Z2\r +folder_with_multiple_groups/test_img_T2-Z3\r +```\r +\r +One can convert this folder with- \\\r +`batchconvert --omezarr --merge_files /folder_with_multiple_groups `\r + \r +BatchConvert will detect the two patterns in this folder and perform two grouped conversions. \r +The output folders will be named as `test_img_CRange{1-2-1}-TRange{1-2-1}.ome.zarr` and \r +`test_img_TRange{1-2-1}-ZRange{1-3-1}.ome.zarr`. \r +\r +\r +**Example 5**\r +\r +Now imagine that we have the same files as in the example 4 but the filenames of the\r +first group lack any dimension specifier, so we have the following folder:\r +\r +```\r +folder_with_multiple_groups/test_img_1-1\r +folder_with_multiple_groups/test_img_1-2\r +folder_with_multiple_groups/test_img_2-1\r +folder_with_multiple_groups/test_img_2-2\r +folder_with_multiple_groups/test_img_T1-Z1\r +folder_with_multiple_groups/test_img_T1-Z2\r +folder_with_multiple_groups/test_img_T1-Z3\r +folder_with_multiple_groups/test_img_T2-Z1\r +folder_with_multiple_groups/test_img_T2-Z2\r +folder_with_multiple_groups/test_img_T2-Z3\r +```\r +\r +In such a scenario, BatchConvert allows the user to specify the concatenation axes \r +via `--concatenation_order` option. This option expects comma-separated strings of dimensions \r +for each group. In this example, the user must provide a string of 2 characters, such as `ct` for \r +channel and time, for group 1, since there are two variable fields for this group. Since group 2 \r +already has dimension specifiers (T and Z as specified in the filenames preceding the variable fields),\r +the user does not need to specify anything for this group, and can enter `auto` or `aa` for automatic\r +detection of the specifiers. \r +\r +So the following line can be used to convert this folder: \\\r +`batchconvert --omezarr --merge_files --concatenation_order ct,aa /folder_with_multiple_groups `\r +\r +The resulting OME-Zarrs will have the names:\r +`test_img_CRange{1-2-1}-TRange{1-2-1}.ome.zarr` and\r +`test_img_TRange{1-2-1}-ZRange{1-3-1}.ome.zarr`\r +\r +Note that `--concatenation_order` will override any dimension specifiers already\r +existing in the filenames.\r +\r +\r +**Example 6**\r +\r +There can be scenarios where the user may want to have further control over the axes along \r +which to concatenate the images. For example, the filenames might contain the data acquisition\r +date, which can be recognised by BatchConvert as a concatenation axis in the automatic \r +detection mode. An example of such a fileset might look like:\r +\r +```\r +filenames_with_dates/test_data_date03.03.2023_imageZ1-T1\r +filenames_with_dates/test_data_date03.03.2023_imageZ1-T2\r +filenames_with_dates/test_data_date03.03.2023_imageZ1-T3\r +filenames_with_dates/test_data_date03.03.2023_imageZ2-T1\r +filenames_with_dates/test_data_date03.03.2023_imageZ2-T2\r +filenames_with_dates/test_data_date03.03.2023_imageZ2-T3\r +filenames_with_dates/test_data_date04.03.2023_imageZ1-T1\r +filenames_with_dates/test_data_date04.03.2023_imageZ1-T2\r +filenames_with_dates/test_data_date04.03.2023_imageZ1-T3\r +filenames_with_dates/test_data_date04.03.2023_imageZ2-T1\r +filenames_with_dates/test_data_date04.03.2023_imageZ2-T2\r +filenames_with_dates/test_data_date04.03.2023_imageZ2-T3\r +```\r +\r +One may try the following command to convert this folder:\r +\r +`batchconvert --omezarr --merge_files /filenames_with_dates `\r +\r +Since the concatenation axes are not specified, this command would try to create\r +a single OME-Zarr with name: `test_data_dateRange{03-04-1}.03.2023_imageZRange{1-2-1}-TRange{1-3-1}`.\r +\r +In order to force BatchConvert to ignore the date field, the user can restrict the concatenation \r +axes to the last two numeric fields. This can be done by using a command such as: \\\r +`batchconvert --omezarr --merge_files --concatenation_order aa /filenames_with_dates ` \\\r +This command will avoid concatenation along the date field, and therefore, there will be two\r +OME-Zarrs corresponding to the two dates. The number of characters being passed to the \r +`--concatenation_order` option specifies the number of numeric fields (starting from the right \r +end of the filename) that are recognised by the BatchConvert as valid concatenation axes. \r +Passing `aa`, therefore, means that the last two numeric fields must be recognised as \r +concatenation axes and the dimension type should be automatically detected (`a` for automatic). \r +In the same logic, one could, for example, convert each Z section into a separate OME-Zarr by \r +specifying `--concatenation_order a`.\r +\r +\r +\r +### Conversion on slurm\r +\r +All the examples given above can also be run on slurm by specifying `-pf cluster` option. \r +Note that this option automatically uses the singularity profile:\\\r +`batchconvert omezarr -pf cluster -p .oir `\r +\r +\r +\r +\r +\r +\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.453.2" ; + schema1:isBasedOn "https://github.com/Euro-BioImaging/BatchConvert.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for BatchConvert" ; + schema1:sdDatePublished "2024-08-05 10:29:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/453/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 89 ; + schema1:creator ; + schema1:dateCreated "2023-04-27T11:21:54Z" ; + schema1:dateModified "2023-07-03T11:04:19Z" ; + schema1:description """# BatchConvert\r +\r +A command line tool for converting image data into either of the standard file formats OME-TIFF or OME-Zarr. \r +\r +The tool wraps the dedicated file converters bfconvert and bioformats2raw to convert into OME-TIFF or OME-Zarr,\r +respectively. The workflow management system NextFlow is used to perform conversion in parallel for batches of images. \r +\r +The tool also wraps s3 and Aspera clients (go-mc and aspera-cli, respectively). Therefore, input and output locations can \r +be specified as local or remote storage and file transfer will be performed automatically. The conversion can be run on \r +HPC with Slurm. \r +\r +![](figures/diagram.png)\r +\r +## Installation & Dependencies\r +\r +**Important** note: The package has been so far only tested on Ubuntu 20.04.\r +\r +The minimal dependency to run the tool is NextFlow, which should be installed and made accessible from the command line.\r +\r +If conda exists on your system, you can install BatchConvert together with NextFlow using the following script:\r +```\r +git clone https://github.com/Euro-BioImaging/BatchConvert.git && \\ \r +source BatchConvert/installation/install_with_nextflow.sh\r +```\r +\r +\r +If you already have NextFlow installed and accessible from the command line (or if you prefer to install it manually \r +e.g., as shown [here](https://www.nextflow.io/docs/latest/getstarted.html)), you can also install BatchConvert alone, using the following script:\r +```\r +git clone https://github.com/Euro-BioImaging/BatchConvert.git && \\ \r +source BatchConvert/installation/install.sh\r +```\r +\r +\r +Other dependencies (which will be **automatically** installed):\r +- bioformats2raw (entrypoint bioformats2raw)\r +- bftools (entrypoint bfconvert)\r +- go-mc (entrypoint mc)\r +- aspera-cli (entrypoint ascp)\r +\r +These dependencies will be pulled and cached automatically at the first execution of the conversion command. \r +The mode of dependency management can be specified by using the command line option ``--profile`` or `-pf`. Depending \r +on how this option is specified, the dependencies will be acquired / run either via conda or via docker/singularity containers. \r +\r +Specifying ``--profile conda`` (default) will install the dependencies to an \r +environment at ``./.condaCache`` and use this environment to run the workflow. This option \r +requires that miniconda/anaconda is installed on your system. \r +\r +Alternatively, specifying ``--profile docker`` or ``--profile singularity`` will pull a docker or \r +singularity image with the dependencies, respectively, and use this image to run the workflow.\r +These options assume that the respective container runtime (docker or singularity) is available on \r +your system. If singularity is being used, a cache directory will be created at the path \r +``./.singularityCache`` where the singularity image is stored. \r +\r +Finally, you can still choose to install the dependencies manually and use your own installations to run\r +the workflow. In this case, you should specify ``--profile standard`` and make sure the entrypoints\r +specified above are recognised by your shell. \r +\r +\r +## Configuration\r +\r +BatchConvert can be configured to have default options for file conversion and transfer. Probably, the most important sets of parameters\r +to be configured include credentials for the remote ends. The easiest way to configure remote stores is by running the interactive \r +configuration command as indicated below.\r +\r +### Configuration of the s3 object store\r +\r +Run the interactive configuration command: \r +\r +`batchconvert configure_s3_remote`\r +\r +This will start a sequence of requests for s3 credentials such as name, url, access, etc. Provide each requested credential and click\r +enter. Continue this cycle until the process is finished. Upon completing the configuration, the sequence of commands should roughly look like this:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_s3_remote\r +enter remote name (for example s3)\r +s3\r +enter url:\r +https://s3.embl.de\r +enter access key:\r +\r +enter secret key:\r +\r +enter bucket name:\r +\r +Configuration of the default s3 credentials is complete\r +```\r +\r +\r +### Configuration of the BioStudies user space\r +\r +Run the interactive configuration command: \r +\r +`batchconvert configure_bia_remote`\r +\r +This will prompt a request for the secret directory to connect to. Enter the secret directory for your user space and click enter. \r +Upon completing the configuration, the sequence of commands should roughly look like this:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_bia_remote\r +enter the secret directory for BioImage Archive user space:\r +\r +configuration of the default bia credentials is complete\r +```\r +\r +### Configuration of the slurm options\r +\r +BatchConvert can also run on slurm clusters. In order to configure the slurm parameters, run the interactive configuration command: \r +\r +`batchconvert configure_slurm`\r +\r +This will start a sequence of requests for slurm options. Provide each requested option and click enter. \r +Continue this cycle until the process is finished. Upon completing the configuration, the sequence of commands should \r +roughly look like this:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_slurm\r +Please enter value for queue_size\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´50´\r +s\r +Please enter value for submit_rate_limit\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´10/2min´\r +s\r +Please enter value for cluster_options\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´--mem-per-cpu=3140 --cpus-per-task=16´\r +s\r +Please enter value for time\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the current value ´6h´\r +s\r +configuration of the default slurm parameters is complete\r +```\r +\r +### Configuration of the default conversion parameters\r +\r +While all conversion parameters can be specified as command line arguments, it can\r +be useful for the users to set their own default parameters to avoid re-entering those\r +parameters for subsequent executions. BatchConvert allows for interactive configuration of \r +conversion in the same way as configuration of the remote stores described above.\r +\r +To configure the conversion into OME-TIFF, run the following command:\r +\r +`batchconvert configure_ometiff`\r +\r +This will prompt the user to enter a series of parameters, which will then be saved as the \r +default parameters to be passed to the `batchconvert ometiff` command. Upon completing the \r +configuration, the sequence of commands should look similar to:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_ometiff\r +Please enter value for noflat\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +Please enter value for series\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +Please enter value for timepoint\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +...\r +...\r +...\r +...\r +...\r +...\r +Configuration of the default parameters for 'bfconvert' is complete\r +```\r +\r +\r +To configure the conversion into OME-Zarr, run the following command:\r +\r +`batchconvert configure_omezarr`\r +\r +Similarly, this will prompt the user to enter a series of parameters, which will then be saved as the \r +default parameters to be passed to the `batchconvert omezarr` command. Upon completing the configuration, \r +the sequence of commands should look similar to:\r +\r +```\r +oezdemir@pc-ellenberg108:~$ batchconvert configure_omezarr\r +Please enter value for resolutions_zarr\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +Please enter value for chunk_h\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +s\r +Please enter value for chunk_w\r +Click enter if this parameter is not applicable\r +Enter "skip" or "s" if you would like to keep the parameter´s current value, which is \r +...\r +...\r +...\r +...\r +...\r +...\r +Configuration of the default parameters for 'bioformats2raw' is complete\r +```\r +\r +It is important to note that the initial defaults for the conversion parameters are the same as the defaults\r +of the backend tools bfconvert and bioformats2raw, as noted in the prompt excerpt above. Through interactive configuration, \r +the user is overriding these initial defaults and setting their own defaults. It is possible to reset the initial\r +defaults by running the following command.\r +\r +`batchconvert reset_defaults`\r +\r +Another important point is that any of these configured parameters can be overridden by passing a value to that\r +parameter in the commandline. For instance, in the following command, the value of 20 will be assigned to `chunk_h` parameter \r +even if the value for the same parameter might be different in the configuration file. \r +\r +`batchconvert omezarr --chunk_h 20 `\r +\r +\r +## Examples\r +\r +### Local conversion\r +\r +#### Parallel conversion of files to separate OME-TIFFs / OME-Zarrs:\r +Convert a batch of images on your local storage into OME-TIFF format. \r +Note that the `input_path` in the command given below is typically a \r +directory with multiple image files but a single image file can also be passed:\\\r +`batchconvert ometiff -pf conda ` \r +\r +Note that if this is your first conversion with the profile `conda`, \r +it will take a while for a conda environment with the dependencies to be\r +created. All the subsequent conversion commands with the profile `conda`,\r +however, will use this environment, and thus show no such delay.\r +\r +Since conda is the default profile, it does not have to be \r +explicitly included in the command line. Thus, the command can be shortened to:\\\r +`batchconvert ometiff `\r +\r +Convert only the first channel of the images:\\\r +`batchconvert ometiff -chn 0 `\r +\r +Crop the images being converted along x and y axis by 150 pixels:\\\r +`batchconvert ometiff -cr 0,0,150,150 `\r +\r +Convert into OME-Zarr instead:\\\r +`batchconvert omezarr `\r +\r +Convert into OME-Zarr with 3 resolution levels:\\\r +`batchconvert omezarr -rz 3 `\r +\r +Select a subset of images with a matching string such as "mutation":\\\r +`batchconvert omezarr -p mutation `\r +\r +Select a subset of images using wildcards. Note that the use of "" around \r +the input path is necessary when using wildcards:\\\r +`batchconvert omezarr "/*D3*.oir" `\r +\r +Convert by using a singularity container instead of conda environment (requires\r +singularity to be installed on your system):\\\r +`batchconvert omezarr -pf singularity "/*D3*.oir" `\r +\r +Convert by using a docker container instead of conda environment (requires docker\r +to be installed on your system):\\\r +`batchconvert omezarr -pf docker "/*D3*.oir" `\r +\r +Note that similarly to the case with the profile `conda`, the first execution of\r +a conversion with the profile `singularity` or `docker` will take a while for the\r +container image to be pulled. All the subsequent conversion commands using a \r +container option will use this image, and thus show no such delay. \r +\r +Convert local data and upload the output to an s3 bucket. Note that the output \r +path is created relative to the bucket specified in your s3 configuration:\\\r +`batchconvert omezarr -dt s3 `\r +\r +Receive input files from an s3 bucket, convert locally and upload the output to \r +the same bucket. Note that wildcards cannot be used when the input is from s3. \r +Use pattern matching option `-p` for selecting a subset of input files:\\\r +`batchconvert omezarr -p mutation -st s3 -dt s3 `\r +\r +Receive input files from your private BioStudies user space and convert them locally.\r +Use pattern matching option `-p` for selecting a subset of input files:\\\r +`batchconvert omezarr -p mutation -st bia `\r +\r +Receive an input from an s3 bucket, convert locally and upload the output to your \r +private BioStudies user space. Use pattern matching option `-p` for selecting a subset \r +of input files:\\\r +`batchconvert omezarr -p mutation -st s3 -dt bia `\r +\r +Note that in all the examples shown above, BatchConvert treats each input file as separate,\r +standalone data point, disregarding the possibility that some of the input files might belong to \r +the same multidimensional array. Thus, each input file is converted to an independent \r +OME-TIFF / OME-Zarr and the number of outputs will thus equal the number of selected input files.\r +An alternative scenario is discussed below.\r +\r +#### Parallel conversion of file groups by stacking multiple files into single OME-TIFFs / OME-Zarrs:\r +\r +When the flag `--merge_files` is specified, BatchConvert tries to detect which input files might \r +belong to the same multidimensional array based on the patterns in the filenames. Then a "grouped conversion" \r +is performed, meaning that the files belonging to the same dataset will be incorporated into \r +a single OME-TIFF / OME-Zarr series, in that files will be concatenated along specific dimension(s) \r +during the conversion. Multiple file groups in the input directory can be detected and converted \r +in parallel. \r +\r +This feature uses Bio-Formats's pattern files as described [here](https://docs.openmicroscopy.org/bio-formats/6.6.0/formats/pattern-file.html).\r +However, BatchConvert generates pattern files automatically, allowing the user to directly use the \r +input directory in the conversion command. BatchConvert also has the option of specifying the \r +concatenation axes in the command line, which is especially useful in cases where the filenames \r +may not contain dimension information. \r +\r +To be able to use the `--merge files` flag, the input file names must obey certain rules:\r +1. File names in the same group must be uniform, except for one or more **numeric field(s)**, which\r +should show incremental change across the files. These so-called **variable fields** \r +will be detected and used as the dimension(s) of concatenation.\r +2. The length of variable fields must be uniform within the group. For instance, if the\r +variable field has values reaching multi-digit numbers, leading "0"s should be included where needed \r +in the file names to make the variable field length uniform within the group.\r +3. Typically, each variable field should follow a dimension specifier. What patterns can be used as \r +dimension specifiers are explained [here](https://docs.openmicroscopy.org/bio-formats/6.6.0/formats/pattern-file.html).\r +However, BatchConvert also has the option `--concatenation_order`, which allows the user to\r +specify from the command line, the dimension(s), along which the files must be concatenated.\r +4. File names that are unique and cannot be associated with any group will be assumed as\r +standalone images and converted accordingly. \r +\r +Below are some examples of grouped conversion commands in the context of different possible use-case scenarios:\r +\r +**Example 1:**\r +\r +This is an example of a folder with non-uniform filename lengths:\r +```\r +time-series/test_img_T2\r +time-series/test_img_T4\r +time-series/test_img_T6\r +time-series/test_img_T8\r +time-series/test_img_T10\r +time-series/test_img_T12\r +```\r +In this example, leading zeroes are missing in the variable fields of some filenames. \r +A typical command to convert this folder to a single OME-TIFF would look like: \\\r +`batchconvert --ometiff --merge_files /time-series `\r +\r +However, this command would fail to create a single OME-Zarr folder due to the non-uniform \r +lengths of the filenames. Instead, the files would be split into two groups based on the\r +filename length, leading to two separate OME-Zarrs with names:\r +\r +`test_img_TRange{2-8-2}.ome.zarr` and `test_img_TRange{10-12-2}.ome.zarr`\r +\r +Here is the corrected version of the folder for the above example-\r +```\r +time-series/test_img_T02\r +time-series/test_img_T04\r +time-series/test_img_T06\r +time-series/test_img_T08\r +time-series/test_img_T10\r +time-series/test_img_T12\r +```\r +\r +Executing the same command on this folder would result in a single OME-Zarr with the name:\r +`test_img_TRange{02-12-2}.ome.zarr`\r +\r +**Example 2**- \r +\r +In this example, the filename lengths are uniform but the incrementation within the variable field is not.\r +```\r +time-series/test_img_T2\r +time-series/test_img_T4\r +time-series/test_img_T5\r +time-series/test_img_T7\r +```\r +\r +A typical command to convert this folder to a single OME-Zarr would look like: \\\r +`batchconvert --omezarr --merge_files /time-series `\r +\r +However, the command would fail to assume these files as a single group due to the\r +non-uniform incrementation in the variable field of the filenames. Instead, the dataset \r +would be split into two groups, leading to two separate OME-Zarrs with the following names:\r +`test_img_TRange{2-4-2}.ome.zarr` and `test_img_TRange{5-7-2}.ome.zarr` \r +\r +\r +**Example 3**\r +\r +This is an example of a case where the conversion attempts to concatenate files along two\r +dimensions, channel and time.\r +```\r +multichannel_time-series/test_img_C1-T1\r +multichannel_time-series/test_img_C1-T2\r +multichannel_time-series/test_img_C1-T3\r +multichannel_time-series/test_img_C2-T1\r +multichannel_time-series/test_img_C2-T2\r +```\r +To convert this folder to a single OME-Zarr, one could try the following command: \\\r +`batchconvert --omezarr --merge_files /multichannel_time-series `\r +\r +However, since the channel-2 does not have the same number of timeframes as the channel-1, \r +BatchConvert will fail to assume these two channels as part of the same series and\r +will instead split the two channels into two separate OME-Zarrs. \r +\r +The output would look like: \\\r +`test_img_C1-TRange{1-3-1}.ome.zarr` \\\r +`test_img_C2-TRange{1-2-1}.ome.zarr`\r +\r +To be able to really incorporate all files into a single OME-Zarr, the folder should have equal\r +number of images corresponding to both channels, as shown below:\r +```\r +multichannel_time-series/test_img_C1-T1\r +multichannel_time-series/test_img_C1-T2\r +multichannel_time-series/test_img_C1-T3\r +multichannel_time-series/test_img_C2-T1\r +multichannel_time-series/test_img_C2-T2\r +multichannel_time-series/test_img_C2-T3\r +```\r +The same conversion command on this version of the input folder would result in a single \r +OME-Zarr with the name: \\\r +`test_img_CRange{1-2-1}-TRange{1-3-1}.ome.zarr`\r +\r +\r +**Example 4**\r +\r +This is another example of a case, where there are multiple filename patterns in the input folder.\r +\r +```\r +folder_with_multiple_groups/test_img_C1-T1\r +folder_with_multiple_groups/test_img_C1-T2\r +folder_with_multiple_groups/test_img_C2-T1\r +folder_with_multiple_groups/test_img_C2-T2\r +folder_with_multiple_groups/test_img_T1-Z1\r +folder_with_multiple_groups/test_img_T1-Z2\r +folder_with_multiple_groups/test_img_T1-Z3\r +folder_with_multiple_groups/test_img_T2-Z1\r +folder_with_multiple_groups/test_img_T2-Z2\r +folder_with_multiple_groups/test_img_T2-Z3\r +```\r +\r +One can convert this folder with- \\\r +`batchconvert --omezarr --merge_files /folder_with_multiple_groups `\r + \r +BatchConvert will detect the two patterns in this folder and perform two grouped conversions. \r +The output folders will be named as `test_img_CRange{1-2-1}-TRange{1-2-1}.ome.zarr` and \r +`test_img_TRange{1-2-1}-ZRange{1-3-1}.ome.zarr`. \r +\r +\r +**Example 5**\r +\r +Now imagine that we have the same files as in the example 4 but the filenames of the\r +first group lack any dimension specifier, so we have the following folder:\r +\r +```\r +folder_with_multiple_groups/test_img_1-1\r +folder_with_multiple_groups/test_img_1-2\r +folder_with_multiple_groups/test_img_2-1\r +folder_with_multiple_groups/test_img_2-2\r +folder_with_multiple_groups/test_img_T1-Z1\r +folder_with_multiple_groups/test_img_T1-Z2\r +folder_with_multiple_groups/test_img_T1-Z3\r +folder_with_multiple_groups/test_img_T2-Z1\r +folder_with_multiple_groups/test_img_T2-Z2\r +folder_with_multiple_groups/test_img_T2-Z3\r +```\r +\r +In such a scenario, BatchConvert allows the user to specify the concatenation axes \r +via `--concatenation_order` option. This option expects comma-separated strings of dimensions \r +for each group. In this example, the user must provide a string of 2 characters, such as `ct` for \r +channel and time, for group 1, since there are two variable fields for this group. Since group 2 \r +already has dimension specifiers (T and Z as specified in the filenames preceding the variable fields),\r +the user does not need to specify anything for this group, and can enter `auto` or `aa` for automatic\r +detection of the specifiers. \r +\r +So the following line can be used to convert this folder: \\\r +`batchconvert --omezarr --merge_files --concatenation_order ct,aa /folder_with_multiple_groups `\r +\r +The resulting OME-Zarrs will have the names:\r +`test_img_CRange{1-2-1}-TRange{1-2-1}.ome.zarr` and\r +`test_img_TRange{1-2-1}-ZRange{1-3-1}.ome.zarr`\r +\r +Note that `--concatenation_order` will override any dimension specifiers already\r +existing in the filenames.\r +\r +\r +**Example 6**\r +\r +There can be scenarios where the user may want to have further control over the axes along \r +which to concatenate the images. For example, the filenames might contain the data acquisition\r +date, which can be recognised by BatchConvert as a concatenation axis in the automatic \r +detection mode. An example of such a fileset might look like:\r +\r +```\r +filenames_with_dates/test_data_date03.03.2023_imageZ1-T1\r +filenames_with_dates/test_data_date03.03.2023_imageZ1-T2\r +filenames_with_dates/test_data_date03.03.2023_imageZ1-T3\r +filenames_with_dates/test_data_date03.03.2023_imageZ2-T1\r +filenames_with_dates/test_data_date03.03.2023_imageZ2-T2\r +filenames_with_dates/test_data_date03.03.2023_imageZ2-T3\r +filenames_with_dates/test_data_date04.03.2023_imageZ1-T1\r +filenames_with_dates/test_data_date04.03.2023_imageZ1-T2\r +filenames_with_dates/test_data_date04.03.2023_imageZ1-T3\r +filenames_with_dates/test_data_date04.03.2023_imageZ2-T1\r +filenames_with_dates/test_data_date04.03.2023_imageZ2-T2\r +filenames_with_dates/test_data_date04.03.2023_imageZ2-T3\r +```\r +\r +One may try the following command to convert this folder:\r +\r +`batchconvert --omezarr --merge_files /filenames_with_dates `\r +\r +Since the concatenation axes are not specified, this command would try to create\r +a single OME-Zarr with name: `test_data_dateRange{03-04-1}.03.2023_imageZRange{1-2-1}-TRange{1-3-1}`.\r +\r +In order to force BatchConvert to ignore the date field, the user can restrict the concatenation \r +axes to the last two numeric fields. This can be done by using a command such as: \\\r +`batchconvert --omezarr --merge_files --concatenation_order aa /filenames_with_dates ` \\\r +This command will avoid concatenation along the date field, and therefore, there will be two\r +OME-Zarrs corresponding to the two dates. The number of characters being passed to the \r +`--concatenation_order` option specifies the number of numeric fields (starting from the right \r +end of the filename) that are recognised by the BatchConvert as valid concatenation axes. \r +Passing `aa`, therefore, means that the last two numeric fields must be recognised as \r +concatenation axes and the dimension type should be automatically detected (`a` for automatic). \r +In the same logic, one could, for example, convert each Z section into a separate OME-Zarr by \r +specifying `--concatenation_order a`.\r +\r +\r +\r +### Conversion on slurm\r +\r +All the examples given above can also be run on slurm by specifying `-pf cluster` option. \r +Note that this option automatically uses the singularity profile:\\\r +`batchconvert omezarr -pf cluster -p .oir `\r +\r +\r +\r +\r +\r +\r +\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/453?version=2" ; + schema1:keywords "Nextflow, bash, Python, NGFF, OME-Zarr, Conversion, imaging, bioimaging, image file format, file conversion, OME-TIFF, S3, BioStudies, bioformats, bioformats2raw" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "BatchConvert" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/453?version=2" ; + schema1:version 2 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 151518 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """A CWL-based pipeline for processing ChIP-Seq data (FASTQ format) and performing: \r +\r +- Peak calling\r +- Consensus peak count table generation\r +- Detection of super-enhancer regions\r +- Differential binding analysis\r +\r +On the respective GitHub folder are available:\r +\r +- The CWL wrappers for the workflow\r +- A pre-configured YAML template, based on validation analysis of publicly available HTS data\r +- Tables of metadata (``EZH2_metadata_CLL.csv`` and ``H3K27me3_metadata_CLL.csv``), based on the same validation analysis, to serve as input examples for the design of comparisons during differential binding analysis\r +- A list of ChIP-Seq blacklisted regions (human genome version 38; hg38) from the ENCODE project, which is can be used as input for the workflow, is provided in BED format (``hg38-blacklist.v2.bed``)\r +\r +Briefly, the workflow performs the following steps:\r +\r +1. Quality control of short reads (FastQC)\r +2. Trimming of the reads (e.g., removal of adapter and/or low quality sequences) (Trimmomatic)\r +3. Mapping to reference genome (HISAT2)\r +5. Convertion of mapped reads from SAM (Sequence Alignment Map) to BAM (Binary Alignment Map) format (samtools)\r +6. Sorting mapped reads based on chromosomal coordinates (samtools)\r +7. Adding information regarding paired end reads (e.g., CIGAR field information) (samtools)\r +8. Re-sorting based on chromosomal coordinates (samtools)\r +9. Removal of duplicate reads (samtools)\r +10. Index creation for coordinate-sorted BAM files to enable fast random access (samtools)\r +11. Production of quality metrics and files for the inspection of the mapped ChIP-Seq reads, taking into consideration the experimental design (deeptools2):\r + - Read coverages for genomic regions of two or more BAM files are computed (multiBamSummary). The results are produced in compressed numpy array (NPZ) format and are used to calculate and visualize pairwise correlation values between the read coverages (plotCorrelation). \r + - Estimation of sequencing depth, through genomic position (base pair) sampling, and visualization is performed for multiple BAM files (plotCoverage).\r + - Cumulative read coverages for each indexed BAM file are plotted by counting and sorting all reads overlapping a “window” of specified length (plotFingerprint).\r + - Production of coverage track files (bigWig), with the coverage calculated as the number of reads per consecutive windows of predefined size (bamCoverage), and normalized through various available methods (e.g., Reads Per Kilobase per Million mapped reads; RPKM). The coverage track files are used to calculate scores per selected genomic regions (computeMatrix), typically genes, and a heatmap, based on the scores associated with these genomic regions, is produced (plotHeatmap).\r +12. Calling potential binding positions (peaks) to the genome (peak calling) (MACS2)\r +13. Generation of consensus peak count table for the application of custom analyses on MACS2 peak calling results (bedtools)\r +14. Detection of super-enhancer regions (Rank Ordering of Super-Enhancers; ROSE)\r +15. Differential binding analyses (DiffBind) for:\r + - MACS2 peak calling results\r + - ROSE-detected super-enhancer regions \r + """ ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.525.1" ; + schema1:isBasedOn "https://github.com/BiodataAnalysisGroup/CWL_HTS_pipelines/tree/main/ChIP_Seq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL-based ChIP-Seq workflow" ; + schema1:sdDatePublished "2024-08-05 10:30:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/525/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 42324 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-07-05T09:39:05Z" ; + schema1:dateModified "2023-07-05T09:39:32Z" ; + schema1:description """A CWL-based pipeline for processing ChIP-Seq data (FASTQ format) and performing: \r +\r +- Peak calling\r +- Consensus peak count table generation\r +- Detection of super-enhancer regions\r +- Differential binding analysis\r +\r +On the respective GitHub folder are available:\r +\r +- The CWL wrappers for the workflow\r +- A pre-configured YAML template, based on validation analysis of publicly available HTS data\r +- Tables of metadata (``EZH2_metadata_CLL.csv`` and ``H3K27me3_metadata_CLL.csv``), based on the same validation analysis, to serve as input examples for the design of comparisons during differential binding analysis\r +- A list of ChIP-Seq blacklisted regions (human genome version 38; hg38) from the ENCODE project, which is can be used as input for the workflow, is provided in BED format (``hg38-blacklist.v2.bed``)\r +\r +Briefly, the workflow performs the following steps:\r +\r +1. Quality control of short reads (FastQC)\r +2. Trimming of the reads (e.g., removal of adapter and/or low quality sequences) (Trimmomatic)\r +3. Mapping to reference genome (HISAT2)\r +5. Convertion of mapped reads from SAM (Sequence Alignment Map) to BAM (Binary Alignment Map) format (samtools)\r +6. Sorting mapped reads based on chromosomal coordinates (samtools)\r +7. Adding information regarding paired end reads (e.g., CIGAR field information) (samtools)\r +8. Re-sorting based on chromosomal coordinates (samtools)\r +9. Removal of duplicate reads (samtools)\r +10. Index creation for coordinate-sorted BAM files to enable fast random access (samtools)\r +11. Production of quality metrics and files for the inspection of the mapped ChIP-Seq reads, taking into consideration the experimental design (deeptools2):\r + - Read coverages for genomic regions of two or more BAM files are computed (multiBamSummary). The results are produced in compressed numpy array (NPZ) format and are used to calculate and visualize pairwise correlation values between the read coverages (plotCorrelation). \r + - Estimation of sequencing depth, through genomic position (base pair) sampling, and visualization is performed for multiple BAM files (plotCoverage).\r + - Cumulative read coverages for each indexed BAM file are plotted by counting and sorting all reads overlapping a “window” of specified length (plotFingerprint).\r + - Production of coverage track files (bigWig), with the coverage calculated as the number of reads per consecutive windows of predefined size (bamCoverage), and normalized through various available methods (e.g., Reads Per Kilobase per Million mapped reads; RPKM). The coverage track files are used to calculate scores per selected genomic regions (computeMatrix), typically genes, and a heatmap, based on the scores associated with these genomic regions, is produced (plotHeatmap).\r +12. Calling potential binding positions (peaks) to the genome (peak calling) (MACS2)\r +13. Generation of consensus peak count table for the application of custom analyses on MACS2 peak calling results (bedtools)\r +14. Detection of super-enhancer regions (Rank Ordering of Super-Enhancers; ROSE)\r +15. Differential binding analyses (DiffBind) for:\r + - MACS2 peak calling results\r + - ROSE-detected super-enhancer regions \r + """ ; + schema1:image ; + schema1:keywords "CWL, workflow, ChIP-seq, Epigenomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "CWL-based ChIP-Seq workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/525?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 271851 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 167040 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:MediaObject ; + schema1:contentSize 3277 ; + schema1:dateModified "2024-01-17T10:54:05+00:00" ; + schema1:name "dnn_cnn_5epochs.csv" ; + schema1:sdDatePublished "2024-03-25T10:49:09+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1017?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/rnafusion" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnafusion" ; + schema1:sdDatePublished "2024-08-05 10:22:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1017/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3714 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:09Z" ; + schema1:dateModified "2024-06-11T12:55:09Z" ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1017?version=16" ; + schema1:keywords "fusion, fusion-genes, gene-fusion, rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnafusion" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1017?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.258.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_protein_complex_md_setup/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:32:33 +0100" ; + schema1:url "https://workflowhub.eu/workflows/258/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 203702 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 31213 ; + schema1:creator , + ; + schema1:dateCreated "2022-01-10T11:52:43Z" ; + schema1:dateModified "2023-06-07T10:54:32Z" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/258?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CWL Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_protein_complex_md_setup/cwl/workflow.cwl" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """16S Microbial Analysis with mothur (short)\r +\r +The workflows in this collection are from the '16S Microbial Analysis with mothur' tutorial for analysis of 16S data (Saskia Hiltemann, Bérénice Batut, Dave Clements), adapted for piepline use on galaxy australia (Ahmed Mehdi). The workflows developed in galaxy use mothur software package developed by Schloss et al https://pubmed.ncbi.nlm.nih.gov/19801464/. \r +\r +Please also refer to the 16S tutorials available at Galaxy https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop-short/tutorial.html and [https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop/tutorial.html\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/648?version=1" ; + schema1:isBasedOn "https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop-short/tutorial.html" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Workflow 1: Further Quality Control [16S Microbial Analysis With Mothur]" ; + schema1:sdDatePublished "2024-08-05 10:27:21 +0100" ; + schema1:url "https://workflowhub.eu/workflows/648/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17182 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2023-11-09T05:04:17Z" ; + schema1:dateModified "2023-11-09T05:13:16Z" ; + schema1:description """16S Microbial Analysis with mothur (short)\r +\r +The workflows in this collection are from the '16S Microbial Analysis with mothur' tutorial for analysis of 16S data (Saskia Hiltemann, Bérénice Batut, Dave Clements), adapted for piepline use on galaxy australia (Ahmed Mehdi). The workflows developed in galaxy use mothur software package developed by Schloss et al https://pubmed.ncbi.nlm.nih.gov/19801464/. \r +\r +Please also refer to the 16S tutorials available at Galaxy https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop-short/tutorial.html and [https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop/tutorial.html\r +""" ; + schema1:isPartOf ; + schema1:keywords "Metagenomics" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Workflow 1: Further Quality Control [16S Microbial Analysis With Mothur]" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/648?version=1" ; + schema1:version 1 ; + ns1:input , + ; + ns1:output , + , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2023-11-08T00:19:30.565058" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-decontamination-VGP9" ; + schema1:license "BSD-3-Clause" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-decontamination-VGP9/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.17" . + + a schema1:Dataset ; + schema1:datePublished "2021-09-15T17:26:39.020087" ; + schema1:description "Cryo-EM processing workflow" ; + schema1:hasPart , + , + ; + schema1:image "workflow.svg" ; + schema1:keywords "cryoem" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "entryTitleTest" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:image ; + schema1:name "workflow" ; + schema1:programmingLanguage . + + a schema1:MediaObject . + + a schema1:Dataset ; + schema1:datePublished "2024-06-25T09:57:05.231229" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/taxonomy-profiling-and-visualization-with-krona" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "taxonomy-profiling-and-visualization-with-krona/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + schema1:datePublished "2024-06-21T15:04:44.017897" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/pox-virus-amplicon" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "pox-virus-amplicon/main" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "pox-virus-amplicon/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/pox-virus-amplicon" ; + schema1:version "0.2" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "# StructuralVariants Workflow" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/278?version=1" ; + schema1:isBasedOn "https://gitlab.bsc.es/lrodrig1/structuralvariants_poc/-/tree/1.1.3/structuralvariants/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CNV_pipeline" ; + schema1:sdDatePublished "2024-08-05 10:31:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/278/ro_crate?version=1" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 8862 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8665 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-14T12:07:59Z" ; + schema1:dateModified "2022-03-14T12:10:41Z" ; + schema1:description "# StructuralVariants Workflow" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/278?version=10" ; + schema1:keywords "cancer, CODEX2, ExomeDepth, manta, TransBioNet, variant calling, GRIDSS, structural variants" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CNV_pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/278?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 66395 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Macromolecular Coarse-Grained Flexibility (FlexServ) tutorial using BioExcel Building Blocks (biobb)\r +\r +This tutorial aims to illustrate the process of generating protein conformational ensembles from 3D structures and analysing its molecular flexibility, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.551.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_flexserv" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Macromolecular Coarse-Grained Flexibility tutorial" ; + schema1:sdDatePublished "2024-08-05 10:29:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/551/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 109577 ; + schema1:creator , + ; + schema1:dateCreated "2023-08-02T10:39:18Z" ; + schema1:dateModified "2023-08-02T11:20:30Z" ; + schema1:description """# Macromolecular Coarse-Grained Flexibility (FlexServ) tutorial using BioExcel Building Blocks (biobb)\r +\r +This tutorial aims to illustrate the process of generating protein conformational ensembles from 3D structures and analysing its molecular flexibility, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/551?version=1" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Macromolecular Coarse-Grained Flexibility tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_flexserv/blob/main/biobb_wf_flexserv/notebooks/biobb_wf_flexserv.ipynb" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.120.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:31:05 +0100" ; + schema1:url "https://workflowhub.eu/workflows/120/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 57824 ; + schema1:creator , + ; + schema1:dateCreated "2022-09-15T09:24:04Z" ; + schema1:dateModified "2023-01-16T13:49:50Z" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/120?version=5" ; + schema1:isPartOf , + , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_md_setup/77a78a8345c385a0bc8588b21153fc1151a2ede2/biobb_wf_md_setup/notebooks/biobb_MDsetup_tutorial.ipynb" ; + schema1:version 3 . + + a schema1:Dataset ; + schema1:datePublished "2023-10-19T15:11:18.085303" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/cutandrun" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "cutandrun/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.14" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """This workflow provides a calculaiton of the power spectrum of Stochastic Gravitational Wave Backgorund (SGWB) from a first-order cosmological phase transition based on the parameterisations of Roper Pol et al. (2023). The power spectrum includes two components: from the sound waves excited by collisions of bubbles of the new phase and from the turbulence that is induced by these collisions.\r +\r +The cosmological epoch of the phase transition is described by the temperature, T_star and by the number(s) of relativistic degrees of freedom, g_star that should be specified as parameters.\r +\r +The phase transition itself is characterised by phenomenological parameters, alpha, beta_H and epsilon_turb, the latent heat, the ratio of the Hubble radius to the bubble size at percolation and the fraction of the energy otuput of the phase transition that goes into turbulence.\r +\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.831.1" ; + schema1:isBasedOn "https://gitlab.renkulab.io/astronomy/mmoda/sgwb" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for SGWB model spectrum" ; + schema1:sdDatePublished "2024-08-05 10:24:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/831/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1968 ; + schema1:creator , + ; + schema1:dateCreated "2024-04-23T15:35:10Z" ; + schema1:dateModified "2024-04-23T15:36:41Z" ; + schema1:description """This workflow provides a calculaiton of the power spectrum of Stochastic Gravitational Wave Backgorund (SGWB) from a first-order cosmological phase transition based on the parameterisations of Roper Pol et al. (2023). The power spectrum includes two components: from the sound waves excited by collisions of bubbles of the new phase and from the turbulence that is induced by these collisions.\r +\r +The cosmological epoch of the phase transition is described by the temperature, T_star and by the number(s) of relativistic degrees of freedom, g_star that should be specified as parameters.\r +\r +The phase transition itself is characterised by phenomenological parameters, alpha, beta_H and epsilon_turb, the latent heat, the ratio of the Hubble radius to the bubble size at percolation and the fraction of the energy otuput of the phase transition that goes into turbulence.\r +\r +\r +""" ; + schema1:keywords "astronomy" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "SGWB model spectrum" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/831?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.56.6" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_protein-complex_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:31:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/56/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 156554 ; + schema1:creator , + ; + schema1:dateCreated "2023-07-26T09:21:56Z" ; + schema1:dateModified "2023-07-26T09:22:49Z" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/56?version=6" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_protein-complex_md_setup/master/biobb_wf_protein-complex_md_setup/notebooks/biobb_Protein-Complex_MDsetup_tutorial.ipynb" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """Workflow for Creating a large disease network from various datasets and databases for IBM, and applying the active subnetwork identification method MOGAMUN.\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/681?version=5" ; + schema1:isBasedOn "https://github.com/jdwijnbergen/IBM_ASI_workflow.git" ; + schema1:license "notspecified" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Inclusion Body Myositis Active Subnetwork Identification Workflow" ; + schema1:sdDatePublished "2024-08-05 10:25:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/681/ro_crate?version=5" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 27177 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6953 ; + schema1:creator , + ; + schema1:dateCreated "2023-11-27T16:04:32Z" ; + schema1:dateModified "2023-11-27T16:04:32Z" ; + schema1:description """Workflow for Creating a large disease network from various datasets and databases for IBM, and applying the active subnetwork identification method MOGAMUN.\r +\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/681?version=6" ; + schema1:keywords "Bioinformatics, CWL, Genomics, Transcriptomics, Protein-Protein Interaction" ; + schema1:license "https://choosealicense.com/no-permission/" ; + schema1:name "Inclusion Body Myositis Active Subnetwork Identification Workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/681?version=5" ; + schema1:version 5 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """# Single drug prediction Workflow\r +## Table of Contents\r +\r +- [Single drug prediction Workflow](#single-drug-prediction-workflow)\r + - [Table of Contents](#table-of-contents)\r + - [Description](#description)\r + - [Contents](#contents)\r + - [Building Blocks](#building-blocks)\r + - [Workflows](#workflows)\r + - [Resources](#resources)\r + - [Tests](#tests)\r + - [Instructions](#instructions)\r + - [Local machine](#local-machine)\r + - [Requirements](#requirements)\r + - [Usage steps](#usage-steps)\r + - [MareNostrum 4](#marenostrum-4)\r + - [Requirements in MN4](#requirements-in-mn4)\r + - [Usage steps in MN4](#usage-steps-in-mn4)\r + - [License](#license)\r + - [Contact](#contact)\r +\r +## Description\r +\r +Complementarily, the workflow supports single drug response predictions to provide a baseline prediction in cases where drug response information for a given drug and cell line is not available. As an input, the workflow needs basal gene expression data for a cell, the drug targets (they need to be known for untested drugs) and optionally CARNIVAL features (sub-network activity predicted with CARNIVAL building block) and predicts log(IC50) values. This workflow uses a custom matrix factorization approach built with Google JAX and trained with gradient descent. The workflow can be used both for training a model, and for predicting new drug responses.\r +\r +The workflow uses the following building blocks in order of execution (for training a model):\r +\r +1. Carnival_gex_preprocess\r + - Preprocessed the basal gene expression data from GDSC. The input is a matrix of Gene x Sample expression data.\r +2. Progeny\r + - Using the preprocessed data, it estimates pathway activities for each column in the data (for each sample). It returns a matrix of Pathways x Samples with activity values for 11 pathways.\r +3. Omnipath\r + - It downloads latest Prior Knowledge Network of signalling. This building block can be ommited if there exists already a csv file with the network.\r +4. TF Enrichment\r + - For each sample, transcription factor activities are estimated using Dorothea.\r +5. CarnivalPy\r + - Using the TF activities estimated before, it runs Carnival to obtain a sub-network consistent with the TF activities (for each sample).\r +6. Carnival_feature_merger\r + - Preselect a set of genes by the user (if specified) and merge the features with the basal gene expression data.\r +7. ML Jax Drug Prediction\r + - Trains a model using the combined features to predict IC50 values from GDSC.\r +\r +For details on individual workflow steps, please check the scripts that use each individual building block in the workflow [`GitHub repository`]()\r +\r +## Contents\r +\r +### Building Blocks\r +\r +The ``BuildingBlocks`` folder contains the script to install the\r +Building Blocks used in the Single Drug Prediction Workflow.\r +\r +### Workflows\r +\r +The ``Workflow`` folder contains the workflows implementations.\r +\r +Currently contains the implementation using PyCOMPSs.\r +\r +### Resources\r +\r +The ``Resources`` folder contains a small dataset for testing purposes.\r +\r +### Tests\r +\r +The ``Tests`` folder contains the scripts that run each Building Block\r +used in the workflow for a small dataset.\r +They can be executed individually *without PyCOMPSs installed* for testing\r +purposes.\r +\r +## Instructions\r +\r +### Local machine\r +\r +This section explains the requirements and usage for the Single Drug Prediction Workflow in a laptop or desktop computer.\r +\r +#### Requirements\r +\r +- [`permedcoe`](https://github.com/PerMedCoE/permedcoe) package\r +- [PyCOMPSs](https://pycompss.readthedocs.io/en/stable/Sections/00_Quickstart.html)\r +- [Singularity](https://sylabs.io/guides/3.0/user-guide/installation.html)\r +\r +#### Usage steps\r +\r +1. Clone this repository:\r +\r + ```bash\r + git clone https://github.com/PerMedCoE/single-drug-prediction-workflow.git\r + ```\r +\r +2. Install the Building Blocks required for the COVID19 Workflow:\r +\r + ```bash\r + single-drug-prediction-workflow/BuildingBlocks/./install_BBs.sh\r + ```\r +\r +3. Get the required Building Block images from the project [B2DROP](https://b2drop.bsc.es/index.php/f/444350):\r +\r + - Required images:\r + - toolset.singularity\r + - carnivalpy.singularity\r + - ml-jax.singularity\r +\r + The path where these files are stored **MUST be exported in the `PERMEDCOE_IMAGES`** environment variable.\r +\r + > :warning: **TIP**: These containers can be built manually as follows (be patient since some of them may take some time):\r + 1. Clone the `BuildingBlocks` repository\r + ```bash\r + git clone https://github.com/PerMedCoE/BuildingBlocks.git\r + ```\r + 2. Build the required Building Block images\r + ```bash\r + cd BuildingBlocks/Resources/images\r + ## Download new BB singularity files\r + wget https://github.com/saezlab/permedcoe/archive/refs/heads/master.zip\r + unzip master.zip\r + cd permedcoe-master/containers\r + ## Build containers\r + cd toolset\r + sudo /usr/local/bin/singularity build toolset.sif toolset.singularity\r + mv toolset.sif ../../../\r + cd ..\r + cd carnivalpy\r + sudo /usr/local/bin/singularity build carnivalpy.sif carnivalpy.singularity\r + mv carnivalpy.sif ../../../\r + cd ..\r + cd ml-jax\r + sudo /usr/local/bin/singularity build ml-jax.sif ml-jax.singularity\r + mv ml-jax.sif ../../../tf-jax.sif\r + cd ..\r + cd ../..\r + ## Cleanup\r + rm -rf permedcoe-master\r + rm master.zip\r + cd ../../..\r + ```\r +\r + > :warning: **TIP**: The singularity containers **can to be downloaded** from: https://cloud.sylabs.io/library/pablormier\r +\r +\r +**If using PyCOMPSs in local PC** (make sure that PyCOMPSs in installed):\r +\r +4. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflows/PyCOMPSs\r + ```\r +\r +5. Execute `./run.sh`\r +\r + The execution is prepared to use the singularity images that **MUST** be placed into `BuildingBlocks/Resources/images` folder. If they are located in any other folder, please update the `run.sh` script setting the `PERMEDCOE_IMAGES` to the images folder.\r +\r + > **TIP**: If you want to run the workflow with a different dataset, please update the `run.sh` script setting the `dataset` variable to the new dataset folder and their file names.\r +\r +### MareNostrum 4\r +\r +This section explains the requirements and usage for the Single Drug Prediction Workflow in the MareNostrum 4 supercomputer.\r +\r +#### Requirements in MN4\r +\r +- Access to MN4\r +\r +All Building Blocks are already installed in MN4, and the Single Drug Prediction Workflow available.\r +\r +#### Usage steps in MN4\r +\r +1. Load the `COMPSs`, `Singularity` and `permedcoe` modules\r +\r + ```bash\r + export COMPSS_PYTHON_VERSION=3\r + module load COMPSs/3.1\r + module load singularity/3.5.2\r + module use /apps/modules/modulefiles/tools/COMPSs/libraries\r + module load permedcoe\r + ```\r +\r + > **TIP**: Include the loading into your `${HOME}/.bashrc` file to load it automatically on the session start.\r +\r + This commands will load COMPSs and the permedcoe package which provides all necessary dependencies, as well as the path to the singularity container images (`PERMEDCOE_IMAGES` environment variable) and testing dataset (`SINGLE_DRUG_PREDICTION_WORKFLOW_DATASET` environment variable).\r +\r +2. Get a copy of the pilot workflow into your desired folder\r +\r + ```bash\r + mkdir desired_folder\r + cd desired_folder\r + get_single_drug_prediction_workflow\r + ```\r +\r +3. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflow/PyCOMPSs\r + ```\r +\r +4. Execute `./launch.sh`\r +\r + This command will launch a job into the job queuing system (SLURM) requesting 2 nodes (one node acting half master and half worker, and other full worker node) for 20 minutes, and is prepared to use the singularity images that are already deployed in MN4 (located into the `PERMEDCOE_IMAGES` environment variable). It uses the dataset located into `../../Resources/data` folder.\r +\r + > :warning: **TIP**: If you want to run the workflow with a different dataset, please edit the `launch.sh` script and define the appropriate dataset path.\r +\r + After the execution, a `results` folder will be available with with Single Drug Prediction Workflow results.\r +\r +## License\r +\r +[Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)\r +\r +## Contact\r +\r +\r +\r +This software has been developed for the [PerMedCoE project](https://permedcoe.eu/), funded by the European Commission (EU H2020 [951773](https://cordis.europa.eu/project/id/951773)).\r +\r +![](https://permedcoe.eu/wp-content/uploads/2020/11/logo_1.png "PerMedCoE")\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/478?version=1" ; + schema1:isBasedOn "https://github.com/PerMedCoE/single-drug-prediction-workflow" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for PerMedCoE Single Drug Prediction" ; + schema1:sdDatePublished "2024-08-05 10:30:25 +0100" ; + schema1:url "https://workflowhub.eu/workflows/478/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1243 ; + schema1:dateCreated "2023-05-23T12:15:44Z" ; + schema1:dateModified "2023-05-23T12:32:48Z" ; + schema1:description """# Single drug prediction Workflow\r +## Table of Contents\r +\r +- [Single drug prediction Workflow](#single-drug-prediction-workflow)\r + - [Table of Contents](#table-of-contents)\r + - [Description](#description)\r + - [Contents](#contents)\r + - [Building Blocks](#building-blocks)\r + - [Workflows](#workflows)\r + - [Resources](#resources)\r + - [Tests](#tests)\r + - [Instructions](#instructions)\r + - [Local machine](#local-machine)\r + - [Requirements](#requirements)\r + - [Usage steps](#usage-steps)\r + - [MareNostrum 4](#marenostrum-4)\r + - [Requirements in MN4](#requirements-in-mn4)\r + - [Usage steps in MN4](#usage-steps-in-mn4)\r + - [License](#license)\r + - [Contact](#contact)\r +\r +## Description\r +\r +Complementarily, the workflow supports single drug response predictions to provide a baseline prediction in cases where drug response information for a given drug and cell line is not available. As an input, the workflow needs basal gene expression data for a cell, the drug targets (they need to be known for untested drugs) and optionally CARNIVAL features (sub-network activity predicted with CARNIVAL building block) and predicts log(IC50) values. This workflow uses a custom matrix factorization approach built with Google JAX and trained with gradient descent. The workflow can be used both for training a model, and for predicting new drug responses.\r +\r +The workflow uses the following building blocks in order of execution (for training a model):\r +\r +1. Carnival_gex_preprocess\r + - Preprocessed the basal gene expression data from GDSC. The input is a matrix of Gene x Sample expression data.\r +2. Progeny\r + - Using the preprocessed data, it estimates pathway activities for each column in the data (for each sample). It returns a matrix of Pathways x Samples with activity values for 11 pathways.\r +3. Omnipath\r + - It downloads latest Prior Knowledge Network of signalling. This building block can be ommited if there exists already a csv file with the network.\r +4. TF Enrichment\r + - For each sample, transcription factor activities are estimated using Dorothea.\r +5. CarnivalPy\r + - Using the TF activities estimated before, it runs Carnival to obtain a sub-network consistent with the TF activities (for each sample).\r +6. Carnival_feature_merger\r + - Preselect a set of genes by the user (if specified) and merge the features with the basal gene expression data.\r +7. ML Jax Drug Prediction\r + - Trains a model using the combined features to predict IC50 values from GDSC.\r +\r +For details on individual workflow steps, please check the scripts that use each individual building block in the workflow [`GitHub repository`]()\r +\r +## Contents\r +\r +### Building Blocks\r +\r +The ``BuildingBlocks`` folder contains the script to install the\r +Building Blocks used in the Single Drug Prediction Workflow.\r +\r +### Workflows\r +\r +The ``Workflow`` folder contains the workflows implementations.\r +\r +Currently contains the implementation using PyCOMPSs.\r +\r +### Resources\r +\r +The ``Resources`` folder contains a small dataset for testing purposes.\r +\r +### Tests\r +\r +The ``Tests`` folder contains the scripts that run each Building Block\r +used in the workflow for a small dataset.\r +They can be executed individually *without PyCOMPSs installed* for testing\r +purposes.\r +\r +## Instructions\r +\r +### Local machine\r +\r +This section explains the requirements and usage for the Single Drug Prediction Workflow in a laptop or desktop computer.\r +\r +#### Requirements\r +\r +- [`permedcoe`](https://github.com/PerMedCoE/permedcoe) package\r +- [PyCOMPSs](https://pycompss.readthedocs.io/en/stable/Sections/00_Quickstart.html)\r +- [Singularity](https://sylabs.io/guides/3.0/user-guide/installation.html)\r +\r +#### Usage steps\r +\r +1. Clone this repository:\r +\r + ```bash\r + git clone https://github.com/PerMedCoE/single-drug-prediction-workflow.git\r + ```\r +\r +2. Install the Building Blocks required for the COVID19 Workflow:\r +\r + ```bash\r + single-drug-prediction-workflow/BuildingBlocks/./install_BBs.sh\r + ```\r +\r +3. Get the required Building Block images from the project [B2DROP](https://b2drop.bsc.es/index.php/f/444350):\r +\r + - Required images:\r + - toolset.singularity\r + - carnivalpy.singularity\r + - ml-jax.singularity\r +\r + The path where these files are stored **MUST be exported in the `PERMEDCOE_IMAGES`** environment variable.\r +\r + > :warning: **TIP**: These containers can be built manually as follows (be patient since some of them may take some time):\r + 1. Clone the `BuildingBlocks` repository\r + ```bash\r + git clone https://github.com/PerMedCoE/BuildingBlocks.git\r + ```\r + 2. Build the required Building Block images\r + ```bash\r + cd BuildingBlocks/Resources/images\r + ## Download new BB singularity files\r + wget https://github.com/saezlab/permedcoe/archive/refs/heads/master.zip\r + unzip master.zip\r + cd permedcoe-master/containers\r + ## Build containers\r + cd toolset\r + sudo /usr/local/bin/singularity build toolset.sif toolset.singularity\r + mv toolset.sif ../../../\r + cd ..\r + cd carnivalpy\r + sudo /usr/local/bin/singularity build carnivalpy.sif carnivalpy.singularity\r + mv carnivalpy.sif ../../../\r + cd ..\r + cd ml-jax\r + sudo /usr/local/bin/singularity build ml-jax.sif ml-jax.singularity\r + mv ml-jax.sif ../../../tf-jax.sif\r + cd ..\r + cd ../..\r + ## Cleanup\r + rm -rf permedcoe-master\r + rm master.zip\r + cd ../../..\r + ```\r +\r + > :warning: **TIP**: The singularity containers **can to be downloaded** from: https://cloud.sylabs.io/library/pablormier\r +\r +\r +**If using PyCOMPSs in local PC** (make sure that PyCOMPSs in installed):\r +\r +4. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflows/PyCOMPSs\r + ```\r +\r +5. Execute `./run.sh`\r +\r + The execution is prepared to use the singularity images that **MUST** be placed into `BuildingBlocks/Resources/images` folder. If they are located in any other folder, please update the `run.sh` script setting the `PERMEDCOE_IMAGES` to the images folder.\r +\r + > **TIP**: If you want to run the workflow with a different dataset, please update the `run.sh` script setting the `dataset` variable to the new dataset folder and their file names.\r +\r +### MareNostrum 4\r +\r +This section explains the requirements and usage for the Single Drug Prediction Workflow in the MareNostrum 4 supercomputer.\r +\r +#### Requirements in MN4\r +\r +- Access to MN4\r +\r +All Building Blocks are already installed in MN4, and the Single Drug Prediction Workflow available.\r +\r +#### Usage steps in MN4\r +\r +1. Load the `COMPSs`, `Singularity` and `permedcoe` modules\r +\r + ```bash\r + export COMPSS_PYTHON_VERSION=3\r + module load COMPSs/3.1\r + module load singularity/3.5.2\r + module use /apps/modules/modulefiles/tools/COMPSs/libraries\r + module load permedcoe\r + ```\r +\r + > **TIP**: Include the loading into your `${HOME}/.bashrc` file to load it automatically on the session start.\r +\r + This commands will load COMPSs and the permedcoe package which provides all necessary dependencies, as well as the path to the singularity container images (`PERMEDCOE_IMAGES` environment variable) and testing dataset (`SINGLE_DRUG_PREDICTION_WORKFLOW_DATASET` environment variable).\r +\r +2. Get a copy of the pilot workflow into your desired folder\r +\r + ```bash\r + mkdir desired_folder\r + cd desired_folder\r + get_single_drug_prediction_workflow\r + ```\r +\r +3. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflow/PyCOMPSs\r + ```\r +\r +4. Execute `./launch.sh`\r +\r + This command will launch a job into the job queuing system (SLURM) requesting 2 nodes (one node acting half master and half worker, and other full worker node) for 20 minutes, and is prepared to use the singularity images that are already deployed in MN4 (located into the `PERMEDCOE_IMAGES` environment variable). It uses the dataset located into `../../Resources/data` folder.\r +\r + > :warning: **TIP**: If you want to run the workflow with a different dataset, please edit the `launch.sh` script and define the appropriate dataset path.\r +\r + After the execution, a `results` folder will be available with with Single Drug Prediction Workflow results.\r +\r +## License\r +\r +[Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)\r +\r +## Contact\r +\r +\r +\r +This software has been developed for the [PerMedCoE project](https://permedcoe.eu/), funded by the European Commission (EU H2020 [951773](https://cordis.europa.eu/project/id/951773)).\r +\r +![](https://permedcoe.eu/wp-content/uploads/2020/11/logo_1.png "PerMedCoE")\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "PerMedCoE Single Drug Prediction" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/478?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2024-01-11T16:25:21.762853" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-variant-calling" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sars-cov-2-pe-illumina-artic-variant-calling/COVID-19-PE-ARTIC-ILLUMINA" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.19" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "This workflow correspond to the Genome-wide alternative splicing analysis training. It allows to analyze isoform switching by making use of IsoformSwitchAnalyzeR." ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/472?version=3" ; + schema1:license "CC-BY-NC-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genome-wide alternative splicing analysis" ; + schema1:sdDatePublished "2024-08-05 10:30:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/472/ro_crate?version=3" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 17028 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 81080 ; + schema1:creator ; + schema1:dateCreated "2023-05-25T21:39:41Z" ; + schema1:dateModified "2023-05-25T21:41:36Z" ; + schema1:description "This workflow correspond to the Genome-wide alternative splicing analysis training. It allows to analyze isoform switching by making use of IsoformSwitchAnalyzeR." ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/472?version=3" ; + schema1:keywords "Transcriptomics, alternative-splicing, GTN" ; + schema1:license "https://spdx.org/licenses/CC-BY-NC-4.0" ; + schema1:name "Genome-wide alternative splicing analysis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/472?version=3" ; + schema1:version 3 ; + ns1:input , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 46064 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# polya_liftover - sc/snRNAseq Snakemake Workflow\r +\r +A [Snakemake][sm] workflow for using PolyA_DB and UCSC Liftover with Cellranger.\r +\r +Some genes are not accurately annotated in the reference genome.\r +Here,\r +we use information provide by the [PolyA_DB v3.2][polya] to update the coordinates,\r +then the [USCS Liftover][liftover] tool to update to a more recent genome.\r +Next,\r +we use [Cellranger][cr] to create the reference and count matrix.\r +Finally,\r +by taking advantage of the integrated [Conda][conda] and [Singularity][sing] support,\r +we can run the whole thing in an isolated environment.\r +\r +Please see our [README][readme] for the full details!\r +\r +\r +[sm]: https://snakemake.readthedocs.io/en/stable/index.html "Snakemake"\r +[polya]: https://exon.apps.wistar.org/polya_db/v3/index.php "PolyA_DB"\r +[liftover]: https://genome.ucsc.edu/cgi-bin/hgLiftOver "Liftover"\r +[cr]: https://github.com/alexdobin/STAR "Cellranger"\r +[conda]: https://docs.conda.io/en/latest/ "Conda"\r +[sing]: https://sylabs.io/singularity/ "Singularity"\r +[readme]: https://github.com/IMS-Bio2Core-Facility/polya_liftover/blob/main/README.md""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/263?version=1" ; + schema1:isBasedOn "https://github.com/IMS-Bio2Core-Facility/polya_liftover" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for polya_liftover" ; + schema1:sdDatePublished "2024-08-05 10:32:40 +0100" ; + schema1:url "https://workflowhub.eu/workflows/263/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 451 ; + schema1:creator ; + schema1:dateCreated "2022-01-17T10:44:28Z" ; + schema1:dateModified "2023-01-16T13:57:28Z" ; + schema1:description """# polya_liftover - sc/snRNAseq Snakemake Workflow\r +\r +A [Snakemake][sm] workflow for using PolyA_DB and UCSC Liftover with Cellranger.\r +\r +Some genes are not accurately annotated in the reference genome.\r +Here,\r +we use information provide by the [PolyA_DB v3.2][polya] to update the coordinates,\r +then the [USCS Liftover][liftover] tool to update to a more recent genome.\r +Next,\r +we use [Cellranger][cr] to create the reference and count matrix.\r +Finally,\r +by taking advantage of the integrated [Conda][conda] and [Singularity][sing] support,\r +we can run the whole thing in an isolated environment.\r +\r +Please see our [README][readme] for the full details!\r +\r +\r +[sm]: https://snakemake.readthedocs.io/en/stable/index.html "Snakemake"\r +[polya]: https://exon.apps.wistar.org/polya_db/v3/index.php "PolyA_DB"\r +[liftover]: https://genome.ucsc.edu/cgi-bin/hgLiftOver "Liftover"\r +[cr]: https://github.com/alexdobin/STAR "Cellranger"\r +[conda]: https://docs.conda.io/en/latest/ "Conda"\r +[sing]: https://sylabs.io/singularity/ "Singularity"\r +[readme]: https://github.com/IMS-Bio2Core-Facility/polya_liftover/blob/main/README.md""" ; + schema1:image ; + schema1:keywords "Transcriptomics, scRNA-seq, Snakemake, FAIR workflows, FastQC, MultiQC, Cellranger, LiftOver" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "polya_liftover" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/263?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 14760 . + + a schema1:Dataset ; + schema1:hasPart , + , + , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-consensus-from-variation" ; + schema1:mainEntity ; + schema1:name "COVID-19-CONSENSUS-CONSTRUCTION (v0.2)" ; + schema1:sdDatePublished "2021-07-23 10:18:30 +0100" ; + schema1:softwareVersion "v0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 61638 ; + schema1:name "COVID-19-CONSENSUS-CONSTRUCTION" ; + schema1:programmingLanguage . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "This is the workflow for the recreation potential component of the cultural ecosystems digital twin" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/811?version=1" ; + schema1:isBasedOn "https://biodt.eu/" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for BioDT Cultural Ecosystem Services prototype Digital Twin - Recreation Potential Component" ; + schema1:sdDatePublished "2024-08-05 10:24:40 +0100" ; + schema1:url "https://workflowhub.eu/workflows/811/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 501 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-04-02T15:46:26Z" ; + schema1:dateModified "2024-05-17T13:14:09Z" ; + schema1:description "This is the workflow for the recreation potential component of the cultural ecosystems digital twin" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "BioDT Cultural Ecosystem Services prototype Digital Twin - Recreation Potential Component" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/811?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2021-12-20T17:04:47.622964" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/parallel-accession-download" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "parallel-accession-download/main" ; + schema1:sdDatePublished "2021-12-21 03:01:01 +0000" ; + schema1:softwareVersion "v0.1.2" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=19" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-08-05 10:23:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=19" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17866 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:59Z" ; + schema1:dateModified "2024-06-11T12:54:59Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=19" ; + schema1:version 19 . + + a schema1:Dataset ; + schema1:datePublished "2024-07-15T09:32:38.873738" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/rnaseq-sr" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "rnaseq-sr/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.24" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Conformational Transitions calculations tutorial using BioExcel Building Blocks (biobb) and GOdMD\r +\r +This tutorial aims to illustrate the process of computing a conformational transition between two known structural conformations of a protein, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.558.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_godmd/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy Protein Conformational Transitions calculations tutorial" ; + schema1:sdDatePublished "2024-08-05 10:28:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/558/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 16355 ; + schema1:creator , + ; + schema1:dateCreated "2023-08-11T08:36:46Z" ; + schema1:dateModified "2023-08-11T08:38:31Z" ; + schema1:description """# Protein Conformational Transitions calculations tutorial using BioExcel Building Blocks (biobb) and GOdMD\r +\r +This tutorial aims to illustrate the process of computing a conformational transition between two known structural conformations of a protein, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy Protein Conformational Transitions calculations tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_godmd/galaxy/biobb_wf_godmd.ga" ; + schema1:version 1 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "This is the workflow for the biodiversity component of the cultural ecosystems digital twin" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/810?version=1" ; + schema1:isBasedOn "https://biodt.eu/" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for BioDT Cultural Ecosystem Services prototype Digital Twin - Biodiversity Component" ; + schema1:sdDatePublished "2024-08-05 10:24:40 +0100" ; + schema1:url "https://workflowhub.eu/workflows/810/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 672 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-04-02T15:45:06Z" ; + schema1:dateModified "2024-05-17T13:15:34Z" ; + schema1:description "This is the workflow for the biodiversity component of the cultural ecosystems digital twin" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "BioDT Cultural Ecosystem Services prototype Digital Twin - Biodiversity Component" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/810?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2024-06-28T13:49:10.541149" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/rnaseq-sr" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "rnaseq-sr/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + schema1:datePublished "2023-11-15T16:55:13.540629" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Scaffolding-HiC-VGP8" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Scaffolding-HiC-VGP8/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.17" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "MARS-seq v2 preprocessing pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/996?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/marsseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/marsseq" ; + schema1:sdDatePublished "2024-08-05 10:23:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/996/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9096 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:00Z" ; + schema1:dateModified "2024-06-11T12:55:00Z" ; + schema1:description "MARS-seq v2 preprocessing pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/996?version=4" ; + schema1:keywords "facs-sorting, MARS-seq, single-cell, single-cell-rna-seq, star-solo, transcriptional-dynamics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/marsseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/996?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + schema1:datePublished "2024-07-10T16:11:49.131170" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/quality-and-contamination-control" ; + schema1:license "GPL-3.0-or-later" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "quality-and-contamination-control/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.24" . + + a schema1:Dataset ; + schema1:datePublished "2021-12-20T17:04:47.614081" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/gromacs-dctmd" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "gromacs-dctmd/main" ; + schema1:sdDatePublished "2021-12-21 03:01:01 +0000" ; + schema1:softwareVersion "v0.1.2" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=22" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-08-05 10:24:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=22" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10903 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:13Z" ; + schema1:dateModified "2024-06-11T12:55:13Z" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=22" ; + schema1:version 22 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Summary\r +\r +This tutorial aims to illustrate the process of protein-ligand docking, step by step, using the BioExcel Building Blocks library (biobb). The particular example used is the Mitogen-activated protein kinase 14 (p38-α) protein (PDB code 3HEC), a well-known Protein Kinase enzyme, in complex with the FDA-approved Imatinib, (PDB Ligand code STI, DrugBank Ligand Code DB00619), a small molecule kinase inhibitor used to treat certain types of cancer.\r +\r +Workflow engine is a jupyter notebook. It can be run in binder, following the link given, or locally. Auxiliar libraries used are: nb\\_conda_kernels, nglview, ipywidgets, plotly, and simpletraj. Environment can be setup using the included environment.yml file.\r +\r +\r +# Parameters\r +\r +## Inputs\r +\r +Parameters needed to configure the workflow:\r +\r +* **pdb_code**: PDB code of the experimental complex structure (if exists).\r +* **ligand_code**: Ligand PDB code (3-letter code) for the small molecule (e.g. STI).\r +\r +## Outputs\r +\r +Output files generated (named according to the input parameters given above):\r +\r +* **output\\_structure**: generated protein-ligand complex""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.127.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_virtual-screening" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Protein-ligand Docking tutorial using BioExcel Building Blocks (biobb) (jupyter notebook)" ; + schema1:sdDatePublished "2024-08-05 10:30:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/127/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 39034 ; + schema1:creator , + , + ; + schema1:dateCreated "2021-06-28T10:30:46Z" ; + schema1:dateModified "2021-06-28T14:21:23Z" ; + schema1:description """# Summary\r +\r +This tutorial aims to illustrate the process of protein-ligand docking, step by step, using the BioExcel Building Blocks library (biobb). The particular example used is the Mitogen-activated protein kinase 14 (p38-α) protein (PDB code 3HEC), a well-known Protein Kinase enzyme, in complex with the FDA-approved Imatinib, (PDB Ligand code STI, DrugBank Ligand Code DB00619), a small molecule kinase inhibitor used to treat certain types of cancer.\r +\r +Workflow engine is a jupyter notebook. It can be run in binder, following the link given, or locally. Auxiliar libraries used are: nb\\_conda_kernels, nglview, ipywidgets, plotly, and simpletraj. Environment can be setup using the included environment.yml file.\r +\r +\r +# Parameters\r +\r +## Inputs\r +\r +Parameters needed to configure the workflow:\r +\r +* **pdb_code**: PDB code of the experimental complex structure (if exists).\r +* **ligand_code**: Ligand PDB code (3-letter code) for the small molecule (e.g. STI).\r +\r +## Outputs\r +\r +Output files generated (named according to the input parameters given above):\r +\r +* **output\\_structure**: generated protein-ligand complex""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/127?version=5" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Protein-ligand Docking tutorial using BioExcel Building Blocks (biobb) (jupyter notebook)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/127?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """\r +\r +![Logo](https://cbg-ethz.github.io/V-pipe/img/logo.svg)\r +\r +[![bio.tools](https://img.shields.io/badge/bio-tools-blue.svg)](https://bio.tools/V-Pipe)\r +[![Snakemake](https://img.shields.io/badge/snakemake-≥6.8.1-blue.svg)](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe)\r +[![Deploy Docker image](https://github.com/cbg-ethz/V-pipe/actions/workflows/deploy-docker.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe)\r +[![Tests](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml)\r +[![Mega-Linter](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml)\r +[![License: Apache-2.0](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)\r +\r +V-pipe is a workflow designed for the analysis of next generation sequencing (NGS) data from viral pathogens. It produces a number of results in a curated format (e.g., consensus sequences, SNV calls, local/global haplotypes).\r +V-pipe is written using the Snakemake workflow management system.\r +\r +## Usage\r +\r +Different ways of initializing V-pipe are presented below. We strongly encourage you to deploy it [using the quick install script](#using-quick-install-script), as this is our preferred method.\r +\r +To configure V-pipe refer to the documentation present in [config/README.md](config/README.md).\r +\r +V-pipe expects the input samples to be organized in a [two-level](config/README.md#samples) directory hierarchy,\r +and the sequencing reads must be provided in a sub-folder named `raw_data`. Further details can be found on the [website](https://cbg-ethz.github.io/V-pipe/usage/).\r +Check the utils subdirectory for [mass-importers tools](utils/README.md#samples-mass-importers) that can assist you in generating this hierarchy.\r +\r +We provide [virus-specific base configuration files](config/README.md#virus-base-config) which contain handy defaults for, e.g., HIV and SARS-CoV-2. Set the virus in the general section of the configuration file:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +```\r +\r +Also see [snakemake's documentation](https://snakemake.readthedocs.io/en/stable/executing/cli.html) to learn more about the command-line options available when executing the workflow.\r +\r +### Using quick install script\r +\r +To deploy V-pipe, use the [installation script](utils/README.md#quick-installer) with the following parameters:\r +\r +```bash\r +curl -O 'https://raw.githubusercontent.com/cbg-ethz/V-pipe/master/utils/quick_install.sh'\r +./quick_install.sh -w work\r +```\r +\r +This script will download and install miniconda, checkout the V-pipe git repository (use `-b` to specify which branch/tag) and setup a work directory (specified with `-w`) with an executable script that will execute the workflow:\r +\r +```bash\r +cd work\r +# edit config.yaml and provide samples/ directory\r +./vpipe --jobs 4 --printshellcmds --dry-run\r +```\r +\r +### Using Docker\r +\r +Note: the [docker image](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe) is only setup with components to run the workflow for HIV and SARS-CoV-2 virus base configurations.\r +Using V-pipe with other viruses or configurations might require internet connectivity for additional software components.\r +\r +Create `config.yaml` or `vpipe.config` and then populate the `samples/` directory.\r +For example, the following config file could be used:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +\r +output:\r + snv: true\r + local: true\r + global: false\r + visualization: true\r + QA: true\r +```\r +\r +Then execute:\r +\r +```bash\r +docker run --rm -it -v $PWD:/work ghcr.io/cbg-ethz/v-pipe:master --jobs 4 --printshellcmds --dry-run\r +```\r +\r +### Using Snakedeploy\r +\r +First install [mamba](https://github.com/conda-forge/miniforge#mambaforge), then create and activate an environment with Snakemake and Snakedeploy:\r +\r +```bash\r +mamba create -c bioconda -c conda-forge --name snakemake snakemake snakedeploy\r +conda activate snakemake\r +```\r +\r +Snakemake's [official workflow installer Snakedeploy](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe) can now be used:\r +\r +```bash\r +snakedeploy deploy-workflow https://github.com/cbg-ethz/V-pipe --tag master .\r +# edit config/config.yaml and provide samples/ directory\r +snakemake --use-conda --jobs 4 --printshellcmds --dry-run\r +```\r +\r +## Dependencies\r +\r +- **[Conda](https://conda.io/docs/index.html)**\r +\r + Conda is a cross-platform package management system and an environment manager application. Snakemake uses mamba as a package manager.\r +\r +- **[Snakemake](https://snakemake.readthedocs.io/)**\r +\r + Snakemake is the central workflow and dependency manager of V-pipe. It determines the order in which individual tools are invoked and checks that programs do not exit unexpectedly.\r +\r +- **[VICUNA](https://www.broadinstitute.org/viral-genomics/vicuna)**\r +\r + VICUNA is a *de novo* assembly software designed for populations with high mutation rates. It is used to build an initial reference for mapping reads with ngshmmalign aligner when a `references/cohort_consensus.fasta` file is not provided. Further details can be found in the [wiki](https://github.com/cbg-ethz/V-pipe/wiki/getting-started#input-files) pages.\r +\r +### Computational tools\r +\r +Other dependencies are managed by using isolated conda environments per rule, and below we list some of the computational tools integrated in V-pipe:\r +\r +- **[FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/)**\r +\r + FastQC gives an overview of the raw sequencing data. Flowcells that have been overloaded or otherwise fail during sequencing can easily be determined with FastQC.\r +\r +- **[PRINSEQ](http://prinseq.sourceforge.net/)**\r +\r + Trimming and clipping of reads is performed by PRINSEQ. It is currently the most versatile raw read processor with many customization options.\r +\r +- **[ngshmmalign](https://github.com/cbg-ethz/ngshmmalign)**\r +\r + We perform the alignment of the curated NGS data using our custom ngshmmalign that takes structural variants into account. It produces multiple consensus sequences that include either majority bases or ambiguous bases.\r +\r +- **[bwa](https://github.com/lh3/bwa)**\r +\r + In order to detect specific cross-contaminations with other probes, the Burrows-Wheeler aligner is used. It quickly yields estimates for foreign genomic material in an experiment.\r + Additionally, It can be used as an alternative aligner to ngshmmalign.\r +\r +- **[MAFFT](http://mafft.cbrc.jp/alignment/software/)**\r +\r + To standardise multiple samples to the same reference genome (say HXB2 for HIV-1), the multiple sequence aligner MAFFT is employed. The multiple sequence alignment helps in determining regions of low conservation and thus makes standardisation of alignments more robust.\r +\r +- **[Samtools and bcftools](https://www.htslib.org/)**\r +\r + The Swiss Army knife of alignment postprocessing and diagnostics. bcftools is also used to generate consensus sequence with indels.\r +\r +- **[SmallGenomeUtilities](https://github.com/cbg-ethz/smallgenomeutilities)**\r +\r + We perform genomic liftovers to standardised reference genomes using our in-house developed python library of utilities for rewriting alignments.\r +\r +- **[ShoRAH](https://github.com/cbg-ethz/shorah)**\r +\r + ShoRAh performs SNV calling and local haplotype reconstruction by using bayesian clustering.\r +\r +- **[LoFreq](https://csb5.github.io/lofreq/)**\r +\r + LoFreq (version 2) is SNVs and indels caller from next-generation sequencing data, and can be used as an alternative engine for SNV calling.\r +\r +- **[SAVAGE](https://bitbucket.org/jbaaijens/savage) and [Haploclique](https://github.com/cbg-ethz/haploclique)**\r +\r + We use HaploClique or SAVAGE to perform global haplotype reconstruction for heterogeneous viral populations by using an overlap graph.\r +\r +## Citation\r +\r +If you use this software in your research, please cite:\r +\r +Posada-Céspedes S., Seifert D., Topolsky I., Jablonski K.P., Metzner K.J., and Beerenwinkel N. 2021.\r +"V-pipe: a computational pipeline for assessing viral genetic diversity from high-throughput sequencing data."\r +_Bioinformatics_, January. doi:[10.1093/bioinformatics/btab015](https://doi.org/10.1093/bioinformatics/btab015).\r +\r +## Contributions\r +\r +- [Ivan Topolsky\\* ![orcid]](https://orcid.org/0000-0002-7561-0810), [![github]](https://github.com/dryak)\r +- [Kim Philipp Jablonski ![orcid]](https://orcid.org/0000-0002-4166-4343), [![github]](https://github.com/kpj)\r +- [Lara Fuhrmann ![orcid]](https://orcid.org/0000-0001-6405-0654), [![github]](https://github.com/LaraFuhrmann)\r +- [Uwe Schmitt ![orcid]](https://orcid.org/0000-0002-4658-0616), [![github]](https://github.com/uweschmitt)\r +- [Monica Dragan ![orcid]](https://orcid.org/0000-0002-7719-5892), [![github]](https://github.com/monicadragan)\r +- [Susana Posada Céspedes ![orcid]](https://orcid.org/0000-0002-7459-8186), [![github]](https://github.com/sposadac)\r +- [David Seifert ![orcid]](https://orcid.org/0000-0003-4739-5110), [![github]](https://github.com/SoapZA)\r +- Tobias Marschall\r +- [Niko Beerenwinkel\\*\\* ![orcid]](https://orcid.org/0000-0002-0573-6119)\r +\r +\\* software maintainer ;\r +\\** group leader\r +\r +[github]: https://cbg-ethz.github.io/V-pipe/img/mark-github.svg\r +[orcid]: https://cbg-ethz.github.io/V-pipe/img/ORCIDiD_iconvector.svg\r +\r +## Contact\r +\r +We encourage users to use the [issue tracker](https://github.com/cbg-ethz/V-pipe/issues). For further enquiries, you can also contact the V-pipe Dev Team .\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/301?version=2" ; + schema1:isBasedOn "https://github.com/cbg-ethz/V-pipe.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for V-pipe (main multi-virus version)" ; + schema1:sdDatePublished "2024-08-05 10:23:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/301/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1308 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-30T16:51:13Z" ; + schema1:dateModified "2022-04-11T09:29:47Z" ; + schema1:description """\r +\r +![Logo](https://cbg-ethz.github.io/V-pipe/img/logo.svg)\r +\r +[![bio.tools](https://img.shields.io/badge/bio-tools-blue.svg)](https://bio.tools/V-Pipe)\r +[![Snakemake](https://img.shields.io/badge/snakemake-≥6.8.1-blue.svg)](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe)\r +[![Deploy Docker image](https://github.com/cbg-ethz/V-pipe/actions/workflows/deploy-docker.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe)\r +[![Tests](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml)\r +[![Mega-Linter](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml)\r +[![License: Apache-2.0](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)\r +\r +V-pipe is a workflow designed for the analysis of next generation sequencing (NGS) data from viral pathogens. It produces a number of results in a curated format (e.g., consensus sequences, SNV calls, local/global haplotypes).\r +V-pipe is written using the Snakemake workflow management system.\r +\r +## Usage\r +\r +Different ways of initializing V-pipe are presented below. We strongly encourage you to deploy it [using the quick install script](#using-quick-install-script), as this is our preferred method.\r +\r +To configure V-pipe refer to the documentation present in [config/README.md](config/README.md).\r +\r +V-pipe expects the input samples to be organized in a [two-level](config/README.md#samples) directory hierarchy,\r +and the sequencing reads must be provided in a sub-folder named `raw_data`. Further details can be found on the [website](https://cbg-ethz.github.io/V-pipe/usage/).\r +Check the utils subdirectory for [mass-importers tools](utils/README.md#samples-mass-importers) that can assist you in generating this hierarchy.\r +\r +We provide [virus-specific base configuration files](config/README.md#virus-base-config) which contain handy defaults for, e.g., HIV and SARS-CoV-2. Set the virus in the general section of the configuration file:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +```\r +\r +Also see [snakemake's documentation](https://snakemake.readthedocs.io/en/stable/executing/cli.html) to learn more about the command-line options available when executing the workflow.\r +\r +### Using quick install script\r +\r +To deploy V-pipe, use the [installation script](utils/README.md#quick-installer) with the following parameters:\r +\r +```bash\r +curl -O 'https://raw.githubusercontent.com/cbg-ethz/V-pipe/master/utils/quick_install.sh'\r +./quick_install.sh -w work\r +```\r +\r +This script will download and install miniconda, checkout the V-pipe git repository (use `-b` to specify which branch/tag) and setup a work directory (specified with `-w`) with an executable script that will execute the workflow:\r +\r +```bash\r +cd work\r +# edit config.yaml and provide samples/ directory\r +./vpipe --jobs 4 --printshellcmds --dry-run\r +```\r +\r +### Using Docker\r +\r +Note: the [docker image](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe) is only setup with components to run the workflow for HIV and SARS-CoV-2 virus base configurations.\r +Using V-pipe with other viruses or configurations might require internet connectivity for additional software components.\r +\r +Create `config.yaml` or `vpipe.config` and then populate the `samples/` directory.\r +For example, the following config file could be used:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +\r +output:\r + snv: true\r + local: true\r + global: false\r + visualization: true\r + QA: true\r +```\r +\r +Then execute:\r +\r +```bash\r +docker run --rm -it -v $PWD:/work ghcr.io/cbg-ethz/v-pipe:master --jobs 4 --printshellcmds --dry-run\r +```\r +\r +### Using Snakedeploy\r +\r +First install [mamba](https://github.com/conda-forge/miniforge#mambaforge), then create and activate an environment with Snakemake and Snakedeploy:\r +\r +```bash\r +mamba create -c bioconda -c conda-forge --name snakemake snakemake snakedeploy\r +conda activate snakemake\r +```\r +\r +Snakemake's [official workflow installer Snakedeploy](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe) can now be used:\r +\r +```bash\r +snakedeploy deploy-workflow https://github.com/cbg-ethz/V-pipe --tag master .\r +# edit config/config.yaml and provide samples/ directory\r +snakemake --use-conda --jobs 4 --printshellcmds --dry-run\r +```\r +\r +## Dependencies\r +\r +- **[Conda](https://conda.io/docs/index.html)**\r +\r + Conda is a cross-platform package management system and an environment manager application. Snakemake uses mamba as a package manager.\r +\r +- **[Snakemake](https://snakemake.readthedocs.io/)**\r +\r + Snakemake is the central workflow and dependency manager of V-pipe. It determines the order in which individual tools are invoked and checks that programs do not exit unexpectedly.\r +\r +- **[VICUNA](https://www.broadinstitute.org/viral-genomics/vicuna)**\r +\r + VICUNA is a *de novo* assembly software designed for populations with high mutation rates. It is used to build an initial reference for mapping reads with ngshmmalign aligner when a `references/cohort_consensus.fasta` file is not provided. Further details can be found in the [wiki](https://github.com/cbg-ethz/V-pipe/wiki/getting-started#input-files) pages.\r +\r +### Computational tools\r +\r +Other dependencies are managed by using isolated conda environments per rule, and below we list some of the computational tools integrated in V-pipe:\r +\r +- **[FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/)**\r +\r + FastQC gives an overview of the raw sequencing data. Flowcells that have been overloaded or otherwise fail during sequencing can easily be determined with FastQC.\r +\r +- **[PRINSEQ](http://prinseq.sourceforge.net/)**\r +\r + Trimming and clipping of reads is performed by PRINSEQ. It is currently the most versatile raw read processor with many customization options.\r +\r +- **[ngshmmalign](https://github.com/cbg-ethz/ngshmmalign)**\r +\r + We perform the alignment of the curated NGS data using our custom ngshmmalign that takes structural variants into account. It produces multiple consensus sequences that include either majority bases or ambiguous bases.\r +\r +- **[bwa](https://github.com/lh3/bwa)**\r +\r + In order to detect specific cross-contaminations with other probes, the Burrows-Wheeler aligner is used. It quickly yields estimates for foreign genomic material in an experiment.\r + Additionally, It can be used as an alternative aligner to ngshmmalign.\r +\r +- **[MAFFT](http://mafft.cbrc.jp/alignment/software/)**\r +\r + To standardise multiple samples to the same reference genome (say HXB2 for HIV-1), the multiple sequence aligner MAFFT is employed. The multiple sequence alignment helps in determining regions of low conservation and thus makes standardisation of alignments more robust.\r +\r +- **[Samtools and bcftools](https://www.htslib.org/)**\r +\r + The Swiss Army knife of alignment postprocessing and diagnostics. bcftools is also used to generate consensus sequence with indels.\r +\r +- **[SmallGenomeUtilities](https://github.com/cbg-ethz/smallgenomeutilities)**\r +\r + We perform genomic liftovers to standardised reference genomes using our in-house developed python library of utilities for rewriting alignments.\r +\r +- **[ShoRAH](https://github.com/cbg-ethz/shorah)**\r +\r + ShoRAh performs SNV calling and local haplotype reconstruction by using bayesian clustering.\r +\r +- **[LoFreq](https://csb5.github.io/lofreq/)**\r +\r + LoFreq (version 2) is SNVs and indels caller from next-generation sequencing data, and can be used as an alternative engine for SNV calling.\r +\r +- **[SAVAGE](https://bitbucket.org/jbaaijens/savage) and [Haploclique](https://github.com/cbg-ethz/haploclique)**\r +\r + We use HaploClique or SAVAGE to perform global haplotype reconstruction for heterogeneous viral populations by using an overlap graph.\r +\r +## Citation\r +\r +If you use this software in your research, please cite:\r +\r +Posada-Céspedes S., Seifert D., Topolsky I., Jablonski K.P., Metzner K.J., and Beerenwinkel N. 2021.\r +"V-pipe: a computational pipeline for assessing viral genetic diversity from high-throughput sequencing data."\r +_Bioinformatics_, January. doi:[10.1093/bioinformatics/btab015](https://doi.org/10.1093/bioinformatics/btab015).\r +\r +## Contributions\r +\r +- [Ivan Topolsky\\* ![orcid]](https://orcid.org/0000-0002-7561-0810), [![github]](https://github.com/dryak)\r +- [Kim Philipp Jablonski ![orcid]](https://orcid.org/0000-0002-4166-4343), [![github]](https://github.com/kpj)\r +- [Lara Fuhrmann ![orcid]](https://orcid.org/0000-0001-6405-0654), [![github]](https://github.com/LaraFuhrmann)\r +- [Uwe Schmitt ![orcid]](https://orcid.org/0000-0002-4658-0616), [![github]](https://github.com/uweschmitt)\r +- [Monica Dragan ![orcid]](https://orcid.org/0000-0002-7719-5892), [![github]](https://github.com/monicadragan)\r +- [Susana Posada Céspedes ![orcid]](https://orcid.org/0000-0002-7459-8186), [![github]](https://github.com/sposadac)\r +- [David Seifert ![orcid]](https://orcid.org/0000-0003-4739-5110), [![github]](https://github.com/SoapZA)\r +- Tobias Marschall\r +- [Niko Beerenwinkel\\*\\* ![orcid]](https://orcid.org/0000-0002-0573-6119)\r +\r +\\* software maintainer ;\r +\\** group leader\r +\r +[github]: https://cbg-ethz.github.io/V-pipe/img/mark-github.svg\r +[orcid]: https://cbg-ethz.github.io/V-pipe/img/ORCIDiD_iconvector.svg\r +\r +## Contact\r +\r +We encourage users to use the [issue tracker](https://github.com/cbg-ethz/V-pipe/issues). For further enquiries, you can also contact the V-pipe Dev Team .\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/301?version=4" ; + schema1:keywords "Alignment, Assembly, covid-19, Genomics, INDELs, rna, SNPs, variant_calling, workflow" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "V-pipe (main multi-virus version)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/301?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-23T13:33:07.949422" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Differential abundance analysis" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/981?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/differentialabundance" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/differentialabundance" ; + schema1:sdDatePublished "2024-08-05 10:24:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/981/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12719 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:48Z" ; + schema1:dateModified "2024-06-11T12:54:48Z" ; + schema1:description "Differential abundance analysis" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/981?version=8" ; + schema1:keywords "ATAC-seq, ChIP-seq, deseq2, differential-abundance, differential-expression, gsea, limma, microarray, rna-seq, shiny" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/differentialabundance" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/981?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """This workflow takes a cell-type-annotated AnnData object (processed with SnapATAC2) and performs peak calling with MACS3 on the cell types. Next, a cell-by-peak matrix is constructed and differential accessibility tests are performed for comparison of either two cell types or one cell type with a background of all other cells. \r +Lastly, differentially accessible marker regions for each cell type are identified. """ ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1089?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Differential peak analysis with SnapATAC2" ; + schema1:sdDatePublished "2024-08-05 10:22:10 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1089/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 24746 ; + schema1:creator ; + schema1:dateCreated "2024-08-02T18:23:43Z" ; + schema1:dateModified "2024-08-02T18:23:43Z" ; + schema1:description """This workflow takes a cell-type-annotated AnnData object (processed with SnapATAC2) and performs peak calling with MACS3 on the cell types. Next, a cell-by-peak matrix is constructed and differential accessibility tests are performed for comparison of either two cell types or one cell type with a background of all other cells. \r +Lastly, differentially accessible marker regions for each cell type are identified. """ ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Differential peak analysis with SnapATAC2" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1089?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + ; + ns1:output , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/383?version=1" ; + schema1:isBasedOn "https://gitlab.bsc.es/lrodrig1/structuralvariants_poc/-/tree/1.1.3/structuralvariants/nextflow" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CNV_pipeline" ; + schema1:sdDatePublished "2024-08-05 10:31:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/383/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4361 ; + schema1:creator , + ; + schema1:dateCreated "2022-09-09T10:38:14Z" ; + schema1:dateModified "2022-09-09T10:39:53Z" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/383?version=2" ; + schema1:keywords "CODEX2, TransBioNet, ExomeDepth, variant calling, cancer, manta, GRIDS, structural variants" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CNV_pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/383?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "CLIP analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/973?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/clipseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/clipseq" ; + schema1:sdDatePublished "2024-08-05 10:24:10 +0100" ; + schema1:url "https://workflowhub.eu/workflows/973/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6003 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:45Z" ; + schema1:dateModified "2024-06-11T12:54:45Z" ; + schema1:description "CLIP analysis pipeline" ; + schema1:keywords "clip, clip-seq, peak-calling, rna-rbp-interactions" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/clipseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/973?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-08-05 10:23:40 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7157 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:58Z" ; + schema1:dateModified "2024-06-11T12:54:58Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=28" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-08-05 10:24:28 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=28" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12883 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:13Z" ; + schema1:dateModified "2024-06-11T12:55:13Z" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=28" ; + schema1:version 28 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=16" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-08-05 10:22:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=16" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10659 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:39Z" ; + schema1:dateModified "2024-06-11T12:54:39Z" ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=16" ; + schema1:version 16 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# MGnify genomes catalogue pipeline\r +\r +[MGnify](https://www.ebi.ac.uk/metagenomics/) A pipeline to perform taxonomic and functional annotation and to generate a catalogue from a set of isolate and/or metagenome-assembled genomes (MAGs) using the workflow described in the following publication:\r +\r +Gurbich TA, Almeida A, Beracochea M, Burdett T, Burgin J, Cochrane G, Raj S, Richardson L, Rogers AB, Sakharova E, Salazar GA and Finn RD. (2023) [MGnify Genomes: A Resource for Biome-specific Microbial Genome Catalogues.](https://www.sciencedirect.com/science/article/pii/S0022283623000724) J Mol Biol. doi: https://doi.org/10.1016/j.jmb.2023.168016\r +\r +Detailed information about existing MGnify catalogues: https://docs.mgnify.org/src/docs/genome-viewer.html\r +\r +### Tools used in the pipeline\r +| Tool/Database | Version | Purpose |\r +| ----------- | ----------- |----------- |\r +| CheckM | 1.1.3 | Determining genome quality |\r +| dRep | 3.2.2 | Genome clustering |\r +| Mash | 2.3 | Sketch for the catalogue; placement of genomes into clusters (update only); strain tree |\r +| GUNC | 1.0.3 | Quality control |\r +| GTDB-Tk | 2.1.0 | Assigning taxonomy; generating alignments |\r +| GTDB | r207_v2 | Database for GTDB-Tk |\r +| Prokka | 1.14.6 | Protein annotation |\r +| IQ-TREE 2 | 2.2.0.3 | Generating a phylogenetic tree |\r +| Kraken 2 | 2.1.2 | Generating a kraken database |\r +| Bracken | 2.6.2 | Generating a bracken database |\r +| MMseqs2 | 13.45111 | Generating a protein catalogue |\r +| eggNOG-mapper | 2.1.3 | Protein annotation (eggNOG, KEGG, COG, CAZy) |\r +| InterProScan | 5.57-90.0 | Protein annotation (InterPro, Pfam) |\r +| CRISPRCasFinder | 4.3.2 | Annotation of CRISPR arrays |\r +| AMRFinderPlus | 3.11.4 | Antimicrobial resistance gene annotation; virulence factors, biocide, heat, acid, and metal resistance gene annotation |\r +| AMRFinderPlus DB | 3.11 2023-02-23.1 | Database for AMRFinderPlus |\r +| SanntiS | 0.9.3.2 | Biosynthetic gene cluster annotation |\r +| Infernal | 1.1.4 | RNA predictions |\r +| tRNAscan-SE | 2.0.9 | tRNA predictions |\r +| Rfam | 14.6 | Identification of SSU/LSU rRNA and other ncRNAs |\r +| Panaroo | 1.3.2 | Pan-genome computation |\r +| Seqtk | 1.3 | Generating a gene catalogue |\r +| VIRify | - | Viral sequence annotation |\r +| MoMofy | 1.0.0 | Mobilome annotation |\r +| samtools | 1.15 | FASTA indexing |\r +\r +## Setup\r +\r +### Environment\r +\r +The pipeline is implemented in [Nextflow](https://www.nextflow.io/).\r +\r +Requirements:\r +- [singulairty](https://sylabs.io/docs/) or [docker](https://www.docker.com/)\r +\r +#### Reference databases\r +\r +The pipeline needs the following reference databases and configuration files (roughtly ~150G):\r +\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/gunc_db_2.0.4.dmnd.gz\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/eggnog_db.tgz\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/rfams_cms/\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/kegg_classes.tsv\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/ncrna/\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/continent_countries.csv\r +- https://data.gtdb.ecogenomic.org/releases/release207/207.0/auxillary_files/gtdbtk_r207_v2_data.tar.gz\r +\r +### Containers\r +\r +This pipeline requires [singularity](https://sylabs.io/docs/) or [docker](https://www.docker.com/) as the container engine to run pipeline.\r +\r +The containers are hosted in [biocontainers](https://biocontainers.pro/) and [quay.io/microbiome-informatics](https://quay.io/organization/microbiome-informatics) repository.\r +\r +It's possible to build the containers from scratch using the following script:\r +\r +```bash\r +cd containers && bash build.sh\r +```\r +\r +## Running the pipeline\r +\r +## Data preparation\r +\r +1. You need to pre-download your data to directories and make sure that genomes are uncompressed. Scripts to fetch genomes from ENA ([fetch_ena.py](https://github.com/EBI-Metagenomics/genomes-pipeline/blob/master/containers/genomes-catalog-update/scripts/fetch_ena.py)) and NCBI ([fetch_ncbi.py](https://github.com/EBI-Metagenomics/genomes-pipeline/blob/master/containers/genomes-catalog-update/scripts/fetch_ncbi.py)) are provided and need to be executed separately from the pipeline. If you have downloaded genomes from both ENA and NCBI, put them into separate folders.\r +\r +2. When genomes are fetched from ENA using the `fetch_ena.py` script, a CSV file with contamination and completeness statistics is also created in the same directory where genomes are saved to. If you are downloading genomes using a different approach, a CSV file needs to be created manually (each line should be genome accession, % completeness, % contamination). The ENA fetching script also pre-filters genomes to satisfy the QS50 cut-off (QS = % completeness - 5 * % contamination). \r +\r +3. You will need the following information to run the pipeline:\r + - catalogue name (for example, zebrafish-faecal)\r + - catalogue version (for example, 1.0)\r + - catalogue biome (for example, root:Host-associated:Human:Digestive system:Large intestine:Fecal)\r + - min and max accession number to be assigned to the genomes (only MGnify specific). Max - Min = #total number of genomes (NCBI+ENA)\r +\r +### Execution\r +\r +The pipeline is built in [Nextflow](https://www.nextflow.io), and utilized containers to run the software (we don't support conda ATM).\r +In order to run the pipeline it's required that the user creates a profile that suits their needs, there is an `ebi` profile in `nexflow.config` that can be used as template.\r +\r +After downloading the databases and adjusting the config file:\r +\r +```bash\r +nextflow run EBI-Metagenomics/genomes-pipeline -c -profile \\\r +--genome-prefix=MGYG \\\r +--biome="root:Host-associated:Fish:Digestive system" \\\r +--ena_genomes= \\\r +--ena_genomes_checkm= \\\r +--mgyg_start=0 \\\r +--mgyg_end=10 \\\r +--catalogue_name=zebrafish-faecal \\\r +--catalogue_version="1.0" \\\r +--ftp_name="zebrafish-faecal" \\\r +--ftp_version="v1.0" \\\r +--outdir=""\r +```\r +\r +### Development\r +\r +Install development tools (including pre-commit hooks to run Black code formatting).\r +\r +```bash\r +pip install -r requirements-dev.txt\r +pre-commit install\r +```\r +\r +#### Code style\r +\r +Use Black, this tool is configured if you install the pre-commit tools as above.\r +\r +To manually run them: black .\r +\r +### Testing\r +\r +This repo has 2 set of tests, python unit tests for some of the most critical python scripts and [nf-test](https://github.com/askimed/nf-test) scripts for the nextflow code.\r +\r +To run the python tests\r +\r +```bash\r +pip install -r requirements-test.txt\r +pytest\r +```\r +\r +To run the nextflow ones the databases have to downloaded manually, we are working to improve this.\r +\r +```bash\r +nf-test test tests/*\r +```\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/462?version=1" ; + schema1:isBasedOn "https://github.com/EBI-Metagenomics/genomes-pipeline.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for MGnify genomes catalogue pipeline" ; + schema1:sdDatePublished "2024-08-05 10:24:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/462/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 129 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-04-28T09:36:34Z" ; + schema1:dateModified "2024-05-23T11:20:39Z" ; + schema1:description """# MGnify genomes catalogue pipeline\r +\r +[MGnify](https://www.ebi.ac.uk/metagenomics/) A pipeline to perform taxonomic and functional annotation and to generate a catalogue from a set of isolate and/or metagenome-assembled genomes (MAGs) using the workflow described in the following publication:\r +\r +Gurbich TA, Almeida A, Beracochea M, Burdett T, Burgin J, Cochrane G, Raj S, Richardson L, Rogers AB, Sakharova E, Salazar GA and Finn RD. (2023) [MGnify Genomes: A Resource for Biome-specific Microbial Genome Catalogues.](https://www.sciencedirect.com/science/article/pii/S0022283623000724) J Mol Biol. doi: https://doi.org/10.1016/j.jmb.2023.168016\r +\r +Detailed information about existing MGnify catalogues: https://docs.mgnify.org/src/docs/genome-viewer.html\r +\r +### Tools used in the pipeline\r +| Tool/Database | Version | Purpose |\r +| ----------- | ----------- |----------- |\r +| CheckM | 1.1.3 | Determining genome quality |\r +| dRep | 3.2.2 | Genome clustering |\r +| Mash | 2.3 | Sketch for the catalogue; placement of genomes into clusters (update only); strain tree |\r +| GUNC | 1.0.3 | Quality control |\r +| GTDB-Tk | 2.1.0 | Assigning taxonomy; generating alignments |\r +| GTDB | r207_v2 | Database for GTDB-Tk |\r +| Prokka | 1.14.6 | Protein annotation |\r +| IQ-TREE 2 | 2.2.0.3 | Generating a phylogenetic tree |\r +| Kraken 2 | 2.1.2 | Generating a kraken database |\r +| Bracken | 2.6.2 | Generating a bracken database |\r +| MMseqs2 | 13.45111 | Generating a protein catalogue |\r +| eggNOG-mapper | 2.1.3 | Protein annotation (eggNOG, KEGG, COG, CAZy) |\r +| InterProScan | 5.57-90.0 | Protein annotation (InterPro, Pfam) |\r +| CRISPRCasFinder | 4.3.2 | Annotation of CRISPR arrays |\r +| AMRFinderPlus | 3.11.4 | Antimicrobial resistance gene annotation; virulence factors, biocide, heat, acid, and metal resistance gene annotation |\r +| AMRFinderPlus DB | 3.11 2023-02-23.1 | Database for AMRFinderPlus |\r +| SanntiS | 0.9.3.2 | Biosynthetic gene cluster annotation |\r +| Infernal | 1.1.4 | RNA predictions |\r +| tRNAscan-SE | 2.0.9 | tRNA predictions |\r +| Rfam | 14.6 | Identification of SSU/LSU rRNA and other ncRNAs |\r +| Panaroo | 1.3.2 | Pan-genome computation |\r +| Seqtk | 1.3 | Generating a gene catalogue |\r +| VIRify | - | Viral sequence annotation |\r +| MoMofy | 1.0.0 | Mobilome annotation |\r +| samtools | 1.15 | FASTA indexing |\r +\r +## Setup\r +\r +### Environment\r +\r +The pipeline is implemented in [Nextflow](https://www.nextflow.io/).\r +\r +Requirements:\r +- [singulairty](https://sylabs.io/docs/) or [docker](https://www.docker.com/)\r +\r +#### Reference databases\r +\r +The pipeline needs the following reference databases and configuration files (roughtly ~150G):\r +\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/gunc_db_2.0.4.dmnd.gz\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/eggnog_db.tgz\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/rfams_cms/\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/kegg_classes.tsv\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/ncrna/\r +- ftp://ftp.ebi.ac.uk/pub/databases/metagenomics/genomes-pipeline/continent_countries.csv\r +- https://data.gtdb.ecogenomic.org/releases/release207/207.0/auxillary_files/gtdbtk_r207_v2_data.tar.gz\r +\r +### Containers\r +\r +This pipeline requires [singularity](https://sylabs.io/docs/) or [docker](https://www.docker.com/) as the container engine to run pipeline.\r +\r +The containers are hosted in [biocontainers](https://biocontainers.pro/) and [quay.io/microbiome-informatics](https://quay.io/organization/microbiome-informatics) repository.\r +\r +It's possible to build the containers from scratch using the following script:\r +\r +```bash\r +cd containers && bash build.sh\r +```\r +\r +## Running the pipeline\r +\r +## Data preparation\r +\r +1. You need to pre-download your data to directories and make sure that genomes are uncompressed. Scripts to fetch genomes from ENA ([fetch_ena.py](https://github.com/EBI-Metagenomics/genomes-pipeline/blob/master/containers/genomes-catalog-update/scripts/fetch_ena.py)) and NCBI ([fetch_ncbi.py](https://github.com/EBI-Metagenomics/genomes-pipeline/blob/master/containers/genomes-catalog-update/scripts/fetch_ncbi.py)) are provided and need to be executed separately from the pipeline. If you have downloaded genomes from both ENA and NCBI, put them into separate folders.\r +\r +2. When genomes are fetched from ENA using the `fetch_ena.py` script, a CSV file with contamination and completeness statistics is also created in the same directory where genomes are saved to. If you are downloading genomes using a different approach, a CSV file needs to be created manually (each line should be genome accession, % completeness, % contamination). The ENA fetching script also pre-filters genomes to satisfy the QS50 cut-off (QS = % completeness - 5 * % contamination). \r +\r +3. You will need the following information to run the pipeline:\r + - catalogue name (for example, zebrafish-faecal)\r + - catalogue version (for example, 1.0)\r + - catalogue biome (for example, root:Host-associated:Human:Digestive system:Large intestine:Fecal)\r + - min and max accession number to be assigned to the genomes (only MGnify specific). Max - Min = #total number of genomes (NCBI+ENA)\r +\r +### Execution\r +\r +The pipeline is built in [Nextflow](https://www.nextflow.io), and utilized containers to run the software (we don't support conda ATM).\r +In order to run the pipeline it's required that the user creates a profile that suits their needs, there is an `ebi` profile in `nexflow.config` that can be used as template.\r +\r +After downloading the databases and adjusting the config file:\r +\r +```bash\r +nextflow run EBI-Metagenomics/genomes-pipeline -c -profile \\\r +--genome-prefix=MGYG \\\r +--biome="root:Host-associated:Fish:Digestive system" \\\r +--ena_genomes= \\\r +--ena_genomes_checkm= \\\r +--mgyg_start=0 \\\r +--mgyg_end=10 \\\r +--catalogue_name=zebrafish-faecal \\\r +--catalogue_version="1.0" \\\r +--ftp_name="zebrafish-faecal" \\\r +--ftp_version="v1.0" \\\r +--outdir=""\r +```\r +\r +### Development\r +\r +Install development tools (including pre-commit hooks to run Black code formatting).\r +\r +```bash\r +pip install -r requirements-dev.txt\r +pre-commit install\r +```\r +\r +#### Code style\r +\r +Use Black, this tool is configured if you install the pre-commit tools as above.\r +\r +To manually run them: black .\r +\r +### Testing\r +\r +This repo has 2 set of tests, python unit tests for some of the most critical python scripts and [nf-test](https://github.com/askimed/nf-test) scripts for the nextflow code.\r +\r +To run the python tests\r +\r +```bash\r +pip install -r requirements-test.txt\r +pytest\r +```\r +\r +To run the nextflow ones the databases have to downloaded manually, we are working to improve this.\r +\r +```bash\r +nf-test test tests/*\r +```\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/462?version=1" ; + schema1:keywords "Metagenomics, Nextflow, Bioinformatics" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "MGnify genomes catalogue pipeline" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/462?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=13" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-08-05 10:23:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=13" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14588 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:59Z" ; + schema1:dateModified "2024-06-11T12:54:59Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=13" ; + schema1:version 13 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=17" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-08-05 10:23:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=17" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9238 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:04Z" ; + schema1:dateModified "2024-06-11T12:55:04Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=17" ; + schema1:version 17 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.881.1" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Fgenesh annotation -TSI" ; + schema1:sdDatePublished "2024-08-05 10:22:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/881/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17572 ; + schema1:creator ; + schema1:dateCreated "2024-05-08T07:28:08Z" ; + schema1:dateModified "2024-05-09T04:10:32Z" ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/881?version=3" ; + schema1:isPartOf ; + schema1:keywords "TSI-annotation" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Fgenesh annotation -TSI" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/881?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 1040040 . + + a schema1:Dataset ; + schema1:datePublished "2024-05-30T07:28:39.831046" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/chipseq-sr" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "chipseq-sr/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """# Snakemake workflow: Reconstructing raw tomography data\r +\r +A Snakemake worfklow for tomographically reconstructing raw data using [tomopy](https://tomopy.readthedocs.io/en/stable/).\r +\r +## Installation\r +\r +First download this repo and navigate to it\r +```bash\r +git clone https://codebase.helmholtz.cloud/gernha62/reconstructing-raw-tomography-data.git\r +```\r +```bash\r +cd /path/to/repo\r +```\r +(Optional) Download the example folder with:\r +```bash\r +wget -m -np https://doi2.psi.ch/datasets/das/work/p15/p15869/compression/MI04_02/tif\r +```\r +Create a virtual environment and install all necessary packages (requires conda): \r +```bash\r +conda env create --name reconstr_env --file workflow/envs/reconstr.yml\r +```\r +Activate the new virtual environment: \r +```bash\r +conda activate reconstr_env\r +```\r +\r +## Configuration\r +\r +To configure the workflow, adapt the config file found at `config/config.yaml` . The config looks as follows:\r +```yaml\r +number_of_darks: 50\r +number_of_flats: 100\r +number_of_projections: 501\r +rotation_center: 508.77\r +raw_data:\r + MI04_02: doi2.psi.ch/datasets/das/work/p15/p15869/compression/MI04_02/tif\r +```\r + In the config, adjust `number_of_darks`, `number_of_flats`, `number_of_projections` and `rotation_center` to the number of darks, flats, projections and the rotation center of your dataset. The necessary information can usually be found in the .log file of the folder that contains the raw data. \r +\r +`MI04_02: doi2.psi.ch/datasets/das/work/p15/p15869/compression/MI04_02/tif` denotes the path to the example folder used for reconstruction and the keyword `MI04_02` will be used to name the output (e.g. in this case the output folder will be named `recon_dir_MI04_02`). Replace the examle path with the path to the dataset you want to reconstruct. Additionally, if you want the name of the output folder to have a different suffix, replace the keyword `MI04_02` with a name you prefer.\r +\r +## Run the workflow\r +\r +If the .tif files contain a numerical prefix that is not separated from the actual image index, it is best to first rename the files. The files will be renamed to `00001.tif`, `00002.tif` and so on. If the renaming is needed, run:\r +\r +```bash\r +snakemake --cores 1 'logs/renamefile_MI04_02.log'\r +```\r +If you replaced the keyword `MI04_02` in the config file then adjust the command accordingly (e.g. if you replaced the keyword with `Tomo_dataset` then the command should be `snakemake --cores 1 'logs/renamefile_Tomo_dataset.log'`).\r +\r +Before trying to compute the reconstructions, make sure you have enough memory available (ideally more than 60 GB).\r +To compute the reconstructions using one core, use the command:\r +```bash\r +snakemake --cores 1\r +```\r +If you want to use all available cores instead, use:\r +```bash\r +snakemake --cores all\r +```\r +This creates a folder in `results` with the reconstructed data.\r +\r +## Credit\r +The example dataset used in this project (MI04_02 evolving magma, Mattia Pistone, University of Georgia) was taken from: https://doi.psi.ch/detail/10.16907/05a50450-767f-421d-9832-342b57c201af\r +\r +The script used for reconstruction (`scripts/reconstructs_tomo_datasets.py`) was provided by Alain Studer, PSI.""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.432.1" ; + schema1:isBasedOn "https://codebase.helmholtz.cloud/gernha62/reconstructing-raw-tomography-data.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Reconstructing raw tomography data" ; + schema1:sdDatePublished "2024-08-05 10:31:22 +0100" ; + schema1:url "https://workflowhub.eu/workflows/432/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1061 ; + schema1:creator ; + schema1:dateCreated "2023-02-02T13:46:59Z" ; + schema1:dateModified "2023-02-02T13:50:16Z" ; + schema1:description """# Snakemake workflow: Reconstructing raw tomography data\r +\r +A Snakemake worfklow for tomographically reconstructing raw data using [tomopy](https://tomopy.readthedocs.io/en/stable/).\r +\r +## Installation\r +\r +First download this repo and navigate to it\r +```bash\r +git clone https://codebase.helmholtz.cloud/gernha62/reconstructing-raw-tomography-data.git\r +```\r +```bash\r +cd /path/to/repo\r +```\r +(Optional) Download the example folder with:\r +```bash\r +wget -m -np https://doi2.psi.ch/datasets/das/work/p15/p15869/compression/MI04_02/tif\r +```\r +Create a virtual environment and install all necessary packages (requires conda): \r +```bash\r +conda env create --name reconstr_env --file workflow/envs/reconstr.yml\r +```\r +Activate the new virtual environment: \r +```bash\r +conda activate reconstr_env\r +```\r +\r +## Configuration\r +\r +To configure the workflow, adapt the config file found at `config/config.yaml` . The config looks as follows:\r +```yaml\r +number_of_darks: 50\r +number_of_flats: 100\r +number_of_projections: 501\r +rotation_center: 508.77\r +raw_data:\r + MI04_02: doi2.psi.ch/datasets/das/work/p15/p15869/compression/MI04_02/tif\r +```\r + In the config, adjust `number_of_darks`, `number_of_flats`, `number_of_projections` and `rotation_center` to the number of darks, flats, projections and the rotation center of your dataset. The necessary information can usually be found in the .log file of the folder that contains the raw data. \r +\r +`MI04_02: doi2.psi.ch/datasets/das/work/p15/p15869/compression/MI04_02/tif` denotes the path to the example folder used for reconstruction and the keyword `MI04_02` will be used to name the output (e.g. in this case the output folder will be named `recon_dir_MI04_02`). Replace the examle path with the path to the dataset you want to reconstruct. Additionally, if you want the name of the output folder to have a different suffix, replace the keyword `MI04_02` with a name you prefer.\r +\r +## Run the workflow\r +\r +If the .tif files contain a numerical prefix that is not separated from the actual image index, it is best to first rename the files. The files will be renamed to `00001.tif`, `00002.tif` and so on. If the renaming is needed, run:\r +\r +```bash\r +snakemake --cores 1 'logs/renamefile_MI04_02.log'\r +```\r +If you replaced the keyword `MI04_02` in the config file then adjust the command accordingly (e.g. if you replaced the keyword with `Tomo_dataset` then the command should be `snakemake --cores 1 'logs/renamefile_Tomo_dataset.log'`).\r +\r +Before trying to compute the reconstructions, make sure you have enough memory available (ideally more than 60 GB).\r +To compute the reconstructions using one core, use the command:\r +```bash\r +snakemake --cores 1\r +```\r +If you want to use all available cores instead, use:\r +```bash\r +snakemake --cores all\r +```\r +This creates a folder in `results` with the reconstructed data.\r +\r +## Credit\r +The example dataset used in this project (MI04_02 evolving magma, Mattia Pistone, University of Georgia) was taken from: https://doi.psi.ch/detail/10.16907/05a50450-767f-421d-9832-342b57c201af\r +\r +The script used for reconstruction (`scripts/reconstructs_tomo_datasets.py`) was provided by Alain Studer, PSI.""" ; + schema1:keywords "Reconstruction, Tomography" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Reconstructing raw tomography data" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/432?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2024-07-15T07:55:09.233674" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "atacseq/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.24" . + + a schema1:Dataset ; + schema1:datePublished "2023-11-15T08:48:07.467425" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Scaffolding-Bionano-VGP7" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Scaffolding-Bionano-VGP7/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.17" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# gene2phylo\r +\r +**gene2phylo** is a snakemake pipeline for batch phylogenetic analysis of a given set of input genes. \r +\r +## Contents\r + - [Setup](#setup)\r + - [Example data](#example-data)\r + - [Input](#input)\r + - [Output](#output)\r + - [Running your own data](#running-your-own-data)\r + - [Getting help](#getting-help)\r + - [Citations](#citations)\r +\r +## Setup\r +\r +The pipeline is written in Snakemake and uses conda to install the necessary tools.\r +\r +It is *strongly recommended* to install conda using Mambaforge. See details here https://snakemake.readthedocs.io/en/stable/getting_started/installation.html\r +\r +Once conda is installed, you can pull the github repo and set up the base conda environment.\r +\r +```\r +# get github repo\r +git clone https://github.com/o-william-white/gene2phylo\r +\r +# change dir\r +cd gene2phylo\r +\r +# setup conda env\r +conda env create -n snakemake -f workflow/envs/conda_env.yaml\r +```\r +\r +
\r +\r +
\r +\r +## Example data\r +\r +Before you run your own data, it is recommended to run the example datasets provided . This will confirm there are no user-specific issues with the setup and it also installs all the dependencies. The example data includes mitochondrial and ribosomal genes from 25 different butterfly species. \r +\r +To run the example data, use the code below. The first time you run the pipeline, it will take some time to install each of the conda environments, so it is a good time to take a tea break :).\r +```\r +conda activate snakemake\r +\r +snakemake \\\r + --cores 4 \\\r + --use-conda\r +```\r +\r +
\r +\r +
\r +\r +## Input\r +\r +Snakemake requires a `config.yaml` to define input parameters. \r +\r +For the example data provided, the config file is located here `config/config.yaml` and it looks like this:\r +```\r +# name of input directory containg genes\r +input_dir: .test\r +\r +# realign (True or False)\r +realign: True\r +\r +# alignment missing data threshold for alignment (0.0 - 1.0), only required if realign == True\r +missing_threshold: 0.5\r +\r +# alignment trimming method to use (gblocks or clipkit), only required if realign == True\r +alignment_trim: gblocks\r +\r +# name of outgroup sample (optional)\r +# use "NA" if there is no obvious outgroup\r +# if more than one outgroup use a comma separated list i.e. "sampleA,sampleB"\r +outgroup: Eurema_blanda\r +\r +# plot dimensions (cm)\r +plot_height: 20\r +plot_width: 20\r +```\r +\r +
\r +\r +
\r +\r +## Output\r +\r +All output files are saved to the `results` direcotry. Below is a table summarising all of the output files generated by the pipeline.\r +\r +| Directory | Description |\r +|---------------------------|---------------------------|\r +| mafft | Optional: Mafft aligned fasta files of all genes |\r +| mafft_filtered | Optional: Mafft aligned fasta files after the removal of sequences based on a missing data threshold |\r +| alignment_trim | Optional: Ambiguous parts of alignment removed using either gblocks or clipkit |\r +| iqtree | Iqtree phylogenetic analysis for each gene |\r +| iqtree_plots | Plots of Iqtree phylogenetic tree for each gene |\r +| concatenate_alignments | Partitioned alignment of all genes |\r +| iqtree_partitioned | Iqtree partitioned phylogenetic analysis |\r +| iqtree_partitioned_plot | Plot of Iqtree partitioned tree |\r +| astral | Astral phylogenetic analysis of all gene trees |\r +| astral_plot | Plot of Astral tree |\r +\r +
\r +\r +
\r +\r +## Running your own data\r +\r +For the pipeline to function properly, the input gene alignments must be: \r +- in a single directory \r +- end with ".fasta"\r +- named after the aligned gene (e.g. "cox1.fasta" or "28S.fasta")\r +- share identical sample names across alignments (e.g. all genes from sample A share the same name)\r +\r +Please see the example data in the `.test/` directory as an example. \r +\r +Then you need to generate your own config.yaml file, using the example template provided.\r +\r +## Getting help\r +\r +If you have any questions, please do get in touch in the issues or by email o.william.white@gmail.com\r +\r +
\r +\r +
\r +\r +## Citations\r +\r +If you use the pipeline, please cite our bioarxiv preprint: https://doi.org/10.1101/2023.08.11.552985\r +\r +Since the pipeline is a wrapper for several other bioinformatic tools we also ask that you cite the tools used by the pipeline:\r + - Gblocks (default) https://doi.org/10.1093/oxfordjournals.molbev.a026334\r + - Clipkit (optional) https://doi.org/10.1371/journal.pbio.3001007\r + - Mafft (optional) https://doi.org/10.1093/molbev/mst010\r + - Iqtree https://doi.org/10.1093/molbev/msu300\r + - Ete3 https://doi.org/10.1093/molbev/msw046\r + - Ggtree https://doi.org/10.1111/2041-210X.12628\r + - Astral https://doi.org/10.1186/s12859-018-2129-y\r +\r +
\r +\r +
\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/793?version=1" ; + schema1:isBasedOn "https://github.com/o-william-white/gene2phylo.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for gene2phylo" ; + schema1:sdDatePublished "2024-08-05 10:25:09 +0100" ; + schema1:url "https://workflowhub.eu/workflows/793/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 929 ; + schema1:dateCreated "2024-03-13T13:36:20Z" ; + schema1:dateModified "2024-03-21T13:56:02Z" ; + schema1:description """# gene2phylo\r +\r +**gene2phylo** is a snakemake pipeline for batch phylogenetic analysis of a given set of input genes. \r +\r +## Contents\r + - [Setup](#setup)\r + - [Example data](#example-data)\r + - [Input](#input)\r + - [Output](#output)\r + - [Running your own data](#running-your-own-data)\r + - [Getting help](#getting-help)\r + - [Citations](#citations)\r +\r +## Setup\r +\r +The pipeline is written in Snakemake and uses conda to install the necessary tools.\r +\r +It is *strongly recommended* to install conda using Mambaforge. See details here https://snakemake.readthedocs.io/en/stable/getting_started/installation.html\r +\r +Once conda is installed, you can pull the github repo and set up the base conda environment.\r +\r +```\r +# get github repo\r +git clone https://github.com/o-william-white/gene2phylo\r +\r +# change dir\r +cd gene2phylo\r +\r +# setup conda env\r +conda env create -n snakemake -f workflow/envs/conda_env.yaml\r +```\r +\r +
\r +\r +
\r +\r +## Example data\r +\r +Before you run your own data, it is recommended to run the example datasets provided . This will confirm there are no user-specific issues with the setup and it also installs all the dependencies. The example data includes mitochondrial and ribosomal genes from 25 different butterfly species. \r +\r +To run the example data, use the code below. The first time you run the pipeline, it will take some time to install each of the conda environments, so it is a good time to take a tea break :).\r +```\r +conda activate snakemake\r +\r +snakemake \\\r + --cores 4 \\\r + --use-conda\r +```\r +\r +
\r +\r +
\r +\r +## Input\r +\r +Snakemake requires a `config.yaml` to define input parameters. \r +\r +For the example data provided, the config file is located here `config/config.yaml` and it looks like this:\r +```\r +# name of input directory containg genes\r +input_dir: .test\r +\r +# realign (True or False)\r +realign: True\r +\r +# alignment missing data threshold for alignment (0.0 - 1.0), only required if realign == True\r +missing_threshold: 0.5\r +\r +# alignment trimming method to use (gblocks or clipkit), only required if realign == True\r +alignment_trim: gblocks\r +\r +# name of outgroup sample (optional)\r +# use "NA" if there is no obvious outgroup\r +# if more than one outgroup use a comma separated list i.e. "sampleA,sampleB"\r +outgroup: Eurema_blanda\r +\r +# plot dimensions (cm)\r +plot_height: 20\r +plot_width: 20\r +```\r +\r +
\r +\r +
\r +\r +## Output\r +\r +All output files are saved to the `results` direcotry. Below is a table summarising all of the output files generated by the pipeline.\r +\r +| Directory | Description |\r +|---------------------------|---------------------------|\r +| mafft | Optional: Mafft aligned fasta files of all genes |\r +| mafft_filtered | Optional: Mafft aligned fasta files after the removal of sequences based on a missing data threshold |\r +| alignment_trim | Optional: Ambiguous parts of alignment removed using either gblocks or clipkit |\r +| iqtree | Iqtree phylogenetic analysis for each gene |\r +| iqtree_plots | Plots of Iqtree phylogenetic tree for each gene |\r +| concatenate_alignments | Partitioned alignment of all genes |\r +| iqtree_partitioned | Iqtree partitioned phylogenetic analysis |\r +| iqtree_partitioned_plot | Plot of Iqtree partitioned tree |\r +| astral | Astral phylogenetic analysis of all gene trees |\r +| astral_plot | Plot of Astral tree |\r +\r +
\r +\r +
\r +\r +## Running your own data\r +\r +For the pipeline to function properly, the input gene alignments must be: \r +- in a single directory \r +- end with ".fasta"\r +- named after the aligned gene (e.g. "cox1.fasta" or "28S.fasta")\r +- share identical sample names across alignments (e.g. all genes from sample A share the same name)\r +\r +Please see the example data in the `.test/` directory as an example. \r +\r +Then you need to generate your own config.yaml file, using the example template provided.\r +\r +## Getting help\r +\r +If you have any questions, please do get in touch in the issues or by email o.william.white@gmail.com\r +\r +
\r +\r +
\r +\r +## Citations\r +\r +If you use the pipeline, please cite our bioarxiv preprint: https://doi.org/10.1101/2023.08.11.552985\r +\r +Since the pipeline is a wrapper for several other bioinformatic tools we also ask that you cite the tools used by the pipeline:\r + - Gblocks (default) https://doi.org/10.1093/oxfordjournals.molbev.a026334\r + - Clipkit (optional) https://doi.org/10.1371/journal.pbio.3001007\r + - Mafft (optional) https://doi.org/10.1093/molbev/mst010\r + - Iqtree https://doi.org/10.1093/molbev/msu300\r + - Ete3 https://doi.org/10.1093/molbev/msw046\r + - Ggtree https://doi.org/10.1111/2041-210X.12628\r + - Astral https://doi.org/10.1186/s12859-018-2129-y\r +\r +
\r +\r +
\r +\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "gene2phylo" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/793?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 10324 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + schema1:datePublished "2024-06-28T13:49:10.539699" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/rnaseq-pe" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "rnaseq-pe/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Conformational ensembles generation\r +\r +## Workflow included in the [ELIXIR 3D-Bioinfo](https://elixir-europe.org/communities/3d-bioinfo) Implementation Study:\r +\r +### Building on PDBe-KB to chart and characterize the conformation landscape of native proteins\r +\r +This tutorial aims to illustrate the process of generating **protein conformational ensembles** from** 3D structures **and analysing its **molecular flexibility**, step by step, using the **BioExcel Building Blocks library (biobb)**.\r +\r +## Conformational landscape of native proteins\r +**Proteins** are **dynamic** systems that adopt multiple **conformational states**, a property essential for many **biological processes** (e.g. binding other proteins, nucleic acids, small molecule ligands, or switching between functionaly active and inactive states). Characterizing the different **conformational states** of proteins and the transitions between them is therefore critical for gaining insight into their **biological function** and can help explain the effects of genetic variants in **health** and **disease** and the action of drugs.\r +\r +**Structural biology** has become increasingly efficient in sampling the different **conformational states** of proteins. The **PDB** has currently archived more than **170,000 individual structures**, but over two thirds of these structures represent **multiple conformations** of the same or related protein, observed in different crystal forms, when interacting with other proteins or other macromolecules, or upon binding small molecule ligands. Charting this conformational diversity across the PDB can therefore be employed to build a useful approximation of the **conformational landscape** of native proteins.\r +\r +A number of resources and **tools** describing and characterizing various often complementary aspects of protein **conformational diversity** in known structures have been developed, notably by groups in Europe. These tools include algorithms with varying degree of sophistication, for aligning the 3D structures of individual protein chains or domains, of protein assemblies, and evaluating their degree of **structural similarity**. Using such tools one can **align structures pairwise**, compute the corresponding **similarity matrix**, and identify ensembles of **structures/conformations** with a defined **similarity level** that tend to recur in different PDB entries, an operation typically performed using **clustering** methods. Such workflows are at the basis of resources such as **CATH, Contemplate, or PDBflex** that offer access to **conformational ensembles** comprised of similar **conformations** clustered according to various criteria. Other types of tools focus on differences between **protein conformations**, identifying regions of proteins that undergo large **collective displacements** in different PDB entries, those that act as **hinges or linkers**, or regions that are inherently **flexible**.\r +\r +To build a meaningful approximation of the **conformational landscape** of native proteins, the **conformational ensembles** (and the differences between them), identified on the basis of **structural similarity/dissimilarity** measures alone, need to be **biophysically characterized**. This may be approached at **two different levels**. \r +- At the **biological level**, it is important to link observed **conformational ensembles**, to their **functional roles** by evaluating the correspondence with **protein family classifications** based on sequence information and **functional annotations** in public databases e.g. Uniprot, PDKe-Knowledge Base (KB). These links should provide valuable mechanistic insights into how the **conformational and dynamic properties** of proteins are exploited by evolution to regulate their **biological function**.

\r +\r +- At the **physical level** one needs to introduce **energetic consideration** to evaluate the likelihood that the identified **conformational ensembles** represent **conformational states** that the protein (or domain under study) samples in isolation. Such evaluation is notoriously **challenging** and can only be roughly approximated by using **computational methods** to evaluate the extent to which the observed **conformational ensembles** can be reproduced by algorithms that simulate the **dynamic behavior** of protein systems. These algorithms include the computationally expensive **classical molecular dynamics (MD) simulations** to sample local thermal fluctuations but also faster more approximate methods such as **Elastic Network Models** and **Normal Node Analysis** (NMA) to model low energy **collective motions**. Alternatively, **enhanced sampling molecular dynamics** can be used to model complex types of **conformational changes** but at a very high computational cost. \r +\r +The **ELIXIR 3D-Bioinfo Implementation Study** *Building on PDBe-KB to chart and characterize the conformation landscape of native proteins* focuses on:\r +\r +1. Mapping the **conformational diversity** of proteins and their homologs across the PDB. \r +2. Characterize the different **flexibility properties** of protein regions, and link this information to sequence and functional annotation.\r +3. Benchmark **computational methods** that can predict a biophysical description of protein motions.\r +\r +This notebook is part of the third objective, where a list of **computational resources** that are able to predict **protein flexibility** and **conformational ensembles** have been collected, evaluated, and integrated in reproducible and interoperable workflows using the **BioExcel Building Blocks library**. Note that the list is not meant to be exhaustive, it is built following the expertise of the implementation study partners.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.487.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_flexdyn/python" ; + schema1:license "other-open" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Protein conformational ensembles generation" ; + schema1:sdDatePublished "2024-08-05 10:30:17 +0100" ; + schema1:url "https://workflowhub.eu/workflows/487/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10859 ; + schema1:creator , + ; + schema1:dateCreated "2023-05-31T13:27:53Z" ; + schema1:dateModified "2023-06-01T09:53:40Z" ; + schema1:description """# Protein Conformational ensembles generation\r +\r +## Workflow included in the [ELIXIR 3D-Bioinfo](https://elixir-europe.org/communities/3d-bioinfo) Implementation Study:\r +\r +### Building on PDBe-KB to chart and characterize the conformation landscape of native proteins\r +\r +This tutorial aims to illustrate the process of generating **protein conformational ensembles** from** 3D structures **and analysing its **molecular flexibility**, step by step, using the **BioExcel Building Blocks library (biobb)**.\r +\r +## Conformational landscape of native proteins\r +**Proteins** are **dynamic** systems that adopt multiple **conformational states**, a property essential for many **biological processes** (e.g. binding other proteins, nucleic acids, small molecule ligands, or switching between functionaly active and inactive states). Characterizing the different **conformational states** of proteins and the transitions between them is therefore critical for gaining insight into their **biological function** and can help explain the effects of genetic variants in **health** and **disease** and the action of drugs.\r +\r +**Structural biology** has become increasingly efficient in sampling the different **conformational states** of proteins. The **PDB** has currently archived more than **170,000 individual structures**, but over two thirds of these structures represent **multiple conformations** of the same or related protein, observed in different crystal forms, when interacting with other proteins or other macromolecules, or upon binding small molecule ligands. Charting this conformational diversity across the PDB can therefore be employed to build a useful approximation of the **conformational landscape** of native proteins.\r +\r +A number of resources and **tools** describing and characterizing various often complementary aspects of protein **conformational diversity** in known structures have been developed, notably by groups in Europe. These tools include algorithms with varying degree of sophistication, for aligning the 3D structures of individual protein chains or domains, of protein assemblies, and evaluating their degree of **structural similarity**. Using such tools one can **align structures pairwise**, compute the corresponding **similarity matrix**, and identify ensembles of **structures/conformations** with a defined **similarity level** that tend to recur in different PDB entries, an operation typically performed using **clustering** methods. Such workflows are at the basis of resources such as **CATH, Contemplate, or PDBflex** that offer access to **conformational ensembles** comprised of similar **conformations** clustered according to various criteria. Other types of tools focus on differences between **protein conformations**, identifying regions of proteins that undergo large **collective displacements** in different PDB entries, those that act as **hinges or linkers**, or regions that are inherently **flexible**.\r +\r +To build a meaningful approximation of the **conformational landscape** of native proteins, the **conformational ensembles** (and the differences between them), identified on the basis of **structural similarity/dissimilarity** measures alone, need to be **biophysically characterized**. This may be approached at **two different levels**. \r +- At the **biological level**, it is important to link observed **conformational ensembles**, to their **functional roles** by evaluating the correspondence with **protein family classifications** based on sequence information and **functional annotations** in public databases e.g. Uniprot, PDKe-Knowledge Base (KB). These links should provide valuable mechanistic insights into how the **conformational and dynamic properties** of proteins are exploited by evolution to regulate their **biological function**.

\r +\r +- At the **physical level** one needs to introduce **energetic consideration** to evaluate the likelihood that the identified **conformational ensembles** represent **conformational states** that the protein (or domain under study) samples in isolation. Such evaluation is notoriously **challenging** and can only be roughly approximated by using **computational methods** to evaluate the extent to which the observed **conformational ensembles** can be reproduced by algorithms that simulate the **dynamic behavior** of protein systems. These algorithms include the computationally expensive **classical molecular dynamics (MD) simulations** to sample local thermal fluctuations but also faster more approximate methods such as **Elastic Network Models** and **Normal Node Analysis** (NMA) to model low energy **collective motions**. Alternatively, **enhanced sampling molecular dynamics** can be used to model complex types of **conformational changes** but at a very high computational cost. \r +\r +The **ELIXIR 3D-Bioinfo Implementation Study** *Building on PDBe-KB to chart and characterize the conformation landscape of native proteins* focuses on:\r +\r +1. Mapping the **conformational diversity** of proteins and their homologs across the PDB. \r +2. Characterize the different **flexibility properties** of protein regions, and link this information to sequence and functional annotation.\r +3. Benchmark **computational methods** that can predict a biophysical description of protein motions.\r +\r +This notebook is part of the third objective, where a list of **computational resources** that are able to predict **protein flexibility** and **conformational ensembles** have been collected, evaluated, and integrated in reproducible and interoperable workflows using the **BioExcel Building Blocks library**. Note that the list is not meant to be exhaustive, it is built following the expertise of the implementation study partners.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "" ; + schema1:name "Python Protein conformational ensembles generation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_flexdyn/python/workflow.py" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=12" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-08-05 10:22:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=12" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9397 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:39Z" ; + schema1:dateModified "2024-06-11T12:54:39Z" ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=12" ; + schema1:version 12 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """[![ci](https://github.com/zavolanlab/zarp/workflows/CI/badge.svg?branch=dev)](https://github.com/zavolanlab/zarp/actions?query=workflow%3Aci)\r +[![GitHub license](https://img.shields.io/github/license/zavolanlab/zarp?color=orange)](https://github.com/zavolanlab/zarp/blob/dev/LICENSE)\r +[![DOI:10.1101/2021.11.18.469017](http://img.shields.io/badge/DOI-10.1101/2021.11.18.469017-B31B1B.svg)](https://doi.org/10.1101/2021.11.18.469017)\r +\r +\r +
\r + \r +
\r +\r +\r +# **ZARP** ([Zavolan-Lab](https://www.biozentrum.unibas.ch/research/researchgroups/overview/unit/zavolan/research-group-mihaela-zavolan/) Automated RNA-Seq Pipeline) \r +...is a generic RNA-Seq analysis workflow that allows \r +users to process and analyze Illumina short-read sequencing libraries with minimum effort. The workflow relies on \r +publicly available bioinformatics tools and currently handles single or paired-end stranded bulk RNA-seq data.\r +The workflow is developed in [Snakemake](https://snakemake.readthedocs.io/en/stable/), a widely used workflow management system in the bioinformatics\r +community.\r +\r +According to the current ZARP implementation, reads are analyzed (pre-processed, aligned, quantified) with state-of-the-art\r +tools to give meaningful initial insights into the quality and composition of an RNA-Seq library, reducing hands-on time for bioinformaticians and giving experimentalists the possibility to rapidly assess their data. Additional reports summarise the results of the individual steps and provide useful visualisations.\r +\r +\r +> **Note:** For a more detailed description of each step, please refer to the [workflow\r +> documentation](https://github.com/zavolanlab/zarp/blob/main/pipeline_documentation.md).\r +\r +\r +## Requirements\r +\r +The workflow has been tested on:\r +- CentOS 7.5\r +- Debian 10\r +- Ubuntu 16.04, 18.04\r +\r +> **NOTE:**\r +> Currently, we only support **Linux** execution. \r +\r +\r +# Installation\r +\r +## 1. Clone the repository\r +\r +Go to the desired directory/folder on your file system, then clone/get the \r +repository and move into the respective directory with:\r +\r +```bash\r +git clone https://github.com/zavolanlab/zarp.git\r +cd zarp\r +```\r +\r +## 2. Conda and Mamba installation\r +\r +Workflow dependencies can be conveniently installed with the [Conda](http://docs.conda.io/projects/conda/en/latest/index.html)\r +package manager. We recommend that you install [Miniconda](https://docs.conda.io/en/latest/miniconda.html) \r +for your system (Linux). Be sure to select Python 3 option. \r +The workflow was built and tested with `miniconda 4.7.12`.\r +Other versions are not guaranteed to work as expected.\r +\r +Given that Miniconda has been installed and is available in the current shell the first\r +dependency for ZARP is the [Mamba](https://github.com/mamba-org/mamba) package manager, which needs to be installed in\r +the `base` conda environment with:\r +\r +```bash\r +conda install mamba -n base -c conda-forge\r +```\r +\r +## 3. Dependencies installation\r +\r +For improved reproducibility and reusability of the workflow,\r +each individual step of the workflow runs either in its own [Singularity](https://sylabs.io/singularity/)\r +container or in its own [Conda](http://docs.conda.io/projects/conda/en/latest/index.html) virtual environemnt. \r +As a consequence, running this workflow has very few individual dependencies. \r +The **container execution** requires Singularity to be installed on the system where the workflow is executed. \r +As the functional installation of Singularity requires root privileges, and Conda currently only provides Singularity\r +for Linux architectures, the installation instructions are slightly different depending on your system/setup:\r +\r +### For most users\r +\r +If you do *not* have root privileges on the machine you want\r +to run the workflow on *or* if you do not have a Linux machine, please [install\r +Singularity](https://sylabs.io/guides/3.5/admin-guide/installation.html) separately and in privileged mode, depending\r +on your system. You may have to ask an authorized person (e.g., a systems\r +administrator) to do that. This will almost certainly be required if you want\r +to run the workflow on a high-performance computing (HPC) cluster. \r +\r +> **NOTE:**\r +> The workflow has been tested with the following Singularity versions: \r +> * `v2.6.2`\r +> * `v3.5.2`\r +\r +After installing Singularity, install the remaining dependencies with:\r +```bash\r +mamba env create -f install/environment.yml\r +```\r +\r +\r +### As root user on Linux\r +\r +If you have a Linux machine, as well as root privileges, (e.g., if you plan to\r +run the workflow on your own computer), you can execute the following command\r +to include Singularity in the Conda environment:\r +\r +```bash\r +mamba env update -f install/environment.root.yml\r +```\r +\r +## 4. Activate environment\r +\r +Activate the Conda environment with:\r +\r +```bash\r +conda activate zarp\r +```\r +\r +# Extra installation steps (optional)\r +\r +## 5. Non-essential dependencies installation\r +\r +Most tests have additional dependencies. If you are planning to run tests, you\r +will need to install these by executing the following command _in your active\r +Conda environment_:\r +\r +```bash\r +mamba env update -f install/environment.dev.yml\r +```\r +\r +## 6. Successful installation tests\r +\r +We have prepared several tests to check the integrity of the workflow and its\r +components. These can be found in subdirectories of the `tests/` directory. \r +The most critical of these tests enable you to execute the entire workflow on a \r +set of small example input files. Note that for this and other tests to complete\r +successfully, [additional dependencies](#installing-non-essential-dependencies) \r +need to be installed. \r +Execute one of the following commands to run the test workflow \r +on your local machine:\r +* Test workflow on local machine with **Singularity**:\r +```bash\r +bash tests/test_integration_workflow/test.local.sh\r +```\r +* Test workflow on local machine with **Conda**:\r +```bash\r +bash tests/test_integration_workflow_with_conda/test.local.sh\r +```\r +Execute one of the following commands to run the test workflow \r +on a [Slurm](https://slurm.schedmd.com/documentation.html)-managed high-performance computing (HPC) cluster:\r +\r +* Test workflow with **Singularity**:\r +\r +```bash\r +bash tests/test_integration_workflow/test.slurm.sh\r +```\r +* Test workflow with **Conda**:\r +\r +```bash\r +bash tests/test_integration_workflow_with_conda/test.slurm.sh\r +```\r +\r +> **NOTE:** Depending on the configuration of your Slurm installation you may\r +> need to adapt file `slurm-config.json` (located directly under `profiles`\r +> directory) and the arguments to options `--cores` and `--jobs`\r +> in the file `config.yaml` of a respective profile.\r +> Consult the manual of your workload manager as well as the section of the\r +> Snakemake manual dealing with [profiles].\r +\r +# Running the workflow on your own samples\r +\r +1. Assuming that your current directory is the repository's root directory,\r +create a directory for your workflow run and move into it with:\r +\r + ```bash\r + mkdir config/my_run\r + cd config/my_run\r + ```\r +\r +2. Create an empty sample table and a workflow configuration file:\r +\r + ```bash\r + touch samples.tsv\r + touch config.yaml\r + ```\r +\r +3. Use your editor of choice to populate these files with appropriate\r +values. Have a look at the examples in the `tests/` directory to see what the\r +files should look like, specifically:\r +\r + - [samples.tsv](https://github.com/zavolanlab/zarp/blob/main/tests/input_files/samples.tsv)\r + - [config.yaml](https://github.com/zavolanlab/zarp/blob/main/tests/input_files/config.yaml)\r +\r + - For more details and explanations, refer to the [pipeline-documentation](https://github.com/zavolanlab/zarp/blob/main/pipeline_documentation.md)\r +\r +\r +4. Create a runner script. Pick one of the following choices for either local\r +or cluster execution. Before execution of the respective command, you need to\r +remember to update the argument of the `--singularity-args` option of a\r +respective profile (file: `profiles/{profile}/config.yaml`) so that\r +it contains a comma-separated list of _all_ directories\r +containing input data files (samples and any annotation files etc) required for\r +your run.\r +\r + Runner script for _local execution_:\r +\r + ```bash\r + cat << "EOF" > run.sh\r + #!/bin/bash\r +\r + snakemake \\\r + --profile="../../profiles/local-singularity" \\\r + --configfile="config.yaml"\r +\r + EOF\r + ```\r +\r + **OR**\r +\r + Runner script for _Slurm cluster exection_ (note that you may need\r + to modify the arguments to `--jobs` and `--cores` in the file:\r + `profiles/slurm-singularity/config.yaml` depending on your HPC\r + and workload manager configuration):\r +\r + ```bash\r + cat << "EOF" > run.sh\r + #!/bin/bash\r + mkdir -p logs/cluster_log\r + snakemake \\\r + --profile="../profiles/slurm-singularity" \\\r + --configfile="config.yaml"\r + EOF\r + ```\r +\r + When running the pipeline with *conda* you should use `local-conda` and\r + `slurm-conda` profiles instead.\r +\r +5. Start your workflow run:\r +\r + ```bash\r + bash run.sh\r + ```\r +\r +# Sample downloads from SRA\r +\r +An independent Snakemake workflow `workflow/rules/sra_download.smk` is included\r +for the download of SRA samples with [sra-tools].\r +\r +> Note: as of Snakemake 7.3.1, only profile conda is supported. \r +> Singularity fails because the *sra-tools* Docker container only has `sh` \r +but `bash` is required.\r +\r +> Note: The workflow uses the implicit temporary directory \r +from snakemake, which is called with [resources.tmpdir].\r +\r +The workflow expects the following config:\r +* `samples`, a sample table (tsv) with column *sample* containing *SRR* identifiers,\r +see example [here](https://github.com/zavolanlab/zarp/blob/main/tests/input_files/sra_samples.tsv).\r +* `outdir`, an output directory\r +* `samples_out`, a pointer to a modified sample table with location of fastq files\r +* `cluster_log_dir`, the cluster log directory.\r +\r +For executing the example one can use the following\r +(with activated *zarp* environment):\r +\r +```bash\r +snakemake --snakefile="workflow/rules/sra_download.smk" \\\r + --profile="profiles/local-conda" \\\r + --config samples="tests/input_files/sra_samples.tsv" \\\r + outdir="results/sra_downloads" \\\r + samples_out="results/sra_downloads/sra_samples.out.tsv" \\\r + log_dir="logs" \\\r + cluster_log_dir="logs/cluster_log"\r +```\r +After successful execution, `results/sra_downloads/sra_samples.out.tsv` should contain:\r +```tsv\r +sample fq1 fq2\r +SRR18552868 results/sra_downloads/SRR18552868/SRR18552868.fastq.gz \r +SRR18549672 results/sra_downloads/SRR18549672/SRR18549672_1.fastq.gz results/sra_downloads/SRR18549672/SRR18549672_2.fastq.gz\r +```\r +\r +\r +# Metadata completion with HTSinfer\r +An independent Snakemake workflow `workflow/rules/htsinfer.smk` that populates the `samples.tsv` required by ZARP with the sample specific parameters `seqmode`, `f1_3p`, `f2_3p`, `organism`, `libtype` and `index_size`. Those parameters are inferred from the provided `fastq.gz` files by [HTSinfer](https://github.com/zavolanlab/htsinfer).\r +\r +> Note: The workflow uses the implicit temporary directory \r +from snakemake, which is called with [resources.tmpdir].\r +\r +\r +The workflow expects the following config:\r +* `samples`, a sample table (tsv) with column *sample* containing sample identifiers, as well as columns *fq1* and *fq2* containing the paths to the input fastq files\r +see example [here](https://github.com/zavolanlab/zarp/blob/main/tests/input_files/sra_samples.tsv). If the table contains further ZARP compatible columns (see [pipeline documentation](https://github.com/zavolanlab/zarp/blob/main/pipeline_documentation.md#read-sample-table)), the values specified there by the user are given priority over htsinfer's results. \r +* `outdir`, an output directory\r +* `samples_out`, path to a modified sample table with inferred parameters\r +* `records`, set to 100000 per default\r + \r +For executing the example one can use the following\r +(with activated *zarp* environment):\r +```bash\r +cd tests/test_htsinfer_workflow\r +snakemake \\\r + --snakefile="../../workflow/rules/htsinfer.smk" \\\r + --restart-times=0 \\\r + --profile="../../profiles/local-singularity" \\\r + --config outdir="results" \\\r + samples="../input_files/htsinfer_samples.tsv" \\\r + samples_out="samples_htsinfer.tsv" \\\r + --notemp \\\r + --keep-incomplete\r +```\r +\r +However, this call will exit with an error, as not all parameters can be inferred from the example files. The argument `--keep-incomplete` makes sure the `samples_htsinfer.tsv` file can nevertheless be inspected. \r +\r +After successful execution - if all parameters could be either inferred or were specified by the user - `[OUTDIR]/[SAMPLES_OUT]` should contain a populated table with parameters `seqmode`, `f1_3p`, `f2_3p`, `organism`, `libtype` and `index_size` for all input samples as described in the [pipeline documentation](https://github.com/zavolanlab/zarp/blob/main/pipeline_documentation.md#read-sample-table).\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.447.1" ; + schema1:isBasedOn "https://github.com/zavolanlab/zarp" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ZARP: An automated workflow for processing of RNA-seq data" ; + schema1:sdDatePublished "2024-08-05 10:30:33 +0100" ; + schema1:url "https://workflowhub.eu/workflows/447/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 312593 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 59182 ; + schema1:creator ; + schema1:dateCreated "2023-03-21T13:07:16Z" ; + schema1:dateModified "2023-05-12T15:33:47Z" ; + schema1:description """[![ci](https://github.com/zavolanlab/zarp/workflows/CI/badge.svg?branch=dev)](https://github.com/zavolanlab/zarp/actions?query=workflow%3Aci)\r +[![GitHub license](https://img.shields.io/github/license/zavolanlab/zarp?color=orange)](https://github.com/zavolanlab/zarp/blob/dev/LICENSE)\r +[![DOI:10.1101/2021.11.18.469017](http://img.shields.io/badge/DOI-10.1101/2021.11.18.469017-B31B1B.svg)](https://doi.org/10.1101/2021.11.18.469017)\r +\r +\r +
\r + \r +
\r +\r +\r +# **ZARP** ([Zavolan-Lab](https://www.biozentrum.unibas.ch/research/researchgroups/overview/unit/zavolan/research-group-mihaela-zavolan/) Automated RNA-Seq Pipeline) \r +...is a generic RNA-Seq analysis workflow that allows \r +users to process and analyze Illumina short-read sequencing libraries with minimum effort. The workflow relies on \r +publicly available bioinformatics tools and currently handles single or paired-end stranded bulk RNA-seq data.\r +The workflow is developed in [Snakemake](https://snakemake.readthedocs.io/en/stable/), a widely used workflow management system in the bioinformatics\r +community.\r +\r +According to the current ZARP implementation, reads are analyzed (pre-processed, aligned, quantified) with state-of-the-art\r +tools to give meaningful initial insights into the quality and composition of an RNA-Seq library, reducing hands-on time for bioinformaticians and giving experimentalists the possibility to rapidly assess their data. Additional reports summarise the results of the individual steps and provide useful visualisations.\r +\r +\r +> **Note:** For a more detailed description of each step, please refer to the [workflow\r +> documentation](https://github.com/zavolanlab/zarp/blob/main/pipeline_documentation.md).\r +\r +\r +## Requirements\r +\r +The workflow has been tested on:\r +- CentOS 7.5\r +- Debian 10\r +- Ubuntu 16.04, 18.04\r +\r +> **NOTE:**\r +> Currently, we only support **Linux** execution. \r +\r +\r +# Installation\r +\r +## 1. Clone the repository\r +\r +Go to the desired directory/folder on your file system, then clone/get the \r +repository and move into the respective directory with:\r +\r +```bash\r +git clone https://github.com/zavolanlab/zarp.git\r +cd zarp\r +```\r +\r +## 2. Conda and Mamba installation\r +\r +Workflow dependencies can be conveniently installed with the [Conda](http://docs.conda.io/projects/conda/en/latest/index.html)\r +package manager. We recommend that you install [Miniconda](https://docs.conda.io/en/latest/miniconda.html) \r +for your system (Linux). Be sure to select Python 3 option. \r +The workflow was built and tested with `miniconda 4.7.12`.\r +Other versions are not guaranteed to work as expected.\r +\r +Given that Miniconda has been installed and is available in the current shell the first\r +dependency for ZARP is the [Mamba](https://github.com/mamba-org/mamba) package manager, which needs to be installed in\r +the `base` conda environment with:\r +\r +```bash\r +conda install mamba -n base -c conda-forge\r +```\r +\r +## 3. Dependencies installation\r +\r +For improved reproducibility and reusability of the workflow,\r +each individual step of the workflow runs either in its own [Singularity](https://sylabs.io/singularity/)\r +container or in its own [Conda](http://docs.conda.io/projects/conda/en/latest/index.html) virtual environemnt. \r +As a consequence, running this workflow has very few individual dependencies. \r +The **container execution** requires Singularity to be installed on the system where the workflow is executed. \r +As the functional installation of Singularity requires root privileges, and Conda currently only provides Singularity\r +for Linux architectures, the installation instructions are slightly different depending on your system/setup:\r +\r +### For most users\r +\r +If you do *not* have root privileges on the machine you want\r +to run the workflow on *or* if you do not have a Linux machine, please [install\r +Singularity](https://sylabs.io/guides/3.5/admin-guide/installation.html) separately and in privileged mode, depending\r +on your system. You may have to ask an authorized person (e.g., a systems\r +administrator) to do that. This will almost certainly be required if you want\r +to run the workflow on a high-performance computing (HPC) cluster. \r +\r +> **NOTE:**\r +> The workflow has been tested with the following Singularity versions: \r +> * `v2.6.2`\r +> * `v3.5.2`\r +\r +After installing Singularity, install the remaining dependencies with:\r +```bash\r +mamba env create -f install/environment.yml\r +```\r +\r +\r +### As root user on Linux\r +\r +If you have a Linux machine, as well as root privileges, (e.g., if you plan to\r +run the workflow on your own computer), you can execute the following command\r +to include Singularity in the Conda environment:\r +\r +```bash\r +mamba env update -f install/environment.root.yml\r +```\r +\r +## 4. Activate environment\r +\r +Activate the Conda environment with:\r +\r +```bash\r +conda activate zarp\r +```\r +\r +# Extra installation steps (optional)\r +\r +## 5. Non-essential dependencies installation\r +\r +Most tests have additional dependencies. If you are planning to run tests, you\r +will need to install these by executing the following command _in your active\r +Conda environment_:\r +\r +```bash\r +mamba env update -f install/environment.dev.yml\r +```\r +\r +## 6. Successful installation tests\r +\r +We have prepared several tests to check the integrity of the workflow and its\r +components. These can be found in subdirectories of the `tests/` directory. \r +The most critical of these tests enable you to execute the entire workflow on a \r +set of small example input files. Note that for this and other tests to complete\r +successfully, [additional dependencies](#installing-non-essential-dependencies) \r +need to be installed. \r +Execute one of the following commands to run the test workflow \r +on your local machine:\r +* Test workflow on local machine with **Singularity**:\r +```bash\r +bash tests/test_integration_workflow/test.local.sh\r +```\r +* Test workflow on local machine with **Conda**:\r +```bash\r +bash tests/test_integration_workflow_with_conda/test.local.sh\r +```\r +Execute one of the following commands to run the test workflow \r +on a [Slurm](https://slurm.schedmd.com/documentation.html)-managed high-performance computing (HPC) cluster:\r +\r +* Test workflow with **Singularity**:\r +\r +```bash\r +bash tests/test_integration_workflow/test.slurm.sh\r +```\r +* Test workflow with **Conda**:\r +\r +```bash\r +bash tests/test_integration_workflow_with_conda/test.slurm.sh\r +```\r +\r +> **NOTE:** Depending on the configuration of your Slurm installation you may\r +> need to adapt file `slurm-config.json` (located directly under `profiles`\r +> directory) and the arguments to options `--cores` and `--jobs`\r +> in the file `config.yaml` of a respective profile.\r +> Consult the manual of your workload manager as well as the section of the\r +> Snakemake manual dealing with [profiles].\r +\r +# Running the workflow on your own samples\r +\r +1. Assuming that your current directory is the repository's root directory,\r +create a directory for your workflow run and move into it with:\r +\r + ```bash\r + mkdir config/my_run\r + cd config/my_run\r + ```\r +\r +2. Create an empty sample table and a workflow configuration file:\r +\r + ```bash\r + touch samples.tsv\r + touch config.yaml\r + ```\r +\r +3. Use your editor of choice to populate these files with appropriate\r +values. Have a look at the examples in the `tests/` directory to see what the\r +files should look like, specifically:\r +\r + - [samples.tsv](https://github.com/zavolanlab/zarp/blob/main/tests/input_files/samples.tsv)\r + - [config.yaml](https://github.com/zavolanlab/zarp/blob/main/tests/input_files/config.yaml)\r +\r + - For more details and explanations, refer to the [pipeline-documentation](https://github.com/zavolanlab/zarp/blob/main/pipeline_documentation.md)\r +\r +\r +4. Create a runner script. Pick one of the following choices for either local\r +or cluster execution. Before execution of the respective command, you need to\r +remember to update the argument of the `--singularity-args` option of a\r +respective profile (file: `profiles/{profile}/config.yaml`) so that\r +it contains a comma-separated list of _all_ directories\r +containing input data files (samples and any annotation files etc) required for\r +your run.\r +\r + Runner script for _local execution_:\r +\r + ```bash\r + cat << "EOF" > run.sh\r + #!/bin/bash\r +\r + snakemake \\\r + --profile="../../profiles/local-singularity" \\\r + --configfile="config.yaml"\r +\r + EOF\r + ```\r +\r + **OR**\r +\r + Runner script for _Slurm cluster exection_ (note that you may need\r + to modify the arguments to `--jobs` and `--cores` in the file:\r + `profiles/slurm-singularity/config.yaml` depending on your HPC\r + and workload manager configuration):\r +\r + ```bash\r + cat << "EOF" > run.sh\r + #!/bin/bash\r + mkdir -p logs/cluster_log\r + snakemake \\\r + --profile="../profiles/slurm-singularity" \\\r + --configfile="config.yaml"\r + EOF\r + ```\r +\r + When running the pipeline with *conda* you should use `local-conda` and\r + `slurm-conda` profiles instead.\r +\r +5. Start your workflow run:\r +\r + ```bash\r + bash run.sh\r + ```\r +\r +# Sample downloads from SRA\r +\r +An independent Snakemake workflow `workflow/rules/sra_download.smk` is included\r +for the download of SRA samples with [sra-tools].\r +\r +> Note: as of Snakemake 7.3.1, only profile conda is supported. \r +> Singularity fails because the *sra-tools* Docker container only has `sh` \r +but `bash` is required.\r +\r +> Note: The workflow uses the implicit temporary directory \r +from snakemake, which is called with [resources.tmpdir].\r +\r +The workflow expects the following config:\r +* `samples`, a sample table (tsv) with column *sample* containing *SRR* identifiers,\r +see example [here](https://github.com/zavolanlab/zarp/blob/main/tests/input_files/sra_samples.tsv).\r +* `outdir`, an output directory\r +* `samples_out`, a pointer to a modified sample table with location of fastq files\r +* `cluster_log_dir`, the cluster log directory.\r +\r +For executing the example one can use the following\r +(with activated *zarp* environment):\r +\r +```bash\r +snakemake --snakefile="workflow/rules/sra_download.smk" \\\r + --profile="profiles/local-conda" \\\r + --config samples="tests/input_files/sra_samples.tsv" \\\r + outdir="results/sra_downloads" \\\r + samples_out="results/sra_downloads/sra_samples.out.tsv" \\\r + log_dir="logs" \\\r + cluster_log_dir="logs/cluster_log"\r +```\r +After successful execution, `results/sra_downloads/sra_samples.out.tsv` should contain:\r +```tsv\r +sample fq1 fq2\r +SRR18552868 results/sra_downloads/SRR18552868/SRR18552868.fastq.gz \r +SRR18549672 results/sra_downloads/SRR18549672/SRR18549672_1.fastq.gz results/sra_downloads/SRR18549672/SRR18549672_2.fastq.gz\r +```\r +\r +\r +# Metadata completion with HTSinfer\r +An independent Snakemake workflow `workflow/rules/htsinfer.smk` that populates the `samples.tsv` required by ZARP with the sample specific parameters `seqmode`, `f1_3p`, `f2_3p`, `organism`, `libtype` and `index_size`. Those parameters are inferred from the provided `fastq.gz` files by [HTSinfer](https://github.com/zavolanlab/htsinfer).\r +\r +> Note: The workflow uses the implicit temporary directory \r +from snakemake, which is called with [resources.tmpdir].\r +\r +\r +The workflow expects the following config:\r +* `samples`, a sample table (tsv) with column *sample* containing sample identifiers, as well as columns *fq1* and *fq2* containing the paths to the input fastq files\r +see example [here](https://github.com/zavolanlab/zarp/blob/main/tests/input_files/sra_samples.tsv). If the table contains further ZARP compatible columns (see [pipeline documentation](https://github.com/zavolanlab/zarp/blob/main/pipeline_documentation.md#read-sample-table)), the values specified there by the user are given priority over htsinfer's results. \r +* `outdir`, an output directory\r +* `samples_out`, path to a modified sample table with inferred parameters\r +* `records`, set to 100000 per default\r + \r +For executing the example one can use the following\r +(with activated *zarp* environment):\r +```bash\r +cd tests/test_htsinfer_workflow\r +snakemake \\\r + --snakefile="../../workflow/rules/htsinfer.smk" \\\r + --restart-times=0 \\\r + --profile="../../profiles/local-singularity" \\\r + --config outdir="results" \\\r + samples="../input_files/htsinfer_samples.tsv" \\\r + samples_out="samples_htsinfer.tsv" \\\r + --notemp \\\r + --keep-incomplete\r +```\r +\r +However, this call will exit with an error, as not all parameters can be inferred from the example files. The argument `--keep-incomplete` makes sure the `samples_htsinfer.tsv` file can nevertheless be inspected. \r +\r +After successful execution - if all parameters could be either inferred or were specified by the user - `[OUTDIR]/[SAMPLES_OUT]` should contain a populated table with parameters `seqmode`, `f1_3p`, `f2_3p`, `organism`, `libtype` and `index_size` for all input samples as described in the [pipeline documentation](https://github.com/zavolanlab/zarp/blob/main/pipeline_documentation.md#read-sample-table).\r +\r +""" ; + schema1:image ; + schema1:keywords "Bioinformatics, rna, rna-seq, RNASEQ, NGS, high-throughput" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "ZARP: An automated workflow for processing of RNA-seq data" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/447?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-08-05 10:23:17 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7421 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:14Z" ; + schema1:dateModified "2024-06-11T12:55:14Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "ATACSeq peak-calling and differential analysis pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/965?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/atacseq" ; + schema1:sdDatePublished "2024-08-05 10:24:19 +0100" ; + schema1:url "https://workflowhub.eu/workflows/965/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5926 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:41Z" ; + schema1:dateModified "2024-06-11T12:54:41Z" ; + schema1:description "ATACSeq peak-calling and differential analysis pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/965?version=8" ; + schema1:keywords "ATAC-seq, chromatin-accessibiity" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/atacseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/965?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + schema1:datePublished "2024-07-15T07:30:13.661043" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/rnaseq-pe" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "rnaseq-pe/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.24" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Genome-wide alternative splicing analysis v.2" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/482?version=2" ; + schema1:license "CC-BY-NC-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genome-wide alternative splicing analysis v.2" ; + schema1:sdDatePublished "2024-08-05 10:30:11 +0100" ; + schema1:url "https://workflowhub.eu/workflows/482/ro_crate?version=2" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 214419 . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 30115 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 119623 ; + schema1:creator ; + schema1:dateCreated "2023-06-07T16:10:45Z" ; + schema1:dateModified "2023-06-11T11:47:11Z" ; + schema1:description "Genome-wide alternative splicing analysis v.2" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/482?version=6" ; + schema1:keywords "Transcriptomics, Alternative splicing, isoform switching" ; + schema1:license "https://spdx.org/licenses/CC-BY-NC-4.0" ; + schema1:name "Genome-wide alternative splicing analysis v.2" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/482?version=2" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """Description: Trinity @ NCI-Gadi contains a staged [Trinity](https://github.com/trinityrnaseq/trinityrnaseq/wiki) workflow that can be run on the National Computational Infrastructure’s (NCI) Gadi supercomputer. Trinity performs de novo transcriptome assembly of RNA-seq data by combining three independent software modules Inchworm, Chrysalis and Butterfly to process RNA-seq reads. The algorithm can detect isoforms, handle paired-end reads, multiple insert sizes and strandedness. \r +\r +Infrastructure\\_deployment\\_metadata: Gadi (NCI)""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.145.1" ; + schema1:isBasedOn "https://github.com/Sydney-Informatics-Hub/Gadi-Trinity" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Trinity @ NCI-Gadi" ; + schema1:sdDatePublished "2024-08-05 10:33:08 +0100" ; + schema1:url "https://workflowhub.eu/workflows/145/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11522 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2021-08-17T04:44:57Z" ; + schema1:dateModified "2023-01-16T13:51:44Z" ; + schema1:description """Description: Trinity @ NCI-Gadi contains a staged [Trinity](https://github.com/trinityrnaseq/trinityrnaseq/wiki) workflow that can be run on the National Computational Infrastructure’s (NCI) Gadi supercomputer. Trinity performs de novo transcriptome assembly of RNA-seq data by combining three independent software modules Inchworm, Chrysalis and Butterfly to process RNA-seq reads. The algorithm can detect isoforms, handle paired-end reads, multiple insert sizes and strandedness. \r +\r +Infrastructure\\_deployment\\_metadata: Gadi (NCI)""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "Assembly, Transcriptomics, trinity, NCI, RNASEQ, rna, rna-seq, Gadi, scalable, PBS" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Trinity @ NCI-Gadi" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/145?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 51464 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-07T21:34:48.804317" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.17" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Macromolecular Coarse-Grained Flexibility (FlexServ) tutorial using BioExcel Building Blocks (biobb)\r +\r +This tutorial aims to illustrate the process of generating protein conformational ensembles from 3D structures and analysing its molecular flexibility, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.552.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_flexserv/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL Macromolecular Coarse-Grained Flexibility tutorial" ; + schema1:sdDatePublished "2024-08-05 10:29:46 +0100" ; + schema1:url "https://workflowhub.eu/workflows/552/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 125571 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 27401 ; + schema1:creator , + ; + schema1:dateCreated "2023-08-02T10:50:45Z" ; + schema1:dateModified "2023-08-02T11:21:11Z" ; + schema1:description """# Macromolecular Coarse-Grained Flexibility (FlexServ) tutorial using BioExcel Building Blocks (biobb)\r +\r +This tutorial aims to illustrate the process of generating protein conformational ensembles from 3D structures and analysing its molecular flexibility, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CWL Macromolecular Coarse-Grained Flexibility tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_flexserv/cwl/workflow.cwl" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "CAGE-seq pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/969?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/cageseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/cageseq" ; + schema1:sdDatePublished "2024-08-05 10:24:15 +0100" ; + schema1:url "https://workflowhub.eu/workflows/969/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5375 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:44Z" ; + schema1:dateModified "2024-06-11T12:54:44Z" ; + schema1:description "CAGE-seq pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/969?version=2" ; + schema1:keywords "cage, cage-seq, cageseq-data, gene-expression, rna" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/cageseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/969?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """# workflow-qc-of-radseq-reads\r +\r +These workflows are part of a set designed to work for RAD-seq data on the Galaxy platform, using the tools from the Stacks program. \r +\r +Galaxy Australia: https://usegalaxy.org.au/\r +\r +Stacks: http://catchenlab.life.illinois.edu/stacks/\r +\r +## Inputs\r +* demultiplexed reads in fastq format, in a collection\r +* two adapter sequences in fasta format, for input into cutadapt\r +\r +## Steps and outputs\r +\r +The workflow can be modified to suit your own parameters. \r +\r +The workflow steps are:\r +* Run FastQC to get statistics on the raw reads, send to MultiQC to create a nice output. This is tagged as "Report 1" in the Galaxy history. \r +* Run Cutadapt on the reads to cut adapters - enter two files with adapter sequence at the workflow option for "Choose file containing 3' adapters". The default settings are on except that the "Maximum error rate" for the adapters is set to 0.2 instead of 0.1. Send output statistics to MulitQC, this is "Report 2" in the Galaxy history. Note that you may have different requirements here in terms of how many adapter sequences you want to enter. We recommend copying the workflow and modifying as needed. \r +* Send these reads to fastp for additional filtering or trimming. Default settings are on but can be modified as needed. Send output statistics to MultiQC, this is "Report 3" in the Galaxy history. \r +* The filtered and trimmed reads are then ready for the stacks workflows. \r +\r +![qc-wf](wf-image-qc.png)\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/346?version=1" ; + schema1:isBasedOn "https://github.com/AnnaSyme/workflow-qc-of-radseq-reads.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for QC of RADseq reads" ; + schema1:sdDatePublished "2024-08-05 10:32:09 +0100" ; + schema1:url "https://workflowhub.eu/workflows/346/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 20279 ; + schema1:creator ; + schema1:dateCreated "2022-05-31T06:55:21Z" ; + schema1:dateModified "2023-01-30T18:21:31Z" ; + schema1:description """# workflow-qc-of-radseq-reads\r +\r +These workflows are part of a set designed to work for RAD-seq data on the Galaxy platform, using the tools from the Stacks program. \r +\r +Galaxy Australia: https://usegalaxy.org.au/\r +\r +Stacks: http://catchenlab.life.illinois.edu/stacks/\r +\r +## Inputs\r +* demultiplexed reads in fastq format, in a collection\r +* two adapter sequences in fasta format, for input into cutadapt\r +\r +## Steps and outputs\r +\r +The workflow can be modified to suit your own parameters. \r +\r +The workflow steps are:\r +* Run FastQC to get statistics on the raw reads, send to MultiQC to create a nice output. This is tagged as "Report 1" in the Galaxy history. \r +* Run Cutadapt on the reads to cut adapters - enter two files with adapter sequence at the workflow option for "Choose file containing 3' adapters". The default settings are on except that the "Maximum error rate" for the adapters is set to 0.2 instead of 0.1. Send output statistics to MulitQC, this is "Report 2" in the Galaxy history. Note that you may have different requirements here in terms of how many adapter sequences you want to enter. We recommend copying the workflow and modifying as needed. \r +* Send these reads to fastp for additional filtering or trimming. Default settings are on but can be modified as needed. Send output statistics to MultiQC, this is "Report 3" in the Galaxy history. \r +* The filtered and trimmed reads are then ready for the stacks workflows. \r +\r +![qc-wf](wf-image-qc.png)\r +""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "QC of RADseq reads" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/346?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 267910 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "MARS-seq v2 preprocessing pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/996?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/marsseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/marsseq" ; + schema1:sdDatePublished "2024-08-05 10:23:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/996/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9093 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:00Z" ; + schema1:dateModified "2024-06-11T12:55:00Z" ; + schema1:description "MARS-seq v2 preprocessing pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/996?version=4" ; + schema1:keywords "facs-sorting, MARS-seq, single-cell, single-cell-rna-seq, star-solo, transcriptional-dynamics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/marsseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/996?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """# Framework for construction of phylogenetic networks on High Performance Computing (HPC) environment\r +\r +## Introduction\r +\r +Phylogeny refers to the evolutionary history and relationship between biological lineages related by common descent. Reticulate evolution refers to the origination of lineages through the complete or partial merging of ancestor lineages. Networks may be used to represent lineage independence events in non-treelike phylogenetic processes.\r +\r +The methodology for reconstructing networks is still in development. Here we explore two methods for reconstructing rooted explicit phylogenetic networks, PhyloNetworks and Phylonet, which employ computationally expensive and time consuming algorithms. The construction of phylogenetic networks follows a coordinated processing flow of data sets analyzed and processed by the coordinated execution of a set of different programs, packages, libraries or pipelines, called workflow activities. \r +\r +In view of the complexity in modeling network experiments, the present work introduces a workflow for phylogenetic network analyses coupled to be executed in High-Performance Computing (HPC) environments. The workflow aims to integrate well-established software, pipelines and scripts, implementing a challenging task since these tools do not consistently profit from the HPC environment, leading to an increase in the expected makespan and idle computing resources.\r +\r +## Requirements\r +\r +1. Python >= 3.8\r + 1. Biopython >= 1.75\r + 2. Pandas >= 1.3.2\r + 3. Parsl >= 1.0\r +3. Raxml >= 8.2.12\r +4. Astral >= 5.7.1\r +5. SnaQ (PhyloNetworks) >= 0.13.0\r +6. MrBayes >= 3.2.7a\r +7. BUCKy >= 1.4.4\r +8. Quartet MaxCut >= 2.10\r +9. PhyloNet >= 3.8.2\r +10. Julia >= 1.4.1\r +11. IQTREE >= 2.0\r +\r +\r +## How to use\r +\r +### Setting up the framework\r +\r +The framework uses a file to get all the needed parameters. For default it loads the file *default.ini* in the config folder, but you can explicitly load other files using the argument ``-s name_of_the_file``, *e.g.* ``-s config/test.ini``.\r +\r +* Edit *parl.env* with the environment variables you may need, such as modules loadeds in SLURM\r +* Edit *work.config* with the directories of your phylogeny studies (the framework receives as input a set of homologous gene alignments of species in the nexus format).\r +* Edit *default.ini* with the path for each of the needed softwares and the parameters of the execution provider.\r +\r +For default, the execution logs are created in the ``runinfo`` folder. To change it you can use the `-r folder_path` parameter.\r +\r +#### Contents of the configuration file\r +\r +* General settings\r +\r +```ini\r +[GENERAL]\r +ExecutionProvider = SLURM\r +ScriptDir = ./scripts\r +Environ = config/parsl.env\r +Workload = config/work.config\r +NetworkMethod = MP\r +TreeMethod = RAXML\r +BootStrap = 1000\r +```\r +\r +1. The framework can be executed in a HPC environment using the Slurm resource manager using the parameter ``ExecutionProvider`` equals to ``SLURM`` or locally with ``LOCAL``. \r +2. The path of the scripts folder is assigned in ``ScriptDir``. It's recommended to use the absolute path to avoid errors.\r +3. The ``Environ`` parameter contains the path of the file used to set environment variables. More details can be seen below.\r +4. In ``Workload`` is the path of the experiments that will be performed.\r +5. ``NetworkMethod`` and ``TreeMethod`` are the default network and tree methods that will be used to perform the workloads' studies.\r +6. ``Bootstrap`` is the parameter used in all the software that use bootstrap (RAxML, IQTREE and ASTRAL)\r +\r +* Workflow execution settings\r + \r + When using SLURM, these are the needed parameters:\r + ```ini\r + [WORKFLOW]\r + Monitor = False\r + PartCore = 24\r + PartNode = 1\r + Walltime = 00:20:00\r + ```\r +\r + 1. ``Monitor`` is a parameter to use parsl's monitor module in HPC environment. It can be *true* or *false*. If you want to use it, it's necessary to set it as *true* and manually change the address in ``infra_manager.py``\r + 2. If you are using it in a HPC environment (using SLURM), the framework is going to submit in a job. ``PartCore`` is the number of cores of the node; ``PartNode`` is the number of nodes of the partition; and the ``Walltime`` parameter is the maximum amount of time the job will be able to run.\r +\r + However, if the the desired execution method is the LocalProvider, _i.e._ the execution is being performed in your own machine, only these parameters are necessary:\r +\r + ```ini\r + [WORKFLOW]\r + Monitor = False\r + MaxCore = 6\r + CoresPerWorker = 1\r +\r + ```\r +\r +* RAxML settings\r +\r + ```ini\r + [RAXML]\r + RaxmlExecutable = raxmlHPC-PTHREADS\r + RaxmlThreads = 6\r + RaxmlEvolutionaryModel = GTRGAMMA --HKY85\r + ```\r +\r +* IQTREE settings\r +\r + ```ini\r + [IQTREE]\r + IqTreeExecutable = iqtree2\r + IqTreeEvolutionaryModel = TIM2+I+G \r + IqTreeThreads = 6\r + ```\r +\r +* ASTRAL settings\r +\r + ```ini\r + [ASTRAL]\r + AstralExecDir = /opt/astral/5.7.1\r + AstralJar = astral.jar\r + ```\r +\r +* PhyloNet settings\r +\r + ```ini\r + [PHYLONET]\r + PhyloNetExecDir = /opt/phylonet/3.8.2/\r + PhyloNetJar = PhyloNet.jar\r + PhyloNetThreads = 6\r + PhyloNetHMax = 3\r + PhyloNetRuns = 5\r + ```\r +\r +* SNAQ settings\r +\r + ```ini\r + [SNAQ]\r + SnaqThreads = 6\r + SnaqHMax = 3\r + SnaqRuns = 3\r + ```\r +\r +* Mr. Bayes settings\r +\r + ```ini\r + [MRBAYES]\r + MBExecutable = mb\r + MBParameters = set usebeagle=no beagledevice=cpu beagleprecision=double; mcmcp ngen=100000 burninfrac=.25 samplefreq=50 printfreq=10000 diagnfreq=10000 nruns=2 nchains=2 temp=0.40 swapfreq=10\r + ```\r +\r +* Bucky settings\r +\r + ```ini\r + [BUCKY]\r + BuckyExecutable = bucky\r + MbSumExecutable = mbsum\r + ```\r +\r +* Quartet MaxCut\r +\r + ```ini\r + QUARTETMAXCUT]\r + QmcExecDir = /opt/quartet/\r + QmcExecutable = find-cut-Linux-64\r + ```\r +\r +#### Workload file\r +\r +For default the workload file is ``work.config`` in the *config* folder. The file contains the absolute paths of the experiment's folders.\r +\r +```\r +/home/rafael.terra/Biocomp/data/Denv_1\r +```\r +\r +You can comment folders using the # character in the beginning of the path. *e. g.* ``#/home/rafael.terra/Biocomp/data/Denv_1``. That way the framework won't read this path.\r +\r +You can also run a specific flow for a path using ``@TreeMethod|NetworkMethod`` in the end of a path. Where *TreeMethod* can be RAXML, IQTREE or MRBAYES and *NetworkMethod* can be MPL or MP (case sensitive). The supported flows are: ``RAXML|MPL``, ``RAXML|MP``, ``IQTREE|MPL``, ``IQTREE|MP`` and ``MRBAYES|MPL``. For example:\r +\r +```\r +/home/rafael.terra/Biocomp/data/Denv_1@RAXML|MPL\r +```\r +\r +#### Environment file\r +\r +The environment file contains all the environment variables (like module files used in SLURM) used during the framework execution. Example:\r +\r +```sh\r +module load python/3.8.2\r +module load raxml/8.2_openmpi-2.0_gnu\r +module load java/jdk-12\r +module load iqtree/2.1.1\r +module load bucky/1.4.4\r +module load mrbayes/3.2.7a-OpenMPI-4.0.4\r +source /scratch/app/modulos/julia-1.5.1.sh\r +```\r +\r +#### Experiment folder\r +\r +Each experiment folder needs to have a *input folder* containing a *.tar.gz* compressed file and a *.json* with the following content. **The framework considers that there is only one file of each extension in the input folder**.\r +\r +```json\r +{\r + "Mapping":"",\r + "Outgroup":""\r +}\r +```\r +\r +Where ``Mapping`` is a direct mapping of the taxon, when there are multiple alleles per species, in the format ``species1:taxon1,taxon2;species2:taxon3,taxon4`` *(white spaces are not supported)* and ``Outgroup`` is the taxon used to root the network. The Mapping parameter is optional (although it has to be in the json file without value), but the outgroup is obligatory. It's important to say that the flow *MRBAYES|MPL* doesn't support multiple alleles per species. Example:\r +\r +```json\r +{\r + "Mapping": "dengue_virus_type_2:FJ850082,FJ850088,JX669479,JX669482,JX669488,KP188569;dengue_virus_type_3:FJ850079,FJ850094,JN697379,JX669494;dengue_virus_type_1:FJ850073,FJ850084,FJ850093,JX669465,JX669466,JX669475,KP188545,KP188547;dengue_virus_type_4:JN559740,JQ513337,JQ513341,JQ513343,JQ513344,JQ513345,KP188563,KP188564;Zika_virus:MH882543", \r + "Outgroup": "MH882543"\r +}\r +```\r +\r +\r +## Running the framework\r +\r +* In a local machine:\r +\r + After setting up the framework, just run ``python3 parsl_workflow.py``.\r + \r +* In a SLURM environment:\r +\r + Create an submition script that inside contains: ``python3 parsl_workflow.py``.\r +\r + ```sh\r + #!/bin/bash\r + #SBATCH --time=15:00:00\r + #SBATCH -e slurm-%j.err\r + #SBATCH -o slurm-%j.out\r + module load python/3.9.6\r + cd /path/to/biocomp\r + python3 parsl_workflow.py\r + ```\r +\r +The framework is under heavy development. If you notice any bug, please create an issue here on GitHub.\r +\r +### Running in a DOCKER container\r +\r +The framework is also available to be used in Docker. It can be built from source or pushed from DockerHub.\r +\r +#### Building it from the source code\r +\r +Adapt the default settings file ``config/default.ini`` according to your machine, setting the number of threads and bootstrap. After that, run ``docker build -t hp2net .`` in the project's root folder.\r +\r +#### Downloading it from Dockerhub\r +\r +The docker image can also be downloaded from [Docker hub](https://hub.docker.com/repository/docker/rafaelstjf/hp2net/general). To do that, just run the command ``docker pull rafaelstjf/hp2net:main``\r +\r +#### Running\r +\r +The first step to run the framework is to setup your dataset. To test if the framework is running without problems in your machine, you can use the [example datasets](example_data).\r +\r +![Alt text](docs/example_data.png)\r +\r +Extracting the ``example_data.zip`` file, a new folder called ``with_outgroup`` is created. This folder contain four datasets of DENV sequences.\r +\r +The next step is the creation of the settings and workload files. For the settings file, download the [default.ini](config/default.ini) from this repository and change it to you liking (the path of all software are already configured to run on docker). The workload file is a text file containing the absolute path of the datasets, followed by the desired pipeline, as shown before in this document. Here for example purposes, the ``input.txt`` file was created.\r +\r +![Alt text](docs/example_files.png)\r +\r +With all the files prepared, the framework can be executed from the ``example_data`` folder as following:\r +\r +``docker run --rm -v $PWD:$PWD rafaelstjf/hp2net:main -s $PWD/default.ini -w $PWD/input.txt``\r +\r +**Important:** the docker doesn't save your logs, for that add the parameter: ``-r $PWD/name_of_your_log_folder``.\r +\r +---\r +If you are running it on **Santos Dumont Supercomputer**, both downloading and execution of the docker container need to be performed from a submission script and executed using ``sg docker -c "sbatch script.sh"``. The snippet below shows an example of submission script.\r +\r +```sh\r +#!/bin/bash\r +#SBATCH --nodes=1\r +#SBATCH --ntasks-per-node=24\r +#SBATCH -p cpu_small\r +#SBATCH -J Hp2NET\r +#SBATCH --exclusive\r +#SBATCH --time=02:00:00\r +#SBATCH -e slurm-%j.err\r +#SBATCH -o slurm-%j.out\r +\r +DIR='/scratch/pcmrnbio2/rafael.terra/WF_parsl/example_data'\r +docker pull rafaelstjf/hp2net:main\r +\r +docker run --rm -v $DIR:$DIR rafaelstjf/hp2net:main -s ${DIR}/sdumont.ini -w ${DIR}/entrada.txt -r ${DIR}/logs\r +```\r +\r +## If you use it, please cite\r +\r +Terra, R., Coelho, M., Cruz, L., Garcia-Zapata, M., Gadelha, L., Osthoff, C., ... & Ocana, K. (2021, July). Gerência e Análises de Workflows aplicados a Redes Filogenéticas de Genomas de Dengue no Brasil. In *Anais do XV Brazilian e-Science Workshop* (pp. 49-56). SBC.\r +\r +**Also cite all the coupled software!**\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.703.1" ; + schema1:isBasedOn "https://github.com/rafaelstjf/biocomp.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for HP2NET - Framework for Construction of Phylogenetic Networks on High Performance Computing (HPC) Environment" ; + schema1:sdDatePublished "2024-08-05 10:26:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/703/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14660 ; + schema1:creator , + ; + schema1:dateCreated "2024-01-09T13:04:17Z" ; + schema1:dateModified "2024-01-18T18:24:01Z" ; + schema1:description """# Framework for construction of phylogenetic networks on High Performance Computing (HPC) environment\r +\r +## Introduction\r +\r +Phylogeny refers to the evolutionary history and relationship between biological lineages related by common descent. Reticulate evolution refers to the origination of lineages through the complete or partial merging of ancestor lineages. Networks may be used to represent lineage independence events in non-treelike phylogenetic processes.\r +\r +The methodology for reconstructing networks is still in development. Here we explore two methods for reconstructing rooted explicit phylogenetic networks, PhyloNetworks and Phylonet, which employ computationally expensive and time consuming algorithms. The construction of phylogenetic networks follows a coordinated processing flow of data sets analyzed and processed by the coordinated execution of a set of different programs, packages, libraries or pipelines, called workflow activities. \r +\r +In view of the complexity in modeling network experiments, the present work introduces a workflow for phylogenetic network analyses coupled to be executed in High-Performance Computing (HPC) environments. The workflow aims to integrate well-established software, pipelines and scripts, implementing a challenging task since these tools do not consistently profit from the HPC environment, leading to an increase in the expected makespan and idle computing resources.\r +\r +## Requirements\r +\r +1. Python >= 3.8\r + 1. Biopython >= 1.75\r + 2. Pandas >= 1.3.2\r + 3. Parsl >= 1.0\r +3. Raxml >= 8.2.12\r +4. Astral >= 5.7.1\r +5. SnaQ (PhyloNetworks) >= 0.13.0\r +6. MrBayes >= 3.2.7a\r +7. BUCKy >= 1.4.4\r +8. Quartet MaxCut >= 2.10\r +9. PhyloNet >= 3.8.2\r +10. Julia >= 1.4.1\r +11. IQTREE >= 2.0\r +\r +\r +## How to use\r +\r +### Setting up the framework\r +\r +The framework uses a file to get all the needed parameters. For default it loads the file *default.ini* in the config folder, but you can explicitly load other files using the argument ``-s name_of_the_file``, *e.g.* ``-s config/test.ini``.\r +\r +* Edit *parl.env* with the environment variables you may need, such as modules loadeds in SLURM\r +* Edit *work.config* with the directories of your phylogeny studies (the framework receives as input a set of homologous gene alignments of species in the nexus format).\r +* Edit *default.ini* with the path for each of the needed softwares and the parameters of the execution provider.\r +\r +For default, the execution logs are created in the ``runinfo`` folder. To change it you can use the `-r folder_path` parameter.\r +\r +#### Contents of the configuration file\r +\r +* General settings\r +\r +```ini\r +[GENERAL]\r +ExecutionProvider = SLURM\r +ScriptDir = ./scripts\r +Environ = config/parsl.env\r +Workload = config/work.config\r +NetworkMethod = MP\r +TreeMethod = RAXML\r +BootStrap = 1000\r +```\r +\r +1. The framework can be executed in a HPC environment using the Slurm resource manager using the parameter ``ExecutionProvider`` equals to ``SLURM`` or locally with ``LOCAL``. \r +2. The path of the scripts folder is assigned in ``ScriptDir``. It's recommended to use the absolute path to avoid errors.\r +3. The ``Environ`` parameter contains the path of the file used to set environment variables. More details can be seen below.\r +4. In ``Workload`` is the path of the experiments that will be performed.\r +5. ``NetworkMethod`` and ``TreeMethod`` are the default network and tree methods that will be used to perform the workloads' studies.\r +6. ``Bootstrap`` is the parameter used in all the software that use bootstrap (RAxML, IQTREE and ASTRAL)\r +\r +* Workflow execution settings\r + \r + When using SLURM, these are the needed parameters:\r + ```ini\r + [WORKFLOW]\r + Monitor = False\r + PartCore = 24\r + PartNode = 1\r + Walltime = 00:20:00\r + ```\r +\r + 1. ``Monitor`` is a parameter to use parsl's monitor module in HPC environment. It can be *true* or *false*. If you want to use it, it's necessary to set it as *true* and manually change the address in ``infra_manager.py``\r + 2. If you are using it in a HPC environment (using SLURM), the framework is going to submit in a job. ``PartCore`` is the number of cores of the node; ``PartNode`` is the number of nodes of the partition; and the ``Walltime`` parameter is the maximum amount of time the job will be able to run.\r +\r + However, if the the desired execution method is the LocalProvider, _i.e._ the execution is being performed in your own machine, only these parameters are necessary:\r +\r + ```ini\r + [WORKFLOW]\r + Monitor = False\r + MaxCore = 6\r + CoresPerWorker = 1\r +\r + ```\r +\r +* RAxML settings\r +\r + ```ini\r + [RAXML]\r + RaxmlExecutable = raxmlHPC-PTHREADS\r + RaxmlThreads = 6\r + RaxmlEvolutionaryModel = GTRGAMMA --HKY85\r + ```\r +\r +* IQTREE settings\r +\r + ```ini\r + [IQTREE]\r + IqTreeExecutable = iqtree2\r + IqTreeEvolutionaryModel = TIM2+I+G \r + IqTreeThreads = 6\r + ```\r +\r +* ASTRAL settings\r +\r + ```ini\r + [ASTRAL]\r + AstralExecDir = /opt/astral/5.7.1\r + AstralJar = astral.jar\r + ```\r +\r +* PhyloNet settings\r +\r + ```ini\r + [PHYLONET]\r + PhyloNetExecDir = /opt/phylonet/3.8.2/\r + PhyloNetJar = PhyloNet.jar\r + PhyloNetThreads = 6\r + PhyloNetHMax = 3\r + PhyloNetRuns = 5\r + ```\r +\r +* SNAQ settings\r +\r + ```ini\r + [SNAQ]\r + SnaqThreads = 6\r + SnaqHMax = 3\r + SnaqRuns = 3\r + ```\r +\r +* Mr. Bayes settings\r +\r + ```ini\r + [MRBAYES]\r + MBExecutable = mb\r + MBParameters = set usebeagle=no beagledevice=cpu beagleprecision=double; mcmcp ngen=100000 burninfrac=.25 samplefreq=50 printfreq=10000 diagnfreq=10000 nruns=2 nchains=2 temp=0.40 swapfreq=10\r + ```\r +\r +* Bucky settings\r +\r + ```ini\r + [BUCKY]\r + BuckyExecutable = bucky\r + MbSumExecutable = mbsum\r + ```\r +\r +* Quartet MaxCut\r +\r + ```ini\r + QUARTETMAXCUT]\r + QmcExecDir = /opt/quartet/\r + QmcExecutable = find-cut-Linux-64\r + ```\r +\r +#### Workload file\r +\r +For default the workload file is ``work.config`` in the *config* folder. The file contains the absolute paths of the experiment's folders.\r +\r +```\r +/home/rafael.terra/Biocomp/data/Denv_1\r +```\r +\r +You can comment folders using the # character in the beginning of the path. *e. g.* ``#/home/rafael.terra/Biocomp/data/Denv_1``. That way the framework won't read this path.\r +\r +You can also run a specific flow for a path using ``@TreeMethod|NetworkMethod`` in the end of a path. Where *TreeMethod* can be RAXML, IQTREE or MRBAYES and *NetworkMethod* can be MPL or MP (case sensitive). The supported flows are: ``RAXML|MPL``, ``RAXML|MP``, ``IQTREE|MPL``, ``IQTREE|MP`` and ``MRBAYES|MPL``. For example:\r +\r +```\r +/home/rafael.terra/Biocomp/data/Denv_1@RAXML|MPL\r +```\r +\r +#### Environment file\r +\r +The environment file contains all the environment variables (like module files used in SLURM) used during the framework execution. Example:\r +\r +```sh\r +module load python/3.8.2\r +module load raxml/8.2_openmpi-2.0_gnu\r +module load java/jdk-12\r +module load iqtree/2.1.1\r +module load bucky/1.4.4\r +module load mrbayes/3.2.7a-OpenMPI-4.0.4\r +source /scratch/app/modulos/julia-1.5.1.sh\r +```\r +\r +#### Experiment folder\r +\r +Each experiment folder needs to have a *input folder* containing a *.tar.gz* compressed file and a *.json* with the following content. **The framework considers that there is only one file of each extension in the input folder**.\r +\r +```json\r +{\r + "Mapping":"",\r + "Outgroup":""\r +}\r +```\r +\r +Where ``Mapping`` is a direct mapping of the taxon, when there are multiple alleles per species, in the format ``species1:taxon1,taxon2;species2:taxon3,taxon4`` *(white spaces are not supported)* and ``Outgroup`` is the taxon used to root the network. The Mapping parameter is optional (although it has to be in the json file without value), but the outgroup is obligatory. It's important to say that the flow *MRBAYES|MPL* doesn't support multiple alleles per species. Example:\r +\r +```json\r +{\r + "Mapping": "dengue_virus_type_2:FJ850082,FJ850088,JX669479,JX669482,JX669488,KP188569;dengue_virus_type_3:FJ850079,FJ850094,JN697379,JX669494;dengue_virus_type_1:FJ850073,FJ850084,FJ850093,JX669465,JX669466,JX669475,KP188545,KP188547;dengue_virus_type_4:JN559740,JQ513337,JQ513341,JQ513343,JQ513344,JQ513345,KP188563,KP188564;Zika_virus:MH882543", \r + "Outgroup": "MH882543"\r +}\r +```\r +\r +\r +## Running the framework\r +\r +* In a local machine:\r +\r + After setting up the framework, just run ``python3 parsl_workflow.py``.\r + \r +* In a SLURM environment:\r +\r + Create an submition script that inside contains: ``python3 parsl_workflow.py``.\r +\r + ```sh\r + #!/bin/bash\r + #SBATCH --time=15:00:00\r + #SBATCH -e slurm-%j.err\r + #SBATCH -o slurm-%j.out\r + module load python/3.9.6\r + cd /path/to/biocomp\r + python3 parsl_workflow.py\r + ```\r +\r +The framework is under heavy development. If you notice any bug, please create an issue here on GitHub.\r +\r +### Running in a DOCKER container\r +\r +The framework is also available to be used in Docker. It can be built from source or pushed from DockerHub.\r +\r +#### Building it from the source code\r +\r +Adapt the default settings file ``config/default.ini`` according to your machine, setting the number of threads and bootstrap. After that, run ``docker build -t hp2net .`` in the project's root folder.\r +\r +#### Downloading it from Dockerhub\r +\r +The docker image can also be downloaded from [Docker hub](https://hub.docker.com/repository/docker/rafaelstjf/hp2net/general). To do that, just run the command ``docker pull rafaelstjf/hp2net:main``\r +\r +#### Running\r +\r +The first step to run the framework is to setup your dataset. To test if the framework is running without problems in your machine, you can use the [example datasets](example_data).\r +\r +![Alt text](docs/example_data.png)\r +\r +Extracting the ``example_data.zip`` file, a new folder called ``with_outgroup`` is created. This folder contain four datasets of DENV sequences.\r +\r +The next step is the creation of the settings and workload files. For the settings file, download the [default.ini](config/default.ini) from this repository and change it to you liking (the path of all software are already configured to run on docker). The workload file is a text file containing the absolute path of the datasets, followed by the desired pipeline, as shown before in this document. Here for example purposes, the ``input.txt`` file was created.\r +\r +![Alt text](docs/example_files.png)\r +\r +With all the files prepared, the framework can be executed from the ``example_data`` folder as following:\r +\r +``docker run --rm -v $PWD:$PWD rafaelstjf/hp2net:main -s $PWD/default.ini -w $PWD/input.txt``\r +\r +**Important:** the docker doesn't save your logs, for that add the parameter: ``-r $PWD/name_of_your_log_folder``.\r +\r +---\r +If you are running it on **Santos Dumont Supercomputer**, both downloading and execution of the docker container need to be performed from a submission script and executed using ``sg docker -c "sbatch script.sh"``. The snippet below shows an example of submission script.\r +\r +```sh\r +#!/bin/bash\r +#SBATCH --nodes=1\r +#SBATCH --ntasks-per-node=24\r +#SBATCH -p cpu_small\r +#SBATCH -J Hp2NET\r +#SBATCH --exclusive\r +#SBATCH --time=02:00:00\r +#SBATCH -e slurm-%j.err\r +#SBATCH -o slurm-%j.out\r +\r +DIR='/scratch/pcmrnbio2/rafael.terra/WF_parsl/example_data'\r +docker pull rafaelstjf/hp2net:main\r +\r +docker run --rm -v $DIR:$DIR rafaelstjf/hp2net:main -s ${DIR}/sdumont.ini -w ${DIR}/entrada.txt -r ${DIR}/logs\r +```\r +\r +## If you use it, please cite\r +\r +Terra, R., Coelho, M., Cruz, L., Garcia-Zapata, M., Gadelha, L., Osthoff, C., ... & Ocana, K. (2021, July). Gerência e Análises de Workflows aplicados a Redes Filogenéticas de Genomas de Dengue no Brasil. In *Anais do XV Brazilian e-Science Workshop* (pp. 49-56). SBC.\r +\r +**Also cite all the coupled software!**\r +\r +""" ; + schema1:keywords "Bioinformatics, Parsl, phylogenetics, HPC" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "HP2NET - Framework for Construction of Phylogenetic Networks on High Performance Computing (HPC) Environment" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/703?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description "This workflow begins from a set of genome assemblies of different samples, strains, species. The genome is first annotated with Funnanotate. Predicted proteins are furtner annotated with Busco. Next, 'ProteinOrtho' finds orthologs across the samples and makes orthogroups. Orthogroups where all samples are represented are extracted. Orthologs in each orthogroup are aligned with ClustalW. Test dataset: https://zenodo.org/record/6610704#.Ypn3FzlBw5k" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/358?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for preparing genomic data for phylogeny recostruction (GTN)" ; + schema1:sdDatePublished "2024-08-05 10:32:04 +0100" ; + schema1:url "https://workflowhub.eu/workflows/358/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 34691 ; + schema1:creator ; + schema1:dateCreated "2022-06-06T14:05:14Z" ; + schema1:dateModified "2023-02-13T14:06:44Z" ; + schema1:description "This workflow begins from a set of genome assemblies of different samples, strains, species. The genome is first annotated with Funnanotate. Predicted proteins are furtner annotated with Busco. Next, 'ProteinOrtho' finds orthologs across the samples and makes orthogroups. Orthogroups where all samples are represented are extracted. Orthologs in each orthogroup are aligned with ClustalW. Test dataset: https://zenodo.org/record/6610704#.Ypn3FzlBw5k" ; + schema1:keywords "phylogenetics, phylogenomics, Annotation" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "preparing genomic data for phylogeny recostruction (GTN)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/358?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2023-08-31T11:09:22.704427" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "atacseq/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Takes fastqs and reference data, to produce a single cell counts matrix into and save in annData format - adding a column called sample with the sample name. " ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/646?version=1" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "other-closed" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq: Count and Load with Cell Ranger" ; + schema1:sdDatePublished "2024-08-05 10:24:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/646/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 16476 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-11-09T01:26:29Z" ; + schema1:dateModified "2023-11-09T03:53:01Z" ; + schema1:description "Takes fastqs and reference data, to produce a single cell counts matrix into and save in annData format - adding a column called sample with the sample name. " ; + schema1:isBasedOn "https://workflowhub.eu/workflows/646?version=1" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "" ; + schema1:name "scRNAseq: Count and Load with Cell Ranger" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/646?version=1" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=9" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-08-05 10:23:18 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11482 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:15Z" ; + schema1:dateModified "2024-06-11T12:55:15Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=9" ; + schema1:version 9 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/991?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/hlatyping" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hlatyping" ; + schema1:sdDatePublished "2024-08-05 10:22:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/991/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3408 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:55Z" ; + schema1:dateModified "2024-06-11T12:54:55Z" ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/991?version=9" ; + schema1:keywords "DNA, hla, hla-typing, immunology, optitype, personalized-medicine, rna" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hlatyping" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/991?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.54.4" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_ligand_parameterization" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter GMX Notebook Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-08-05 10:25:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/54/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15271 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-04-14T07:51:54Z" ; + schema1:dateModified "2023-04-14T07:54:27Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/54?version=5" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter GMX Notebook Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_ligand_parameterization/blob/master/biobb_wf_ligand_parameterization/notebooks/biobb_ligand_parameterization_tutorial.ipynb" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Demultiplexing pipeline for Illumina sequencing data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/978?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/demultiplex" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/demultiplex" ; + schema1:sdDatePublished "2024-08-05 10:24:07 +0100" ; + schema1:url "https://workflowhub.eu/workflows/978/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9518 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:47Z" ; + schema1:dateModified "2024-06-11T12:54:47Z" ; + schema1:description "Demultiplexing pipeline for Illumina sequencing data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/978?version=7" ; + schema1:keywords "bases2fastq, bcl2fastq, demultiplexing, elementbiosciences, illumina" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/demultiplex" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/978?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + schema1:datePublished "2023-10-23T15:16:21.551531" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "atacseq/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.14" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.130.5" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Amber Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:25:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/130/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 64611 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-04T15:28:57Z" ; + schema1:dateModified "2024-03-04T15:30:41Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/130?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Amber Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_amber_md_setup/blob/main/biobb_wf_amber_md_setup/notebooks/mdsetup/biobb_amber_setup_notebook.ipynb" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + ; + schema1:description "Docking performed by rDock using as 3 different kind of inputs. More info can be found at https://covid19.galaxyproject.org/cheminformatics/" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/14?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Cheminformatics - Docking" ; + schema1:sdDatePublished "2024-08-05 10:33:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/14/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 2177 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9616 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2020-04-10T14:32:01Z" ; + schema1:dateModified "2023-01-16T13:40:50Z" ; + schema1:description "Docking performed by rDock using as 3 different kind of inputs. More info can be found at https://covid19.galaxyproject.org/cheminformatics/" ; + schema1:image ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Cheminformatics - Docking" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/14?version=1" ; + schema1:version 1 ; + ns1:input , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 5524 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=9" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-08-05 10:23:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4258 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:03Z" ; + schema1:dateModified "2024-06-11T12:55:03Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=9" ; + schema1:version 9 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1017?version=15" ; + schema1:isBasedOn "https://github.com/nf-core/rnafusion" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnafusion" ; + schema1:sdDatePublished "2024-08-05 10:22:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1017/ro_crate?version=15" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11863 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:10Z" ; + schema1:dateModified "2024-06-11T12:55:10Z" ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1017?version=16" ; + schema1:keywords "fusion, fusion-genes, gene-fusion, rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnafusion" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1017?version=15" ; + schema1:version 15 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +**Based on the official [pmx tutorial](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate how to compute a **fast-growth mutation free energy** calculation, step by step, using the BioExcel **Building Blocks library (biobb)**. The particular example used is the **Staphylococcal nuclease** protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +The **non-equilibrium free energy calculation** protocol performs a **fast alchemical transition** in the direction **WT->Mut** and back **Mut->WT**. The two equilibrium trajectories needed for the tutorial, one for **Wild Type (WT)** and another for the **Mutated (Mut)** protein (Isoleucine 10 to Alanine -I10A-), have already been generated and are included in this example. We will name **WT as stateA** and **Mut as stateB**.\r +\r +![](https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/schema.png)\r +\r +The tutorial calculates the **free energy difference** in the folded state of a protein. Starting from **two 1ns-length independent equilibrium simulations** (WT and mutant), snapshots are selected to start **fast (50ps) transitions** driving the system in the **forward** (WT to mutant) and **reverse** (mutant to WT) directions, and the **work values** required to perform these transitions are collected. With these values, **Crooks Gaussian Intersection** (CGI), **Bennett Acceptance Ratio** (BAR) and **Jarzynski estimator** methods are used to calculate the **free energy difference** between the two states.\r +\r +*Please note that for the sake of disk space this tutorial is using 1ns-length equilibrium trajectories, whereas in the [original example](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/eq.mdp) the equilibrium trajectories used were obtained from 10ns-length simulations.*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.328.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_pmx_tutorial/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)" ; + schema1:sdDatePublished "2024-08-05 10:32:13 +0100" ; + schema1:url "https://workflowhub.eu/workflows/328/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8358 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-22T10:02:49Z" ; + schema1:dateModified "2022-11-22T10:03:35Z" ; + schema1:description """# Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +**Based on the official [pmx tutorial](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate how to compute a **fast-growth mutation free energy** calculation, step by step, using the BioExcel **Building Blocks library (biobb)**. The particular example used is the **Staphylococcal nuclease** protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +The **non-equilibrium free energy calculation** protocol performs a **fast alchemical transition** in the direction **WT->Mut** and back **Mut->WT**. The two equilibrium trajectories needed for the tutorial, one for **Wild Type (WT)** and another for the **Mutated (Mut)** protein (Isoleucine 10 to Alanine -I10A-), have already been generated and are included in this example. We will name **WT as stateA** and **Mut as stateB**.\r +\r +![](https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/schema.png)\r +\r +The tutorial calculates the **free energy difference** in the folded state of a protein. Starting from **two 1ns-length independent equilibrium simulations** (WT and mutant), snapshots are selected to start **fast (50ps) transitions** driving the system in the **forward** (WT to mutant) and **reverse** (mutant to WT) directions, and the **work values** required to perform these transitions are collected. With these values, **Crooks Gaussian Intersection** (CGI), **Bennett Acceptance Ratio** (BAR) and **Jarzynski estimator** methods are used to calculate the **free energy difference** between the two states.\r +\r +*Please note that for the sake of disk space this tutorial is using 1ns-length equilibrium trajectories, whereas in the [original example](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/eq.mdp) the equilibrium trajectories used were obtained from 10ns-length simulations.*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/328?version=3" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_pmx_tutorial/python/workflow.py" ; + schema1:version 2 . + + a schema1:Dataset ; + schema1:datePublished "2022-11-28T16:35:36.692567" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/chipseq-sr" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "chipseq-sr/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.2" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +For this workflow:\r +\r +Inputs:\r +* assembled-genome.fasta\r +* hard-repeat-masked-genome.fasta\r +* If using the mRNAs option, the additional inputs required are .cdna, .pro and .dat files. \r +\r +What it does:\r +* This workflow splits the input genomes into single sequences (to decrease computation time), annotates using FgenesH++, and merges the output. \r +\r +Outputs:\r +* genome annotation in gff3 format\r +* fasta files of mRNAs, cDNAs and proteins\r +* Busco report\r +\r +\r +\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.881.3" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Fgenesh annotation -TSI" ; + schema1:sdDatePublished "2024-08-05 10:22:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/881/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17572 ; + schema1:creator ; + schema1:dateCreated "2024-06-18T09:08:35Z" ; + schema1:dateModified "2024-06-18T09:11:51Z" ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +For this workflow:\r +\r +Inputs:\r +* assembled-genome.fasta\r +* hard-repeat-masked-genome.fasta\r +* If using the mRNAs option, the additional inputs required are .cdna, .pro and .dat files. \r +\r +What it does:\r +* This workflow splits the input genomes into single sequences (to decrease computation time), annotates using FgenesH++, and merges the output. \r +\r +Outputs:\r +* genome annotation in gff3 format\r +* fasta files of mRNAs, cDNAs and proteins\r +* Busco report\r +\r +\r +\r +\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/881?version=3" ; + schema1:isPartOf ; + schema1:keywords "TSI-annotation" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Fgenesh annotation -TSI" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/881?version=3" ; + schema1:version 3 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 189968 . + + a schema1:Dataset ; + schema1:datePublished "2022-02-17T09:30:15.975078" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-variation-reporting" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sars-cov-2-variation-reporting/COVID-19-VARIATION-REPORTING" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-08-05 10:23:17 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7425 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:14Z" ; + schema1:dateModified "2024-06-11T12:55:14Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "B cell repertoire analysis pipeline with immcantation framework." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/963?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/airrflow" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/bcellmagic" ; + schema1:sdDatePublished "2024-08-05 10:24:20 +0100" ; + schema1:url "https://workflowhub.eu/workflows/963/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4189 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:36Z" ; + schema1:dateModified "2024-06-11T12:54:36Z" ; + schema1:description "B cell repertoire analysis pipeline with immcantation framework." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/963?version=13" ; + schema1:keywords "airr, b-cell, immcantation, immunorepertoire, repseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/bcellmagic" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/963?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# CMIP tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of computing **classical molecular interaction potentials** from **protein structures**, step by step, using the **BioExcel Building Blocks library (biobb)**. Examples shown are **Molecular Interaction Potentials (MIPs) grids, protein-protein/ligand interaction potentials, and protein titration**. The particular structures used are the **Lysozyme** protein (PDB code [1AKI](https://www.rcsb.org/structure/1aki)), and a MD simulation of the complex formed by the **SARS-CoV-2 Receptor Binding Domain and the human Angiotensin Converting Enzyme 2** (PDB code [6VW1](https://www.rcsb.org/structure/6vw1)).\r +\r +The code wrapped is the ***Classical Molecular Interaction Potentials (CMIP)*** code:\r +\r +**Classical molecular interaction potentials: Improved setup procedure in molecular dynamics simulations of proteins.**\r +*Gelpí, J.L., Kalko, S.G., Barril, X., Cirera, J., de la Cruz, X., Luque, F.J. and Orozco, M. (2001)*\r +*Proteins, 45: 428-437. [https://doi.org/10.1002/prot.1159](https://doi.org/10.1002/prot.1159)*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.774.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/main/biobb_wf_cmip/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Classical Molecular Interaction Potentials" ; + schema1:sdDatePublished "2024-08-05 10:25:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/774/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7716 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-04T15:04:07Z" ; + schema1:dateModified "2024-03-04T15:15:20Z" ; + schema1:description """# CMIP tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of computing **classical molecular interaction potentials** from **protein structures**, step by step, using the **BioExcel Building Blocks library (biobb)**. Examples shown are **Molecular Interaction Potentials (MIPs) grids, protein-protein/ligand interaction potentials, and protein titration**. The particular structures used are the **Lysozyme** protein (PDB code [1AKI](https://www.rcsb.org/structure/1aki)), and a MD simulation of the complex formed by the **SARS-CoV-2 Receptor Binding Domain and the human Angiotensin Converting Enzyme 2** (PDB code [6VW1](https://www.rcsb.org/structure/6vw1)).\r +\r +The code wrapped is the ***Classical Molecular Interaction Potentials (CMIP)*** code:\r +\r +**Classical molecular interaction potentials: Improved setup procedure in molecular dynamics simulations of proteins.**\r +*Gelpí, J.L., Kalko, S.G., Barril, X., Cirera, J., de la Cruz, X., Luque, F.J. and Orozco, M. (2001)*\r +*Proteins, 45: 428-437. [https://doi.org/10.1002/prot.1159](https://doi.org/10.1002/prot.1159)*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Classical Molecular Interaction Potentials" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/main/biobb_wf_cmip/python/workflow.py" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1021?version=13" ; + schema1:isBasedOn "https://github.com/nf-core/scrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/scrnaseq" ; + schema1:sdDatePublished "2024-08-05 10:23:15 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1021/ro_crate?version=13" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11170 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:18Z" ; + schema1:dateModified "2024-06-11T12:55:18Z" ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1021?version=13" ; + schema1:keywords "10x-genomics, 10xgenomics, alevin, bustools, Cellranger, kallisto, rna-seq, single-cell, star-solo" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/scrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1021?version=13" ; + schema1:version 13 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """[![Development](https://img.shields.io/badge/development-active-blue.svg)](https://img.shields.io/badge/development-active-blue.svg)\r +[![Reads2Map](https://circleci.com/gh/Cristianetaniguti/Reads2Map.svg?style=svg)](https://app.circleci.com/pipelines/github/Cristianetaniguti/Reads2Map)\r +\r +## Reads2Map \r +\r +Reads2Map presents a collection of [WDL workflows](https://openwdl.org/) to build linkage maps from sequencing reads. Each workflow release is described in the [Read2Map releases page](https://github.com/Cristianetaniguti/Reads2Map/releases). \r +\r +The main workflows are the `EmpiricalReads2Map.wdl` and the `SimulatedReads2Map.wdl`. The `EmpiricalReads2Map.wdl` is composed by the `EmpiricalSNPCalling.wdl` that performs the SNP calling, and the `EmpiricalMaps.wdl` that performs the genotype calling and map building in empirical reads. The `SimulatedReads2Map.wdl` simulates Illumina reads for RADseq, exome, or WGS data and performs the SNP and genotype calling and genetic map building.\r +\r +By now, [GATK](https://github.com/broadinstitute/gatk), [Freebayes](https://github.com/ekg/freebayes) are included for SNP calling; [updog](https://github.com/dcgerard/updog), [polyRAD](https://github.com/lvclark/polyRAD), [SuperMASSA](https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0030906) for dosage calling; and [OneMap](https://github.com/augusto-garcia/onemap), and [GUSMap](https://github.com/tpbilton/GUSMap) for linkage map build.\r +\r +![math_meth2](https://user-images.githubusercontent.com/7572527/203172239-e4d2d857-84e2-48c5-bb88-01052a287004.png)\r +\r +## How to use\r +\r +Multiple systems are available to run WDL workflows such as Cromwell, miniWDL, and dxWDL. See further information in the [openwdl documentation](https://github.com/openwdl/wdl#execution-engines).\r +\r +To run a pipeline, first navigate to [Reads2Map releases page](https://github.com/Cristianetaniguti/Reads2Map/releases), search for the pipeline tag you which to run, and download the pipeline’s assets (the WDL workflow, the JSON, and the ZIP with accompanying dependencies).\r +\r +## Documentation\r +\r +Check the description of the inputs for the pipelines:\r +\r +* [EmpiricalReads2Map (EmpiricalSNPCalling and EmpiricalMaps)](https://cristianetaniguti.github.io/Tutorials/Reads2Map/EmpiricalReads.html)\r +\r +* [SimulatedReads2Map](https://cristianetaniguti.github.io/Tutorials/Reads2Map/simulatedreads.html)\r +\r +Check how to evaluate the workflows results in Reads2MapApp Shiny:\r +\r +* [Reads2MapApp](https://github.com/Cristianetaniguti/Reads2MapApp)\r +\r +Once you selected the best pipeline using a subset of your data, you can build a complete high-density linkage map:\r +\r +* [A Guide to Build High-Density Linkage Maps](https://cristianetaniguti.github.io/Tutorials/onemap/Quick_HighDens/High_density_maps.html)\r +\r +Check more information and examples of usage in:\r +\r +* [Taniguti, C. H., Taniguti, L. M., Amadeu, R. R., Mollinari, M., Da, G., Pereira, S., Riera-Lizarazu, O., Lau, J., Byrne, D., de Siqueira Gesteira, G., De, T., Oliveira, P., Ferreira, G. C., & Franco Garcia, A. A. Developing best practices for genotyping-by-sequencing analysis using linkage maps as benchmarks. BioRxiv. https://doi.org/10.1101/2022.11.24.517847](https://www.biorxiv.org/content/10.1101/2022.11.24.517847v2)\r +\r +## Third-party software and images\r +\r +- [BWA](https://github.com/lh3/bwa) in [us.gcr.io/broad-gotc-prod/genomes-in-the-cloud:2.5.7-2021-06-09_16-47-48Z](https://console.cloud.google.com/gcr/images/broad-gotc-prod/US/genomes-in-the-cloud): Used to align simulated reads to reference;\r +- [cutadapt](https://github.com/marcelm/cutadapt) in [cristaniguti/ pirs-ddrad-cutadapt:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/pirs-ddrad-cutadapt): Trim simulated reads;\r +- [ddRADseqTools](https://github.com/GGFHF/ddRADseqTools) in [cristaniguti/ pirs-ddrad-cutadapt:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/pirs-ddrad-cutadapt): Set of applications useful to in silico design and testing of double digest RADseq (ddRADseq) experiments;\r +- [Freebayes](https://github.com/ekg/freebayes) in [Cristaniguti/freebayes:0.0.1](): Variant call step;\r +- [GATK](https://github.com/broadinstitute/gatk) in [us.gcr.io/broad-gotc-prod/genomes-in-the-cloud:2.5.7-2021-06-09_16-47-48Z](https://console.cloud.google.com/gcr/images/broad-gotc-prod/US/genomes-in-the-cloud): Variant call step using Haplotype Caller, GenomicsDBImport and GenotypeGVCFs;\r +- [PedigreeSim](https://github.com/PBR/pedigreeSim?files=1) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Simulates progeny genotypes from parents genotypes for different types of populations;\r +- [picard](https://github.com/broadinstitute/picard) in [us.gcr.io/broad-gotc-prod/genomes-in-the-cloud:2.5.7-2021-06-09_16-47-48Z](https://console.cloud.google.com/gcr/images/broad-gotc-prod/US/genomes-in-the-cloud): Process alignment files;\r +- [pirs](https://github.com/galaxy001/pirs) in [cristaniguti/ pirs-ddrad-cutadapt:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/pirs-ddrad-cutadapt): To generate simulates paired-end reads from a reference genome;\r +- [samtools](https://github.com/samtools/samtools) in [us.gcr.io/broad-gotc-prod/genomes-in-the-cloud:2.5.7-2021-06-09_16-47-48Z](https://console.cloud.google.com/gcr/images/broad-gotc-prod/US/genomes-in-the-cloud): Process alignment files;\r +- [SimuSCoP](https://github.com/qasimyu/simuscop) in [cristaniguti/simuscopr:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/simuscopr): Exome and WGS Illumina reads simulations;\r +- [RADinitio](http://catchenlab.life.illinois.edu/radinitio/) in [ cristaniguti/radinitio:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/radinitio): RADseq Illumina reads simulation;\r +- [SuperMASSA](https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0030906) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Efficient Exact Maximum a Posteriori Computation for Bayesian SNP Genotyping in Polyploids;\r +- [bcftools](https://github.com/samtools/bcftools) in [lifebitai/bcftools:1.10.2](https://hub.docker.com/r/lifebitai/bcftools): utilities for variant calling and manipulating VCFs and BCFs;\r +- [vcftools](http://vcftools.sourceforge.net/) in [cristaniguti/split_markers:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/split_markers): program package designed for working with VCF files.\r +- [MCHap](https://github.com/PlantandFoodResearch/MCHap) in [cristaniguti/mchap:0.7.0](https://hub.docker.com/repository/docker/cristaniguti/mchap): Polyploid micro-haplotype assembly using Markov chain Monte Carlo simulation.\r +\r +### R packages\r +\r +- [OneMap](https://github.com/augusto-garcia/onemap) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Is a software for constructing genetic maps in experimental crosses: full-sib, RILs, F2 and backcrosses;\r +- [Reads2MapTools](https://github.com/Cristianetaniguti/Reads2MapTools) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Support package to perform mapping populations simulations and genotyping for OneMap genetic map building\r +- [GUSMap](https://github.com/tpbilton/GUSMap): Genotyping Uncertainty with Sequencing data and linkage MAPping\r +- [updog](https://github.com/dcgerard/updog) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Flexible Genotyping of Polyploids using Next Generation Sequencing Data\r +- [polyRAD](https://github.com/lvclark/polyRAD) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Genotype Calling with Uncertainty from Sequencing Data in Polyploids\r +- [Reads2MapApp](https://github.com/Cristianetaniguti/Reads2MapApp) in [cristaniguti/reads2mapApp:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Shiny app to evaluate Reads2Map workflows results\r +- [simuscopR](https://github.com/Cristianetaniguti/simuscopR) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Wrap-up R package for SimusCop simulations.""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.409.1" ; + schema1:isBasedOn "https://github.com/Cristianetaniguti/Reads2Map" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for EmpiricalReads2Map" ; + schema1:sdDatePublished "2024-08-05 10:31:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/409/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1773 ; + schema1:creator ; + schema1:dateCreated "2022-11-29T20:09:43Z" ; + schema1:dateModified "2023-01-16T14:04:54Z" ; + schema1:description """[![Development](https://img.shields.io/badge/development-active-blue.svg)](https://img.shields.io/badge/development-active-blue.svg)\r +[![Reads2Map](https://circleci.com/gh/Cristianetaniguti/Reads2Map.svg?style=svg)](https://app.circleci.com/pipelines/github/Cristianetaniguti/Reads2Map)\r +\r +## Reads2Map \r +\r +Reads2Map presents a collection of [WDL workflows](https://openwdl.org/) to build linkage maps from sequencing reads. Each workflow release is described in the [Read2Map releases page](https://github.com/Cristianetaniguti/Reads2Map/releases). \r +\r +The main workflows are the `EmpiricalReads2Map.wdl` and the `SimulatedReads2Map.wdl`. The `EmpiricalReads2Map.wdl` is composed by the `EmpiricalSNPCalling.wdl` that performs the SNP calling, and the `EmpiricalMaps.wdl` that performs the genotype calling and map building in empirical reads. The `SimulatedReads2Map.wdl` simulates Illumina reads for RADseq, exome, or WGS data and performs the SNP and genotype calling and genetic map building.\r +\r +By now, [GATK](https://github.com/broadinstitute/gatk), [Freebayes](https://github.com/ekg/freebayes) are included for SNP calling; [updog](https://github.com/dcgerard/updog), [polyRAD](https://github.com/lvclark/polyRAD), [SuperMASSA](https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0030906) for dosage calling; and [OneMap](https://github.com/augusto-garcia/onemap), and [GUSMap](https://github.com/tpbilton/GUSMap) for linkage map build.\r +\r +![math_meth2](https://user-images.githubusercontent.com/7572527/203172239-e4d2d857-84e2-48c5-bb88-01052a287004.png)\r +\r +## How to use\r +\r +Multiple systems are available to run WDL workflows such as Cromwell, miniWDL, and dxWDL. See further information in the [openwdl documentation](https://github.com/openwdl/wdl#execution-engines).\r +\r +To run a pipeline, first navigate to [Reads2Map releases page](https://github.com/Cristianetaniguti/Reads2Map/releases), search for the pipeline tag you which to run, and download the pipeline’s assets (the WDL workflow, the JSON, and the ZIP with accompanying dependencies).\r +\r +## Documentation\r +\r +Check the description of the inputs for the pipelines:\r +\r +* [EmpiricalReads2Map (EmpiricalSNPCalling and EmpiricalMaps)](https://cristianetaniguti.github.io/Tutorials/Reads2Map/EmpiricalReads.html)\r +\r +* [SimulatedReads2Map](https://cristianetaniguti.github.io/Tutorials/Reads2Map/simulatedreads.html)\r +\r +Check how to evaluate the workflows results in Reads2MapApp Shiny:\r +\r +* [Reads2MapApp](https://github.com/Cristianetaniguti/Reads2MapApp)\r +\r +Once you selected the best pipeline using a subset of your data, you can build a complete high-density linkage map:\r +\r +* [A Guide to Build High-Density Linkage Maps](https://cristianetaniguti.github.io/Tutorials/onemap/Quick_HighDens/High_density_maps.html)\r +\r +Check more information and examples of usage in:\r +\r +* [Taniguti, C. H., Taniguti, L. M., Amadeu, R. R., Mollinari, M., Da, G., Pereira, S., Riera-Lizarazu, O., Lau, J., Byrne, D., de Siqueira Gesteira, G., De, T., Oliveira, P., Ferreira, G. C., & Franco Garcia, A. A. Developing best practices for genotyping-by-sequencing analysis using linkage maps as benchmarks. BioRxiv. https://doi.org/10.1101/2022.11.24.517847](https://www.biorxiv.org/content/10.1101/2022.11.24.517847v2)\r +\r +## Third-party software and images\r +\r +- [BWA](https://github.com/lh3/bwa) in [us.gcr.io/broad-gotc-prod/genomes-in-the-cloud:2.5.7-2021-06-09_16-47-48Z](https://console.cloud.google.com/gcr/images/broad-gotc-prod/US/genomes-in-the-cloud): Used to align simulated reads to reference;\r +- [cutadapt](https://github.com/marcelm/cutadapt) in [cristaniguti/ pirs-ddrad-cutadapt:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/pirs-ddrad-cutadapt): Trim simulated reads;\r +- [ddRADseqTools](https://github.com/GGFHF/ddRADseqTools) in [cristaniguti/ pirs-ddrad-cutadapt:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/pirs-ddrad-cutadapt): Set of applications useful to in silico design and testing of double digest RADseq (ddRADseq) experiments;\r +- [Freebayes](https://github.com/ekg/freebayes) in [Cristaniguti/freebayes:0.0.1](): Variant call step;\r +- [GATK](https://github.com/broadinstitute/gatk) in [us.gcr.io/broad-gotc-prod/genomes-in-the-cloud:2.5.7-2021-06-09_16-47-48Z](https://console.cloud.google.com/gcr/images/broad-gotc-prod/US/genomes-in-the-cloud): Variant call step using Haplotype Caller, GenomicsDBImport and GenotypeGVCFs;\r +- [PedigreeSim](https://github.com/PBR/pedigreeSim?files=1) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Simulates progeny genotypes from parents genotypes for different types of populations;\r +- [picard](https://github.com/broadinstitute/picard) in [us.gcr.io/broad-gotc-prod/genomes-in-the-cloud:2.5.7-2021-06-09_16-47-48Z](https://console.cloud.google.com/gcr/images/broad-gotc-prod/US/genomes-in-the-cloud): Process alignment files;\r +- [pirs](https://github.com/galaxy001/pirs) in [cristaniguti/ pirs-ddrad-cutadapt:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/pirs-ddrad-cutadapt): To generate simulates paired-end reads from a reference genome;\r +- [samtools](https://github.com/samtools/samtools) in [us.gcr.io/broad-gotc-prod/genomes-in-the-cloud:2.5.7-2021-06-09_16-47-48Z](https://console.cloud.google.com/gcr/images/broad-gotc-prod/US/genomes-in-the-cloud): Process alignment files;\r +- [SimuSCoP](https://github.com/qasimyu/simuscop) in [cristaniguti/simuscopr:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/simuscopr): Exome and WGS Illumina reads simulations;\r +- [RADinitio](http://catchenlab.life.illinois.edu/radinitio/) in [ cristaniguti/radinitio:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/radinitio): RADseq Illumina reads simulation;\r +- [SuperMASSA](https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0030906) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Efficient Exact Maximum a Posteriori Computation for Bayesian SNP Genotyping in Polyploids;\r +- [bcftools](https://github.com/samtools/bcftools) in [lifebitai/bcftools:1.10.2](https://hub.docker.com/r/lifebitai/bcftools): utilities for variant calling and manipulating VCFs and BCFs;\r +- [vcftools](http://vcftools.sourceforge.net/) in [cristaniguti/split_markers:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/split_markers): program package designed for working with VCF files.\r +- [MCHap](https://github.com/PlantandFoodResearch/MCHap) in [cristaniguti/mchap:0.7.0](https://hub.docker.com/repository/docker/cristaniguti/mchap): Polyploid micro-haplotype assembly using Markov chain Monte Carlo simulation.\r +\r +### R packages\r +\r +- [OneMap](https://github.com/augusto-garcia/onemap) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Is a software for constructing genetic maps in experimental crosses: full-sib, RILs, F2 and backcrosses;\r +- [Reads2MapTools](https://github.com/Cristianetaniguti/Reads2MapTools) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Support package to perform mapping populations simulations and genotyping for OneMap genetic map building\r +- [GUSMap](https://github.com/tpbilton/GUSMap): Genotyping Uncertainty with Sequencing data and linkage MAPping\r +- [updog](https://github.com/dcgerard/updog) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Flexible Genotyping of Polyploids using Next Generation Sequencing Data\r +- [polyRAD](https://github.com/lvclark/polyRAD) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Genotype Calling with Uncertainty from Sequencing Data in Polyploids\r +- [Reads2MapApp](https://github.com/Cristianetaniguti/Reads2MapApp) in [cristaniguti/reads2mapApp:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Shiny app to evaluate Reads2Map workflows results\r +- [simuscopR](https://github.com/Cristianetaniguti/simuscopR) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Wrap-up R package for SimusCop simulations.""" ; + schema1:keywords "WDL, linkage_map, variant_calling" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "EmpiricalReads2Map" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/409?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Conformational ensembles generation\r +\r +## Workflow included in the [ELIXIR 3D-Bioinfo](https://elixir-europe.org/communities/3d-bioinfo) Implementation Study:\r +\r +### Building on PDBe-KB to chart and characterize the conformation landscape of native proteins\r +\r +This tutorial aims to illustrate the process of generating **protein conformational ensembles** from** 3D structures **and analysing its **molecular flexibility**, step by step, using the **BioExcel Building Blocks library (biobb)**.\r +\r +## Conformational landscape of native proteins\r +**Proteins** are **dynamic** systems that adopt multiple **conformational states**, a property essential for many **biological processes** (e.g. binding other proteins, nucleic acids, small molecule ligands, or switching between functionaly active and inactive states). Characterizing the different **conformational states** of proteins and the transitions between them is therefore critical for gaining insight into their **biological function** and can help explain the effects of genetic variants in **health** and **disease** and the action of drugs.\r +\r +**Structural biology** has become increasingly efficient in sampling the different **conformational states** of proteins. The **PDB** has currently archived more than **170,000 individual structures**, but over two thirds of these structures represent **multiple conformations** of the same or related protein, observed in different crystal forms, when interacting with other proteins or other macromolecules, or upon binding small molecule ligands. Charting this conformational diversity across the PDB can therefore be employed to build a useful approximation of the **conformational landscape** of native proteins.\r +\r +A number of resources and **tools** describing and characterizing various often complementary aspects of protein **conformational diversity** in known structures have been developed, notably by groups in Europe. These tools include algorithms with varying degree of sophistication, for aligning the 3D structures of individual protein chains or domains, of protein assemblies, and evaluating their degree of **structural similarity**. Using such tools one can **align structures pairwise**, compute the corresponding **similarity matrix**, and identify ensembles of **structures/conformations** with a defined **similarity level** that tend to recur in different PDB entries, an operation typically performed using **clustering** methods. Such workflows are at the basis of resources such as **CATH, Contemplate, or PDBflex** that offer access to **conformational ensembles** comprised of similar **conformations** clustered according to various criteria. Other types of tools focus on differences between **protein conformations**, identifying regions of proteins that undergo large **collective displacements** in different PDB entries, those that act as **hinges or linkers**, or regions that are inherently **flexible**.\r +\r +To build a meaningful approximation of the **conformational landscape** of native proteins, the **conformational ensembles** (and the differences between them), identified on the basis of **structural similarity/dissimilarity** measures alone, need to be **biophysically characterized**. This may be approached at **two different levels**. \r +- At the **biological level**, it is important to link observed **conformational ensembles**, to their **functional roles** by evaluating the correspondence with **protein family classifications** based on sequence information and **functional annotations** in public databases e.g. Uniprot, PDKe-Knowledge Base (KB). These links should provide valuable mechanistic insights into how the **conformational and dynamic properties** of proteins are exploited by evolution to regulate their **biological function**.

\r +\r +- At the **physical level** one needs to introduce **energetic consideration** to evaluate the likelihood that the identified **conformational ensembles** represent **conformational states** that the protein (or domain under study) samples in isolation. Such evaluation is notoriously **challenging** and can only be roughly approximated by using **computational methods** to evaluate the extent to which the observed **conformational ensembles** can be reproduced by algorithms that simulate the **dynamic behavior** of protein systems. These algorithms include the computationally expensive **classical molecular dynamics (MD) simulations** to sample local thermal fluctuations but also faster more approximate methods such as **Elastic Network Models** and **Normal Node Analysis** (NMA) to model low energy **collective motions**. Alternatively, **enhanced sampling molecular dynamics** can be used to model complex types of **conformational changes** but at a very high computational cost. \r +\r +The **ELIXIR 3D-Bioinfo Implementation Study** *Building on PDBe-KB to chart and characterize the conformation landscape of native proteins* focuses on:\r +\r +1. Mapping the **conformational diversity** of proteins and their homologs across the PDB. \r +2. Characterize the different **flexibility properties** of protein regions, and link this information to sequence and functional annotation.\r +3. Benchmark **computational methods** that can predict a biophysical description of protein motions.\r +\r +This notebook is part of the third objective, where a list of **computational resources** that are able to predict **protein flexibility** and **conformational ensembles** have been collected, evaluated, and integrated in reproducible and interoperable workflows using the **BioExcel Building Blocks library**. Note that the list is not meant to be exhaustive, it is built following the expertise of the implementation study partners.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.488.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_flexdyn/cwl" ; + schema1:license "other-open" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL Protein conformational ensembles generation" ; + schema1:sdDatePublished "2024-08-05 10:30:19 +0100" ; + schema1:url "https://workflowhub.eu/workflows/488/ro_crate?version=2" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 203809 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 39465 ; + schema1:creator , + ; + schema1:dateCreated "2023-06-06T10:10:37Z" ; + schema1:dateModified "2024-04-22T10:41:26Z" ; + schema1:description """# Protein Conformational ensembles generation\r +\r +## Workflow included in the [ELIXIR 3D-Bioinfo](https://elixir-europe.org/communities/3d-bioinfo) Implementation Study:\r +\r +### Building on PDBe-KB to chart and characterize the conformation landscape of native proteins\r +\r +This tutorial aims to illustrate the process of generating **protein conformational ensembles** from** 3D structures **and analysing its **molecular flexibility**, step by step, using the **BioExcel Building Blocks library (biobb)**.\r +\r +## Conformational landscape of native proteins\r +**Proteins** are **dynamic** systems that adopt multiple **conformational states**, a property essential for many **biological processes** (e.g. binding other proteins, nucleic acids, small molecule ligands, or switching between functionaly active and inactive states). Characterizing the different **conformational states** of proteins and the transitions between them is therefore critical for gaining insight into their **biological function** and can help explain the effects of genetic variants in **health** and **disease** and the action of drugs.\r +\r +**Structural biology** has become increasingly efficient in sampling the different **conformational states** of proteins. The **PDB** has currently archived more than **170,000 individual structures**, but over two thirds of these structures represent **multiple conformations** of the same or related protein, observed in different crystal forms, when interacting with other proteins or other macromolecules, or upon binding small molecule ligands. Charting this conformational diversity across the PDB can therefore be employed to build a useful approximation of the **conformational landscape** of native proteins.\r +\r +A number of resources and **tools** describing and characterizing various often complementary aspects of protein **conformational diversity** in known structures have been developed, notably by groups in Europe. These tools include algorithms with varying degree of sophistication, for aligning the 3D structures of individual protein chains or domains, of protein assemblies, and evaluating their degree of **structural similarity**. Using such tools one can **align structures pairwise**, compute the corresponding **similarity matrix**, and identify ensembles of **structures/conformations** with a defined **similarity level** that tend to recur in different PDB entries, an operation typically performed using **clustering** methods. Such workflows are at the basis of resources such as **CATH, Contemplate, or PDBflex** that offer access to **conformational ensembles** comprised of similar **conformations** clustered according to various criteria. Other types of tools focus on differences between **protein conformations**, identifying regions of proteins that undergo large **collective displacements** in different PDB entries, those that act as **hinges or linkers**, or regions that are inherently **flexible**.\r +\r +To build a meaningful approximation of the **conformational landscape** of native proteins, the **conformational ensembles** (and the differences between them), identified on the basis of **structural similarity/dissimilarity** measures alone, need to be **biophysically characterized**. This may be approached at **two different levels**. \r +- At the **biological level**, it is important to link observed **conformational ensembles**, to their **functional roles** by evaluating the correspondence with **protein family classifications** based on sequence information and **functional annotations** in public databases e.g. Uniprot, PDKe-Knowledge Base (KB). These links should provide valuable mechanistic insights into how the **conformational and dynamic properties** of proteins are exploited by evolution to regulate their **biological function**.

\r +\r +- At the **physical level** one needs to introduce **energetic consideration** to evaluate the likelihood that the identified **conformational ensembles** represent **conformational states** that the protein (or domain under study) samples in isolation. Such evaluation is notoriously **challenging** and can only be roughly approximated by using **computational methods** to evaluate the extent to which the observed **conformational ensembles** can be reproduced by algorithms that simulate the **dynamic behavior** of protein systems. These algorithms include the computationally expensive **classical molecular dynamics (MD) simulations** to sample local thermal fluctuations but also faster more approximate methods such as **Elastic Network Models** and **Normal Node Analysis** (NMA) to model low energy **collective motions**. Alternatively, **enhanced sampling molecular dynamics** can be used to model complex types of **conformational changes** but at a very high computational cost. \r +\r +The **ELIXIR 3D-Bioinfo Implementation Study** *Building on PDBe-KB to chart and characterize the conformation landscape of native proteins* focuses on:\r +\r +1. Mapping the **conformational diversity** of proteins and their homologs across the PDB. \r +2. Characterize the different **flexibility properties** of protein regions, and link this information to sequence and functional annotation.\r +3. Benchmark **computational methods** that can predict a biophysical description of protein motions.\r +\r +This notebook is part of the third objective, where a list of **computational resources** that are able to predict **protein flexibility** and **conformational ensembles** have been collected, evaluated, and integrated in reproducible and interoperable workflows using the **BioExcel Building Blocks library**. Note that the list is not meant to be exhaustive, it is built following the expertise of the implementation study partners.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/488?version=1" ; + schema1:keywords "" ; + schema1:license "" ; + schema1:name "CWL Protein conformational ensembles generation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_flexdyn/cwl/workflow.cwl" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1027?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/viralrecon" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/viralrecon" ; + schema1:sdDatePublished "2024-08-05 10:23:05 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1027/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6372 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:20Z" ; + schema1:dateModified "2024-06-11T12:55:20Z" ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1027?version=10" ; + schema1:keywords "Amplicon, ARTIC, Assembly, covid-19, covid19, illumina, long-read-sequencing, Metagenomics, nanopore, ONT, oxford-nanopore, SARS-CoV-2, variant-calling, viral, Virus" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/viralrecon" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1027?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A Nanostring nCounter analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1003?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/nanostring" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/nanostring" ; + schema1:sdDatePublished "2024-08-05 10:23:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1003/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8834 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:06Z" ; + schema1:dateModified "2024-06-11T12:55:06Z" ; + schema1:description "A Nanostring nCounter analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1003?version=4" ; + schema1:keywords "nanostring, nanostringnorm" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/nanostring" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1003?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """RNAseq workflow UMG: Here we introduce a scientific workflow implementing several open-source software executed by Galaxy parallel scripting language in an high-performance computing environment. We have applied the workflow to a single-cardiomyocyte RNA-seq data retrieved from Gene Expression Omnibus database. The workflow allows for the analysis (alignment, QC, sort and count reads, statistics generation) of raw RNA-seq data and seamless integration of differential expression results into a configurable script code.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.412.1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for RNAseq_UMG_SDumont_v1" ; + schema1:sdDatePublished "2024-08-05 10:31:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/412/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 19967 ; + schema1:creator ; + schema1:dateCreated "2022-12-06T19:22:16Z" ; + schema1:dateModified "2023-02-13T14:05:45Z" ; + schema1:description """RNAseq workflow UMG: Here we introduce a scientific workflow implementing several open-source software executed by Galaxy parallel scripting language in an high-performance computing environment. We have applied the workflow to a single-cardiomyocyte RNA-seq data retrieved from Gene Expression Omnibus database. The workflow allows for the analysis (alignment, QC, sort and count reads, statistics generation) of raw RNA-seq data and seamless integration of differential expression results into a configurable script code.\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "RNAseq_UMG_SDumont_v1" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/412?version=1" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.821.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/main/biobb_wf_dna_helparms/docker" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Docker Structural DNA helical parameters tutorial" ; + schema1:sdDatePublished "2024-08-05 10:24:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/821/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 864 ; + schema1:creator , + ; + schema1:dateCreated "2024-04-22T10:34:40Z" ; + schema1:dateModified "2024-05-22T13:50:29Z" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Docker Structural DNA helical parameters tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/main/biobb_wf_dna_helparms/docker/Dockerfile" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Mutations Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.290.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_md_setup_mutations/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Protein MD Setup tutorial with mutations" ; + schema1:sdDatePublished "2024-08-05 10:30:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/290/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7412 ; + schema1:creator , + ; + schema1:dateCreated "2023-04-14T08:55:52Z" ; + schema1:dateModified "2023-04-14T08:56:54Z" ; + schema1:description """# Mutations Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/290?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Protein MD Setup tutorial with mutations" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_md_setup_mutations/python/workflow.py" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """![](https://github.com/AusARG/pipesnake/blob/main/docs/images/pipesnake_Logo.png)\r + \r +\r +Welcome to the *pipesnake*. \r +Let's get started. \r +\r +---\r +\r +# Introduction\r +\r +**pipesnake** is a bioinformatics best-practice analysis pipeline for phylogenomic reconstruction starting from short-read 'second-generation' sequencing data.\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies.\r +\r +---\r +\r +# Motivation + Project Background\r +\r +We developed *pipesnake* as part of the [***Aus***tralian ***A***mphibian and ***R***eptile ***G***enomics](https://ausargenomics.com/) (*AusARG*) initiative. **AusARG** is a national collaborative project aiming to facilitate the development of genomics resources for Australia's unique amphibian and reptile fauna. This pipeline was developed specifically as part of the *AusARG Phylogenomics Working Group* with the goal of collecting a consistent set of phylogenomic data for all of Australia's frogs and reptiles, under similar assembly, alignment, and tree estimation procedures. \r +\r +*pipesnake* is however, applicable to much broader phylogenomic questions, and is appropriate for processing exon-capture or transcriptomic data, so long as the **input is second-generation (short-read) data**. """ ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/737?version=1" ; + schema1:isBasedOn "https://github.com/AusARG/pipesnake" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for pipesnake" ; + schema1:sdDatePublished "2024-08-05 10:25:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/737/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 3866 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1887 ; + schema1:creator , + ; + schema1:dateCreated "2024-02-02T05:15:00Z" ; + schema1:dateModified "2024-02-08T00:03:01Z" ; + schema1:description """![](https://github.com/AusARG/pipesnake/blob/main/docs/images/pipesnake_Logo.png)\r + \r +\r +Welcome to the *pipesnake*. \r +Let's get started. \r +\r +---\r +\r +# Introduction\r +\r +**pipesnake** is a bioinformatics best-practice analysis pipeline for phylogenomic reconstruction starting from short-read 'second-generation' sequencing data.\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies.\r +\r +---\r +\r +# Motivation + Project Background\r +\r +We developed *pipesnake* as part of the [***Aus***tralian ***A***mphibian and ***R***eptile ***G***enomics](https://ausargenomics.com/) (*AusARG*) initiative. **AusARG** is a national collaborative project aiming to facilitate the development of genomics resources for Australia's unique amphibian and reptile fauna. This pipeline was developed specifically as part of the *AusARG Phylogenomics Working Group* with the goal of collecting a consistent set of phylogenomic data for all of Australia's frogs and reptiles, under similar assembly, alignment, and tree estimation procedures. \r +\r +*pipesnake* is however, applicable to much broader phylogenomic questions, and is appropriate for processing exon-capture or transcriptomic data, so long as the **input is second-generation (short-read) data**. """ ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "pipesnake" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/737?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-08-05 10:23:40 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7200 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:58Z" ; + schema1:dateModified "2024-06-11T12:54:58Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "This workflow is based on the idea of comparing different gene sets through their semantic interpretation. In many cases, the user studies a specific phenotype (e.g. disease) by analyzing lists of genes resulting from different samples or patients. Their pathway analysis could result in different semantic networks, revealing mechanistic and phenotypic divergence between these gene sets. The workflow of BioTranslator Comparative Analysis compares quantitatively the outputs of pathway analysis, based on the topology of the underlying ontological graph, in order to derive a semantic similarity value for each pair of the initial gene sets. The workflow is available in a Galaxy application and can be used for 14 species. The algorithm accepts as input a batch of gene sets, such as BioTranslator, for the same organism. It performs pathway analysis according to the user-selected ontology and then it compares the derived semantic networks and extracts a matrix with their distances, as well as a respective heatmap." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/193?version=1" ; + schema1:isBasedOn "http://www.biotranslator.gr:8080/u/thodk/w/workflow-of-biotranslator-comparative-analysis" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Workflow of BioTranslator Comparative Analysis" ; + schema1:sdDatePublished "2024-08-05 10:33:02 +0100" ; + schema1:url "https://workflowhub.eu/workflows/193/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3984 ; + schema1:dateCreated "2021-09-26T18:20:05Z" ; + schema1:dateModified "2023-01-16T13:53:00Z" ; + schema1:description "This workflow is based on the idea of comparing different gene sets through their semantic interpretation. In many cases, the user studies a specific phenotype (e.g. disease) by analyzing lists of genes resulting from different samples or patients. Their pathway analysis could result in different semantic networks, revealing mechanistic and phenotypic divergence between these gene sets. The workflow of BioTranslator Comparative Analysis compares quantitatively the outputs of pathway analysis, based on the topology of the underlying ontological graph, in order to derive a semantic similarity value for each pair of the initial gene sets. The workflow is available in a Galaxy application and can be used for 14 species. The algorithm accepts as input a batch of gene sets, such as BioTranslator, for the same organism. It performs pathway analysis according to the user-selected ontology and then it compares the derived semantic networks and extracts a matrix with their distances, as well as a respective heatmap." ; + schema1:image ; + schema1:keywords "Semantic Network Analysis, Semantic Comparison, Pathway Analysis" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Workflow of BioTranslator Comparative Analysis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/193?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 29724 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=14" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-08-05 10:22:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=14" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10290 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:39Z" ; + schema1:dateModified "2024-06-11T12:54:39Z" ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=14" ; + schema1:version 14 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Simulations and figures supporting the manuscript \"Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake\"" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.511.1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake" ; + schema1:sdDatePublished "2024-08-05 10:29:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/511/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 305 ; + schema1:creator , + , + , + , + , + ; + schema1:dateCreated "2023-06-21T09:51:56Z" ; + schema1:dateModified "2023-08-18T11:04:05Z" ; + schema1:description "Simulations and figures supporting the manuscript \"Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake\"" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/511?version=4" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/511?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 75460 . + + a schema1:Dataset ; + schema1:datePublished "2021-12-20T17:04:47.625180" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-consensus-from-variation" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sars-cov-2-consensus-from-variation/COVID-19-CONSENSUS-CONSTRUCTION" ; + schema1:sdDatePublished "2021-12-21 03:01:01 +0000" ; + schema1:softwareVersion "v0.2.2" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/986?version=13" ; + schema1:isBasedOn "https://github.com/nf-core/fetchngs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/fetchngs" ; + schema1:sdDatePublished "2024-08-05 10:23:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/986/ro_crate?version=13" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9686 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:53Z" ; + schema1:dateModified "2024-06-11T12:54:53Z" ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/986?version=13" ; + schema1:keywords "ddbj, download, ena, FASTQ, geo, sra, synapse" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/fetchngs" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/986?version=13" ; + schema1:version 13 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "The pangenome graph construction pipeline renders a collection of sequences into a pangenome graph. Its goal is to build a graph that is locally directed and acyclic while preserving large-scale variation. Maintaining local linearity is important for interpretation, visualization, mapping, comparative genomics, and reuse of pangenome graphs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1007?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/pangenome" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/pangenome" ; + schema1:sdDatePublished "2024-08-05 10:23:28 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1007/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11481 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:07Z" ; + schema1:dateModified "2024-06-11T12:55:07Z" ; + schema1:description "The pangenome graph construction pipeline renders a collection of sequences into a pangenome graph. Its goal is to build a graph that is locally directed and acyclic while preserving large-scale variation. Maintaining local linearity is important for interpretation, visualization, mapping, comparative genomics, and reuse of pangenome graphs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1007?version=3" ; + schema1:keywords "pangenome" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/pangenome" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1007?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + schema1:datePublished "2024-05-30T07:35:00.361997" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/cutandrun" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "cutandrun/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/963?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/airrflow" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/airrflow" ; + schema1:sdDatePublished "2024-08-05 10:24:21 +0100" ; + schema1:url "https://workflowhub.eu/workflows/963/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8509 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:37Z" ; + schema1:dateModified "2024-06-11T12:54:37Z" ; + schema1:description "B and T cell repertoire analysis pipeline with the Immcantation framework." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/963?version=13" ; + schema1:keywords "airr, b-cell, immcantation, immunorepertoire, repseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/airrflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/963?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Conformational ensembles generation\r +\r +## Workflow included in the [ELIXIR 3D-Bioinfo](https://elixir-europe.org/communities/3d-bioinfo) Implementation Study:\r +\r +### Building on PDBe-KB to chart and characterize the conformation landscape of native proteins\r +\r +This tutorial aims to illustrate the process of generating **protein conformational ensembles** from** 3D structures **and analysing its **molecular flexibility**, step by step, using the **BioExcel Building Blocks library (biobb)**.\r +\r +## Conformational landscape of native proteins\r +**Proteins** are **dynamic** systems that adopt multiple **conformational states**, a property essential for many **biological processes** (e.g. binding other proteins, nucleic acids, small molecule ligands, or switching between functionaly active and inactive states). Characterizing the different **conformational states** of proteins and the transitions between them is therefore critical for gaining insight into their **biological function** and can help explain the effects of genetic variants in **health** and **disease** and the action of drugs.\r +\r +**Structural biology** has become increasingly efficient in sampling the different **conformational states** of proteins. The **PDB** has currently archived more than **170,000 individual structures**, but over two thirds of these structures represent **multiple conformations** of the same or related protein, observed in different crystal forms, when interacting with other proteins or other macromolecules, or upon binding small molecule ligands. Charting this conformational diversity across the PDB can therefore be employed to build a useful approximation of the **conformational landscape** of native proteins.\r +\r +A number of resources and **tools** describing and characterizing various often complementary aspects of protein **conformational diversity** in known structures have been developed, notably by groups in Europe. These tools include algorithms with varying degree of sophistication, for aligning the 3D structures of individual protein chains or domains, of protein assemblies, and evaluating their degree of **structural similarity**. Using such tools one can **align structures pairwise**, compute the corresponding **similarity matrix**, and identify ensembles of **structures/conformations** with a defined **similarity level** that tend to recur in different PDB entries, an operation typically performed using **clustering** methods. Such workflows are at the basis of resources such as **CATH, Contemplate, or PDBflex** that offer access to **conformational ensembles** comprised of similar **conformations** clustered according to various criteria. Other types of tools focus on differences between **protein conformations**, identifying regions of proteins that undergo large **collective displacements** in different PDB entries, those that act as **hinges or linkers**, or regions that are inherently **flexible**.\r +\r +To build a meaningful approximation of the **conformational landscape** of native proteins, the **conformational ensembles** (and the differences between them), identified on the basis of **structural similarity/dissimilarity** measures alone, need to be **biophysically characterized**. This may be approached at **two different levels**. \r +- At the **biological level**, it is important to link observed **conformational ensembles**, to their **functional roles** by evaluating the correspondence with **protein family classifications** based on sequence information and **functional annotations** in public databases e.g. Uniprot, PDKe-Knowledge Base (KB). These links should provide valuable mechanistic insights into how the **conformational and dynamic properties** of proteins are exploited by evolution to regulate their **biological function**.

\r +\r +- At the **physical level** one needs to introduce **energetic consideration** to evaluate the likelihood that the identified **conformational ensembles** represent **conformational states** that the protein (or domain under study) samples in isolation. Such evaluation is notoriously **challenging** and can only be roughly approximated by using **computational methods** to evaluate the extent to which the observed **conformational ensembles** can be reproduced by algorithms that simulate the **dynamic behavior** of protein systems. These algorithms include the computationally expensive **classical molecular dynamics (MD) simulations** to sample local thermal fluctuations but also faster more approximate methods such as **Elastic Network Models** and **Normal Node Analysis** (NMA) to model low energy **collective motions**. Alternatively, **enhanced sampling molecular dynamics** can be used to model complex types of **conformational changes** but at a very high computational cost. \r +\r +The **ELIXIR 3D-Bioinfo Implementation Study** *Building on PDBe-KB to chart and characterize the conformation landscape of native proteins* focuses on:\r +\r +1. Mapping the **conformational diversity** of proteins and their homologs across the PDB. \r +2. Characterize the different **flexibility properties** of protein regions, and link this information to sequence and functional annotation.\r +3. Benchmark **computational methods** that can predict a biophysical description of protein motions.\r +\r +This notebook is part of the third objective, where a list of **computational resources** that are able to predict **protein flexibility** and **conformational ensembles** have been collected, evaluated, and integrated in reproducible and interoperable workflows using the **BioExcel Building Blocks library**. Note that the list is not meant to be exhaustive, it is built following the expertise of the implementation study partners.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.486.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_flexdyn" ; + schema1:license "other-open" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein conformational ensembles generation" ; + schema1:sdDatePublished "2024-08-05 10:30:20 +0100" ; + schema1:url "https://workflowhub.eu/workflows/486/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 108347 ; + schema1:creator , + ; + schema1:dateCreated "2023-05-31T13:19:38Z" ; + schema1:dateModified "2023-05-31T13:24:08Z" ; + schema1:description """# Protein Conformational ensembles generation\r +\r +## Workflow included in the [ELIXIR 3D-Bioinfo](https://elixir-europe.org/communities/3d-bioinfo) Implementation Study:\r +\r +### Building on PDBe-KB to chart and characterize the conformation landscape of native proteins\r +\r +This tutorial aims to illustrate the process of generating **protein conformational ensembles** from** 3D structures **and analysing its **molecular flexibility**, step by step, using the **BioExcel Building Blocks library (biobb)**.\r +\r +## Conformational landscape of native proteins\r +**Proteins** are **dynamic** systems that adopt multiple **conformational states**, a property essential for many **biological processes** (e.g. binding other proteins, nucleic acids, small molecule ligands, or switching between functionaly active and inactive states). Characterizing the different **conformational states** of proteins and the transitions between them is therefore critical for gaining insight into their **biological function** and can help explain the effects of genetic variants in **health** and **disease** and the action of drugs.\r +\r +**Structural biology** has become increasingly efficient in sampling the different **conformational states** of proteins. The **PDB** has currently archived more than **170,000 individual structures**, but over two thirds of these structures represent **multiple conformations** of the same or related protein, observed in different crystal forms, when interacting with other proteins or other macromolecules, or upon binding small molecule ligands. Charting this conformational diversity across the PDB can therefore be employed to build a useful approximation of the **conformational landscape** of native proteins.\r +\r +A number of resources and **tools** describing and characterizing various often complementary aspects of protein **conformational diversity** in known structures have been developed, notably by groups in Europe. These tools include algorithms with varying degree of sophistication, for aligning the 3D structures of individual protein chains or domains, of protein assemblies, and evaluating their degree of **structural similarity**. Using such tools one can **align structures pairwise**, compute the corresponding **similarity matrix**, and identify ensembles of **structures/conformations** with a defined **similarity level** that tend to recur in different PDB entries, an operation typically performed using **clustering** methods. Such workflows are at the basis of resources such as **CATH, Contemplate, or PDBflex** that offer access to **conformational ensembles** comprised of similar **conformations** clustered according to various criteria. Other types of tools focus on differences between **protein conformations**, identifying regions of proteins that undergo large **collective displacements** in different PDB entries, those that act as **hinges or linkers**, or regions that are inherently **flexible**.\r +\r +To build a meaningful approximation of the **conformational landscape** of native proteins, the **conformational ensembles** (and the differences between them), identified on the basis of **structural similarity/dissimilarity** measures alone, need to be **biophysically characterized**. This may be approached at **two different levels**. \r +- At the **biological level**, it is important to link observed **conformational ensembles**, to their **functional roles** by evaluating the correspondence with **protein family classifications** based on sequence information and **functional annotations** in public databases e.g. Uniprot, PDKe-Knowledge Base (KB). These links should provide valuable mechanistic insights into how the **conformational and dynamic properties** of proteins are exploited by evolution to regulate their **biological function**.

\r +\r +- At the **physical level** one needs to introduce **energetic consideration** to evaluate the likelihood that the identified **conformational ensembles** represent **conformational states** that the protein (or domain under study) samples in isolation. Such evaluation is notoriously **challenging** and can only be roughly approximated by using **computational methods** to evaluate the extent to which the observed **conformational ensembles** can be reproduced by algorithms that simulate the **dynamic behavior** of protein systems. These algorithms include the computationally expensive **classical molecular dynamics (MD) simulations** to sample local thermal fluctuations but also faster more approximate methods such as **Elastic Network Models** and **Normal Node Analysis** (NMA) to model low energy **collective motions**. Alternatively, **enhanced sampling molecular dynamics** can be used to model complex types of **conformational changes** but at a very high computational cost. \r +\r +The **ELIXIR 3D-Bioinfo Implementation Study** *Building on PDBe-KB to chart and characterize the conformation landscape of native proteins* focuses on:\r +\r +1. Mapping the **conformational diversity** of proteins and their homologs across the PDB. \r +2. Characterize the different **flexibility properties** of protein regions, and link this information to sequence and functional annotation.\r +3. Benchmark **computational methods** that can predict a biophysical description of protein motions.\r +\r +This notebook is part of the third objective, where a list of **computational resources** that are able to predict **protein flexibility** and **conformational ensembles** have been collected, evaluated, and integrated in reproducible and interoperable workflows using the **BioExcel Building Blocks library**. Note that the list is not meant to be exhaustive, it is built following the expertise of the implementation study partners.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/486?version=2" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "" ; + schema1:name "Jupyter Notebook Protein conformational ensembles generation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_flexdyn/blob/main/biobb_wf_flexdyn/notebooks/biobb_wf_flexdyn.ipynb" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2023-01-13T13:28:37.647185" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "atacseq/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.3" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "This workflow correspond to the Genome-wide alternative splicing analysis training. It allows to analyze isoform switching by making use of IsoformSwitchAnalyzeR." ; + schema1:hasPart , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.472.1" ; + schema1:license "CC-BY-NC-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genome-wide alternative splicing analysis" ; + schema1:sdDatePublished "2024-08-05 10:30:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/472/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 17028 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 65307 ; + schema1:creator ; + schema1:dateCreated "2023-05-17T11:21:57Z" ; + schema1:dateModified "2023-05-17T12:59:35Z" ; + schema1:description "This workflow correspond to the Genome-wide alternative splicing analysis training. It allows to analyze isoform switching by making use of IsoformSwitchAnalyzeR." ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/472?version=3" ; + schema1:keywords "Transcriptomics, alternative-splicing, GTN" ; + schema1:license "https://spdx.org/licenses/CC-BY-NC-4.0" ; + schema1:name "Genome-wide alternative splicing analysis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/472?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 46064 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-08-05 10:23:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4215 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:03Z" ; + schema1:dateModified "2024-06-11T12:55:03Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Differential abundance analysis" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/981?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/differentialabundance" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/differentialabundance" ; + schema1:sdDatePublished "2024-08-05 10:24:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/981/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15717 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:49Z" ; + schema1:dateModified "2024-06-11T12:54:49Z" ; + schema1:description "Differential abundance analysis" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/981?version=8" ; + schema1:keywords "ATAC-seq, ChIP-seq, deseq2, differential-abundance, differential-expression, gsea, limma, microarray, rna-seq, shiny" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/differentialabundance" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/981?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "ATACSeq peak-calling and differential analysis pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/965?version=9" ; + schema1:isBasedOn "https://github.com/nf-core/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/atacseq" ; + schema1:sdDatePublished "2024-08-05 10:24:20 +0100" ; + schema1:url "https://workflowhub.eu/workflows/965/ro_crate?version=9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11439 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:42Z" ; + schema1:dateModified "2024-06-11T12:54:42Z" ; + schema1:description "ATACSeq peak-calling and differential analysis pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/965?version=8" ; + schema1:keywords "ATAC-seq, chromatin-accessibiity" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/atacseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/965?version=9" ; + schema1:version 9 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/986?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/fetchngs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/fetchngs" ; + schema1:sdDatePublished "2024-08-05 10:23:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/986/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6338 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:52Z" ; + schema1:dateModified "2024-06-11T12:54:52Z" ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/986?version=13" ; + schema1:keywords "ddbj, download, ena, FASTQ, geo, sra, synapse" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/fetchngs" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/986?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.286.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_dna_helparms/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Structural DNA helical parameters tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/286/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 22900 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-22T10:14:36Z" ; + schema1:dateModified "2023-01-16T13:58:32Z" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/286?version=3" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Structural DNA helical parameters tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_dna_helparms/python/workflow.py" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Conformational Transitions calculations tutorial using BioExcel Building Blocks (biobb) and GOdMD\r +\r +This tutorial aims to illustrate the process of computing a conformational transition between two known structural conformations of a protein, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.824.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/main/biobb_wf_godmd/docker" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Docker Protein Conformational Transitions calculations tutorial" ; + schema1:sdDatePublished "2024-08-05 10:24:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/824/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 740 ; + schema1:creator , + ; + schema1:dateCreated "2024-04-22T10:50:04Z" ; + schema1:dateModified "2024-05-22T13:47:51Z" ; + schema1:description """# Protein Conformational Transitions calculations tutorial using BioExcel Building Blocks (biobb) and GOdMD\r +\r +This tutorial aims to illustrate the process of computing a conformational transition between two known structural conformations of a protein, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Docker Protein Conformational Transitions calculations tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/main/biobb_wf_godmd/docker/Dockerfile" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +**Based on the official [pmx tutorial](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate how to compute a **fast-growth mutation free energy** calculation, step by step, using the BioExcel **Building Blocks library (biobb)**. The particular example used is the **Staphylococcal nuclease** protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +The **non-equilibrium free energy calculation** protocol performs a **fast alchemical transition** in the direction **WT->Mut** and back **Mut->WT**. The two equilibrium trajectories needed for the tutorial, one for **Wild Type (WT)** and another for the **Mutated (Mut)** protein (Isoleucine 10 to Alanine -I10A-), have already been generated and are included in this example. We will name **WT as stateA** and **Mut as stateB**.\r +\r +![](https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/schema.png)\r +\r +The tutorial calculates the **free energy difference** in the folded state of a protein. Starting from **two 1ns-length independent equilibrium simulations** (WT and mutant), snapshots are selected to start **fast (50ps) transitions** driving the system in the **forward** (WT to mutant) and **reverse** (mutant to WT) directions, and the **work values** required to perform these transitions are collected. With these values, **Crooks Gaussian Intersection** (CGI), **Bennett Acceptance Ratio** (BAR) and **Jarzynski estimator** methods are used to calculate the **free energy difference** between the two states.\r +\r +*Please note that for the sake of disk space this tutorial is using 1ns-length equilibrium trajectories, whereas in the [original example](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/eq.mdp) the equilibrium trajectories used were obtained from 10ns-length simulations.*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.55.6" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_pmx_tutorial" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Mutation Free Energy Calculations" ; + schema1:sdDatePublished "2024-08-05 10:25:30 +0100" ; + schema1:url "https://workflowhub.eu/workflows/55/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 48304 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-04T14:26:13Z" ; + schema1:dateModified "2024-03-05T09:41:01Z" ; + schema1:description """# Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +**Based on the official [pmx tutorial](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate how to compute a **fast-growth mutation free energy** calculation, step by step, using the BioExcel **Building Blocks library (biobb)**. The particular example used is the **Staphylococcal nuclease** protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +The **non-equilibrium free energy calculation** protocol performs a **fast alchemical transition** in the direction **WT->Mut** and back **Mut->WT**. The two equilibrium trajectories needed for the tutorial, one for **Wild Type (WT)** and another for the **Mutated (Mut)** protein (Isoleucine 10 to Alanine -I10A-), have already been generated and are included in this example. We will name **WT as stateA** and **Mut as stateB**.\r +\r +![](https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/schema.png)\r +\r +The tutorial calculates the **free energy difference** in the folded state of a protein. Starting from **two 1ns-length independent equilibrium simulations** (WT and mutant), snapshots are selected to start **fast (50ps) transitions** driving the system in the **forward** (WT to mutant) and **reverse** (mutant to WT) directions, and the **work values** required to perform these transitions are collected. With these values, **Crooks Gaussian Intersection** (CGI), **Bennett Acceptance Ratio** (BAR) and **Jarzynski estimator** methods are used to calculate the **free energy difference** between the two states.\r +\r +*Please note that for the sake of disk space this tutorial is using 1ns-length equilibrium trajectories, whereas in the [original example](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/eq.mdp) the equilibrium trajectories used were obtained from 10ns-length simulations.*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/55?version=5" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Mutation Free Energy Calculations" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_pmx_tutorial/blob/main/biobb_wf_pmx_tutorial/notebooks/biobb_wf_pmx_tutorial.ipynb" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-08-05 10:23:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4215 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:03Z" ; + schema1:dateModified "2024-06-11T12:55:03Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """# Cancer Invasion Workflow\r +\r +## Table of Contents\r +\r +- [Cancer Invasion Workflow](#cancer-invasion-workflow)\r + - [Table of Contents](#table-of-contents)\r + - [Description](#description)\r + - [Contents](#contents)\r + - [Building Blocks](#building-blocks)\r + - [Workflows](#workflows)\r + - [Resources](#resources)\r + - [Tests](#tests)\r + - [Instructions](#instructions)\r + - [Local machine](#local-machine)\r + - [Requirements](#requirements)\r + - [Usage steps](#usage-steps)\r + - [MareNostrum 4](#marenostrum-4)\r + - [Requirements in MN4](#requirements-in-mn4)\r + - [Usage steps in MN4](#usage-steps-in-mn4)\r + - [Mahti or Puhti](#mahti-or-puhti)\r + - [Requirements](#requirements)\r + - [Steps](#steps)\r + - [License](#license)\r + - [Contact](#contact)\r +\r +## Description\r +\r +Uses multiscale simulations to describe cancer progression into invasion.\r +\r +The workflow uses the following building blocks, described in order of execution:\r +\r +1. PhysiBoSS-Invasion\r +\r +For details on individual workflow steps, see the user documentation for each building block.\r +\r +[`GitHub repository`]()\r +\r +\r +## Contents\r +\r +### Building Blocks\r +\r +The ``BuildingBlocks`` folder contains the script to install the\r +Building Blocks used in the Cancer Invasion Workflow.\r +\r +### Workflows\r +\r +The ``Workflow`` folder contains the workflows implementations.\r +\r +Currently contains the implementation using PyCOMPSs and Snakemake (in progress).\r +\r +### Resources\r +\r +The ``Resources`` folder contains dataset files.\r +\r +### Tests\r +\r +The ``Tests`` folder contains the scripts that run each Building Block\r +used in the workflow for the given small dataset.\r +They can be executed individually for testing purposes.\r +\r +## Instructions\r +\r +### Local machine\r +\r +This section explains the requirements and usage for the Cancer Invasion Workflow in a laptop or desktop computer.\r +\r +#### Requirements\r +\r +- [`permedcoe`](https://github.com/PerMedCoE/permedcoe) package\r +- [PyCOMPSs](https://pycompss.readthedocs.io/en/stable/Sections/00_Quickstart.html) / [Snakemake](https://snakemake.readthedocs.io/en/stable/)\r +- [Singularity](https://sylabs.io/guides/3.0/user-guide/installation.html)\r +\r +#### Usage steps\r +\r +1. Clone this repository:\r +\r + ```bash\r + git clone https://github.com/PerMedCoE/cancer-invasion-workflow\r + ```\r +\r +2. Install the Building Blocks required for the Cancer Invasion Workflow:\r +\r + ```bash\r + cancer-invasion-workflow/BuildingBlocks/./install_BBs.sh\r + ```\r +\r +3. Get the required Building Block images from the project [B2DROP](https://b2drop.bsc.es/index.php/f/444350):\r +\r + - Required images:\r + - PhysiCell-Invasion.singularity\r +\r + The path where these files are stored **MUST be exported in the `PERMEDCOE_IMAGES`** environment variable.\r +\r + > :warning: **TIP**: These containers can be built manually as follows (be patient since some of them may take some time):\r + 1. Clone the `BuildingBlocks` repository\r + ```bash\r + git clone https://github.com/PerMedCoE/BuildingBlocks.git\r + ```\r + 2. Build the required Building Block images\r + ```bash\r + cd BuildingBlocks/Resources/images\r + sudo singularity build PhysiCell-Invasion.sif PhysiCell-Invasion.singularity\r + cd ../../..\r + ```\r +\r +**If using PyCOMPSs in local PC** (make sure that PyCOMPSs in installed):\r +\r +4. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflows/PyCOMPSs\r + ```\r +\r +5. Execute `./run.sh`\r +\r +**If using Snakemake in local PC** (make sure that SnakeMake is installed):\r +\r +4. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflows/SnakeMake\r + ```\r +\r +5. Execute `./run.sh`\r + > **TIP**: If you want to run the workflow with a different dataset, please update the `run.sh` script setting the `dataset` variable to the new dataset folder and their file names.\r +\r +\r +### MareNostrum 4\r +\r +This section explains the requirements and usage for the Cancer Invasion Workflow in the MareNostrum 4 supercomputer.\r +\r +#### Requirements in MN4\r +\r +- Access to MN4\r +\r +All Building Blocks are already installed in MN4, and the Cancer Invasion Workflow available.\r +\r +#### Usage steps in MN4\r +\r +1. Load the `COMPSs`, `Singularity` and `permedcoe` modules\r +\r + ```bash\r + export COMPSS_PYTHON_VERSION=3\r + module load COMPSs/3.1\r + module load singularity/3.5.2\r + module use /apps/modules/modulefiles/tools/COMPSs/libraries\r + module load permedcoe\r + ```\r +\r + > **TIP**: Include the loading into your `${HOME}/.bashrc` file to load it automatically on the session start.\r +\r + This commands will load COMPSs and the permedcoe package which provides all necessary dependencies, as well as the path to the singularity container images (`PERMEDCOE_IMAGES` environment variable) and testing dataset (`CANCERINVASIONWORKFLOW_DATASET` environment variable).\r +\r +2. Get a copy of the pilot workflow into your desired folder\r +\r + ```bash\r + mkdir desired_folder\r + cd desired_folder\r + get_cancerinvasionworkflow\r + ```\r +\r +3. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflow/PyCOMPSs\r + ```\r +\r +4. Execute `./launch.sh`\r +\r + This command will launch a job into the job queuing system (SLURM) requesting 2 nodes (one node acting half master and half worker, and other full worker node) for 20 minutes, and is prepared to use the singularity images that are already deployed in MN4 (located into the `PERMEDCOE_IMAGES` environment variable). It uses the dataset located into `../../Resources/data` folder.\r +\r + > :warning: **TIP**: If you want to run the workflow with a different dataset, please edit the `launch.sh` script and define the appropriate dataset path.\r +\r + After the execution, a `results` folder will be available with with Cancer Invasion Workflow results.\r +\r +### Mahti or Puhti\r +\r +This section explains how to run the Cancer Invasion workflow on CSC supercomputers using SnakeMake.\r +\r +#### Requirements\r +\r +- Install snakemake (or check if there is a version installed using `module spider snakemake`)\r +- Install workflow, using the same steps as for the local machine. With the exception that containers have to be built elsewhere.\r +\r +#### Steps\r +\r +\r +1. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflow/SnakeMake\r + ```\r +\r +2. Edit `launch.sh` with the correct partition, account, and resource specifications. \r +\r +3. Execute `./launch.sh`\r +\r + > :warning: Snakemake provides a `--cluster` flag, but this functionality should be avoided as it's really not suited for HPC systems.\r +\r +## License\r +\r +[Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)\r +\r +## Contact\r +\r +\r +\r +This software has been developed for the [PerMedCoE project](https://permedcoe.eu/), funded by the European Commission (EU H2020 [951773](https://cordis.europa.eu/project/id/951773)).\r +\r +![](https://permedcoe.eu/wp-content/uploads/2020/11/logo_1.png "PerMedCoE")\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/692?version=1" ; + schema1:isBasedOn "https://github.com/PerMedCoE/cancer-invasion-workflow" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for PerMedCoE Cancer Diagnosis" ; + schema1:sdDatePublished "2024-08-05 10:26:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/692/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 963 ; + schema1:creator ; + schema1:dateCreated "2023-12-20T13:01:39Z" ; + schema1:dateModified "2024-01-24T09:48:16Z" ; + schema1:description """# Cancer Invasion Workflow\r +\r +## Table of Contents\r +\r +- [Cancer Invasion Workflow](#cancer-invasion-workflow)\r + - [Table of Contents](#table-of-contents)\r + - [Description](#description)\r + - [Contents](#contents)\r + - [Building Blocks](#building-blocks)\r + - [Workflows](#workflows)\r + - [Resources](#resources)\r + - [Tests](#tests)\r + - [Instructions](#instructions)\r + - [Local machine](#local-machine)\r + - [Requirements](#requirements)\r + - [Usage steps](#usage-steps)\r + - [MareNostrum 4](#marenostrum-4)\r + - [Requirements in MN4](#requirements-in-mn4)\r + - [Usage steps in MN4](#usage-steps-in-mn4)\r + - [Mahti or Puhti](#mahti-or-puhti)\r + - [Requirements](#requirements)\r + - [Steps](#steps)\r + - [License](#license)\r + - [Contact](#contact)\r +\r +## Description\r +\r +Uses multiscale simulations to describe cancer progression into invasion.\r +\r +The workflow uses the following building blocks, described in order of execution:\r +\r +1. PhysiBoSS-Invasion\r +\r +For details on individual workflow steps, see the user documentation for each building block.\r +\r +[`GitHub repository`]()\r +\r +\r +## Contents\r +\r +### Building Blocks\r +\r +The ``BuildingBlocks`` folder contains the script to install the\r +Building Blocks used in the Cancer Invasion Workflow.\r +\r +### Workflows\r +\r +The ``Workflow`` folder contains the workflows implementations.\r +\r +Currently contains the implementation using PyCOMPSs and Snakemake (in progress).\r +\r +### Resources\r +\r +The ``Resources`` folder contains dataset files.\r +\r +### Tests\r +\r +The ``Tests`` folder contains the scripts that run each Building Block\r +used in the workflow for the given small dataset.\r +They can be executed individually for testing purposes.\r +\r +## Instructions\r +\r +### Local machine\r +\r +This section explains the requirements and usage for the Cancer Invasion Workflow in a laptop or desktop computer.\r +\r +#### Requirements\r +\r +- [`permedcoe`](https://github.com/PerMedCoE/permedcoe) package\r +- [PyCOMPSs](https://pycompss.readthedocs.io/en/stable/Sections/00_Quickstart.html) / [Snakemake](https://snakemake.readthedocs.io/en/stable/)\r +- [Singularity](https://sylabs.io/guides/3.0/user-guide/installation.html)\r +\r +#### Usage steps\r +\r +1. Clone this repository:\r +\r + ```bash\r + git clone https://github.com/PerMedCoE/cancer-invasion-workflow\r + ```\r +\r +2. Install the Building Blocks required for the Cancer Invasion Workflow:\r +\r + ```bash\r + cancer-invasion-workflow/BuildingBlocks/./install_BBs.sh\r + ```\r +\r +3. Get the required Building Block images from the project [B2DROP](https://b2drop.bsc.es/index.php/f/444350):\r +\r + - Required images:\r + - PhysiCell-Invasion.singularity\r +\r + The path where these files are stored **MUST be exported in the `PERMEDCOE_IMAGES`** environment variable.\r +\r + > :warning: **TIP**: These containers can be built manually as follows (be patient since some of them may take some time):\r + 1. Clone the `BuildingBlocks` repository\r + ```bash\r + git clone https://github.com/PerMedCoE/BuildingBlocks.git\r + ```\r + 2. Build the required Building Block images\r + ```bash\r + cd BuildingBlocks/Resources/images\r + sudo singularity build PhysiCell-Invasion.sif PhysiCell-Invasion.singularity\r + cd ../../..\r + ```\r +\r +**If using PyCOMPSs in local PC** (make sure that PyCOMPSs in installed):\r +\r +4. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflows/PyCOMPSs\r + ```\r +\r +5. Execute `./run.sh`\r +\r +**If using Snakemake in local PC** (make sure that SnakeMake is installed):\r +\r +4. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflows/SnakeMake\r + ```\r +\r +5. Execute `./run.sh`\r + > **TIP**: If you want to run the workflow with a different dataset, please update the `run.sh` script setting the `dataset` variable to the new dataset folder and their file names.\r +\r +\r +### MareNostrum 4\r +\r +This section explains the requirements and usage for the Cancer Invasion Workflow in the MareNostrum 4 supercomputer.\r +\r +#### Requirements in MN4\r +\r +- Access to MN4\r +\r +All Building Blocks are already installed in MN4, and the Cancer Invasion Workflow available.\r +\r +#### Usage steps in MN4\r +\r +1. Load the `COMPSs`, `Singularity` and `permedcoe` modules\r +\r + ```bash\r + export COMPSS_PYTHON_VERSION=3\r + module load COMPSs/3.1\r + module load singularity/3.5.2\r + module use /apps/modules/modulefiles/tools/COMPSs/libraries\r + module load permedcoe\r + ```\r +\r + > **TIP**: Include the loading into your `${HOME}/.bashrc` file to load it automatically on the session start.\r +\r + This commands will load COMPSs and the permedcoe package which provides all necessary dependencies, as well as the path to the singularity container images (`PERMEDCOE_IMAGES` environment variable) and testing dataset (`CANCERINVASIONWORKFLOW_DATASET` environment variable).\r +\r +2. Get a copy of the pilot workflow into your desired folder\r +\r + ```bash\r + mkdir desired_folder\r + cd desired_folder\r + get_cancerinvasionworkflow\r + ```\r +\r +3. Go to `Workflow/PyCOMPSs` folder\r +\r + ```bash\r + cd Workflow/PyCOMPSs\r + ```\r +\r +4. Execute `./launch.sh`\r +\r + This command will launch a job into the job queuing system (SLURM) requesting 2 nodes (one node acting half master and half worker, and other full worker node) for 20 minutes, and is prepared to use the singularity images that are already deployed in MN4 (located into the `PERMEDCOE_IMAGES` environment variable). It uses the dataset located into `../../Resources/data` folder.\r +\r + > :warning: **TIP**: If you want to run the workflow with a different dataset, please edit the `launch.sh` script and define the appropriate dataset path.\r +\r + After the execution, a `results` folder will be available with with Cancer Invasion Workflow results.\r +\r +### Mahti or Puhti\r +\r +This section explains how to run the Cancer Invasion workflow on CSC supercomputers using SnakeMake.\r +\r +#### Requirements\r +\r +- Install snakemake (or check if there is a version installed using `module spider snakemake`)\r +- Install workflow, using the same steps as for the local machine. With the exception that containers have to be built elsewhere.\r +\r +#### Steps\r +\r +\r +1. Go to `Workflow/SnakeMake` folder\r +\r + ```bash\r + cd Workflow/SnakeMake\r + ```\r +\r +2. Edit `launch.sh` with the correct partition, account, and resource specifications. \r +\r +3. Execute `./launch.sh`\r +\r + > :warning: Snakemake provides a `--cluster` flag, but this functionality should be avoided as it's really not suited for HPC systems.\r +\r +## License\r +\r +[Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)\r +\r +## Contact\r +\r +\r +\r +This software has been developed for the [PerMedCoE project](https://permedcoe.eu/), funded by the European Commission (EU H2020 [951773](https://cordis.europa.eu/project/id/951773)).\r +\r +![](https://permedcoe.eu/wp-content/uploads/2020/11/logo_1.png "PerMedCoE")\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "PerMedCoE Cancer Diagnosis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/692?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:description """\r +\r +\r +GermlineStructuralV-nf is a pipeline for identifying structural variant events in human Illumina short read whole genome sequence data. GermlineStructuralV-nf identifies structural variant and copy number events from BAM files using [Manta](https://github.com/Illumina/manta/blob/master/docs/userGuide/README.md#de-novo-calling), [Smoove](https://github.com/brentp/smoove), and [TIDDIT](https://github.com/SciLifeLab/TIDDIT). Variants are then merged using [SURVIVOR](https://github.com/fritzsedlazeck/SURVIVOR), and annotated by [AnnotSV](https://pubmed.ncbi.nlm.nih.gov/29669011/). The pipeline is written in Nextflow and uses Singularity/Docker to run containerised tools.""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.431.1" ; + schema1:isBasedOn "https://github.com/Sydney-Informatics-Hub/Germline-StructuralV-nf" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for GermlineStructuralV-nf" ; + schema1:sdDatePublished "2024-08-05 10:26:07 +0100" ; + schema1:url "https://workflowhub.eu/workflows/431/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4147 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2023-01-31T23:40:40Z" ; + schema1:dateModified "2023-12-18T05:36:07Z" ; + schema1:description """\r +\r +\r +GermlineStructuralV-nf is a pipeline for identifying structural variant events in human Illumina short read whole genome sequence data. GermlineStructuralV-nf identifies structural variant and copy number events from BAM files using [Manta](https://github.com/Illumina/manta/blob/master/docs/userGuide/README.md#de-novo-calling), [Smoove](https://github.com/brentp/smoove), and [TIDDIT](https://github.com/SciLifeLab/TIDDIT). Variants are then merged using [SURVIVOR](https://github.com/fritzsedlazeck/SURVIVOR), and annotated by [AnnotSV](https://pubmed.ncbi.nlm.nih.gov/29669011/). The pipeline is written in Nextflow and uses Singularity/Docker to run containerised tools.""" ; + schema1:keywords "Bioinformatics, Annotation, Genomics, Nextflow, rare diseases, variant_calling, structural variants, manta, smoove, tiddit, annotsv, survivor" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "GermlineStructuralV-nf" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/431?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# ![sanger-tol/insdcdownload](docs/images/sanger-tol-insdcdownload_logo.png)\r +\r +[![GitHub Actions CI Status](https://github.com/sanger-tol/insdcdownload/workflows/nf-core%20CI/badge.svg)](https://github.com/sanger-tol/insdcdownload/actions?query=workflow%3A%22nf-core+CI%22)\r +\r +\r +\r +[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.7155119-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.7155119)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A522.04.0-23aa62.svg)](https://www.nextflow.io/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +\r +[![Get help on Slack](http://img.shields.io/badge/slack-SangerTreeofLife%20%23pipelines-4A154B?labelColor=000000&logo=slack)](https://SangerTreeofLife.slack.com/channels/pipelines)\r +[![Follow on Twitter](http://img.shields.io/badge/twitter-%40sangertol-1DA1F2?labelColor=000000&logo=twitter)](https://twitter.com/sangertol)\r +[![Watch on YouTube](http://img.shields.io/badge/youtube-tree--of--life-FF0000?labelColor=000000&logo=youtube)](https://www.youtube.com/channel/UCFeDpvjU58SA9V0ycRXejhA)\r +\r +## Introduction\r +\r +**sanger-tol/insdcdownload** is a pipeline that downloads assemblies from INSDC into a Tree of Life directory structure.\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!\r +\r +On release, automated continuous integration tests run the pipeline on a full-sized dataset on the GitHub CI infrastructure. This ensures that the pipeline runs in a third-party environment, and has sensible resource allocation defaults set to run on real-world datasets.\r +\r +## Pipeline summary\r +\r +## Overview\r +\r +The pipeline takes an assembly accession number, as well as the assembly name, and downloads it. It also builds a set of common indices (such as `samtools faidx`), and extracts the repeat-masking performed by the NCBI.\r +\r +Steps involved:\r +\r +- Download from the NCBI the genomic sequence (Fasta) and the assembly\r + stats and reports files.\r +- Turn the masked Fasta file into an unmasked one.\r +- Compress and index all Fasta files with `bgzip`, `samtools faidx`, and\r + `samtools dict`.\r +- Generate the `.sizes` file usually required for conversion of data\r + files to UCSC's "big" formats, e.g. bigBed.\r +- Extract the coordinates of the masked regions into a BED file.\r +- Compress and index the BED file with `bgzip` and `tabix`.\r +\r +## Quick Start\r +\r +1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=22.04.0`)\r +\r +2. Install any of [`Docker`](https://docs.docker.com/engine/installation/), [`Singularity`](https://www.sylabs.io/guides/3.0/user-guide/) (you can follow [this tutorial](https://singularity-tutorial.github.io/01-installation/)), [`Podman`](https://podman.io/), [`Shifter`](https://nersc.gitlab.io/development/shifter/how-to-use/) or [`Charliecloud`](https://hpc.github.io/charliecloud/) for full pipeline reproducibility _(you can use [`Conda`](https://conda.io/miniconda.html) both to install Nextflow itself and also to manage software within pipelines. Please only use it within pipelines as a last resort; see [docs](https://nf-co.re/usage/configuration#basic-configuration-profiles))_.\r +\r +3. Download the pipeline and test it on a minimal dataset with a single command:\r +\r + ```bash\r + nextflow run sanger-tol/insdcdownload -profile test,YOURPROFILE --outdir \r + ```\r +\r + Note that some form of configuration will be needed so that Nextflow knows how to fetch the required software. This is usually done in the form of a config profile (`YOURPROFILE` in the example command above). You can chain multiple config profiles in a comma-separated string.\r +\r + > - The pipeline comes with config profiles called `docker`, `singularity`, `podman`, `shifter`, `charliecloud` and `conda` which instruct the pipeline to use the named tool for software management. For example, `-profile test,docker`.\r + > - Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment.\r + > - If you are using `singularity`, please use the [`nf-core download`](https://nf-co.re/tools/#downloading-pipelines-for-offline-use) command to download images first, before running the pipeline. Setting the [`NXF_SINGULARITY_CACHEDIR` or `singularity.cacheDir`](https://www.nextflow.io/docs/latest/singularity.html?#singularity-docker-hub) Nextflow options enables you to store and re-use the images from a central location for future pipeline runs.\r + > - If you are using `conda`, it is highly recommended to use the [`NXF_CONDA_CACHEDIR` or `conda.cacheDir`](https://www.nextflow.io/docs/latest/conda.html) settings to store the environments in a central location for future pipeline runs.\r +\r +4. Start running your own analysis!\r +\r + ```console\r + nextflow run sanger-tol/insdcdownload --assembly_accession GCA_927399515.1 --assembly_name gfLaeSulp1.1 --outdir results\r + ```\r +\r +## Documentation\r +\r +The sanger-tol/insdcdownload pipeline comes with documentation about the pipeline [usage](docs/usage.md) and [output](docs/output.md).\r +\r +## Credits\r +\r +sanger-tol/insdcdownload was mainly written by @muffato, with major borrowings from @priyanka-surana's [read-mapping](https://github.com/sanger-tol/readmapping) pipeline, e.g. the script to remove the repeat-masking, and the overall structure and layout of the sub-workflows.\r +\r +## Contributions and Support\r +\r +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\r +\r +For further information or help, don't hesitate to get in touch on the [Slack `#pipelines` channel](https://sangertreeoflife.slack.com/channels/pipelines). Please [create an issue](https://github.com/sanger-tol/insdcdownload/issues/new/choose) on GitHub if you are not on the Sanger slack channel.\r +\r +## Citations\r +\r +If you use sanger-tol/insdcdownload for your analysis, please cite it using the following doi: [10.5281/zenodo.7155119](https://doi.org/10.5281/zenodo.7155119)\r +\r +An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\r +\r +This pipeline uses code and infrastructure developed and maintained by the [nf-core](https://nf-co.re) community, reused here under the [MIT license](https://github.com/nf-core/tools/blob/master/LICENSE).\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/638?version=1" ; + schema1:isBasedOn "https://github.com/sanger-tol/insdcdownload.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for sanger-tol/insdcdownload v1.1.0 - Deciduous ent" ; + schema1:sdDatePublished "2024-08-05 10:27:11 +0100" ; + schema1:url "https://workflowhub.eu/workflows/638/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1657 ; + schema1:creator , + ; + schema1:dateCreated "2023-11-02T11:59:44Z" ; + schema1:dateModified "2023-11-14T11:58:44Z" ; + schema1:description """# ![sanger-tol/insdcdownload](docs/images/sanger-tol-insdcdownload_logo.png)\r +\r +[![GitHub Actions CI Status](https://github.com/sanger-tol/insdcdownload/workflows/nf-core%20CI/badge.svg)](https://github.com/sanger-tol/insdcdownload/actions?query=workflow%3A%22nf-core+CI%22)\r +\r +\r +\r +[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.7155119-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.7155119)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A522.04.0-23aa62.svg)](https://www.nextflow.io/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +\r +[![Get help on Slack](http://img.shields.io/badge/slack-SangerTreeofLife%20%23pipelines-4A154B?labelColor=000000&logo=slack)](https://SangerTreeofLife.slack.com/channels/pipelines)\r +[![Follow on Twitter](http://img.shields.io/badge/twitter-%40sangertol-1DA1F2?labelColor=000000&logo=twitter)](https://twitter.com/sangertol)\r +[![Watch on YouTube](http://img.shields.io/badge/youtube-tree--of--life-FF0000?labelColor=000000&logo=youtube)](https://www.youtube.com/channel/UCFeDpvjU58SA9V0ycRXejhA)\r +\r +## Introduction\r +\r +**sanger-tol/insdcdownload** is a pipeline that downloads assemblies from INSDC into a Tree of Life directory structure.\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!\r +\r +On release, automated continuous integration tests run the pipeline on a full-sized dataset on the GitHub CI infrastructure. This ensures that the pipeline runs in a third-party environment, and has sensible resource allocation defaults set to run on real-world datasets.\r +\r +## Pipeline summary\r +\r +## Overview\r +\r +The pipeline takes an assembly accession number, as well as the assembly name, and downloads it. It also builds a set of common indices (such as `samtools faidx`), and extracts the repeat-masking performed by the NCBI.\r +\r +Steps involved:\r +\r +- Download from the NCBI the genomic sequence (Fasta) and the assembly\r + stats and reports files.\r +- Turn the masked Fasta file into an unmasked one.\r +- Compress and index all Fasta files with `bgzip`, `samtools faidx`, and\r + `samtools dict`.\r +- Generate the `.sizes` file usually required for conversion of data\r + files to UCSC's "big" formats, e.g. bigBed.\r +- Extract the coordinates of the masked regions into a BED file.\r +- Compress and index the BED file with `bgzip` and `tabix`.\r +\r +## Quick Start\r +\r +1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=22.04.0`)\r +\r +2. Install any of [`Docker`](https://docs.docker.com/engine/installation/), [`Singularity`](https://www.sylabs.io/guides/3.0/user-guide/) (you can follow [this tutorial](https://singularity-tutorial.github.io/01-installation/)), [`Podman`](https://podman.io/), [`Shifter`](https://nersc.gitlab.io/development/shifter/how-to-use/) or [`Charliecloud`](https://hpc.github.io/charliecloud/) for full pipeline reproducibility _(you can use [`Conda`](https://conda.io/miniconda.html) both to install Nextflow itself and also to manage software within pipelines. Please only use it within pipelines as a last resort; see [docs](https://nf-co.re/usage/configuration#basic-configuration-profiles))_.\r +\r +3. Download the pipeline and test it on a minimal dataset with a single command:\r +\r + ```bash\r + nextflow run sanger-tol/insdcdownload -profile test,YOURPROFILE --outdir \r + ```\r +\r + Note that some form of configuration will be needed so that Nextflow knows how to fetch the required software. This is usually done in the form of a config profile (`YOURPROFILE` in the example command above). You can chain multiple config profiles in a comma-separated string.\r +\r + > - The pipeline comes with config profiles called `docker`, `singularity`, `podman`, `shifter`, `charliecloud` and `conda` which instruct the pipeline to use the named tool for software management. For example, `-profile test,docker`.\r + > - Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment.\r + > - If you are using `singularity`, please use the [`nf-core download`](https://nf-co.re/tools/#downloading-pipelines-for-offline-use) command to download images first, before running the pipeline. Setting the [`NXF_SINGULARITY_CACHEDIR` or `singularity.cacheDir`](https://www.nextflow.io/docs/latest/singularity.html?#singularity-docker-hub) Nextflow options enables you to store and re-use the images from a central location for future pipeline runs.\r + > - If you are using `conda`, it is highly recommended to use the [`NXF_CONDA_CACHEDIR` or `conda.cacheDir`](https://www.nextflow.io/docs/latest/conda.html) settings to store the environments in a central location for future pipeline runs.\r +\r +4. Start running your own analysis!\r +\r + ```console\r + nextflow run sanger-tol/insdcdownload --assembly_accession GCA_927399515.1 --assembly_name gfLaeSulp1.1 --outdir results\r + ```\r +\r +## Documentation\r +\r +The sanger-tol/insdcdownload pipeline comes with documentation about the pipeline [usage](docs/usage.md) and [output](docs/output.md).\r +\r +## Credits\r +\r +sanger-tol/insdcdownload was mainly written by @muffato, with major borrowings from @priyanka-surana's [read-mapping](https://github.com/sanger-tol/readmapping) pipeline, e.g. the script to remove the repeat-masking, and the overall structure and layout of the sub-workflows.\r +\r +## Contributions and Support\r +\r +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\r +\r +For further information or help, don't hesitate to get in touch on the [Slack `#pipelines` channel](https://sangertreeoflife.slack.com/channels/pipelines). Please [create an issue](https://github.com/sanger-tol/insdcdownload/issues/new/choose) on GitHub if you are not on the Sanger slack channel.\r +\r +## Citations\r +\r +If you use sanger-tol/insdcdownload for your analysis, please cite it using the following doi: [10.5281/zenodo.7155119](https://doi.org/10.5281/zenodo.7155119)\r +\r +An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\r +\r +This pipeline uses code and infrastructure developed and maintained by the [nf-core](https://nf-co.re) community, reused here under the [MIT license](https://github.com/nf-core/tools/blob/master/LICENSE).\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +""" ; + schema1:keywords "Bioinformatics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "sanger-tol/insdcdownload v1.1.0 - Deciduous ent" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/638?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Genes and transcripts annotation with Isoseq using uLTRA and TAMA" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/993?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/isoseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/isoseq" ; + schema1:sdDatePublished "2024-08-05 10:23:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/993/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8180 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:57Z" ; + schema1:dateModified "2024-06-11T12:54:57Z" ; + schema1:description "Genes and transcripts annotation with Isoseq using uLTRA and TAMA" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/993?version=6" ; + schema1:keywords "isoseq, isoseq-3, rna, tama, ultra" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/isoseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/993?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:description """#### - Deprecated -\r +#### See our updated hybrid assembly workflow: https://workflowhub.eu/workflows/367\r +#### And other workflows: https://workflowhub.eu/projects/16#workflows\r +# \r +**Workflow for sequencing with ONT Nanopore data, from basecalled reads to (meta)assembly and binning**\r +- Workflow Nanopore Quality\r +- Kraken2 taxonomic classification of FASTQ reads\r +- Flye (de-novo assembly)\r +- Medaka (assembly polishing)\r +- metaQUAST (assembly quality reports)\r +\r +**When Illumina reads are provided:** \r + - Workflow Illumina Quality: https://workflowhub.eu/workflows/336?version=1 \r + - Assembly polishing with Pilon
\r + - Workflow binnning https://workflowhub.eu/workflows/64?version=11\r + - Metabat2\r + - CheckM\r + - BUSCO\r + - GTDB-Tk\r +\r +**All tool CWL files and other workflows can be found here:**
\r + Tools: https://git.wur.nl/unlock/cwl/-/tree/master/cwl
\r + Workflows: https://git.wur.nl/unlock/cwl/-/tree/master/cwl/workflows
""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/254?version=3" ; + schema1:isBasedOn "https://git.wur.nl/unlock/cwl/-/blob/master/cwl/workflows/workflow_nanopore_assembly.cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Nanopore Assembly Workflow - Deprecated -" ; + schema1:sdDatePublished "2024-08-05 10:31:21 +0100" ; + schema1:url "https://workflowhub.eu/workflows/254/ro_crate?version=3" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 83285 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17085 ; + schema1:creator , + , + ; + schema1:dateCreated "2022-04-20T09:12:09Z" ; + schema1:dateModified "2023-02-02T15:16:21Z" ; + schema1:description """#### - Deprecated -\r +#### See our updated hybrid assembly workflow: https://workflowhub.eu/workflows/367\r +#### And other workflows: https://workflowhub.eu/projects/16#workflows\r +# \r +**Workflow for sequencing with ONT Nanopore data, from basecalled reads to (meta)assembly and binning**\r +- Workflow Nanopore Quality\r +- Kraken2 taxonomic classification of FASTQ reads\r +- Flye (de-novo assembly)\r +- Medaka (assembly polishing)\r +- metaQUAST (assembly quality reports)\r +\r +**When Illumina reads are provided:** \r + - Workflow Illumina Quality: https://workflowhub.eu/workflows/336?version=1 \r + - Assembly polishing with Pilon
\r + - Workflow binnning https://workflowhub.eu/workflows/64?version=11\r + - Metabat2\r + - CheckM\r + - BUSCO\r + - GTDB-Tk\r +\r +**All tool CWL files and other workflows can be found here:**
\r + Tools: https://git.wur.nl/unlock/cwl/-/tree/master/cwl
\r + Workflows: https://git.wur.nl/unlock/cwl/-/tree/master/cwl/workflows
""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/254?version=2" ; + schema1:keywords "nanopore, Genomics, Metagenomics" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Nanopore Assembly Workflow - Deprecated -" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/254?version=3" ; + schema1:version 3 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=17" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-08-05 10:23:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=17" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17866 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:59Z" ; + schema1:dateModified "2024-06-11T12:54:59Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=17" ; + schema1:version 17 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# IGVreport-nf \r +\r +- [Description](#description)\r + - [Diagram](#diagram)\r + - [User guide](#user-guide)\r + - [Workflow summaries](#workflow-summaries)\r + - [Metadata](#metadata)\r + - [Component tools](#component-tools)\r + - [Required (minimum)\r + inputs/parameters](#required-minimum-inputsparameters)\r + - [Additional notes](#additional-notes)\r + - [Help/FAQ/Troubleshooting](#helpfaqtroubleshooting)\r + - [Acknowledgements/citations/credits](#acknowledgementscitationscredits)\r +\r +## Description \r +\r +Quickly generate [IGV `.html` reports](https://github.com/igvteam/igv-reports) for a genomic region of interest in the human genome (hg38). Bcftools is used to subset a VCF to a region of interest, the subset VCF is then passed to IGV-reports, which generates a report consisting of a table of genomic sites or regions and associated IGV views for each site. The reports can be opened by any web browser as a static page. \r +\r +### Diagram \r +\r +```mermaid\r +graph LR;\r + VCF-->|bcftools view|SubsetVCF;\r + SubsetVCF-->|IGVtools|HTMLreport;\r + AlignmentBAM-->|IGVtools|HTMLreport;\r +```\r +\r +### User guide\r +\r +This workflow uses containers for all steps and can run using Singularity or Docker. It requires Nextflow and either Singularity or Docker be installed. For instructions on installing Nextflow, see their [documentation](https://www.nextflow.io/docs/latest/getstarted.html).\r +\r +**This workflow currently only generates reports for the human reference genome assembly, Hg38.** \r +\r +The workflow runs three processes: \r +1. The provided VCF file is subset to a region of interest using Bcftools view \r +2. The Subset VCF file is then indexed using Bcftools index \r +3. The subset VCF and provided Bam file are used to generate the html report for the region of interest. \r +\r +To start clone this repository: \r +```\r +git clone https://github.com/Sydney-Informatics-Hub/IGVreport-nf.git\r +```\r +\r +From the IGVreport-nf directory, run the pipeline: \r +```\r +nextflow run main.nf --sample \\\r + --bam \\\r + --vcf \\\r + --chr --start --stop \r +```\r +\r +This will create a report in a directory titled `./Report`. You can rename this directory at runtime using the flag `--outDir`. All runtime summary reports will be available in the `./runInfo` directory. \r +\r +### Workflow summaries\r +\r +#### Metadata \r +\r +|metadata field | workflow_name / workflow_version |\r +|-------------------|:---------------------------------:|\r +|Version | 1.0 |\r +|Maturity | under development |\r +|Creators | Georgie Samaha |\r +|Source | NA |\r +|License | GPL-3.0 license |\r +|Workflow manager | NextFlow |\r +|Container | None |\r +|Install method | NA |\r +|GitHub | github.com/Sydney-Informatics-Hub/IGVreport-nf |\r +|bio.tools | NA |\r +|BioContainers | NA | \r +|bioconda | NA |\r +\r +#### Component tools\r +\r +* nextflow>=20.07.1\r +* singularity or docker\r +* bcftools/1.16\r +* igv-reports/1.6.1\r +\r +#### Required (minimum) inputs/parameters\r +\r +* An indexed alignment file in Bam format \r +* A gzipped and indexed vcf file\r +\r +## Additional notes\r +\r +## Help/FAQ/troubleshooting \r +\r +## Acknowledgements/citations/credits\r +\r +This workflow was developed by the Sydney Informatics Hub, a Core Research Facility of the University of Sydney and the Australian BioCommons which is enabled by NCRIS via Bioplatforms Australia. \r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/440?version=1" ; + schema1:isBasedOn "https://github.com/Sydney-Informatics-Hub/IGVreport-nf" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for IGVreport-nf" ; + schema1:sdDatePublished "2024-08-05 10:31:14 +0100" ; + schema1:url "https://workflowhub.eu/workflows/440/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3381 ; + schema1:creator , + ; + schema1:dateCreated "2023-03-21T05:17:30Z" ; + schema1:dateModified "2023-03-21T05:17:30Z" ; + schema1:description """# IGVreport-nf \r +\r +- [Description](#description)\r + - [Diagram](#diagram)\r + - [User guide](#user-guide)\r + - [Workflow summaries](#workflow-summaries)\r + - [Metadata](#metadata)\r + - [Component tools](#component-tools)\r + - [Required (minimum)\r + inputs/parameters](#required-minimum-inputsparameters)\r + - [Additional notes](#additional-notes)\r + - [Help/FAQ/Troubleshooting](#helpfaqtroubleshooting)\r + - [Acknowledgements/citations/credits](#acknowledgementscitationscredits)\r +\r +## Description \r +\r +Quickly generate [IGV `.html` reports](https://github.com/igvteam/igv-reports) for a genomic region of interest in the human genome (hg38). Bcftools is used to subset a VCF to a region of interest, the subset VCF is then passed to IGV-reports, which generates a report consisting of a table of genomic sites or regions and associated IGV views for each site. The reports can be opened by any web browser as a static page. \r +\r +### Diagram \r +\r +```mermaid\r +graph LR;\r + VCF-->|bcftools view|SubsetVCF;\r + SubsetVCF-->|IGVtools|HTMLreport;\r + AlignmentBAM-->|IGVtools|HTMLreport;\r +```\r +\r +### User guide\r +\r +This workflow uses containers for all steps and can run using Singularity or Docker. It requires Nextflow and either Singularity or Docker be installed. For instructions on installing Nextflow, see their [documentation](https://www.nextflow.io/docs/latest/getstarted.html).\r +\r +**This workflow currently only generates reports for the human reference genome assembly, Hg38.** \r +\r +The workflow runs three processes: \r +1. The provided VCF file is subset to a region of interest using Bcftools view \r +2. The Subset VCF file is then indexed using Bcftools index \r +3. The subset VCF and provided Bam file are used to generate the html report for the region of interest. \r +\r +To start clone this repository: \r +```\r +git clone https://github.com/Sydney-Informatics-Hub/IGVreport-nf.git\r +```\r +\r +From the IGVreport-nf directory, run the pipeline: \r +```\r +nextflow run main.nf --sample \\\r + --bam \\\r + --vcf \\\r + --chr --start --stop \r +```\r +\r +This will create a report in a directory titled `./Report`. You can rename this directory at runtime using the flag `--outDir`. All runtime summary reports will be available in the `./runInfo` directory. \r +\r +### Workflow summaries\r +\r +#### Metadata \r +\r +|metadata field | workflow_name / workflow_version |\r +|-------------------|:---------------------------------:|\r +|Version | 1.0 |\r +|Maturity | under development |\r +|Creators | Georgie Samaha |\r +|Source | NA |\r +|License | GPL-3.0 license |\r +|Workflow manager | NextFlow |\r +|Container | None |\r +|Install method | NA |\r +|GitHub | github.com/Sydney-Informatics-Hub/IGVreport-nf |\r +|bio.tools | NA |\r +|BioContainers | NA | \r +|bioconda | NA |\r +\r +#### Component tools\r +\r +* nextflow>=20.07.1\r +* singularity or docker\r +* bcftools/1.16\r +* igv-reports/1.6.1\r +\r +#### Required (minimum) inputs/parameters\r +\r +* An indexed alignment file in Bam format \r +* A gzipped and indexed vcf file\r +\r +## Additional notes\r +\r +## Help/FAQ/troubleshooting \r +\r +## Acknowledgements/citations/credits\r +\r +This workflow was developed by the Sydney Informatics Hub, a Core Research Facility of the University of Sydney and the Australian BioCommons which is enabled by NCRIS via Bioplatforms Australia. \r +""" ; + schema1:keywords "Alignment, Genomics, variant calling, mapping" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "IGVreport-nf" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/440?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1017?version=10" ; + schema1:isBasedOn "https://github.com/nf-core/rnafusion" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnafusion" ; + schema1:sdDatePublished "2024-08-05 10:22:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1017/ro_crate?version=10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10551 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:10Z" ; + schema1:dateModified "2024-06-11T12:55:10Z" ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1017?version=16" ; + schema1:keywords "fusion, fusion-genes, gene-fusion, rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnafusion" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1017?version=10" ; + schema1:version 10 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "ChIP-seq peak-calling and differential analysis pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/971?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/chipseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/chipseq" ; + schema1:sdDatePublished "2024-08-05 10:24:13 +0100" ; + schema1:url "https://workflowhub.eu/workflows/971/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9391 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:45Z" ; + schema1:dateModified "2024-06-11T12:54:45Z" ; + schema1:description "ChIP-seq peak-calling and differential analysis pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/971?version=5" ; + schema1:keywords "ChIP, ChIP-seq, chromatin-immunoprecipitation, macs2, peak-calling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/chipseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/971?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """Germline-ShortV @ NCI-Gadi is an implementation of the BROAD Institute's best practice workflow for germline short variant discovery. This implementation is optimised for the National Compute Infrastucture's Gadi HPC, utilising scatter-gather parallelism to enable use of multiple nodes with high CPU or memory efficiency. This workflow requires sample BAM files, which can be generated using the [Fastq-to-bam @ NCI-Gadi](https://workflowhub.eu/workflows/146) pipeline. Germline-ShortV can be applied to model and non-model organisms (including non-diploid organisms). \r +\r +Infrastructure\\_deployment\\_metadata: Gadi (NCI)""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.143.1" ; + schema1:isBasedOn "https://github.com/Sydney-Informatics-Hub/Germline-ShortV" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Germline-ShortV @ NCI-Gadi" ; + schema1:sdDatePublished "2024-08-05 10:33:05 +0100" ; + schema1:url "https://workflowhub.eu/workflows/143/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 728332 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 24981 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2021-08-17T04:35:21Z" ; + schema1:dateModified "2023-01-16T13:51:44Z" ; + schema1:description """Germline-ShortV @ NCI-Gadi is an implementation of the BROAD Institute's best practice workflow for germline short variant discovery. This implementation is optimised for the National Compute Infrastucture's Gadi HPC, utilising scatter-gather parallelism to enable use of multiple nodes with high CPU or memory efficiency. This workflow requires sample BAM files, which can be generated using the [Fastq-to-bam @ NCI-Gadi](https://workflowhub.eu/workflows/146) pipeline. Germline-ShortV can be applied to model and non-model organisms (including non-diploid organisms). \r +\r +Infrastructure\\_deployment\\_metadata: Gadi (NCI)""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "GATK4, variant_calling, WGS, SNPs, INDELs, HaplotyeCaller, Germline, BROAD, Genomics, genome, DNA, DNA-seq" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Germline-ShortV @ NCI-Gadi" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/143?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Takes fastqs and reference data, to produce a single cell counts matrix into and save in annData format - adding a column called sample with the sample name. " ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/513?version=2" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq: Count and Load with starSOLO" ; + schema1:sdDatePublished "2024-08-05 10:24:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/513/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 28207 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-12-12T03:08:41Z" ; + schema1:dateModified "2023-12-12T03:08:41Z" ; + schema1:description "Takes fastqs and reference data, to produce a single cell counts matrix into and save in annData format - adding a column called sample with the sample name. " ; + schema1:isBasedOn "https://workflowhub.eu/workflows/513?version=2" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "scRNAseq: Count and Load with starSOLO" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/513?version=2" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2023-01-20T16:24:30.927470" ; + schema1:hasPart , + , + , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/hic-hicup-cooler" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + , + , + ; + schema1:name "hic-hicup-cooler/hic-fastq-to-cool-hicup-cooler" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "hic-hicup-cooler/chic-fastq-to-cool-hicup-cooler" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/hic-hicup-cooler" ; + schema1:version "0.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.3" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "hic-hicup-cooler/hic-fastq-to-pairs-hicup" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/hic-hicup-cooler" ; + schema1:version "0.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "hic-hicup-cooler/hic-juicermediumtabix-to-cool-cooler" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/hic-hicup-cooler" ; + schema1:version "0.1" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """This workflow can be used to fit dose-response curves from normalised cell-based assay data (%confluence) using the KNIME HCS extension. The workflow expects triplicates for each of eight test concentrations. This workflow needs R-Server to run in the back-end. Start R and run the following command: library(Rserve); Rserve(args = "--vanilla"). \r +Three types of outliers can be removed: 1 - Outliers from triplicate measurement (standard deviation cut-off can be selected), 2 - inactive and weekly active compounds (% confluence cut-offs can be selected), 3 - toxic concentrations (cut-off for reduction in confluence with stepwise increasing concentration can be selected)\r +Output are two dose-response curve fits per compound for pre and post outlier removal with graphical representation and numerical fit parameters. \r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/388?version=1" ; + schema1:isBasedOn "https://hub.knime.com/fraunhoferitmp/spaces/Public/latest/Dose_Response_Cell-based-Assay/DRC_template_WithOutlierDetection~SwmoxbJewJ1k8Dcd" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for DRC_cellbased_OutlierDetection" ; + schema1:sdDatePublished "2024-08-05 10:31:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/388/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3093204 ; + schema1:creator ; + schema1:dateCreated "2022-09-26T09:58:39Z" ; + schema1:dateModified "2023-01-16T14:02:32Z" ; + schema1:description """This workflow can be used to fit dose-response curves from normalised cell-based assay data (%confluence) using the KNIME HCS extension. The workflow expects triplicates for each of eight test concentrations. This workflow needs R-Server to run in the back-end. Start R and run the following command: library(Rserve); Rserve(args = "--vanilla"). \r +Three types of outliers can be removed: 1 - Outliers from triplicate measurement (standard deviation cut-off can be selected), 2 - inactive and weekly active compounds (% confluence cut-offs can be selected), 3 - toxic concentrations (cut-off for reduction in confluence with stepwise increasing concentration can be selected)\r +Output are two dose-response curve fits per compound for pre and post outlier removal with graphical representation and numerical fit parameters. \r +""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "DRC_cellbased_OutlierDetection" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/388?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 70668 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.293.1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python CNS/XPLOR MD Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-08-05 10:32:26 +0100" ; + schema1:url "https://workflowhub.eu/workflows/293/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1728 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-17T14:49:18Z" ; + schema1:dateModified "2022-03-23T10:05:23Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/293?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python CNS/XPLOR MD Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/293?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1017?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/rnafusion" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnafusion" ; + schema1:sdDatePublished "2024-08-05 10:22:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1017/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9290 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:10Z" ; + schema1:dateModified "2024-06-11T12:55:10Z" ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1017?version=16" ; + schema1:keywords "fusion, fusion-genes, gene-fusion, rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnafusion" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1017?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + schema1:datePublished "2024-07-09T15:30:12.396930" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.24" . + + a schema1:Dataset ; + schema1:datePublished "2023-11-03T14:51:35.106883" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Scaffolding-Bionano-VGP7" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Scaffolding-Bionano-VGP7/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.17" . + + a schema1:Dataset ; + schema1:datePublished "2021-12-20T17:04:47.617205" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/gromacs-mmgbsa" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "gromacs-mmgbsa/main" ; + schema1:sdDatePublished "2021-12-21 03:01:01 +0000" ; + schema1:softwareVersion "v0.1.2" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# CNVand\r +[![Snakemake](https://img.shields.io/badge/snakemake-≥8.0.0-brightgreen.svg?style=flat-square)](https://snakemake.bitbucket.io)\r +[![Conda](https://img.shields.io/badge/conda-≥23.11.0-brightgreen.svg?style=flat-square)](https://anaconda.org/conda-forge/mamba)\r +![Docker](https://img.shields.io/badge/docker-≥26.1.4-brightgreen.svg?style=flat-square)\r +![License](https://img.shields.io/badge/license-MIT-blue.svg?style=flat-square)\r +[![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-2.1-4baaaa.svg)](code_of_conduct.md) \r +\r +CNVand is a snakemake workflow for CNV analysis, tailored for preparing data used by the [CNVizard](https://github.com/IHGGM-Aachen/CNVizard) CNV visualization tool. Given a set of BAM and VCF files, it utilizes the tools `CNVkit` and `AnnotSV` to analyze and annotate copy number variations.\r +\r +## General Settings and Samplesheet\r +To configure this pipeline, modify the config under `config/config.yaml` as needed. Detailed explanations for each setting are provided within the file.\r +\r +Add samples to the pipeline by completing `config/samplesheet.tsv`. Each `sample` should be associated with a `path` to the corresponding BAM and VCF file.\r +\r +For detailed instructions on how to configure CNVand see `config/README.md`.\r +\r +## Reference Files\r +To use CNVand some external reference files are needed alongside your sample data.\r +\r +### Genome\r +\r +For `cnvkit_fix` to work, you need to specify a reference genome in the config file. Take care to use the same reference file for your entire workflow!\r +\r +### Annotations\r +\r +For AnnotSV to work, the annotation files must be downloaded separately and be referenced in the config file under the respective key. For human annotations, this can be done [here](https://www.lbgi.fr/~geoffroy/Annotations/Annotations_Human_3.4.2.tar.gz). In case this link is not working, check the original [AnnotSV](https://github.com/lgmgeo/AnnotSV/tree/master) repository for updates on how to obtain the annotations.\r +\r +## Pipeline Setup\r +CNVand can be executed using mamba environments or a pre-built docker container.\r +\r +### Mamba (Snakedeploy)\r +For a one-click installation, snakedeploy can be used. For further information, see the entry for CNVand in the [Snakemake Workflow Catalog](https://snakemake.github.io/snakemake-workflow-catalog/?repo=IHGGM-Aachen/CNVand)\r +\r +### Mamba (Manual)\r +This workflow can easily setup manually with the given environment file. Install Snakemake and dependencies using the command:\r +\r +```bash\r +mamba env create -f environment.yml\r +```\r +\r +Then activate the newly created environment with: \r +\r +```bash\r +mamba activate cnvand\r +```\r +\r +Now configure the pipeline and download the needed annotation and refenrece files. When everything is set up, Execute the pipeline with:\r +\r +```bash\r +snakemake --cores all --use-conda\r +```\r +\r +Generate a comprehensive execution report by running:\r +\r +```bash\r +snakemake --report report.zip\r +```\r +\r +\r +### Docker\r +\r +CNVand can also be used inside a Docker container. To do so, first pull the Docker image with:\r +\r +```bash\r +docker pull ghcr.io/ihggm-aachen/cnvand:latest\r +```\r +\r +Then run the container with the bind mounts needed in your setup:\r +\r +```bash\r +docker run -it -v /path/to/your/data:/data ghcr.io/ihggm-aachen/cnvand:latest /bin/bash\r +```\r +\r +This command opens an interactive shell inside the Docker container. Once inside the container, you are placed inside the `/cnvand` the directory. From there then run the pipeline once you set an appropriate configuration:\r +\r +```bash\r +snakemake --cores all --use-conda\r +```\r +\r +## Contributing\r +\r +We welcome contributions to improve CNVand. Please see our [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to get started.\r +\r +## Code of Conduct\r +\r +We are committed to fostering an open and welcoming environment. Please see our [CODE_OF_CONDUCT.md](CODE_OF_CONDUCT.md) for our community guidelines.\r +\r +## Documentation\r +\r +Detailed documentation for the workflow can be found in `workflow/documentation.md`.\r +\r +## Testing\r +\r +To ensure the pipeline runs correctly, we have set up both unit and integration tests. Unit tests are generated from successful workflow runs, and integration tests are configured to run the entire workflow with test data.\r +\r +### Integration Tests\r +\r +The integration test can be run using the data and config provided. Remember to download the correct reference/annotations (GRCh38 in case of the bundled NIST data) by yourself and adjust your local paths as necessary!\r +\r +### Unit Tests\r +\r +Run the unit tests with:\r +\r +```bash\r +pytest -v .tests/unit\r +```\r +\r +This will check for the correct CNVand output per rule.\r +\r +## License\r +\r +This project is licensed under the MIT License. See the [LICENSE](LICENSE.md) file for details.\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.1039.1" ; + schema1:isBasedOn "https://github.com/IHGGM-Aachen/CNVand" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CNVand" ; + schema1:sdDatePublished "2024-08-05 10:23:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1039/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 20573 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1367 ; + schema1:creator ; + schema1:dateCreated "2024-06-10T15:56:02Z" ; + schema1:dateModified "2024-06-10T15:57:31Z" ; + schema1:description """# CNVand\r +[![Snakemake](https://img.shields.io/badge/snakemake-≥8.0.0-brightgreen.svg?style=flat-square)](https://snakemake.bitbucket.io)\r +[![Conda](https://img.shields.io/badge/conda-≥23.11.0-brightgreen.svg?style=flat-square)](https://anaconda.org/conda-forge/mamba)\r +![Docker](https://img.shields.io/badge/docker-≥26.1.4-brightgreen.svg?style=flat-square)\r +![License](https://img.shields.io/badge/license-MIT-blue.svg?style=flat-square)\r +[![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-2.1-4baaaa.svg)](code_of_conduct.md) \r +\r +CNVand is a snakemake workflow for CNV analysis, tailored for preparing data used by the [CNVizard](https://github.com/IHGGM-Aachen/CNVizard) CNV visualization tool. Given a set of BAM and VCF files, it utilizes the tools `CNVkit` and `AnnotSV` to analyze and annotate copy number variations.\r +\r +## General Settings and Samplesheet\r +To configure this pipeline, modify the config under `config/config.yaml` as needed. Detailed explanations for each setting are provided within the file.\r +\r +Add samples to the pipeline by completing `config/samplesheet.tsv`. Each `sample` should be associated with a `path` to the corresponding BAM and VCF file.\r +\r +For detailed instructions on how to configure CNVand see `config/README.md`.\r +\r +## Reference Files\r +To use CNVand some external reference files are needed alongside your sample data.\r +\r +### Genome\r +\r +For `cnvkit_fix` to work, you need to specify a reference genome in the config file. Take care to use the same reference file for your entire workflow!\r +\r +### Annotations\r +\r +For AnnotSV to work, the annotation files must be downloaded separately and be referenced in the config file under the respective key. For human annotations, this can be done [here](https://www.lbgi.fr/~geoffroy/Annotations/Annotations_Human_3.4.2.tar.gz). In case this link is not working, check the original [AnnotSV](https://github.com/lgmgeo/AnnotSV/tree/master) repository for updates on how to obtain the annotations.\r +\r +## Pipeline Setup\r +CNVand can be executed using mamba environments or a pre-built docker container.\r +\r +### Mamba (Snakedeploy)\r +For a one-click installation, snakedeploy can be used. For further information, see the entry for CNVand in the [Snakemake Workflow Catalog](https://snakemake.github.io/snakemake-workflow-catalog/?repo=IHGGM-Aachen/CNVand)\r +\r +### Mamba (Manual)\r +This workflow can easily setup manually with the given environment file. Install Snakemake and dependencies using the command:\r +\r +```bash\r +mamba env create -f environment.yml\r +```\r +\r +Then activate the newly created environment with: \r +\r +```bash\r +mamba activate cnvand\r +```\r +\r +Now configure the pipeline and download the needed annotation and refenrece files. When everything is set up, Execute the pipeline with:\r +\r +```bash\r +snakemake --cores all --use-conda\r +```\r +\r +Generate a comprehensive execution report by running:\r +\r +```bash\r +snakemake --report report.zip\r +```\r +\r +\r +### Docker\r +\r +CNVand can also be used inside a Docker container. To do so, first pull the Docker image with:\r +\r +```bash\r +docker pull ghcr.io/ihggm-aachen/cnvand:latest\r +```\r +\r +Then run the container with the bind mounts needed in your setup:\r +\r +```bash\r +docker run -it -v /path/to/your/data:/data ghcr.io/ihggm-aachen/cnvand:latest /bin/bash\r +```\r +\r +This command opens an interactive shell inside the Docker container. Once inside the container, you are placed inside the `/cnvand` the directory. From there then run the pipeline once you set an appropriate configuration:\r +\r +```bash\r +snakemake --cores all --use-conda\r +```\r +\r +## Contributing\r +\r +We welcome contributions to improve CNVand. Please see our [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to get started.\r +\r +## Code of Conduct\r +\r +We are committed to fostering an open and welcoming environment. Please see our [CODE_OF_CONDUCT.md](CODE_OF_CONDUCT.md) for our community guidelines.\r +\r +## Documentation\r +\r +Detailed documentation for the workflow can be found in `workflow/documentation.md`.\r +\r +## Testing\r +\r +To ensure the pipeline runs correctly, we have set up both unit and integration tests. Unit tests are generated from successful workflow runs, and integration tests are configured to run the entire workflow with test data.\r +\r +### Integration Tests\r +\r +The integration test can be run using the data and config provided. Remember to download the correct reference/annotations (GRCh38 in case of the bundled NIST data) by yourself and adjust your local paths as necessary!\r +\r +### Unit Tests\r +\r +Run the unit tests with:\r +\r +```bash\r +pytest -v .tests/unit\r +```\r +\r +This will check for the correct CNVand output per rule.\r +\r +## License\r +\r +This project is licensed under the MIT License. See the [LICENSE](LICENSE.md) file for details.\r +""" ; + schema1:image ; + schema1:keywords "Bioinformatics, annotsv, CNVkit, Copy Number Variation, Snakemake, Magic" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "CNVand" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1039?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "An example workflow to allow users to run the Specimen Data Refinery tools on data provided in an input CSV file." ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.373.1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for De novo digitisation" ; + schema1:sdDatePublished "2024-08-05 10:31:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/373/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 21688 ; + schema1:creator , + , + ; + schema1:dateCreated "2022-07-08T13:00:03Z" ; + schema1:dateModified "2022-07-08T13:08:19Z" ; + schema1:description "An example workflow to allow users to run the Specimen Data Refinery tools on data provided in an input CSV file." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/373?version=1" ; + schema1:keywords "Default-SDR" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "De novo digitisation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/373?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """Data QC step, can run alone or as part of a combined workflow for large genome assembly. \r +\r +* What it does: Reports statistics from sequencing reads.\r +* Inputs: long reads (fastq.gz format), short reads (R1 and R2) (fastq.gz format).\r +* Outputs: For long reads: a nanoplot report (the HTML report summarizes all the information). For short reads: a MultiQC report.\r +* Tools used: Nanoplot, FastQC, MultiQC.\r +* Input parameters: None required.\r +* Workflow steps: Long reads are analysed by Nanoplot; Short reads (R1 and R2) are analysed by FastQC; the resulting reports are processed by MultiQC.\r +* Options: see the tool settings options at runtime and change as required. Alternative tool option: fastp\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.222.1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Data QC" ; + schema1:sdDatePublished "2024-08-05 10:32:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/222/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 417031 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15475 ; + schema1:creator ; + schema1:dateCreated "2021-11-08T04:34:47Z" ; + schema1:dateModified "2023-01-30T18:21:31Z" ; + schema1:description """Data QC step, can run alone or as part of a combined workflow for large genome assembly. \r +\r +* What it does: Reports statistics from sequencing reads.\r +* Inputs: long reads (fastq.gz format), short reads (R1 and R2) (fastq.gz format).\r +* Outputs: For long reads: a nanoplot report (the HTML report summarizes all the information). For short reads: a MultiQC report.\r +* Tools used: Nanoplot, FastQC, MultiQC.\r +* Input parameters: None required.\r +* Workflow steps: Long reads are analysed by Nanoplot; Short reads (R1 and R2) are analysed by FastQC; the resulting reports are processed by MultiQC.\r +* Options: see the tool settings options at runtime and change as required. Alternative tool option: fastp\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)\r +""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "Large-genome-assembly" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Data QC" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/222?version=1" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Summary\r +This notebook demonstrates how to retrieve metadata associated to the paper [A SARS-CoV-2 cytopathicity dataset generated by high-content screening of a large drug repurposing collection](https://doi.org/10.1038/s41597-021-00848-4) and available in IDR at [idr0094-ellinger-sarscov2](https://idr.openmicroscopy.org/search/?query=Name:idr0094).\r +Over 300 compounds were used in this investigation. This notebook allows the user to calculate the half maximal inhibitory concentration (IC50) for each compound. IC50 is a measure of the potency of a substance in inhibiting a specific biological or biochemical function. IC50 is a quantitative measure that indicates how much of a particular inhibitory substance (e.g. drug) is needed to inhibit, in vitro, a given biological process or biological component by 50%.\r +User can download the IC50 for each compound used in that study\r +\r +The notebook can be launched in [My Binder](https://mybinder.org/v2/gh/IDR/idr0094-ellinger-sarscov2/master?urlpath=notebooks%2Fnotebooks%2Fidr0094-ic50.ipynb%3FscreenId%3D2603).\r +\r +A shiny app is also available for dynamic plotting of the IC50 curve for each compound.\r +This R shiny app can be launched in [My Binder](https://mybinder.org/v2/gh/IDR/idr0094-ellinger-sarscov2/master?urlpath=shiny/apps/)\r +\r +\r +# Inputs\r +Parameters needed to configure the workflow:\r +\r +**screenId**: Identifier of a screen in IDR.\r +\r +# Ouputs\r +Output file generated:\r +\r +**ic50.csv**: Comma separate value file containing the IC50 for each compound.\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.238.1" ; + schema1:isBasedOn "https://github.com/IDR/idr0094-ellinger-sarscov2" ; + schema1:license "BSD-2-Clause" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Calculate the half maximal inhibitory concentration (IC50) for each compound used in a SARS-CoV-2 study" ; + schema1:sdDatePublished "2024-08-05 10:32:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/238/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 233368 ; + schema1:creator , + ; + schema1:dateCreated "2021-11-16T10:19:16Z" ; + schema1:dateModified "2023-01-16T13:55:00Z" ; + schema1:description """# Summary\r +This notebook demonstrates how to retrieve metadata associated to the paper [A SARS-CoV-2 cytopathicity dataset generated by high-content screening of a large drug repurposing collection](https://doi.org/10.1038/s41597-021-00848-4) and available in IDR at [idr0094-ellinger-sarscov2](https://idr.openmicroscopy.org/search/?query=Name:idr0094).\r +Over 300 compounds were used in this investigation. This notebook allows the user to calculate the half maximal inhibitory concentration (IC50) for each compound. IC50 is a measure of the potency of a substance in inhibiting a specific biological or biochemical function. IC50 is a quantitative measure that indicates how much of a particular inhibitory substance (e.g. drug) is needed to inhibit, in vitro, a given biological process or biological component by 50%.\r +User can download the IC50 for each compound used in that study\r +\r +The notebook can be launched in [My Binder](https://mybinder.org/v2/gh/IDR/idr0094-ellinger-sarscov2/master?urlpath=notebooks%2Fnotebooks%2Fidr0094-ic50.ipynb%3FscreenId%3D2603).\r +\r +A shiny app is also available for dynamic plotting of the IC50 curve for each compound.\r +This R shiny app can be launched in [My Binder](https://mybinder.org/v2/gh/IDR/idr0094-ellinger-sarscov2/master?urlpath=shiny/apps/)\r +\r +\r +# Inputs\r +Parameters needed to configure the workflow:\r +\r +**screenId**: Identifier of a screen in IDR.\r +\r +# Ouputs\r +Output file generated:\r +\r +**ic50.csv**: Comma separate value file containing the IC50 for each compound.\r +\r +""" ; + schema1:isPartOf ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/BSD-2-Clause" ; + schema1:name "Calculate the half maximal inhibitory concentration (IC50) for each compound used in a SARS-CoV-2 study" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/238?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-se-illumina-wgs-variant-calling" ; + schema1:mainEntity ; + schema1:name "COVID-19-SE-WGS-ILLUMINA (v0.1)" ; + schema1:sdDatePublished "2021-03-12 13:41:28 +0000" ; + schema1:softwareVersion "v0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 30245 ; + schema1:name "COVID-19-SE-WGS-ILLUMINA" ; + schema1:programmingLanguage . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "This workflow converts the top-ranking predicted pathways from the \"RetroSynthesis\" and \"Pathway Analysis\" workflows to plasmids intended to be expressed in the specified organism" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/23?version=1" ; + schema1:isBasedOn "https://galaxy-synbiocad.org/u/mdulac/w/genetic-design-1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genetic Design" ; + schema1:sdDatePublished "2024-08-05 10:33:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/23/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9991 ; + schema1:creator ; + schema1:dateCreated "2020-05-29T10:05:20Z" ; + schema1:dateModified "2023-01-16T13:41:35Z" ; + schema1:description "This workflow converts the top-ranking predicted pathways from the \"RetroSynthesis\" and \"Pathway Analysis\" workflows to plasmids intended to be expressed in the specified organism" ; + schema1:keywords "Retrosynthesis, genetic design, pathway prediction" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Genetic Design" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/23?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + ; + ns1:output , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Introduction\r +\r +`katdetectr` is an *R* package for the detection, characterization and visualization of localized hypermutated regions, often referred to as *kataegis*.\r +\r +Please see the [Application Note](https://www.biorxiv.org/content/10.1101/2022.07.11.499364v1) (under submission) for additional background, details and performance evaluations of `katdetectr`.\r +\r +The general workflow of `katdetectr` can be summarized as follows:\r +\r +1. Import of genomic variants; VCF, MAF or VRanges objects.\r +2. Detection of kataegis foci.\r +3. Visualization of segmentation and kataegis foci.\r +\r +Please see the [vignette](https://bioconductor.org/packages/release/bioc/vignettes/katdetectr/inst/doc/General_overview.html) for an overview of the workflow in a step-by-step manner on publicly-available datasets which are included within this package.\r +\r +\r +## Installation\r +\r +Download katdetectr from BioConductor:\r +```R\r +if (!requireNamespace("BiocManager", quietly = TRUE))\r + install.packages("BiocManager")\r +\r +BiocManager::install("katdetectr")\r +\r +```\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.463.1" ; + schema1:isBasedOn "https://github.com/ErasmusMC-CCBC/katdetectr" ; + schema1:license "GPL-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Katdetectr" ; + schema1:sdDatePublished "2024-08-05 10:30:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/463/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1203 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10674 ; + schema1:creator , + ; + schema1:dateCreated "2023-05-02T11:51:45Z" ; + schema1:dateModified "2023-05-02T11:58:38Z" ; + schema1:description """# Introduction\r +\r +`katdetectr` is an *R* package for the detection, characterization and visualization of localized hypermutated regions, often referred to as *kataegis*.\r +\r +Please see the [Application Note](https://www.biorxiv.org/content/10.1101/2022.07.11.499364v1) (under submission) for additional background, details and performance evaluations of `katdetectr`.\r +\r +The general workflow of `katdetectr` can be summarized as follows:\r +\r +1. Import of genomic variants; VCF, MAF or VRanges objects.\r +2. Detection of kataegis foci.\r +3. Visualization of segmentation and kataegis foci.\r +\r +Please see the [vignette](https://bioconductor.org/packages/release/bioc/vignettes/katdetectr/inst/doc/General_overview.html) for an overview of the workflow in a step-by-step manner on publicly-available datasets which are included within this package.\r +\r +\r +## Installation\r +\r +Download katdetectr from BioConductor:\r +```R\r +if (!requireNamespace("BiocManager", quietly = TRUE))\r + install.packages("BiocManager")\r +\r +BiocManager::install("katdetectr")\r +\r +```\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/GPL-2.0" ; + schema1:name "Katdetectr" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/463?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2023-10-03T10:01:02.570398" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/average-bigwig-between-replicates" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "average-bigwig-between-replicates/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.12" . + + a schema1:Dataset ; + schema1:datePublished "2024-01-12T14:56:00.228274" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-only-VGP3" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-only-VGP3/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.19" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-08-05 10:23:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4227 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:03Z" ; + schema1:dateModified "2024-06-11T12:55:03Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# CMIP MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of computing **classical molecular interaction potentials** from **protein structures**, step by step, using the **BioExcel Building Blocks library (biobb)**. Examples shown are **Molecular Interaction Potentials (MIPs) grids, protein-protein/ligand interaction potentials, and protein titration**. The particular structures used are the **Lysozyme** protein (PDB code [1AKI](https://www.rcsb.org/structure/1aki)), and a MD simulation of the complex formed by the **SARS-CoV-2 Receptor Binding Domain and the human Angiotensin Converting Enzyme 2** (PDB code [6VW1](https://www.rcsb.org/structure/6vw1)).\r +\r +The code wrapped is the ***Classical Molecular Interaction Potentials (CMIP)*** code:\r +\r +**Classical molecular interaction potentials: Improved setup procedure in molecular dynamics simulations of proteins.**\r +*Gelpí, J.L., Kalko, S.G., Barril, X., Cirera, J., de la Cruz, X., Luque, F.J. and Orozco, M. (2001)*\r +*Proteins, 45: 428-437. [https://doi.org/10.1002/prot.1159](https://doi.org/10.1002/prot.1159)*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.773.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_cmip" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Classical Molecular Interaction Potentials" ; + schema1:sdDatePublished "2024-08-05 10:25:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/773/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 78658 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-04T14:46:28Z" ; + schema1:dateModified "2024-03-04T14:48:19Z" ; + schema1:description """# CMIP MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of computing **classical molecular interaction potentials** from **protein structures**, step by step, using the **BioExcel Building Blocks library (biobb)**. Examples shown are **Molecular Interaction Potentials (MIPs) grids, protein-protein/ligand interaction potentials, and protein titration**. The particular structures used are the **Lysozyme** protein (PDB code [1AKI](https://www.rcsb.org/structure/1aki)), and a MD simulation of the complex formed by the **SARS-CoV-2 Receptor Binding Domain and the human Angiotensin Converting Enzyme 2** (PDB code [6VW1](https://www.rcsb.org/structure/6vw1)).\r +\r +The code wrapped is the ***Classical Molecular Interaction Potentials (CMIP)*** code:\r +\r +**Classical molecular interaction potentials: Improved setup procedure in molecular dynamics simulations of proteins.**\r +*Gelpí, J.L., Kalko, S.G., Barril, X., Cirera, J., de la Cruz, X., Luque, F.J. and Orozco, M. (2001)*\r +*Proteins, 45: 428-437. [https://doi.org/10.1002/prot.1159](https://doi.org/10.1002/prot.1159)*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/773?version=1" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Classical Molecular Interaction Potentials" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_cmip/blob/main/biobb_wf_cmip/notebooks/biobb_wf_cmip.ipynb" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1017?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/rnafusion" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnafusion" ; + schema1:sdDatePublished "2024-08-05 10:22:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1017/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4396 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:09Z" ; + schema1:dateModified "2024-06-11T12:55:09Z" ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1017?version=16" ; + schema1:keywords "fusion, fusion-genes, gene-fusion, rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnafusion" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1017?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A pipeline to investigate horizontal gene transfer from NGS data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/988?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/hgtseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hgtseq" ; + schema1:sdDatePublished "2024-08-05 10:23:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/988/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9471 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:54Z" ; + schema1:dateModified "2024-06-11T12:54:54Z" ; + schema1:description "A pipeline to investigate horizontal gene transfer from NGS data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/988?version=1" ; + schema1:keywords "BWA-mem, bwa-mem2, FastQC, genomics-visualization, ggbio, horizontal-gene-transfer, kraken2, krona, MultiQC, NGS, SAMTools, taxonomies, tidyverse" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hgtseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/988?version=2" ; + schema1:version 2 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 31175 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Analysis of RNA-seq data starting from BAM and focusing on mRNA, lncRNA and miRNA" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/199?version=1" ; + schema1:isBasedOn "http://biotranslator.gr:8080/workflows/run?id=e14715ce11d8be59" ; + schema1:license "notspecified" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for lncRNA" ; + schema1:sdDatePublished "2024-08-05 10:33:02 +0100" ; + schema1:url "https://workflowhub.eu/workflows/199/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 307614 ; + schema1:dateCreated "2021-09-29T08:25:31Z" ; + schema1:dateModified "2023-01-16T13:53:17Z" ; + schema1:description "Analysis of RNA-seq data starting from BAM and focusing on mRNA, lncRNA and miRNA" ; + schema1:keywords "" ; + schema1:license "https://choosealicense.com/no-permission/" ; + schema1:name "lncRNA" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/199?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """## Learning objectives\r +- Read data to analyse from an object store.\r +- Analyse data in parallel using Dask.\r +- Show how to use public resources to train neural network.\r +- Load labels associated to the original data\r +- Compare results with ground truth.\r +\r +The authors of the PLOS Biology paper, "Nessys: A new set of tools for the automated detection of nuclei within intact tissues and dense 3D cultures" published in August 2019: https://doi.org/10.1371/journal.pbio.3000388, considered several image segmenation packages, but they did not use the approach described in this notebook.\r +\r +We will analyse the data using [Cellpose](https://www.cellpose.org/) and compare the output with the original segmentation produced by the authors. Cellpose was not considered by the authors. Our workflow shows how public repository can be accessed and data inside it used to validate software tools or new algorithms.\r +\r +We will use a predefined model from Cellpose as a starting point.\r +\r +## Launch\r +This notebook uses the [environment.yml](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_5/environment.yml) file.\r +\r +See [Setup](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_5/setup.md).""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.495.1" ; + schema1:isBasedOn "https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_5/Cellpose_parallel.ipynb" ; + schema1:license "BSD-2-Clause" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Load ome.zarr Image with labels from public S3 repositories, analyze in parallel using Cellpose and compare results" ; + schema1:sdDatePublished "2024-08-05 10:30:13 +0100" ; + schema1:url "https://workflowhub.eu/workflows/495/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 517664 ; + schema1:url "https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_5/includes/CellposeParallel.png" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17325 ; + schema1:creator , + ; + schema1:dateCreated "2023-06-01T10:30:51Z" ; + schema1:dateModified "2023-07-04T00:40:25Z" ; + schema1:description """## Learning objectives\r +- Read data to analyse from an object store.\r +- Analyse data in parallel using Dask.\r +- Show how to use public resources to train neural network.\r +- Load labels associated to the original data\r +- Compare results with ground truth.\r +\r +The authors of the PLOS Biology paper, "Nessys: A new set of tools for the automated detection of nuclei within intact tissues and dense 3D cultures" published in August 2019: https://doi.org/10.1371/journal.pbio.3000388, considered several image segmenation packages, but they did not use the approach described in this notebook.\r +\r +We will analyse the data using [Cellpose](https://www.cellpose.org/) and compare the output with the original segmentation produced by the authors. Cellpose was not considered by the authors. Our workflow shows how public repository can be accessed and data inside it used to validate software tools or new algorithms.\r +\r +We will use a predefined model from Cellpose as a starting point.\r +\r +## Launch\r +This notebook uses the [environment.yml](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_5/environment.yml) file.\r +\r +See [Setup](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_5/setup.md).""" ; + schema1:image ; + schema1:keywords "OME-Zarr, Python, imaging, image processing, Machine Learning, S3" ; + schema1:license "https://spdx.org/licenses/BSD-2-Clause" ; + schema1:name "Load ome.zarr Image with labels from public S3 repositories, analyze in parallel using Cellpose and compare results" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_5/Cellpose_parallel.ipynb" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/protein-ligand-docking).\r +***\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.296.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_virtual_screening/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy Protein-ligand Docking tutorial (Fpocket)" ; + schema1:sdDatePublished "2024-08-05 10:30:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/296/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 23696 ; + schema1:creator , + ; + schema1:dateCreated "2023-05-03T13:48:26Z" ; + schema1:dateModified "2023-05-03T13:49:48Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/protein-ligand-docking).\r +***\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/296?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy Protein-ligand Docking tutorial (Fpocket)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_virtual_screening/galaxy/biobb_wf_virtual_screening.ga" ; + schema1:version 3 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2023-09-15T09:40:24.299054" ; + schema1:hasPart , + , + , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/hic-hicup-cooler" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + , + , + ; + schema1:name "hic-hicup-cooler/hic-fastq-to-cool-hicup-cooler" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "hic-hicup-cooler/chic-fastq-to-cool-hicup-cooler" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/hic-hicup-cooler" ; + schema1:version "0.3" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.11" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "hic-hicup-cooler/hic-fastq-to-pairs-hicup" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/hic-hicup-cooler" ; + schema1:version "0.3" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "hic-hicup-cooler/hic-juicermediumtabix-to-cool-cooler" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/hic-hicup-cooler" ; + schema1:version "0.3" . + + a schema1:Dataset ; + schema1:datePublished "2023-01-16T17:00:18.505110" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "atacseq/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.3" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=20" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-08-05 10:23:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=20" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 18600 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:59Z" ; + schema1:dateModified "2024-06-11T12:54:59Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=20" ; + schema1:version 20 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=21" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-08-05 10:22:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=21" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13915 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:40Z" ; + schema1:dateModified "2024-06-11T12:54:40Z" ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=21" ; + schema1:version 21 . + + a schema1:Dataset ; + schema1:hasPart , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-ivar-analysis" ; + schema1:mainEntity ; + schema1:name "sars-cov-2-pe-illumina-artic-ivar-analysis/SARS-COV-2-ILLUMINA-AMPLICON-IVAR-PANGOLIN-NEXTCLADE (v0.1)" ; + schema1:sdDatePublished "2021-08-31 03:00:45 +0100" ; + schema1:softwareVersion "v0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 48495 ; + schema1:name "SARS-COV-2-ILLUMINA-AMPLICON-IVAR-PANGOLIN-NEXTCLADE" ; + schema1:programmingLanguage . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "Protype demonstrator of a workflow reducing HESS and INTEGRAL/SPI-ACS data to common Light Curve format and combining the lightcurves into a multi-wavelength observation." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/766?version=1" ; + schema1:isBasedOn "https://galaxy.odahub.fr/u/volodymyr/w/workflow-constructed-from-history-unnamed-history-1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Example Multi-Wavelength Light-Curve Analysis" ; + schema1:sdDatePublished "2024-08-05 10:25:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/766/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10761 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-02-21T15:36:09Z" ; + schema1:dateModified "2024-03-01T11:48:35Z" ; + schema1:description "Protype demonstrator of a workflow reducing HESS and INTEGRAL/SPI-ACS data to common Light Curve format and combining the lightcurves into a multi-wavelength observation." ; + schema1:isPartOf ; + schema1:keywords "astronomy" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Example Multi-Wavelength Light-Curve Analysis" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/766?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2022-10-20T13:31:26.677961" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/chipseq-pe" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "chipseq-pe/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.11" . + + a schema1:Dataset ; + schema1:datePublished "2023-09-14T22:03:45.618652" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/rnaseq-pe" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "rnaseq-pe/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.11" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/986?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/fetchngs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/fetchngs" ; + schema1:sdDatePublished "2024-08-05 10:23:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/986/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7284 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:53Z" ; + schema1:dateModified "2024-06-11T12:54:53Z" ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/986?version=13" ; + schema1:keywords "ddbj, download, ena, FASTQ, geo, sra, synapse" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/fetchngs" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/986?version=8" ; + schema1:version 8 . + + a schema1:MediaObject ; + schema1:about ; + schema1:author , + ; + schema1:encodingFormat "text/markdown" . + + a schema1:Dataset ; + schema1:creator , + , + , + , + , + , + ; + schema1:datePublished "2023-04-13" ; + schema1:description "Study protocol" ; + schema1:hasPart , + , + , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.7551181" ; + schema1:name "BY-COVID - WP5 - Baseline Use Case: SARS-CoV-2 vaccine effectiveness assessment - Study protocol" ; + schema1:url "https://zenodo.org/record/7825979" ; + schema1:version "1.0.3" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """# workflow-partial-ustacks-only\r +\r +\r +These workflows are part of a set designed to work for RAD-seq data on the Galaxy platform, using the tools from the Stacks program. \r +\r +Galaxy Australia: https://usegalaxy.org.au/\r +\r +Stacks: http://catchenlab.life.illinois.edu/stacks/\r +\r +\r +For the full de novo workflow see https://workflowhub.eu/workflows/348\r +\r +You may want to run ustacks with different batches of samples. \r +* To be able to combine these later, there are some necessary steps - we need to keep track of how many samples have already run in ustacks, so that new samples can be labelled with different identifying numbers. \r +* In ustacks, under "Processing options" there is an option called "Start identifier at". \r +* The default for this is 1, which can be used for the first batch of samples. These will then be labelled as sample 1, sample 2 and so on. \r +* For any new batches of samples to process in ustacks, we will want to start numbering these at the next available number. e.g. if there were 10 samples in batch 1, this should then be set to start at 11. \r +\r +To combine multiple outputs from ustacks, providing these have been given appropriate starting identifiers:\r +* Find the ustacks output in the Galaxy history. This will be a list of samples. \r +* Click on the cross button next to the filename to delete, but select "Collection only". This releases the items from the list, but they will now be hidden in the Galaxy history.\r +* In the history panel, click on "hidden" to reveal any hidden files. Unhide the samples. \r +* Do this for all the batches of ustacks outputs that are needed. \r +* Click on the tick button, tick all the samples needed, then "For all selected" choose "Build dataset list"\r +* This is now a combined set of samples for input into cstacks. \r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/349?version=1" ; + schema1:isBasedOn "https://github.com/AnnaSyme/workflow-partial-ustacks-only.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Partial de novo workflow: ustacks only" ; + schema1:sdDatePublished "2024-08-05 10:32:07 +0100" ; + schema1:url "https://workflowhub.eu/workflows/349/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3854 ; + schema1:creator ; + schema1:dateCreated "2022-05-31T07:50:13Z" ; + schema1:dateModified "2023-01-30T18:21:31Z" ; + schema1:description """# workflow-partial-ustacks-only\r +\r +\r +These workflows are part of a set designed to work for RAD-seq data on the Galaxy platform, using the tools from the Stacks program. \r +\r +Galaxy Australia: https://usegalaxy.org.au/\r +\r +Stacks: http://catchenlab.life.illinois.edu/stacks/\r +\r +\r +For the full de novo workflow see https://workflowhub.eu/workflows/348\r +\r +You may want to run ustacks with different batches of samples. \r +* To be able to combine these later, there are some necessary steps - we need to keep track of how many samples have already run in ustacks, so that new samples can be labelled with different identifying numbers. \r +* In ustacks, under "Processing options" there is an option called "Start identifier at". \r +* The default for this is 1, which can be used for the first batch of samples. These will then be labelled as sample 1, sample 2 and so on. \r +* For any new batches of samples to process in ustacks, we will want to start numbering these at the next available number. e.g. if there were 10 samples in batch 1, this should then be set to start at 11. \r +\r +To combine multiple outputs from ustacks, providing these have been given appropriate starting identifiers:\r +* Find the ustacks output in the Galaxy history. This will be a list of samples. \r +* Click on the cross button next to the filename to delete, but select "Collection only". This releases the items from the list, but they will now be hidden in the Galaxy history.\r +* In the history panel, click on "hidden" to reveal any hidden files. Unhide the samples. \r +* Do this for all the batches of ustacks outputs that are needed. \r +* Click on the tick button, tick all the samples needed, then "For all selected" choose "Build dataset list"\r +* This is now a combined set of samples for input into cstacks. \r +""" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Partial de novo workflow: ustacks only" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/349?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "A prototype implementation of the Air Quality Prediction pipeline in Galaxy, using CWL tools." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/380?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Air Quality Prediction Prototype" ; + schema1:sdDatePublished "2024-08-05 10:31:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/380/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 25311 ; + schema1:creator , + ; + schema1:dateCreated "2022-07-29T11:16:20Z" ; + schema1:dateModified "2023-01-16T14:02:21Z" ; + schema1:description "A prototype implementation of the Air Quality Prediction pipeline in Galaxy, using CWL tools." ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Air Quality Prediction Prototype" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/380?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "Mapping against all plant virus then make contig out of the mapped reads then blast them." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/102?version=1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for 2: Plant virus confirmation" ; + schema1:sdDatePublished "2024-08-05 10:31:20 +0100" ; + schema1:url "https://workflowhub.eu/workflows/102/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 30174 ; + schema1:dateCreated "2021-02-04T09:10:04Z" ; + schema1:dateModified "2023-02-13T14:06:45Z" ; + schema1:description "Mapping against all plant virus then make contig out of the mapped reads then blast them." ; + schema1:keywords "Virology, mapping, Assembly, reads_selection, blast" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "2: Plant virus confirmation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/102?version=1" ; + schema1:version 1 ; + ns1:input , + ; + ns1:output , + , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2024-04-29T08:49:48.242493" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "atacseq/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1027?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/viralrecon" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/viralrecon" ; + schema1:sdDatePublished "2024-08-05 10:23:04 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1027/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6114 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:20Z" ; + schema1:dateModified "2024-06-11T12:55:20Z" ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1027?version=10" ; + schema1:keywords "Amplicon, ARTIC, Assembly, covid-19, covid19, illumina, long-read-sequencing, Metagenomics, nanopore, ONT, oxford-nanopore, SARS-CoV-2, variant-calling, viral, Virus" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/viralrecon" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1027?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for screening for functional components of assembled contigs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/987?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/funcscan" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/funcscan" ; + schema1:sdDatePublished "2024-08-05 10:23:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/987/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 16630 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:54Z" ; + schema1:dateModified "2024-06-11T12:54:54Z" ; + schema1:description "Pipeline for screening for functional components of assembled contigs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/987?version=8" ; + schema1:keywords "amp, AMR, antibiotic-resistance, antimicrobial-peptides, antimicrobial-resistance-genes, arg, Assembly, bgc, biosynthetic-gene-clusters, contigs, function, Metagenomics, natural-products, screening, secondary-metabolites" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/funcscan" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/987?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """A porting of the Trinity RNA assembly pipeline, https://trinityrnaseq.github.io, that uses Nextflow to handle the underlying sub-tasks.\r +This enables additional capabilities to better use HPC resources, such as packing of tasks to fill up nodes and use of node-local disks to improve I/O.\r +By design, the pipeline separates the workflow logic (main file) and the cluster-specific configuration (config files), improving portability.\r +\r +Based on a pipeline by Sydney Informatics Hub: https://github.com/Sydney-Informatics-Hub/SIH-Raijin-Trinity""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/114?version=1" ; + schema1:isBasedOn "https://github.com/marcodelapierre/trinity-nf" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Trinity RNA Assembly" ; + schema1:sdDatePublished "2024-08-05 10:33:17 +0100" ; + schema1:url "https://workflowhub.eu/workflows/114/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8368 ; + schema1:creator ; + schema1:dateCreated "2021-03-17T03:01:55Z" ; + schema1:dateModified "2021-03-17T03:31:12Z" ; + schema1:description """A porting of the Trinity RNA assembly pipeline, https://trinityrnaseq.github.io, that uses Nextflow to handle the underlying sub-tasks.\r +This enables additional capabilities to better use HPC resources, such as packing of tasks to fill up nodes and use of node-local disks to improve I/O.\r +By design, the pipeline separates the workflow logic (main file) and the cluster-specific configuration (config files), improving portability.\r +\r +Based on a pipeline by Sydney Informatics Hub: https://github.com/Sydney-Informatics-Hub/SIH-Raijin-Trinity""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/114?version=1" ; + schema1:isPartOf ; + schema1:keywords "Assembly, Transcriptomics, RNASEQ, Nextflow" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Trinity RNA Assembly" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/114?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "This is a Galaxy workflow that uses to convert the16S BIOM file to table and figures. It is part of the metaDEGalaxy workflow MetaDEGalaxy: Galaxy workflow for differential abundance analysis of 16s metagenomic data. " ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/142?version=1" ; + schema1:isBasedOn "https://github.com/QFAB-Bioinformatics/metaDEGalaxy" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for 16S_biodiversity_BIOM" ; + schema1:sdDatePublished "2024-08-05 10:33:10 +0100" ; + schema1:url "https://workflowhub.eu/workflows/142/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 102835 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 19403 ; + schema1:creator ; + schema1:dateCreated "2021-08-11T04:12:04Z" ; + schema1:dateModified "2024-04-17T04:16:05Z" ; + schema1:description "This is a Galaxy workflow that uses to convert the16S BIOM file to table and figures. It is part of the metaDEGalaxy workflow MetaDEGalaxy: Galaxy workflow for differential abundance analysis of 16s metagenomic data. " ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "MetaDEGalaxy" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "16S_biodiversity_BIOM" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/142?version=1" ; + schema1:version 1 ; + ns1:output , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """# eQTL-Catalogue/qtlmap\r +**Portable eQTL analysis and statistical fine mapping workflow used by the eQTL Catalogue**\r +\r +### Introduction\r +\r +**eQTL-Catalogue/qtlmap** is a bioinformatics analysis pipeline used for QTL Analysis.\r +\r +The workflow takes phenotype count matrix (normalized and quality controlled) and genotype data as input, and finds associations between them with the help of sample metadata and phenotype metadata files (See [Input formats and preparation](docs/inputs_expl.md) for required input file details). To map QTLs, pipeline uses [QTLTools's](https://qtltools.github.io/qtltools/) PCA and RUN methods. For manipulation of files [BcfTools](https://samtools.github.io/bcftools/bcftools.html), [Tabix](http://www.htslib.org/doc/tabix.html) and custom [Rscript](https://www.rdocumentation.org/packages/utils/versions/3.5.3/topics/Rscript) scripts are used.\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a bioinformatics workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It comes with docker / singularity containers making installation trivial and results highly reproducible.\r +\r +\r +### Documentation\r +The eQTL-Catalogue/qtlmap pipeline comes with documentation about the pipeline, found in the `docs/` directory:\r +\r +1. [Installation](docs/installation.md)\r +2. Pipeline configuration\r + * [Local installation](docs/configuration/local.md)\r + * [Adding your own system](docs/configuration/adding_your_own.md)\r +3. [Input formats and preparation](docs/inputs_expl.md)\r +4. [Running the pipeline](docs/usage.md)\r +5. [Troubleshooting](docs/troubleshooting.md)\r +\r +\r +\r +### Pipeline Description\r +Mapping QTLs is a process of finding statistically significant associations between phenotypes and genetic variants located nearby (within a specific window around phenotype, a.k.a cis window)\r +This pipeline is designed to perform QTL mapping. It is intended to add this pipeline to the nf-core framework in the future.\r +High level representation of the pipeline is shown below:\r +\r +### Results\r +The output directory of the workflow contains the following subdirectories:\r +\r +1. PCA - genotype and gene expression PCA values used as covariates for QTL analysis.\r +2. sumstats - QTL summary statistics from nominal and permutation passes.\r +3. susie - SuSiE fine mapping credible sets.\r +4. susie_full - full set of susie results for all tested variants (very large files).\r +5. susie_merged - susie credible sets merged with summary statistics from univariate QTL analysis.\r +\r +Column names of the output files are explained [here](https://github.com/eQTL-Catalogue/eQTL-Catalogue-resources/blob/master/tabix/Columns.md).\r +\r +\r +# Contributors\r +* Nurlan Kerimov\r +* Kaur Alasoo\r +* Masahiro Kanai\r +* Ralf Tambets\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/300?version=1" ; + schema1:isBasedOn "https://github.com/eQTL-Catalogue/qtlmap.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for eQTL-Catalogue/qtlmap" ; + schema1:sdDatePublished "2024-08-05 10:32:24 +0100" ; + schema1:url "https://workflowhub.eu/workflows/300/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 706230 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15721 ; + schema1:dateCreated "2022-03-29T16:10:54Z" ; + schema1:dateModified "2023-01-16T13:59:09Z" ; + schema1:description """# eQTL-Catalogue/qtlmap\r +**Portable eQTL analysis and statistical fine mapping workflow used by the eQTL Catalogue**\r +\r +### Introduction\r +\r +**eQTL-Catalogue/qtlmap** is a bioinformatics analysis pipeline used for QTL Analysis.\r +\r +The workflow takes phenotype count matrix (normalized and quality controlled) and genotype data as input, and finds associations between them with the help of sample metadata and phenotype metadata files (See [Input formats and preparation](docs/inputs_expl.md) for required input file details). To map QTLs, pipeline uses [QTLTools's](https://qtltools.github.io/qtltools/) PCA and RUN methods. For manipulation of files [BcfTools](https://samtools.github.io/bcftools/bcftools.html), [Tabix](http://www.htslib.org/doc/tabix.html) and custom [Rscript](https://www.rdocumentation.org/packages/utils/versions/3.5.3/topics/Rscript) scripts are used.\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a bioinformatics workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It comes with docker / singularity containers making installation trivial and results highly reproducible.\r +\r +\r +### Documentation\r +The eQTL-Catalogue/qtlmap pipeline comes with documentation about the pipeline, found in the `docs/` directory:\r +\r +1. [Installation](docs/installation.md)\r +2. Pipeline configuration\r + * [Local installation](docs/configuration/local.md)\r + * [Adding your own system](docs/configuration/adding_your_own.md)\r +3. [Input formats and preparation](docs/inputs_expl.md)\r +4. [Running the pipeline](docs/usage.md)\r +5. [Troubleshooting](docs/troubleshooting.md)\r +\r +\r +\r +### Pipeline Description\r +Mapping QTLs is a process of finding statistically significant associations between phenotypes and genetic variants located nearby (within a specific window around phenotype, a.k.a cis window)\r +This pipeline is designed to perform QTL mapping. It is intended to add this pipeline to the nf-core framework in the future.\r +High level representation of the pipeline is shown below:\r +\r +### Results\r +The output directory of the workflow contains the following subdirectories:\r +\r +1. PCA - genotype and gene expression PCA values used as covariates for QTL analysis.\r +2. sumstats - QTL summary statistics from nominal and permutation passes.\r +3. susie - SuSiE fine mapping credible sets.\r +4. susie_full - full set of susie results for all tested variants (very large files).\r +5. susie_merged - susie credible sets merged with summary statistics from univariate QTL analysis.\r +\r +Column names of the output files are explained [here](https://github.com/eQTL-Catalogue/eQTL-Catalogue-resources/blob/master/tabix/Columns.md).\r +\r +\r +# Contributors\r +* Nurlan Kerimov\r +* Kaur Alasoo\r +* Masahiro Kanai\r +* Ralf Tambets\r +""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "eQTL-Catalogue/qtlmap" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/300?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# 1. About TF-Prioritizer\r +\r +This pipeline gives you a full analysis of nfcore chromatine accessibility peak data (ChIP-Seq, ATAC-Seq or DNAse-Seq)\r +and nfcore RNA-seq count data. It performs\r +DESeq2, TEPIC and DYNAMITE including all preprocessing and postprocessing steps necessary to transform the data. It also\r +gives you plots for deep analysis of the data. The general workflow is sketched in the images below:\r +\r +## Graphical abstract:\r +\r +![Graphical abstrat](https://raw.githubusercontent.com/biomedbigdata/TF-Prioritizer/master/media/graphicalAbstract.png)\r +\r +## Technical workflow:\r +\r +![Technical workflow](https://github.com/biomedbigdata/TF-Prioritizer/raw/master/media/technicalWorkflow.png)\r +\r +# 2. License and Citing\r +\r +TF-Prioritizer is distributed under the [GNU General Public License](https://www.gnu.org/licenses/gpl-3.0.en.html). The\r +Graphical Abstract and the Technical Workflow\r +was created using [biorender.com](https://biorender.com/).\r +\r +# 3. Usage\r +\r +The software can be executed using docker. For the following command, only [python3](https://www.python.org/downloads/),\r +[curl](https://curl.se/download.html) and [docker](https://docs.docker.com/get-docker/) are required.\r +Explanations about the configs can be found in\r +the [config readme](https://github.com/biomedbigdata/TF-Prioritizer/blob/master/configTemplates/README.md).\r +\r +```bash\r +curl -s https://raw.githubusercontent.com/biomedbigdata/TF-Prioritizer/master/docker.py | python3 - -c [config_file] -o [output_dir] -t [threads]\r +```\r +\r +Note, that for this approach an internet connection is required. The docker image will be downloaded\r +from [DockerHub](https://hub.docker.com/r/nicotru/tf-prioritizer) on the first execution as well as with every update we\r +release. Furthermore, the wrapper script\r +will be fetched from GitHub with every execution.\r +\r +If curl is not available (for example if you are using windows), or you want to be able to execute the software without\r +an internet connection, you can download the wrapper script\r +from [here](https://raw.githubusercontent.com/biomedbigdata/TF-Prioritizer/pipeJar/docker.py).\r +\r +You can then execute the script using\r +\r +```bash\r +python3 [script_path] -c [config_file] -o [output_dir] -t [threads]\r +```\r +\r +## If you want to use the pipeline without docker\r +\r +We do not recommend using the pipeline without docker, because the dependencies are very complex, and it is very hard to\r +install them correctly. However, if you want to use the pipeline without docker, you can do so by installing the\r +dependencies manually. The dependencies and their correct installation process can be derived from\r +the [Dockerfile](https://github.com/biomedbigdata/TF-Prioritizer/blob/master/Dockerfile) and the environment scripts\r +which can be found in\r +the [environment directory](https://github.com/biomedbigdata/TF-Prioritizer/tree/master/environment).""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/433?version=1" ; + schema1:isBasedOn "https://github.com/biomedbigdata/TF-Prioritizer.git" ; + schema1:license "LGPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for TF-Prioritizer" ; + schema1:sdDatePublished "2024-08-05 10:31:22 +0100" ; + schema1:url "https://workflowhub.eu/workflows/433/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2005 ; + schema1:dateCreated "2023-02-02T14:42:11Z" ; + schema1:dateModified "2023-02-02T14:42:11Z" ; + schema1:description """# 1. About TF-Prioritizer\r +\r +This pipeline gives you a full analysis of nfcore chromatine accessibility peak data (ChIP-Seq, ATAC-Seq or DNAse-Seq)\r +and nfcore RNA-seq count data. It performs\r +DESeq2, TEPIC and DYNAMITE including all preprocessing and postprocessing steps necessary to transform the data. It also\r +gives you plots for deep analysis of the data. The general workflow is sketched in the images below:\r +\r +## Graphical abstract:\r +\r +![Graphical abstrat](https://raw.githubusercontent.com/biomedbigdata/TF-Prioritizer/master/media/graphicalAbstract.png)\r +\r +## Technical workflow:\r +\r +![Technical workflow](https://github.com/biomedbigdata/TF-Prioritizer/raw/master/media/technicalWorkflow.png)\r +\r +# 2. License and Citing\r +\r +TF-Prioritizer is distributed under the [GNU General Public License](https://www.gnu.org/licenses/gpl-3.0.en.html). The\r +Graphical Abstract and the Technical Workflow\r +was created using [biorender.com](https://biorender.com/).\r +\r +# 3. Usage\r +\r +The software can be executed using docker. For the following command, only [python3](https://www.python.org/downloads/),\r +[curl](https://curl.se/download.html) and [docker](https://docs.docker.com/get-docker/) are required.\r +Explanations about the configs can be found in\r +the [config readme](https://github.com/biomedbigdata/TF-Prioritizer/blob/master/configTemplates/README.md).\r +\r +```bash\r +curl -s https://raw.githubusercontent.com/biomedbigdata/TF-Prioritizer/master/docker.py | python3 - -c [config_file] -o [output_dir] -t [threads]\r +```\r +\r +Note, that for this approach an internet connection is required. The docker image will be downloaded\r +from [DockerHub](https://hub.docker.com/r/nicotru/tf-prioritizer) on the first execution as well as with every update we\r +release. Furthermore, the wrapper script\r +will be fetched from GitHub with every execution.\r +\r +If curl is not available (for example if you are using windows), or you want to be able to execute the software without\r +an internet connection, you can download the wrapper script\r +from [here](https://raw.githubusercontent.com/biomedbigdata/TF-Prioritizer/pipeJar/docker.py).\r +\r +You can then execute the script using\r +\r +```bash\r +python3 [script_path] -c [config_file] -o [output_dir] -t [threads]\r +```\r +\r +## If you want to use the pipeline without docker\r +\r +We do not recommend using the pipeline without docker, because the dependencies are very complex, and it is very hard to\r +install them correctly. However, if you want to use the pipeline without docker, you can do so by installing the\r +dependencies manually. The dependencies and their correct installation process can be derived from\r +the [Dockerfile](https://github.com/biomedbigdata/TF-Prioritizer/blob/master/Dockerfile) and the environment scripts\r +which can be found in\r +the [environment directory](https://github.com/biomedbigdata/TF-Prioritizer/tree/master/environment).""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/LGPL-3.0" ; + schema1:name "TF-Prioritizer" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/433?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 1828513 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "The workflow takes raw ONT reads and trimmed Illumina WGS paired reads collections, the ONT raw stats table (calculated from WF1) and the estimated genome size (calculated from WF1) to run NextDenovo and subsequently polish the assembly with HyPo. It produces collapsed assemblies (unpolished and polished) and runs all the QC analyses (gfastats, BUSCO, and Merqury)." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/789?version=1" ; + schema1:isBasedOn "https://github.com/ERGA-consortium/pipelines/tree/main/assembly/galaxy" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ERGA ONT+Illumina Assembly+QC NextDenovo+HyPo v2403 (WF2)" ; + schema1:sdDatePublished "2024-08-05 10:25:13 +0100" ; + schema1:url "https://workflowhub.eu/workflows/789/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 232811 ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/2.Contigging/pics/Cont_ONTnextdenovo_2403.png" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 65262 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-11T14:45:27Z" ; + schema1:dateModified "2024-03-11T14:45:27Z" ; + schema1:description "The workflow takes raw ONT reads and trimmed Illumina WGS paired reads collections, the ONT raw stats table (calculated from WF1) and the estimated genome size (calculated from WF1) to run NextDenovo and subsequently polish the assembly with HyPo. It produces collapsed assemblies (unpolished and polished) and runs all the QC analyses (gfastats, BUSCO, and Merqury)." ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "name:ERGA, name:ASSEMBLY+QC, name:ONT, name:ILLUMINA" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ERGA ONT+Illumina Assembly+QC NextDenovo+HyPo v2403 (WF2)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/2.Contigging/Galaxy-Workflow-ERGA_ONT_Illumina_Assembly_QC_NextDenovo_HyPo_v2403_(WF2).ga" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and modern ancient DNA pipeline in Nextflow and with cloud support." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-08-05 10:23:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3451 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:49Z" ; + schema1:dateModified "2024-06-11T12:54:49Z" ; + schema1:description "A fully reproducible and modern ancient DNA pipeline in Nextflow and with cloud support." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + schema1:datePublished "2024-03-26T20:00:55.315103" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Scaffolding-HiC-VGP8" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Scaffolding-HiC-VGP8/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """![bacpage](https://raw.githubusercontent.com/CholGen/bacpage/split_into_command/.github/logo_dark.png){width=500}\r +\r +This repository contains an easy-to-use pipeline for the assembly and analysis of bacterial genomes using ONT long-read or Illumina short-read technology.\r +\r +# Introduction\r +Advances in sequencing technology during the COVID-19 pandemic has led to massive increases in the generation of sequencing data. Many bioinformatics tools have been developed to analyze this data, but very few tools can be utilized by individuals without prior bioinformatics training.\r +\r +This pipeline was designed to encapsulate pre-existing tools to automate analysis of whole genome sequencing of bacteria. Installation is fast and straightfoward. The pipeline is easy to setup and contains rationale defaults, but is highly modular and configurable by more advance users.\r +A successful run generates consensus sequences, typing information, phylogenetic tree, and quality control report.\r +\r +# Features\r +We anticipate the pipeline will be able to perform the following functions:\r +- [x] Reference-based assembly of Illumina paired-end reads\r +- [x] *De novo* assembly of Illumina paired-end reads\r +- [ ] *De novo* assembly of ONT long reads\r +- [x] Run quality control checks\r +- [x] Variant calling using [bcftools](https://github.com/samtools/bcftools)\r +- [x] Maximum-likelihood phylogenetic inference of processed samples and background dataset using [iqtree](https://github.com/iqtree/iqtree2) \r +- [x] MLST profiling and virulence factor detection\r +- [x] Antimicrobial resistance genes detection\r +- [ ] Plasmid detection\r +\r +# Installation\r +1. Install `miniconda` by running the following two command:\r +```commandline\r +curl -L -O "https://github.com/conda-forge/miniforge/releases/latest/download/Mambaforge-$(uname)-$(uname -m).sh"\r +bash Mambaforge-$(uname)-$(uname -m).sh\r +```\r +\r +2. Clone the repository:\r +```commandline\r +git clone https://github.com/CholGen/bacpage.git\r +```\r +\r +3. Install and activate the pipeline's conda environment:\r +```commandline\r +cd bacpage/\r +mamba env create -f environment.yaml\r +mamba activate bacpage\r +```\r +\r +4. Install the `bacpage` command:\r +```commandline\r +pip install .\r +```\r +\r +5. Test the installation:\r +```commandline\r +bacpage -h\r +bacpage version\r +```\r +These command should print the help and version of the program. Please create an issue if this is not the case.\r +\r +# Usage\r +0. Navigate to the pipeline's directory.\r +1. Copy the `example/` directory to create a directory specifically for each batch of samples.\r +```commandline\r +cp example/ \r +```\r +2. Place raw sequencing reads in the `input/` directory of your project directory.\r +3. Record the name and absolute path of raw sequencing reads in the `sample_data.csv` found within your project directory.\r +4. Replace the values `` and `` in `config.yaml` found within your project directory, with the absolute path of your project directory and pipeline directory, respectively.\r +5. Determine how many cores are available on your computer:\r +```commandline\r +cat /proc/cpuinfo | grep processor\r +```\r +6. From the pipeline's directory, run the entire pipeline on your samples using the following command:\r +```commandline\r +snakemake --configfile /config.yaml --cores \r +```\r +This will generate a consensus sequence in FASTA format for each of your samples and place them in `/results/consensus_sequences/.masked.fasta`. An HTML report containing alignment and quality metrics for your samples can be found at `/results/reports/qc_report.html`. A phylogeny comparing your sequences to the background dataset can be found at `/results/phylogeny/phylogeny.tree`\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/693?version=1" ; + schema1:isBasedOn "https://github.com/CholGen/bacpage.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Reference-based assembly with bacpage" ; + schema1:sdDatePublished "2024-08-05 10:26:05 +0100" ; + schema1:url "https://workflowhub.eu/workflows/693/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 11384 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4757 ; + schema1:dateCreated "2023-12-20T17:45:10Z" ; + schema1:dateModified "2023-12-20T17:49:26Z" ; + schema1:description """![bacpage](https://raw.githubusercontent.com/CholGen/bacpage/split_into_command/.github/logo_dark.png){width=500}\r +\r +This repository contains an easy-to-use pipeline for the assembly and analysis of bacterial genomes using ONT long-read or Illumina short-read technology.\r +\r +# Introduction\r +Advances in sequencing technology during the COVID-19 pandemic has led to massive increases in the generation of sequencing data. Many bioinformatics tools have been developed to analyze this data, but very few tools can be utilized by individuals without prior bioinformatics training.\r +\r +This pipeline was designed to encapsulate pre-existing tools to automate analysis of whole genome sequencing of bacteria. Installation is fast and straightfoward. The pipeline is easy to setup and contains rationale defaults, but is highly modular and configurable by more advance users.\r +A successful run generates consensus sequences, typing information, phylogenetic tree, and quality control report.\r +\r +# Features\r +We anticipate the pipeline will be able to perform the following functions:\r +- [x] Reference-based assembly of Illumina paired-end reads\r +- [x] *De novo* assembly of Illumina paired-end reads\r +- [ ] *De novo* assembly of ONT long reads\r +- [x] Run quality control checks\r +- [x] Variant calling using [bcftools](https://github.com/samtools/bcftools)\r +- [x] Maximum-likelihood phylogenetic inference of processed samples and background dataset using [iqtree](https://github.com/iqtree/iqtree2) \r +- [x] MLST profiling and virulence factor detection\r +- [x] Antimicrobial resistance genes detection\r +- [ ] Plasmid detection\r +\r +# Installation\r +1. Install `miniconda` by running the following two command:\r +```commandline\r +curl -L -O "https://github.com/conda-forge/miniforge/releases/latest/download/Mambaforge-$(uname)-$(uname -m).sh"\r +bash Mambaforge-$(uname)-$(uname -m).sh\r +```\r +\r +2. Clone the repository:\r +```commandline\r +git clone https://github.com/CholGen/bacpage.git\r +```\r +\r +3. Install and activate the pipeline's conda environment:\r +```commandline\r +cd bacpage/\r +mamba env create -f environment.yaml\r +mamba activate bacpage\r +```\r +\r +4. Install the `bacpage` command:\r +```commandline\r +pip install .\r +```\r +\r +5. Test the installation:\r +```commandline\r +bacpage -h\r +bacpage version\r +```\r +These command should print the help and version of the program. Please create an issue if this is not the case.\r +\r +# Usage\r +0. Navigate to the pipeline's directory.\r +1. Copy the `example/` directory to create a directory specifically for each batch of samples.\r +```commandline\r +cp example/ \r +```\r +2. Place raw sequencing reads in the `input/` directory of your project directory.\r +3. Record the name and absolute path of raw sequencing reads in the `sample_data.csv` found within your project directory.\r +4. Replace the values `` and `` in `config.yaml` found within your project directory, with the absolute path of your project directory and pipeline directory, respectively.\r +5. Determine how many cores are available on your computer:\r +```commandline\r +cat /proc/cpuinfo | grep processor\r +```\r +6. From the pipeline's directory, run the entire pipeline on your samples using the following command:\r +```commandline\r +snakemake --configfile /config.yaml --cores \r +```\r +This will generate a consensus sequence in FASTA format for each of your samples and place them in `/results/consensus_sequences/.masked.fasta`. An HTML report containing alignment and quality metrics for your samples can be found at `/results/reports/qc_report.html`. A phylogeny comparing your sequences to the background dataset can be found at `/results/phylogeny/phylogeny.tree`\r +""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Reference-based assembly with bacpage" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/693?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# scRNA-Seq pipelines\r +\r +Here we forge the tools to analyze single cell RNA-Seq experiments. The analysis workflow is based on the Bioconductor packages [*scater*](https://bioconductor.org/packages/devel/bioc/vignettes/scater/inst/doc/overview.html) and [*scran*](https://bioconductor.org/packages/devel/bioc/vignettes/scran/inst/doc/scran.html) as well as the Bioconductor workflows by Lun ATL, McCarthy DJ, & Marioni JC [*A step-by-step workflow for low-level analysis of single-cell RNA-seq data.*](http://doi.org/10.12688/f1000research.9501.1) F1000Res. 2016 Aug 31 [revised 2016 Oct 31];5:2122 and Amezquita RA, Lun ATL et al. [*Orchestrating Single-Cell Analysis with Bioconductor*](https://osca.bioconductor.org/index.html) Nat Methods. 2020 Feb;17(2):137-145.\r +\r +## Implemented protocols\r + - MARS-Seq (massively parallel single-cell RNA-sequencing): The protocol is based on the publications of Jaitin DA, et al. (2014). *Massively parallel single-cell RNA-seq for marker-free decomposition of tissues into cell types.* Science (New York, N.Y.), 343(6172), 776–779. https://doi.org/10.1126/science.1247651 and Keren-Shaul H., et al. (2019). *MARS-seq2.0: an experimental and analytical pipeline for indexed sorting combined with single-cell RNA sequencing.* Nature Protocols. https://doi.org/10.1038/s41596-019-0164-4. The MARS-Seq library preparation protocol is given [here](https://github.com/imbforge/NGSpipe2go/blob/master/resources/MARS-Seq_protocol_Step-by-Step_MML.pdf). The sequencing reads are demultiplexed according to the respective pool barcodes before they are used as input for the analysis pipeline. \r +- Smart-seq2: Libraries are generated using the [Smart-seq2 kit](http://www.nature.com/nmeth/journal/v10/n11/full/nmeth.2639.html). \r +\r +## Pipeline Workflow\r +All analysis steps are illustrated in the pipeline [flowchart](https://www.draw.io/?lightbox=1&highlight=0000ff&edit=_blank&layers=1&nav=1&title=scRNA-Seq#R7R3ZcpvK8mtUlTxIxSIh6dF2oiyVODm2Uzk5LykEI4kYAWGxrXz9nZ6FdUBIQoDjm%2BTeIwYYZqZ7eu%2BegXq1fXrn697ms2sie6BI5tNAfTNQFFmdTPB%2FoGVHW6bKjDasfctkDyUNt9YfxBol1hpZJgoyD4aua4eWl200XMdBRphp033ffcw%2BtnLt7Fc9fY0KDbeGbhdbv1tmuGGtsjZPbrxH1nrDPj1TpvTGUjfu174bOex7jusgemer827YHIONbrqPqSb17UC98l03pL%2B2T1fIhmXlK0bfW5TcjYfsIyes88J76eNqcz0L7YfNzfL6%2Fe3i55%2F3Qw6AB92O2FoMFM3GHV6a1gOsrm2tHXJD%2Bx3BUC99sgzxJf61Zv8lry39fAseEumLt5LFCHd87Tfh1sa%2FZHzP1pfIvoyX9Mq1XZ88pC7IH%2FxIEPrufQwkvIiXK9cJGUbJGoxbDzbIZD2SfuKrlWXbqU4R%2BRN3yu8QGKqXhru1DHwp4d9rWw8C9juGo0T6D%2FXQcmGFhnMpnl4aKgxQD8gP0VOqiUHpHXK3KPR3%2BBF2dzbR6CtsMw1nDAsfU6g5ZW2bFFaOJZVtCbYd1nHfCV7gHww1xGjyYFz%2Fc%2Fs9%2Bhi9%2BfPf3Ye7xY%2Fg4b%2Bh%2FKLx5K0Gf8vwZO3rpoWhfQz6NIAtykzKYEtMU1PYMpYE2KLxB0%2FBlj%2Ffwzd30r%2F%2FfVzcvPW3snn3bvdhOJELwEMmprfs0vXDjbt2Hd1%2Bm7SmIAALkzzzyXU9BpZfKAx3DIR6FLq4KYUU6MkK%2F4XXRxN29SN1580T65lc7PiFg%2Bebegkuf6TvJa%2BRq%2BQ98wLYDr40ALYAaWhcWDYfThEBMwhXCvnAjXwDVexFtsdD3V%2BjsOo59iAsfSUi%2BcjGNOwhywpFSEFexfPWd6kHPNdywiDV81doSPBzPJtk8VMZZ9lU7vnJbFb1PP5BR5CgZzyVWhj75e7L%2Fa%2FN7Ofvf%2B9CZfHz%2BrusLYez54KwRaw6PyKNZ00j0km8SN3DiRIgctYDlH8YEDBc4AdkzXsq50vwcK1exsJerpTBxeVWtxwAiuUh28LcADdeptkc%2FcY%2Bflh7JJXzCTzdybfpwAp8tMr0uglDkLYvADrKYm2FmMuOfNsbORg7lIW1Xa5cjCn45%2FW7W5iasnbxxRD%2Fb2m7S%2BDe6AH0ggWfN6YKi8C4ub4I0G%2F4udX9EP8c8fsjzL7dh1167JLBWGXSiFF%2BGPqRY%2Bgh3m2FeVb0Gq%2FrIlkAKfUvhocuEEVE61YCurzg0gU2VvbSDbwxYILGwV3e6UnQbh6sYvLQCJEZEmJshfoSv50iM1j%2FRivLsYhCokivlnDndaXA%2Fezmbbu6GRBeE3jIINwGa%2BEwUdC5XTOyAQ2f%2F%2FwQxnIntHRgh7pjxpMbwrStFRY4CdPXMU9F%2FlETbgAHn5ARhbDeKRwsGUm5MncGHUgTqEBc20mrQPK4ARVILDMJhJTcEmBF0IOfWFH0sMoIk6cCI5fuZrwhtjyNhctVKSXVXkO5sGSKYMV422E6Q0HIHypZpWCa7cBdrQJgQjkgHCboiw0Zky4lff77R0bq36eaDtKKaaKnlqimTWoIslJX12xJQ%2FgWIP%2FL8heYgDGpBCtQPQ1g6T4BRbOcNaVpS9c3kT%2FEzSAaEEmBIT6heBLG8kl8J23jvSCihIrG8Dd%2BwtNNM%2B5b6Z8kRuEXjMKnjEHucOkr29FJEpeAI3GBpgi%2FcuDkmRXaZmeIUQdL8gaBTqpDqfBUDMPUU0o5IMOla%2B4KjQURKzRjPUXfejYaVAljycPF13mLxRsMSu14t5KDRYFUX1at%2FqGxOOaKWSx133DN5qcR9yu98tHvyPIB%2BzBwcf%2FS54ub2%2BEtIHL57GoO5OD5eq5rNz5Z2qn04U3ldF93Ml9MnxuELuuOTdWJtkvkdzIvsNU2PivSqfTKxry%2Fo2kZTWInnxbuNNo6HQOMEO7m5iZhfWkhk%2F%2BDn9IrA%2FMNn2zDR%2FhSJ3MEocJrbo6sOwn8EWtni5ywi0mNRqPmpqQ7IDW64QZ0XMnUQ%2F1cU4JGEUsn0hMRS9KabLWOoaSEX6aUpPQNUMksQ7cvmMc0BCXikvtPbbSCz7j4qZVNHEwr4lNKqxOPGytEt1iqgR4ffd2Lx3WaFq1kdDJ5pglUQpFOKM8PFuTxZUqWP0CBUzp11Tw3BY6D9NkocJ5Q%2FN%2Fi4VsOlfAlL9HZaPuQ7CC4p6XuYaQPh2xXXRAZ36GWsrJ4BN7wNTGnigxsy1pUx8u3bQpPHTVVSh7g5kysZl6mNK9ulM3Ycjl60P2gEaN%2FWZcnKaAFEJ0HIHkhy3cfLKLopC28IkSrZG%2BXsUH4Gr%2BYWRZmoyj9Ln0N5iq0JEuvXC8cHS4PpZZzL4dUqjnkqbyQKfyfyOuNxdlkueNYntbjjvIRQVnHc0eRc7xVuzP%2FYG8Nz%2BMsGM9geBZGmMjqy5Nbmgp4qh3xpPVEiollhIUehP9cEZJcR4D4HWHSF%2B6IDs7VU3cFA9YfybCJbxAGi%2Fnu%2FYB50nL8fBMtR3hvV%2FByxse3eHBA8Rex2xQeA88dvvHbGNHmmNvupeoUTqVUXUR3cpQ%2BS7hVrYS%2Bk0hO1rGURfgGCP1EygWgjYuUXtYEJEo73JlYh9ALQyy5ylVCTpC9dB9boCRKNSk5H0kY1yQJE6lpknBkEGTWQT3WpBxWnBbUKMSReDVfNo40DPpiwKqcg62a82ZTJGVvJUA%2FFYfU6bRRHBKKLT1yl%2B%2FBozKxRe1MbKlLo8Z9CdTO0ygljV%2F78XE8OT8%2Bys8mtaDRSO3aqNSWBFyAlqb52q%2FVxtve6xvVfHKt%2B2t1qPWHehzKhFoAljrvxb6fqrl9n%2BUr%2B59vOEHj9uHi8Z186%2F78cjn%2F8k19%2BrrZ3g97pD3%2FvXxo2gt8PJUPTWYn8aHauv3t3cVNbc3%2BBqvuxGjseRBkdE4dPsm3wBA%2FXIkfpyBzTiW%2B4QjguZxFmo6VdvHaMmk6ZZ0lfhMa3CR9TfwpOVoHvqzUQjNXVtYQAguZdy1vLdMkpDAHk6Tlhq2EOijJbiUJzsQBTskU2714WJNL%2FA%2Bv3hUw1wke5hW%2BlpNr%2FA8e98Mr18Fd6xYBLsII%2B4iCcFBMyG0ADdQZfDxrvSnggdBMr44PxoN6MtALlVi1ukyncQPNacmFWmGL1ncPkkQJ5iSUjnBFf4lCLwK2A%2FUrYINfFJgOvvnbqGI53fmeY7%2BiG0%2FDIhyJxTUm467jPzw9YCZNXnj%2B%2F0LfWjbg%2B3tkPyDoddBGZM2Uc6MKk7KQKh3BnWri%2BbRneC4VUN1HQWSHQT%2BR3cAEOo3i4DS3gELbOwJWDFVFirYWVLGh6WpE%2FjPR0Iw8G%2BMdPF2dmtdqfMKL35Iaz3XZuyUPD3aruSVnvd%2BStrvu6X7EIxvyvWi4to0l81RIPdIN2I%2BcQ%2FULwc%2BAzBO1Rf4ilnqVLqXerkx%2FXJjdK%2FXOO7PTVo67N6SnKPX6CFTJnlKfVsMumUixiFdkEWwsZ%2FczMHxHD9Dvn%2FQGVu5%2FhiTRaBEYI9o2utmap0Vi5rsqDbDMByv2eXHII%2FjGaINsD%2FnB6ObkRRJ2WViYfsWqfsXjxUQv3mywJX0wQqYrTBAmO0qNOI4O%2Feya1gpoFsEM8mAiChtREEJXmr4FRuosAyrHJZGnwLlpoYMQ40ewwcQgoLROkRyETJRGN1F2Yy%2BYeVvS6nxWj8HLSgMcXlzk7fREjF%2FR1uPPM3NjazGOcf029uGkeNtZnTFn8vUXnXS84h93zk%2FqBaU06B%2BxnBXy0ROmfhbJRavrKjGBGmwptQG7MaEJPFadRkeSmEjbWvo6Q%2BSz%2B1JykznYrTJuKzbyvLGQqtKtW%2BX%2B92P4aG23MlI%2Baab0dbwcXg6nz0XVODayOkN7yvSVHEURAL5xIqNpWa5UiHyj5LBAZIrUapyteKtMzxNCN82hMx9w6bjyocCnhdzVJp1BtASDoYJl1nDnoeAo2klMlKQ2E0nrwaQMZKudh9qhmPk5HE4yJ38FyRxLPQwf597O%2Fjslj63Q26hZp7Yzs%2FGQ45OMcMqzib%2FrQ650bSirPYmT4gSDG31Z2fjSOKnc89q4VWb2k7rMDgyYIi%2BRiCmw3mMNIGXVT9xrZ%2BRn6fq1dCKHMzLtWTKyfEjVRCnWg2iTkYnjwV9gOYimTBHTmvRuJveC3k3lrEljMt8jvOeeH8%2FboXdLfXsFxkRy5ExNamdgwkJqLy2t9Xdr3UaSJx6mwYZ5OEWbPkuKlhfNJ1q3ormQolUndv5fMhceTZCmYcJVbUswr00lLkzz2%2BcPl7Ts4R3esBLkiv%2FOBl6IiEa6Lr1uEh8J7mjAnSdJIUXmu7kh6wMfIGndBLN1k5gEiBn1Rq6MAcqWwW9H0MLTwrIdm0noknEfTqNm7dOo3LlBqwn8HRSPCML3luTvIB91je9o5A%2FsY%2Fh%2BCNtm0lQNk5xIpwhKfHVuzv3rJbr%2BmnOztVEm8tHmXDWLZi2Zc%2FmA%2B2bOXSMHXbrmDouFSm2R0OcKMBMlMbl%2BIFQbeoMPImcNrvU2bLnwSSiOiAejHEyIJ9KgdUJ8BmFxPOtWWBRb%2BP5631ej0mLtzNx%2BWvg0tZ8WPiysHWbcw9hHKtXS0DAeS8%2FKGxUE2cCzrTA8d9pkSv7k8zlc5uwgeTInJVaJmmXiabkIegaZU5O6lTkrTVy9CYAtxt6Hvm7c9zT%2BlVqtYMpskMxC%2F2AFUNEsoEfk9j7VpAFszwfqjQUmprNF4gstHz2qnNB%2FCWFWW0JoWkA4KX6fD%2Fso8nX2gso3CGLnHAN4%2BTvkuJlzSiqkhO7pGpVPSKEHDAG8JZ6I%2B3GdmUQ%2B0n2F9DDyc6fxkW%2Bsw1XbMd2FKO2kDjAL0LZdmowYkCkCTlcVWx4cWQ64weq%2BTcgkSoZIT%2BW60dRSA1RavIOLpSEKC3fewr0xDXm5hXvFKnaPHDIxzzzIISO3yEDngiAacYJgvzLg5uV1H3p%2FplwSaRMnZxFMqlYVXtr5a3RJssUpBuT8LbxT2flb1GOWy3HjKUz3mJXDWUj8GvN2Urs%2FrlNA%2FWk6QTZRJWdq19DxW7vACsg6Iy84%2FbScc53k8%2F%2BBnX5Q0HM492fOSxrE4lCxZpI8E7D22fR0aix0AT7%2F7PH97rs6x6%2Fy02rrqaNd8c54mJ1Yzz44dbLH9cfc4WE9sp7xuIy4lswfy%2FOQ%2BbpqZK3oio6%2BpaG3mIc9gE5IjpWnpj1PZ4dWIibC1M3NbVPXdV1bHt3IIxpBsv7T51EqR4zyWZtKc0q4ILq4ZR2c1%2BPtsRMA6odmEuF7RMaW%2BjYhYi%2FC1J9NWlTnNUtNnq%2FoTqdZXU3Y%2BqdtGvu5Y7KDGpWNhANMp4eFA0xnsxyKnSnhB2qf3LACLHVDAhi0QNag1mkiZWDKc2%2B6j84gqUkCE2oh14fVb4E5HBwGME%2Bjx%2FOJeMp76qfjolm8TU%2F9e%2BnjanM9C%2B2Hzc3y%2Bv3t4uef98NyR30BuYAPCPlc3mY1FrGzT2gNom2CvLS7AjtblrIzVrU5BSRhheV0VAZrKhR6Li3sLMKKLEVvhNNlCYlAVBvLFab%2FUxidEAmKpxwuYX8PUodbxgU5oa8VFUxsolTV8KoctxubWOlZdgvynZXegWPBSvPakY2v9CmOZS0vEqcsvnyFk0ay1iJtKVMxKlWJKgFY56LvkfLuIba%2FMyCXzGsE7cMudrBP89jVptOzEr1frs9TuCzP0BnGbO8L6tap2I4vze9VuRgv1ovT24F16F5qgMXMtWzmz0SQM8EjJDMcRjkTh%2BmyzL1FPRMLN2fa62Wg2wsw2OWRUyz%2FiLBTOxN2HlrxHSTfIR8ZQVHKkyp8DHV6EXfyIcSANNIR54k3grwWBSQfjaF5sUhrAZfLhIfCgDTxiN7QxEli%2F0E0XtGEoxSAd4sCLkh8RTXrL4yxEt9P1%2BjZnhBvnP3K%2FMb1rT94fnr8%2FVjGUJvZJAUlYVzcJMpsXtwkZ7Nq%2F%2FX5wEWbdrow7rratpC2cv%2BY2x%2FvrA9vl7e3T%2B7O3X388eXbsK6Rm5sQuq7ykjtdcpo9VXbv87ymx7mN3OyQCWKQ%2BATHYtQu9eJ6OwKP4B4%2BQU7UoPUZYjmBNXJx4YwW760VgA6iO8iN4JrNiljTYru3IrR8lyPbszN851N9px2XbBSegNrxMXIHEUJ88ZVUSibW64aPRT2eFAqSe6pIZvuhyULAF21izxPwchbwyplcvXUhX%2BkSbh%2F0wjjIjqu0ngv0x0L%2B0KDKuqggXPu2QiqrBlnLMB759u4S8m5hOvu4ZYId9Cqkqbnqm%2BH8wD1Xn72Op2ruUNaxoO6Qohb5qzptwPMgrhspdbu1zn4qa1UVtP26QGd1jIXD7lMd4x6f%2Fn4ayM9zMMnBMU6T3LklMyWNPnufV7STDn%2BvZ5GY9Acdz3K2XV1EElNWpReIlLcLqPuqxeafnyinIFJtO8JVFOqm7oW17Qe8JV2Y0Udb9wGsoKQrYpVluWyspk7ccRflHA02xao4unKUe3bWhHwYndrHKrN8l%2FY41j3EysS2r8Hu8eCklxf2nq1woyoyl%2B67C3wXBazkVjm1CoQeX8ZBJ6nCVwvyR8g508ut5eWzrFqVq7P1VoO%2FgxJnUb5gF2vGVM0yGBtfswMoCUvf6CbBAimrvzUU6jmf5tigKAJxKnD%2FqHzJGwcuZ8yl0UgHxBKStyI732LHns1%2F8m5EcoCS7dL8bohDxzwtQo5BE%2BNYkU6bnj5asfvtQiZX0nIHtIT0d0Xoj%2BfRzmm%2BePbIeILT%2BFcQZl2zVf0X5pSfkkGhwGbBvvvPFSVrmIKT4Rw5uWvX3%2BLP%2F9Ez2YPx2X0SCaGq19UHE3IQV8lKsIR681cUhOSIQZqCXzhjFE97BRuULuuD7lsQSUNSLS3HsEfJEhg7g0SCeRs9QK9rjusNySyV0BNeVJ%2BPjRUJ%2BHp1QcEaDm%2Bv38Zj%2Fvb54msChVpfubKjgMOC%2Bossuh67ZPhBtBx6rhfZvKLPIeu6o8zvPi4ym9Rx46tDh1Cz2wvHAfKE9g7vYJx6Y61IRSkoUkQX3kdBwHEiLq9AyjFAkMKQFGIi3JqNBAi%2Fd%2BgAcGORdOTDCnJc56%2FJAZhP8vpRkTEImb7aRBaA0ELbIzX8SKtQI2e%2FdFApfDzOWl20ec5OW79SeNYw3FalcD7gvlUK%2FxzZoYU5b%2B2suWiLSTZhoTHDRu0diFUzSW6vf6VV5b5A82rtmLJqtjOBfNz1CQpyp7ECXdb8PNRDKFw%2BRtt6U3SlapD9rPj5NZ2l5aysdeQXJNIeGXNo3U7%2BBZDlNz5aZT5QIKGYio982xs5kIFTSkaHCSk1qXa1oAsC7fDYiF4mVTZTySFM608aMdYPQz9yIOTVLE5D3CFfW73Oep9%2FdbMG6HZXHKsJ%2BhY0v0aWW9Bb1VqLJp4vHdv2gpDjMxtZjFxPPUC6OAK%2BpIHqhMC5Xf%2FedokdZ4u1xzXRf1PjFemB5d0e0YCVV8KSggO%2B%2B6q5AeKF0LHYCQyHjYCD3I78TFYbCJgklxor9oMk4F8K9n8EUBQr6sjAYoOftdW5BmjxKQDWmtfr2JYiuptUOY4TwuPtGr%2BIBcGAFT6mDelSyLSWY2JMoAiOf3x2A2poChHYG%2B4RAvxNFjFA5KyKpK5jyUxa3QZnTEw2fde746ISDK48Nf40yVvJCN4TVZSKK6oXNW0ge0csZosM07mVPCl9ec96lcuFpYv43NOX8SVmMWH6cYxgm8%2BuCSr12%2F8B). Specify desired analysis details for your data in the respective *essential.vars.groovy* file (see below) and run the selected pipeline *marsseq.pipeline.groovy* or *smartsseq.pipeline.groovy* as described [here](https://gitlab.rlp.net/imbforge/NGSpipe2go/-/blob/master/README.md). The analysis allows further parameter fine-tuning subsequent the initial analysis e.g. for plotting and QC thresholding. Therefore, a customisable *sc.report.Rmd* file will be generated in the output reports folder after running the pipeline. Go through the steps and modify the default settings where appropriate. Subsequently, the *sc.report.Rmd* file can be converted to a final html report using the *knitr* R-package.\r +\r +### The pipelines includes:\r +- FastQC, MultiQC and other tools for rawdata quality control\r +- Adapter trimming with Cutadapt\r +- Mapping to the genome using STAR\r +- generation of bigWig tracks for visualisation of alignment\r +- Quantification with featureCounts (Subread) and UMI-tools (if UMIs are used for deduplication)\r +- Downstream analysis in R using a pre-designed markdown report file (*sc.report.Rmd*). Modify this file to fit your custom parameter and thresholds and render it to your final html report. The Rmd file uses, among others, the following tools and methods:\r + - QC: the [scater](http://bioconductor.org/packages/release/bioc/html/scater.html) package.\r + - Normalization: the [scran](http://bioconductor.org/packages/release/bioc/html/scran.html) package.\r + - Differential expression analysis: the [scde](http://bioconductor.org/packages/release/bioc/html/scde.html) package.\r + - Trajectory analysis (pseudotime): the [monocle](https://bioconductor.org/packages/release/bioc/html/monocle.html) package.\r +\r +### Pipeline parameter settings\r +- essential.vars.groovy: essential parameter describing the experiment \r + - project folder name\r + - reference genome\r + - experiment design\r + - adapter sequence, etc.\r +- additional (more specialized) parameter can be given in the var.groovy-files of the individual pipeline modules \r +- targets.txt: comma-separated txt-file giving information about the analysed samples. The following columns are required \r + - sample: sample identifier. Must be a unique substring of the input sample file name (e.g. common prefixes and suffixes may be removed). These names are grebbed against the count file names to merge targets.txt to the count data.\r + - plate: plate ID (number) \r + - row: plate row (letter)\r + - col: late column (number)\r + - cells: 0c/1c/10c (control wells)\r + - group: default variable for cell grouping (e.g. by condition)\r + \r + for pool-based libraries like MARSseq required additionally:\r + - pool: the pool ID comprises all cells from 1 library pool (i.e. a set of unique cell barcodes; the cell barcodes are re-used in other pools). Must be a unique substring of the input sample file name. For pool-based design, the pool ID is grebbed against the respective count data filename instead of the sample name as stated above.\r + - barcode: cell barcodes used as cell identifier in the count files. After merging the count data with targets.txt, the barcodes are replaced with sample IDs given in the sample column (i.e. here, sample names need not be a substring of input sample file name).\r +\r +### Programs required\r +- FastQC\r +- STAR\r +- Samtools\r +- Bedtools\r +- Subread\r +- Picard\r +- UCSC utilities\r +- RSeQC\r +- UMI-tools\r +- R\r +\r +## Resources\r +- QC: the [scater](http://bioconductor.org/packages/release/bioc/html/scater.html) package.\r +- Normalization: the [scran](http://bioconductor.org/packages/release/bioc/html/scran.html) package.\r +- Trajectory analysis (pseudotime): the [monocle](https://bioconductor.org/packages/release/bioc/html/monocle.html) package.\r +- A [tutorial](https://scrnaseq-course.cog.sanger.ac.uk/website/index.html) from Hemberg lab\r +- Luecken and Theis 2019 [Current best practices in single‐cell RNA‐seq analysis: a tutorial](https://www.embopress.org/doi/10.15252/msb.20188746)\r +\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/61?version=1" ; + schema1:isBasedOn "https://gitlab.rlp.net/imbforge/NGSpipe2go/-/tree/master/pipelines/scRNAseq" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNA-seq MARS-seq" ; + schema1:sdDatePublished "2024-08-05 10:32:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/61/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2104 ; + schema1:creator ; + schema1:dateCreated "2020-10-07T07:46:11Z" ; + schema1:dateModified "2023-01-16T13:44:52Z" ; + schema1:description """# scRNA-Seq pipelines\r +\r +Here we forge the tools to analyze single cell RNA-Seq experiments. The analysis workflow is based on the Bioconductor packages [*scater*](https://bioconductor.org/packages/devel/bioc/vignettes/scater/inst/doc/overview.html) and [*scran*](https://bioconductor.org/packages/devel/bioc/vignettes/scran/inst/doc/scran.html) as well as the Bioconductor workflows by Lun ATL, McCarthy DJ, & Marioni JC [*A step-by-step workflow for low-level analysis of single-cell RNA-seq data.*](http://doi.org/10.12688/f1000research.9501.1) F1000Res. 2016 Aug 31 [revised 2016 Oct 31];5:2122 and Amezquita RA, Lun ATL et al. [*Orchestrating Single-Cell Analysis with Bioconductor*](https://osca.bioconductor.org/index.html) Nat Methods. 2020 Feb;17(2):137-145.\r +\r +## Implemented protocols\r + - MARS-Seq (massively parallel single-cell RNA-sequencing): The protocol is based on the publications of Jaitin DA, et al. (2014). *Massively parallel single-cell RNA-seq for marker-free decomposition of tissues into cell types.* Science (New York, N.Y.), 343(6172), 776–779. https://doi.org/10.1126/science.1247651 and Keren-Shaul H., et al. (2019). *MARS-seq2.0: an experimental and analytical pipeline for indexed sorting combined with single-cell RNA sequencing.* Nature Protocols. https://doi.org/10.1038/s41596-019-0164-4. The MARS-Seq library preparation protocol is given [here](https://github.com/imbforge/NGSpipe2go/blob/master/resources/MARS-Seq_protocol_Step-by-Step_MML.pdf). The sequencing reads are demultiplexed according to the respective pool barcodes before they are used as input for the analysis pipeline. \r +- Smart-seq2: Libraries are generated using the [Smart-seq2 kit](http://www.nature.com/nmeth/journal/v10/n11/full/nmeth.2639.html). \r +\r +## Pipeline Workflow\r +All analysis steps are illustrated in the pipeline [flowchart](https://www.draw.io/?lightbox=1&highlight=0000ff&edit=_blank&layers=1&nav=1&title=scRNA-Seq#R7R3ZcpvK8mtUlTxIxSIh6dF2oiyVODm2Uzk5LykEI4kYAWGxrXz9nZ6FdUBIQoDjm%2BTeIwYYZqZ7eu%2BegXq1fXrn697ms2sie6BI5tNAfTNQFFmdTPB%2FoGVHW6bKjDasfctkDyUNt9YfxBol1hpZJgoyD4aua4eWl200XMdBRphp033ffcw%2BtnLt7Fc9fY0KDbeGbhdbv1tmuGGtsjZPbrxH1nrDPj1TpvTGUjfu174bOex7jusgemer827YHIONbrqPqSb17UC98l03pL%2B2T1fIhmXlK0bfW5TcjYfsIyes88J76eNqcz0L7YfNzfL6%2Fe3i55%2F3Qw6AB92O2FoMFM3GHV6a1gOsrm2tHXJD%2Bx3BUC99sgzxJf61Zv8lry39fAseEumLt5LFCHd87Tfh1sa%2FZHzP1pfIvoyX9Mq1XZ88pC7IH%2FxIEPrufQwkvIiXK9cJGUbJGoxbDzbIZD2SfuKrlWXbqU4R%2BRN3yu8QGKqXhru1DHwp4d9rWw8C9juGo0T6D%2FXQcmGFhnMpnl4aKgxQD8gP0VOqiUHpHXK3KPR3%2BBF2dzbR6CtsMw1nDAsfU6g5ZW2bFFaOJZVtCbYd1nHfCV7gHww1xGjyYFz%2Fc%2Fs9%2Bhi9%2BfPf3Ye7xY%2Fg4b%2Bh%2FKLx5K0Gf8vwZO3rpoWhfQz6NIAtykzKYEtMU1PYMpYE2KLxB0%2FBlj%2Ffwzd30r%2F%2FfVzcvPW3snn3bvdhOJELwEMmprfs0vXDjbt2Hd1%2Bm7SmIAALkzzzyXU9BpZfKAx3DIR6FLq4KYUU6MkK%2F4XXRxN29SN1580T65lc7PiFg%2Bebegkuf6TvJa%2BRq%2BQ98wLYDr40ALYAaWhcWDYfThEBMwhXCvnAjXwDVexFtsdD3V%2BjsOo59iAsfSUi%2BcjGNOwhywpFSEFexfPWd6kHPNdywiDV81doSPBzPJtk8VMZZ9lU7vnJbFb1PP5BR5CgZzyVWhj75e7L%2Fa%2FN7Ofvf%2B9CZfHz%2BrusLYez54KwRaw6PyKNZ00j0km8SN3DiRIgctYDlH8YEDBc4AdkzXsq50vwcK1exsJerpTBxeVWtxwAiuUh28LcADdeptkc%2FcY%2Bflh7JJXzCTzdybfpwAp8tMr0uglDkLYvADrKYm2FmMuOfNsbORg7lIW1Xa5cjCn45%2FW7W5iasnbxxRD%2Fb2m7S%2BDe6AH0ggWfN6YKi8C4ub4I0G%2F4udX9EP8c8fsjzL7dh1167JLBWGXSiFF%2BGPqRY%2Bgh3m2FeVb0Gq%2FrIlkAKfUvhocuEEVE61YCurzg0gU2VvbSDbwxYILGwV3e6UnQbh6sYvLQCJEZEmJshfoSv50iM1j%2FRivLsYhCokivlnDndaXA%2Fezmbbu6GRBeE3jIINwGa%2BEwUdC5XTOyAQ2f%2F%2FwQxnIntHRgh7pjxpMbwrStFRY4CdPXMU9F%2FlETbgAHn5ARhbDeKRwsGUm5MncGHUgTqEBc20mrQPK4ARVILDMJhJTcEmBF0IOfWFH0sMoIk6cCI5fuZrwhtjyNhctVKSXVXkO5sGSKYMV422E6Q0HIHypZpWCa7cBdrQJgQjkgHCboiw0Zky4lff77R0bq36eaDtKKaaKnlqimTWoIslJX12xJQ%2FgWIP%2FL8heYgDGpBCtQPQ1g6T4BRbOcNaVpS9c3kT%2FEzSAaEEmBIT6heBLG8kl8J23jvSCihIrG8Dd%2BwtNNM%2B5b6Z8kRuEXjMKnjEHucOkr29FJEpeAI3GBpgi%2FcuDkmRXaZmeIUQdL8gaBTqpDqfBUDMPUU0o5IMOla%2B4KjQURKzRjPUXfejYaVAljycPF13mLxRsMSu14t5KDRYFUX1at%2FqGxOOaKWSx133DN5qcR9yu98tHvyPIB%2BzBwcf%2FS54ub2%2BEtIHL57GoO5OD5eq5rNz5Z2qn04U3ldF93Ml9MnxuELuuOTdWJtkvkdzIvsNU2PivSqfTKxry%2Fo2kZTWInnxbuNNo6HQOMEO7m5iZhfWkhk%2F%2BDn9IrA%2FMNn2zDR%2FhSJ3MEocJrbo6sOwn8EWtni5ywi0mNRqPmpqQ7IDW64QZ0XMnUQ%2F1cU4JGEUsn0hMRS9KabLWOoaSEX6aUpPQNUMksQ7cvmMc0BCXikvtPbbSCz7j4qZVNHEwr4lNKqxOPGytEt1iqgR4ffd2Lx3WaFq1kdDJ5pglUQpFOKM8PFuTxZUqWP0CBUzp11Tw3BY6D9NkocJ5Q%2FN%2Fi4VsOlfAlL9HZaPuQ7CC4p6XuYaQPh2xXXRAZ36GWsrJ4BN7wNTGnigxsy1pUx8u3bQpPHTVVSh7g5kysZl6mNK9ulM3Ycjl60P2gEaN%2FWZcnKaAFEJ0HIHkhy3cfLKLopC28IkSrZG%2BXsUH4Gr%2BYWRZmoyj9Ln0N5iq0JEuvXC8cHS4PpZZzL4dUqjnkqbyQKfyfyOuNxdlkueNYntbjjvIRQVnHc0eRc7xVuzP%2FYG8Nz%2BMsGM9geBZGmMjqy5Nbmgp4qh3xpPVEiollhIUehP9cEZJcR4D4HWHSF%2B6IDs7VU3cFA9YfybCJbxAGi%2Fnu%2FYB50nL8fBMtR3hvV%2FByxse3eHBA8Rex2xQeA88dvvHbGNHmmNvupeoUTqVUXUR3cpQ%2BS7hVrYS%2Bk0hO1rGURfgGCP1EygWgjYuUXtYEJEo73JlYh9ALQyy5ylVCTpC9dB9boCRKNSk5H0kY1yQJE6lpknBkEGTWQT3WpBxWnBbUKMSReDVfNo40DPpiwKqcg62a82ZTJGVvJUA%2FFYfU6bRRHBKKLT1yl%2B%2FBozKxRe1MbKlLo8Z9CdTO0ygljV%2F78XE8OT8%2Bys8mtaDRSO3aqNSWBFyAlqb52q%2FVxtve6xvVfHKt%2B2t1qPWHehzKhFoAljrvxb6fqrl9n%2BUr%2B59vOEHj9uHi8Z186%2F78cjn%2F8k19%2BrrZ3g97pD3%2FvXxo2gt8PJUPTWYn8aHauv3t3cVNbc3%2BBqvuxGjseRBkdE4dPsm3wBA%2FXIkfpyBzTiW%2B4QjguZxFmo6VdvHaMmk6ZZ0lfhMa3CR9TfwpOVoHvqzUQjNXVtYQAguZdy1vLdMkpDAHk6Tlhq2EOijJbiUJzsQBTskU2714WJNL%2FA%2Bv3hUw1wke5hW%2BlpNr%2FA8e98Mr18Fd6xYBLsII%2B4iCcFBMyG0ADdQZfDxrvSnggdBMr44PxoN6MtALlVi1ukyncQPNacmFWmGL1ncPkkQJ5iSUjnBFf4lCLwK2A%2FUrYINfFJgOvvnbqGI53fmeY7%2BiG0%2FDIhyJxTUm467jPzw9YCZNXnj%2B%2F0LfWjbg%2B3tkPyDoddBGZM2Uc6MKk7KQKh3BnWri%2BbRneC4VUN1HQWSHQT%2BR3cAEOo3i4DS3gELbOwJWDFVFirYWVLGh6WpE%2FjPR0Iw8G%2BMdPF2dmtdqfMKL35Iaz3XZuyUPD3aruSVnvd%2BStrvu6X7EIxvyvWi4to0l81RIPdIN2I%2BcQ%2FULwc%2BAzBO1Rf4ilnqVLqXerkx%2FXJjdK%2FXOO7PTVo67N6SnKPX6CFTJnlKfVsMumUixiFdkEWwsZ%2FczMHxHD9Dvn%2FQGVu5%2FhiTRaBEYI9o2utmap0Vi5rsqDbDMByv2eXHII%2FjGaINsD%2FnB6ObkRRJ2WViYfsWqfsXjxUQv3mywJX0wQqYrTBAmO0qNOI4O%2Feya1gpoFsEM8mAiChtREEJXmr4FRuosAyrHJZGnwLlpoYMQ40ewwcQgoLROkRyETJRGN1F2Yy%2BYeVvS6nxWj8HLSgMcXlzk7fREjF%2FR1uPPM3NjazGOcf029uGkeNtZnTFn8vUXnXS84h93zk%2FqBaU06B%2BxnBXy0ROmfhbJRavrKjGBGmwptQG7MaEJPFadRkeSmEjbWvo6Q%2BSz%2B1JykznYrTJuKzbyvLGQqtKtW%2BX%2B92P4aG23MlI%2Baab0dbwcXg6nz0XVODayOkN7yvSVHEURAL5xIqNpWa5UiHyj5LBAZIrUapyteKtMzxNCN82hMx9w6bjyocCnhdzVJp1BtASDoYJl1nDnoeAo2klMlKQ2E0nrwaQMZKudh9qhmPk5HE4yJ38FyRxLPQwf597O%2Fjslj63Q26hZp7Yzs%2FGQ45OMcMqzib%2FrQ650bSirPYmT4gSDG31Z2fjSOKnc89q4VWb2k7rMDgyYIi%2BRiCmw3mMNIGXVT9xrZ%2BRn6fq1dCKHMzLtWTKyfEjVRCnWg2iTkYnjwV9gOYimTBHTmvRuJveC3k3lrEljMt8jvOeeH8%2FboXdLfXsFxkRy5ExNamdgwkJqLy2t9Xdr3UaSJx6mwYZ5OEWbPkuKlhfNJ1q3ormQolUndv5fMhceTZCmYcJVbUswr00lLkzz2%2BcPl7Ts4R3esBLkiv%2FOBl6IiEa6Lr1uEh8J7mjAnSdJIUXmu7kh6wMfIGndBLN1k5gEiBn1Rq6MAcqWwW9H0MLTwrIdm0noknEfTqNm7dOo3LlBqwn8HRSPCML3luTvIB91je9o5A%2FsY%2Fh%2BCNtm0lQNk5xIpwhKfHVuzv3rJbr%2BmnOztVEm8tHmXDWLZi2Zc%2FmA%2B2bOXSMHXbrmDouFSm2R0OcKMBMlMbl%2BIFQbeoMPImcNrvU2bLnwSSiOiAejHEyIJ9KgdUJ8BmFxPOtWWBRb%2BP5631ej0mLtzNx%2BWvg0tZ8WPiysHWbcw9hHKtXS0DAeS8%2FKGxUE2cCzrTA8d9pkSv7k8zlc5uwgeTInJVaJmmXiabkIegaZU5O6lTkrTVy9CYAtxt6Hvm7c9zT%2BlVqtYMpskMxC%2F2AFUNEsoEfk9j7VpAFszwfqjQUmprNF4gstHz2qnNB%2FCWFWW0JoWkA4KX6fD%2Fso8nX2gso3CGLnHAN4%2BTvkuJlzSiqkhO7pGpVPSKEHDAG8JZ6I%2B3GdmUQ%2B0n2F9DDyc6fxkW%2Bsw1XbMd2FKO2kDjAL0LZdmowYkCkCTlcVWx4cWQ64weq%2BTcgkSoZIT%2BW60dRSA1RavIOLpSEKC3fewr0xDXm5hXvFKnaPHDIxzzzIISO3yEDngiAacYJgvzLg5uV1H3p%2FplwSaRMnZxFMqlYVXtr5a3RJssUpBuT8LbxT2flb1GOWy3HjKUz3mJXDWUj8GvN2Urs%2FrlNA%2FWk6QTZRJWdq19DxW7vACsg6Iy84%2FbScc53k8%2F%2BBnX5Q0HM492fOSxrE4lCxZpI8E7D22fR0aix0AT7%2F7PH97rs6x6%2Fy02rrqaNd8c54mJ1Yzz44dbLH9cfc4WE9sp7xuIy4lswfy%2FOQ%2BbpqZK3oio6%2BpaG3mIc9gE5IjpWnpj1PZ4dWIibC1M3NbVPXdV1bHt3IIxpBsv7T51EqR4zyWZtKc0q4ILq4ZR2c1%2BPtsRMA6odmEuF7RMaW%2BjYhYi%2FC1J9NWlTnNUtNnq%2FoTqdZXU3Y%2BqdtGvu5Y7KDGpWNhANMp4eFA0xnsxyKnSnhB2qf3LACLHVDAhi0QNag1mkiZWDKc2%2B6j84gqUkCE2oh14fVb4E5HBwGME%2Bjx%2FOJeMp76qfjolm8TU%2F9e%2BnjanM9C%2B2Hzc3y%2Bv3t4uef98NyR30BuYAPCPlc3mY1FrGzT2gNom2CvLS7AjtblrIzVrU5BSRhheV0VAZrKhR6Li3sLMKKLEVvhNNlCYlAVBvLFab%2FUxidEAmKpxwuYX8PUodbxgU5oa8VFUxsolTV8KoctxubWOlZdgvynZXegWPBSvPakY2v9CmOZS0vEqcsvnyFk0ay1iJtKVMxKlWJKgFY56LvkfLuIba%2FMyCXzGsE7cMudrBP89jVptOzEr1frs9TuCzP0BnGbO8L6tap2I4vze9VuRgv1ovT24F16F5qgMXMtWzmz0SQM8EjJDMcRjkTh%2BmyzL1FPRMLN2fa62Wg2wsw2OWRUyz%2FiLBTOxN2HlrxHSTfIR8ZQVHKkyp8DHV6EXfyIcSANNIR54k3grwWBSQfjaF5sUhrAZfLhIfCgDTxiN7QxEli%2F0E0XtGEoxSAd4sCLkh8RTXrL4yxEt9P1%2BjZnhBvnP3K%2FMb1rT94fnr8%2FVjGUJvZJAUlYVzcJMpsXtwkZ7Nq%2F%2FX5wEWbdrow7rratpC2cv%2BY2x%2FvrA9vl7e3T%2B7O3X388eXbsK6Rm5sQuq7ykjtdcpo9VXbv87ymx7mN3OyQCWKQ%2BATHYtQu9eJ6OwKP4B4%2BQU7UoPUZYjmBNXJx4YwW760VgA6iO8iN4JrNiljTYru3IrR8lyPbszN851N9px2XbBSegNrxMXIHEUJ88ZVUSibW64aPRT2eFAqSe6pIZvuhyULAF21izxPwchbwyplcvXUhX%2BkSbh%2F0wjjIjqu0ngv0x0L%2B0KDKuqggXPu2QiqrBlnLMB759u4S8m5hOvu4ZYId9Cqkqbnqm%2BH8wD1Xn72Op2ruUNaxoO6Qohb5qzptwPMgrhspdbu1zn4qa1UVtP26QGd1jIXD7lMd4x6f%2Fn4ayM9zMMnBMU6T3LklMyWNPnufV7STDn%2BvZ5GY9Acdz3K2XV1EElNWpReIlLcLqPuqxeafnyinIFJtO8JVFOqm7oW17Qe8JV2Y0Udb9wGsoKQrYpVluWyspk7ccRflHA02xao4unKUe3bWhHwYndrHKrN8l%2FY41j3EysS2r8Hu8eCklxf2nq1woyoyl%2B67C3wXBazkVjm1CoQeX8ZBJ6nCVwvyR8g508ut5eWzrFqVq7P1VoO%2FgxJnUb5gF2vGVM0yGBtfswMoCUvf6CbBAimrvzUU6jmf5tigKAJxKnD%2FqHzJGwcuZ8yl0UgHxBKStyI732LHns1%2F8m5EcoCS7dL8bohDxzwtQo5BE%2BNYkU6bnj5asfvtQiZX0nIHtIT0d0Xoj%2BfRzmm%2BePbIeILT%2BFcQZl2zVf0X5pSfkkGhwGbBvvvPFSVrmIKT4Rw5uWvX3%2BLP%2F9Ez2YPx2X0SCaGq19UHE3IQV8lKsIR681cUhOSIQZqCXzhjFE97BRuULuuD7lsQSUNSLS3HsEfJEhg7g0SCeRs9QK9rjusNySyV0BNeVJ%2BPjRUJ%2BHp1QcEaDm%2Bv38Zj%2Fvb54msChVpfubKjgMOC%2Bossuh67ZPhBtBx6rhfZvKLPIeu6o8zvPi4ym9Rx46tDh1Cz2wvHAfKE9g7vYJx6Y61IRSkoUkQX3kdBwHEiLq9AyjFAkMKQFGIi3JqNBAi%2Fd%2BgAcGORdOTDCnJc56%2FJAZhP8vpRkTEImb7aRBaA0ELbIzX8SKtQI2e%2FdFApfDzOWl20ec5OW79SeNYw3FalcD7gvlUK%2FxzZoYU5b%2B2suWiLSTZhoTHDRu0diFUzSW6vf6VV5b5A82rtmLJqtjOBfNz1CQpyp7ECXdb8PNRDKFw%2BRtt6U3SlapD9rPj5NZ2l5aysdeQXJNIeGXNo3U7%2BBZDlNz5aZT5QIKGYio982xs5kIFTSkaHCSk1qXa1oAsC7fDYiF4mVTZTySFM608aMdYPQz9yIOTVLE5D3CFfW73Oep9%2FdbMG6HZXHKsJ%2BhY0v0aWW9Bb1VqLJp4vHdv2gpDjMxtZjFxPPUC6OAK%2BpIHqhMC5Xf%2FedokdZ4u1xzXRf1PjFemB5d0e0YCVV8KSggO%2B%2B6q5AeKF0LHYCQyHjYCD3I78TFYbCJgklxor9oMk4F8K9n8EUBQr6sjAYoOftdW5BmjxKQDWmtfr2JYiuptUOY4TwuPtGr%2BIBcGAFT6mDelSyLSWY2JMoAiOf3x2A2poChHYG%2B4RAvxNFjFA5KyKpK5jyUxa3QZnTEw2fde746ISDK48Nf40yVvJCN4TVZSKK6oXNW0ge0csZosM07mVPCl9ec96lcuFpYv43NOX8SVmMWH6cYxgm8%2BuCSr12%2F8B). Specify desired analysis details for your data in the respective *essential.vars.groovy* file (see below) and run the selected pipeline *marsseq.pipeline.groovy* or *smartsseq.pipeline.groovy* as described [here](https://gitlab.rlp.net/imbforge/NGSpipe2go/-/blob/master/README.md). The analysis allows further parameter fine-tuning subsequent the initial analysis e.g. for plotting and QC thresholding. Therefore, a customisable *sc.report.Rmd* file will be generated in the output reports folder after running the pipeline. Go through the steps and modify the default settings where appropriate. Subsequently, the *sc.report.Rmd* file can be converted to a final html report using the *knitr* R-package.\r +\r +### The pipelines includes:\r +- FastQC, MultiQC and other tools for rawdata quality control\r +- Adapter trimming with Cutadapt\r +- Mapping to the genome using STAR\r +- generation of bigWig tracks for visualisation of alignment\r +- Quantification with featureCounts (Subread) and UMI-tools (if UMIs are used for deduplication)\r +- Downstream analysis in R using a pre-designed markdown report file (*sc.report.Rmd*). Modify this file to fit your custom parameter and thresholds and render it to your final html report. The Rmd file uses, among others, the following tools and methods:\r + - QC: the [scater](http://bioconductor.org/packages/release/bioc/html/scater.html) package.\r + - Normalization: the [scran](http://bioconductor.org/packages/release/bioc/html/scran.html) package.\r + - Differential expression analysis: the [scde](http://bioconductor.org/packages/release/bioc/html/scde.html) package.\r + - Trajectory analysis (pseudotime): the [monocle](https://bioconductor.org/packages/release/bioc/html/monocle.html) package.\r +\r +### Pipeline parameter settings\r +- essential.vars.groovy: essential parameter describing the experiment \r + - project folder name\r + - reference genome\r + - experiment design\r + - adapter sequence, etc.\r +- additional (more specialized) parameter can be given in the var.groovy-files of the individual pipeline modules \r +- targets.txt: comma-separated txt-file giving information about the analysed samples. The following columns are required \r + - sample: sample identifier. Must be a unique substring of the input sample file name (e.g. common prefixes and suffixes may be removed). These names are grebbed against the count file names to merge targets.txt to the count data.\r + - plate: plate ID (number) \r + - row: plate row (letter)\r + - col: late column (number)\r + - cells: 0c/1c/10c (control wells)\r + - group: default variable for cell grouping (e.g. by condition)\r + \r + for pool-based libraries like MARSseq required additionally:\r + - pool: the pool ID comprises all cells from 1 library pool (i.e. a set of unique cell barcodes; the cell barcodes are re-used in other pools). Must be a unique substring of the input sample file name. For pool-based design, the pool ID is grebbed against the respective count data filename instead of the sample name as stated above.\r + - barcode: cell barcodes used as cell identifier in the count files. After merging the count data with targets.txt, the barcodes are replaced with sample IDs given in the sample column (i.e. here, sample names need not be a substring of input sample file name).\r +\r +### Programs required\r +- FastQC\r +- STAR\r +- Samtools\r +- Bedtools\r +- Subread\r +- Picard\r +- UCSC utilities\r +- RSeQC\r +- UMI-tools\r +- R\r +\r +## Resources\r +- QC: the [scater](http://bioconductor.org/packages/release/bioc/html/scater.html) package.\r +- Normalization: the [scran](http://bioconductor.org/packages/release/bioc/html/scran.html) package.\r +- Trajectory analysis (pseudotime): the [monocle](https://bioconductor.org/packages/release/bioc/html/monocle.html) package.\r +- A [tutorial](https://scrnaseq-course.cog.sanger.ac.uk/website/index.html) from Hemberg lab\r +- Luecken and Theis 2019 [Current best practices in single‐cell RNA‐seq analysis: a tutorial](https://www.embopress.org/doi/10.15252/msb.20188746)\r +\r +\r +""" ; + schema1:keywords "scRNA-seq, MARS-seq, bpipe, groovy" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "scRNA-seq MARS-seq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/61?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for the identification of circular DNAs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/972?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/circdna" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/circdna" ; + schema1:sdDatePublished "2024-08-05 10:24:11 +0100" ; + schema1:url "https://workflowhub.eu/workflows/972/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8586 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:45Z" ; + schema1:dateModified "2024-06-11T12:54:45Z" ; + schema1:description "Pipeline for the identification of circular DNAs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/972?version=7" ; + schema1:keywords "ampliconarchitect, ampliconsuite, circle-seq, circular, DNA, eccdna, ecdna, extrachromosomal-circular-dna, Genomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/circdna" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/972?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2021-12-20T17:04:47.628302" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-ont-artic-variant-calling" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sars-cov-2-ont-artic-variant-calling/COVID-19-ARTIC-ONT" ; + schema1:sdDatePublished "2021-12-21 03:01:00 +0000" ; + schema1:softwareVersion "v0.3.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """\r +# BridgeDb tutorial: Gene HGNC name to Ensembl identifier\r +\r +This tutorial explains how to use the BridgeDb identifier mapping service to translate HGNC names to Ensembl identifiers. This step is part of the OpenRiskNet use case to link Adverse Outcome Pathways to [WikiPathways](https://wikipathways.org/).\r +\r +First we need to load the Python library to allow calls to the [BridgeDb REST webservice](http://bridgedb.prod.openrisknet.org/swagger/):\r +\r +\r +```python\r +import requests\r +```\r +\r +Let's assume we're interested in the gene with HGNC MECP2 (FIXME: look up a gene in AOPWiki), the API call to make mappings is given below as `callUrl`. Here, the `H` indicates that the query (`MECP2`) is an HGNC symbol:\r +\r +\r +```python\r +callUrl = 'http://bridgedb.prod.openrisknet.org/Human/xrefs/H/MECP2'\r +```\r +\r +The default call returns all identifiers, not just for Ensembl:\r +\r +\r +```python\r +response = requests.get(callUrl)\r +response.text\r +```\r +\r +\r +\r +\r + 'GO:0001964\\tGeneOntology\\nuc065cav.1\\tUCSC Genome Browser\\n312750\\tOMIM\\nGO:0042551\\tGeneOntology\\nuc065car.1\\tUCSC Genome Browser\\nA0A087X1U4\\tUniprot-TrEMBL\\n4204\\tWikiGenes\\nGO:0043524\\tGeneOntology\\nILMN_1702715\\tIllumina\\n34355_at\\tAffy\\nGO:0007268\\tGeneOntology\\nMECP2\\tHGNC\\nuc065caz.1\\tUCSC Genome Browser\\nA_33_P3339036\\tAgilent\\nGO:0006576\\tGeneOntology\\nuc065cbg.1\\tUCSC Genome Browser\\nGO:0006342\\tGeneOntology\\n300496\\tOMIM\\nGO:0035176\\tGeneOntology\\nuc065cbc.1\\tUCSC Genome Browser\\nGO:0033555\\tGeneOntology\\nGO:0045892\\tGeneOntology\\nA_23_P114361\\tAgilent\\nGO:0045893\\tGeneOntology\\nENSG00000169057\\tEnsembl\\nGO:0090063\\tGeneOntology\\nGO:0005515\\tGeneOntology\\nGO:0002087\\tGeneOntology\\nGO:0005634\\tGeneOntology\\nGO:0007416\\tGeneOntology\\nGO:0008104\\tGeneOntology\\nGO:0042826\\tGeneOntology\\nGO:0007420\\tGeneOntology\\nGO:0035067\\tGeneOntology\\n300005\\tOMIM\\nNP_001104262\\tRefSeq\\nA0A087WVW7\\tUniprot-TrEMBL\\nNP_004983\\tRefSeq\\nGO:0046470\\tGeneOntology\\nGO:0010385\\tGeneOntology\\n11722682_at\\tAffy\\nGO:0051965\\tGeneOntology\\nNM_001316337\\tRefSeq\\nuc065caw.1\\tUCSC Genome Browser\\nA0A0D9SFX7\\tUniprot-TrEMBL\\nA0A140VKC4\\tUniprot-TrEMBL\\nGO:0003723\\tGeneOntology\\nGO:0019233\\tGeneOntology\\nGO:0001666\\tGeneOntology\\nGO:0003729\\tGeneOntology\\nGO:0021591\\tGeneOntology\\nuc065cas.1\\tUCSC Genome Browser\\nGO:0019230\\tGeneOntology\\nGO:0003682\\tGeneOntology\\nGO:0001662\\tGeneOntology\\nuc065cbh.1\\tUCSC Genome Browser\\nX99687_at\\tAffy\\nGO:0008344\\tGeneOntology\\nGO:0009791\\tGeneOntology\\nuc065cbd.1\\tUCSC Genome Browser\\nGO:0019904\\tGeneOntology\\nGO:0030182\\tGeneOntology\\nGO:0035197\\tGeneOntology\\n8175998\\tAffy\\nGO:0016358\\tGeneOntology\\nNM_004992\\tRefSeq\\nGO:0003714\\tGeneOntology\\nGO:0005739\\tGeneOntology\\nGO:0005615\\tGeneOntology\\nGO:0005737\\tGeneOntology\\nuc004fjv.3\\tUCSC Genome Browser\\n202617_s_at\\tAffy\\nGO:0050905\\tGeneOntology\\nGO:0008327\\tGeneOntology\\nD3YJ43\\tUniprot-TrEMBL\\nGO:0003677\\tGeneOntology\\nGO:0006541\\tGeneOntology\\nGO:0040029\\tGeneOntology\\nA_33_P3317211\\tAgilent\\nNP_001303266\\tRefSeq\\n11722683_a_at\\tAffy\\nGO:0008211\\tGeneOntology\\nGO:0051151\\tGeneOntology\\nNM_001110792\\tRefSeq\\nX89430_at\\tAffy\\nGO:2000820\\tGeneOntology\\nuc065cat.1\\tUCSC Genome Browser\\nGO:0003700\\tGeneOntology\\nGO:0047485\\tGeneOntology\\n4204\\tEntrez Gene\\nGO:0009405\\tGeneOntology\\nA0A0D9SEX1\\tUniprot-TrEMBL\\nGO:0098794\\tGeneOntology\\n3C2I\\tPDB\\nHs.200716\\tUniGene\\nGO:0000792\\tGeneOntology\\nuc065cax.1\\tUCSC Genome Browser\\n300055\\tOMIM\\n5BT2\\tPDB\\nGO:0006020\\tGeneOntology\\nGO:0031175\\tGeneOntology\\nuc065cbe.1\\tUCSC Genome Browser\\nGO:0008284\\tGeneOntology\\nuc065cba.1\\tUCSC Genome Browser\\nGO:0060291\\tGeneOntology\\n202618_s_at\\tAffy\\nGO:0016573\\tGeneOntology\\n17115453\\tAffy\\nA0A1B0GTV0\\tUniprot-TrEMBL\\nuc065cbi.1\\tUCSC Genome Browser\\nGO:0048167\\tGeneOntology\\nGO:0007616\\tGeneOntology\\nGO:0016571\\tGeneOntology\\nuc004fjw.3\\tUCSC Genome Browser\\nGO:0007613\\tGeneOntology\\nGO:0007612\\tGeneOntology\\nGO:0021549\\tGeneOntology\\n11722684_a_at\\tAffy\\nGO:0001078\\tGeneOntology\\nX94628_rna1_s_at\\tAffy\\nGO:0007585\\tGeneOntology\\nGO:0010468\\tGeneOntology\\nGO:0031061\\tGeneOntology\\nA_24_P237486\\tAgilent\\nGO:0050884\\tGeneOntology\\nGO:0000930\\tGeneOntology\\nGO:0005829\\tGeneOntology\\nuc065cau.1\\tUCSC Genome Browser\\nH7BY72\\tUniprot-TrEMBL\\n202616_s_at\\tAffy\\nGO:0006355\\tGeneOntology\\nuc065cay.1\\tUCSC Genome Browser\\nGO:0010971\\tGeneOntology\\n300673\\tOMIM\\nGO:0008542\\tGeneOntology\\nGO:0060079\\tGeneOntology\\nuc065cbf.1\\tUCSC Genome Browser\\nGO:0006122\\tGeneOntology\\nuc065cbb.1\\tUCSC Genome Browser\\nGO:0007052\\tGeneOntology\\nC9JH89\\tUniprot-TrEMBL\\nB5MCB4\\tUniprot-TrEMBL\\nGO:0032048\\tGeneOntology\\nGO:0050432\\tGeneOntology\\nGO:0001976\\tGeneOntology\\nI6LM39\\tUniprot-TrEMBL\\nGO:0005813\\tGeneOntology\\nILMN_1682091\\tIllumina\\nP51608\\tUniprot-TrEMBL\\n1QK9\\tPDB\\nGO:0006349\\tGeneOntology\\nGO:1900114\\tGeneOntology\\nGO:0000122\\tGeneOntology\\nGO:0006351\\tGeneOntology\\nGO:0008134\\tGeneOntology\\nILMN_1824898\\tIllumina\\n300260\\tOMIM\\n0006510725\\tIllumina\\n'\r +\r +\r +\r +You can also see the results are returned as a TSV file, consisting of two columns, the identifier and the matching database.\r +\r +We will want to convert this reply into a Python dictionary (with the identifier as key, as one database may have multiple identifiers):\r +\r +\r +```python\r +lines = response.text.split("\\n")\r +mappings = {}\r +for line in lines:\r + if ('\\t' in line):\r + tuple = line.split('\\t')\r + identifier = tuple[0]\r + database = tuple[1]\r + if (database == "Ensembl"):\r + mappings[identifier] = database\r +\r +print(mappings)\r +```\r +\r + {'ENSG00000169057': 'Ensembl'}\r +\r +\r +Alternatively, we can restrivct the return values from the BridgeDb webservice to just return Ensembl identifiers (system code `En`). For this, we add the `?dataSource=En` call parameter:\r +\r +\r +```python\r +callUrl = 'http://bridgedb-swagger.prod.openrisknet.org/Human/xrefs/H/MECP2?dataSource=En'\r +response = requests.get(callUrl)\r +response.text\r +```\r +\r +\r +\r +\r + 'ENSG00000169057\\tEnsembl\\n'\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/326?version=2" ; + schema1:isBasedOn "https://github.com/OpenRiskNet/notebooks.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for BridgeDb tutorial: Gene HGNC name to Ensembl identifier" ; + schema1:sdDatePublished "2024-08-05 10:32:14 +0100" ; + schema1:url "https://workflowhub.eu/workflows/326/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8122 ; + schema1:creator , + ; + schema1:dateCreated "2022-04-06T13:09:58Z" ; + schema1:dateModified "2022-04-06T13:09:58Z" ; + schema1:description """\r +# BridgeDb tutorial: Gene HGNC name to Ensembl identifier\r +\r +This tutorial explains how to use the BridgeDb identifier mapping service to translate HGNC names to Ensembl identifiers. This step is part of the OpenRiskNet use case to link Adverse Outcome Pathways to [WikiPathways](https://wikipathways.org/).\r +\r +First we need to load the Python library to allow calls to the [BridgeDb REST webservice](http://bridgedb.prod.openrisknet.org/swagger/):\r +\r +\r +```python\r +import requests\r +```\r +\r +Let's assume we're interested in the gene with HGNC MECP2 (FIXME: look up a gene in AOPWiki), the API call to make mappings is given below as `callUrl`. Here, the `H` indicates that the query (`MECP2`) is an HGNC symbol:\r +\r +\r +```python\r +callUrl = 'http://bridgedb.prod.openrisknet.org/Human/xrefs/H/MECP2'\r +```\r +\r +The default call returns all identifiers, not just for Ensembl:\r +\r +\r +```python\r +response = requests.get(callUrl)\r +response.text\r +```\r +\r +\r +\r +\r + 'GO:0001964\\tGeneOntology\\nuc065cav.1\\tUCSC Genome Browser\\n312750\\tOMIM\\nGO:0042551\\tGeneOntology\\nuc065car.1\\tUCSC Genome Browser\\nA0A087X1U4\\tUniprot-TrEMBL\\n4204\\tWikiGenes\\nGO:0043524\\tGeneOntology\\nILMN_1702715\\tIllumina\\n34355_at\\tAffy\\nGO:0007268\\tGeneOntology\\nMECP2\\tHGNC\\nuc065caz.1\\tUCSC Genome Browser\\nA_33_P3339036\\tAgilent\\nGO:0006576\\tGeneOntology\\nuc065cbg.1\\tUCSC Genome Browser\\nGO:0006342\\tGeneOntology\\n300496\\tOMIM\\nGO:0035176\\tGeneOntology\\nuc065cbc.1\\tUCSC Genome Browser\\nGO:0033555\\tGeneOntology\\nGO:0045892\\tGeneOntology\\nA_23_P114361\\tAgilent\\nGO:0045893\\tGeneOntology\\nENSG00000169057\\tEnsembl\\nGO:0090063\\tGeneOntology\\nGO:0005515\\tGeneOntology\\nGO:0002087\\tGeneOntology\\nGO:0005634\\tGeneOntology\\nGO:0007416\\tGeneOntology\\nGO:0008104\\tGeneOntology\\nGO:0042826\\tGeneOntology\\nGO:0007420\\tGeneOntology\\nGO:0035067\\tGeneOntology\\n300005\\tOMIM\\nNP_001104262\\tRefSeq\\nA0A087WVW7\\tUniprot-TrEMBL\\nNP_004983\\tRefSeq\\nGO:0046470\\tGeneOntology\\nGO:0010385\\tGeneOntology\\n11722682_at\\tAffy\\nGO:0051965\\tGeneOntology\\nNM_001316337\\tRefSeq\\nuc065caw.1\\tUCSC Genome Browser\\nA0A0D9SFX7\\tUniprot-TrEMBL\\nA0A140VKC4\\tUniprot-TrEMBL\\nGO:0003723\\tGeneOntology\\nGO:0019233\\tGeneOntology\\nGO:0001666\\tGeneOntology\\nGO:0003729\\tGeneOntology\\nGO:0021591\\tGeneOntology\\nuc065cas.1\\tUCSC Genome Browser\\nGO:0019230\\tGeneOntology\\nGO:0003682\\tGeneOntology\\nGO:0001662\\tGeneOntology\\nuc065cbh.1\\tUCSC Genome Browser\\nX99687_at\\tAffy\\nGO:0008344\\tGeneOntology\\nGO:0009791\\tGeneOntology\\nuc065cbd.1\\tUCSC Genome Browser\\nGO:0019904\\tGeneOntology\\nGO:0030182\\tGeneOntology\\nGO:0035197\\tGeneOntology\\n8175998\\tAffy\\nGO:0016358\\tGeneOntology\\nNM_004992\\tRefSeq\\nGO:0003714\\tGeneOntology\\nGO:0005739\\tGeneOntology\\nGO:0005615\\tGeneOntology\\nGO:0005737\\tGeneOntology\\nuc004fjv.3\\tUCSC Genome Browser\\n202617_s_at\\tAffy\\nGO:0050905\\tGeneOntology\\nGO:0008327\\tGeneOntology\\nD3YJ43\\tUniprot-TrEMBL\\nGO:0003677\\tGeneOntology\\nGO:0006541\\tGeneOntology\\nGO:0040029\\tGeneOntology\\nA_33_P3317211\\tAgilent\\nNP_001303266\\tRefSeq\\n11722683_a_at\\tAffy\\nGO:0008211\\tGeneOntology\\nGO:0051151\\tGeneOntology\\nNM_001110792\\tRefSeq\\nX89430_at\\tAffy\\nGO:2000820\\tGeneOntology\\nuc065cat.1\\tUCSC Genome Browser\\nGO:0003700\\tGeneOntology\\nGO:0047485\\tGeneOntology\\n4204\\tEntrez Gene\\nGO:0009405\\tGeneOntology\\nA0A0D9SEX1\\tUniprot-TrEMBL\\nGO:0098794\\tGeneOntology\\n3C2I\\tPDB\\nHs.200716\\tUniGene\\nGO:0000792\\tGeneOntology\\nuc065cax.1\\tUCSC Genome Browser\\n300055\\tOMIM\\n5BT2\\tPDB\\nGO:0006020\\tGeneOntology\\nGO:0031175\\tGeneOntology\\nuc065cbe.1\\tUCSC Genome Browser\\nGO:0008284\\tGeneOntology\\nuc065cba.1\\tUCSC Genome Browser\\nGO:0060291\\tGeneOntology\\n202618_s_at\\tAffy\\nGO:0016573\\tGeneOntology\\n17115453\\tAffy\\nA0A1B0GTV0\\tUniprot-TrEMBL\\nuc065cbi.1\\tUCSC Genome Browser\\nGO:0048167\\tGeneOntology\\nGO:0007616\\tGeneOntology\\nGO:0016571\\tGeneOntology\\nuc004fjw.3\\tUCSC Genome Browser\\nGO:0007613\\tGeneOntology\\nGO:0007612\\tGeneOntology\\nGO:0021549\\tGeneOntology\\n11722684_a_at\\tAffy\\nGO:0001078\\tGeneOntology\\nX94628_rna1_s_at\\tAffy\\nGO:0007585\\tGeneOntology\\nGO:0010468\\tGeneOntology\\nGO:0031061\\tGeneOntology\\nA_24_P237486\\tAgilent\\nGO:0050884\\tGeneOntology\\nGO:0000930\\tGeneOntology\\nGO:0005829\\tGeneOntology\\nuc065cau.1\\tUCSC Genome Browser\\nH7BY72\\tUniprot-TrEMBL\\n202616_s_at\\tAffy\\nGO:0006355\\tGeneOntology\\nuc065cay.1\\tUCSC Genome Browser\\nGO:0010971\\tGeneOntology\\n300673\\tOMIM\\nGO:0008542\\tGeneOntology\\nGO:0060079\\tGeneOntology\\nuc065cbf.1\\tUCSC Genome Browser\\nGO:0006122\\tGeneOntology\\nuc065cbb.1\\tUCSC Genome Browser\\nGO:0007052\\tGeneOntology\\nC9JH89\\tUniprot-TrEMBL\\nB5MCB4\\tUniprot-TrEMBL\\nGO:0032048\\tGeneOntology\\nGO:0050432\\tGeneOntology\\nGO:0001976\\tGeneOntology\\nI6LM39\\tUniprot-TrEMBL\\nGO:0005813\\tGeneOntology\\nILMN_1682091\\tIllumina\\nP51608\\tUniprot-TrEMBL\\n1QK9\\tPDB\\nGO:0006349\\tGeneOntology\\nGO:1900114\\tGeneOntology\\nGO:0000122\\tGeneOntology\\nGO:0006351\\tGeneOntology\\nGO:0008134\\tGeneOntology\\nILMN_1824898\\tIllumina\\n300260\\tOMIM\\n0006510725\\tIllumina\\n'\r +\r +\r +\r +You can also see the results are returned as a TSV file, consisting of two columns, the identifier and the matching database.\r +\r +We will want to convert this reply into a Python dictionary (with the identifier as key, as one database may have multiple identifiers):\r +\r +\r +```python\r +lines = response.text.split("\\n")\r +mappings = {}\r +for line in lines:\r + if ('\\t' in line):\r + tuple = line.split('\\t')\r + identifier = tuple[0]\r + database = tuple[1]\r + if (database == "Ensembl"):\r + mappings[identifier] = database\r +\r +print(mappings)\r +```\r +\r + {'ENSG00000169057': 'Ensembl'}\r +\r +\r +Alternatively, we can restrivct the return values from the BridgeDb webservice to just return Ensembl identifiers (system code `En`). For this, we add the `?dataSource=En` call parameter:\r +\r +\r +```python\r +callUrl = 'http://bridgedb-swagger.prod.openrisknet.org/Human/xrefs/H/MECP2?dataSource=En'\r +response = requests.get(callUrl)\r +response.text\r +```\r +\r +\r +\r +\r + 'ENSG00000169057\\tEnsembl\\n'\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/326?version=2" ; + schema1:keywords "Toxicology, jupyter" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "BridgeDb tutorial: Gene HGNC name to Ensembl identifier" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/326?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + schema1:datePublished "2024-03-06T19:03:19.804236" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Purge-duplicate-contigs-VGP6/main" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Purge-duplicate-contigs-VGP6/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:version "0.3.5" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + ; + schema1:description "This workflow generates a file describing the active site of the protein for each of the fragment screening crystal structures using rDock s rbcavity. It also creates a single hybrid molecule that contains all the ligands - the \"frankenstein\" ligand. More info can be found at https://covid19.galaxyproject.org/cheminformatics/" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/13?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Cheminformatics - Active site generation" ; + schema1:sdDatePublished "2024-08-05 10:33:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/13/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 1441 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5287 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2020-04-10T14:25:46Z" ; + schema1:dateModified "2023-01-16T13:40:43Z" ; + schema1:description "This workflow generates a file describing the active site of the protein for each of the fragment screening crystal structures using rDock s rbcavity. It also creates a single hybrid molecule that contains all the ligands - the \"frankenstein\" ligand. More info can be found at https://covid19.galaxyproject.org/cheminformatics/" ; + schema1:image ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Cheminformatics - Active site generation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/13?version=1" ; + schema1:version 1 ; + ns1:input , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 3534 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """
\r +\r +drawing\r +\r +
\r +\r +MultiAffinity enables the study of how gene dysregulation propagates on a multilayer network on a disease of interest, uncovering key genes. Find the detailed documentation for the tool [here](https://marbatlle.github.io/multiAffinity/).\r +\r +![alt](https://github.com/marbatlle/multiAffinity/raw/main/docs/img/multiAffinity_workflow.png)""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/250?version=1" ; + schema1:isBasedOn "https://github.com/inab/ipc_workflows/tree/multiaffinity-20220318/multiAffinity" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for multiAffinity" ; + schema1:sdDatePublished "2024-08-05 10:30:40 +0100" ; + schema1:url "https://workflowhub.eu/workflows/250/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2562 ; + schema1:creator , + ; + schema1:dateCreated "2021-12-14T10:01:12Z" ; + schema1:dateModified "2023-04-21T11:42:40Z" ; + schema1:description """
\r +\r +drawing\r +\r +
\r +\r +MultiAffinity enables the study of how gene dysregulation propagates on a multilayer network on a disease of interest, uncovering key genes. Find the detailed documentation for the tool [here](https://marbatlle.github.io/multiAffinity/).\r +\r +![alt](https://github.com/marbatlle/multiAffinity/raw/main/docs/img/multiAffinity_workflow.png)""" ; + schema1:image ; + schema1:keywords "cancer, pediatric, rna-seq, networks, community-detection" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "multiAffinity" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/250?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 13027 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.260.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_md_setup/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL Amber Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:32:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/260/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 153854 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 25801 ; + schema1:creator , + ; + schema1:dateCreated "2022-01-10T13:24:19Z" ; + schema1:dateModified "2023-06-08T07:26:22Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/260?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CWL Amber Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/260?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 6240 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """\r +\r +# MoMofy\r +Module for integrative Mobilome prediction\r +\r +\r +\r +Bacteria can acquire genetic material through horizontal gene transfer, allowing them to rapidly adapt to changing environmental conditions. These mobile genetic elements can be classified into three main categories: plasmids, phages, and integrons. Autonomous elements are those capable of excising themselves from the chromosome, reintegrating elsewhere, and potentially modifying the host's physiology. Small integrative elements like insertion sequences usually contain one or two genes and are frequently present in multiple copies in the genome, whereas large elements like integrative conjugative elements, often carry multiple cargo genes. The acquisition of large mobile genetic elements may provide genes for defence against other mobile genetic elements or impart new metabolic capabilities to the host.\r +\r +MoMofy is a wraper that integrates the ouptput of different tools designed for the prediction of autonomous integrative mobile genetic elements in prokaryotic genomes and metagenomes. \r +\r +## Contents\r +- [ Workflow ](#wf)\r +- [ Setup ](#sp)\r +- [ MoMofy install and dependencies ](#install)\r +- [ Usage ](#usage)\r +- [ Inputs ](#in)\r +- [ Outputs ](#out)\r +- [ Tests ](#test)\r +- [ Performance ](#profile)\r +- [ Citation ](#cite)\r +\r +\r +\r +## Workflow\r +\r +\r +\r +\r +\r +## Setup\r +\r +This workflow is built using [Nextflow](https://www.nextflow.io/). It uses Singularity containers making installation trivial and results highly reproducible.\r +Explained in this section section, there is one manual step required to build the singularity image for [ICEfinder](https://bioinfo-mml.sjtu.edu.cn/ICEfinder/index.php), as we can't distribute that software due to license issues.\r +\r +- Install [Nextflow version >=21.10](https://www.nextflow.io/docs/latest/getstarted.html#installation)\r +- Install [Singularity](https://github.com/apptainer/singularity/blob/master/INSTALL.md)\r +\r +\r +## MoMofy install and dependencies\r +\r +To install MoMofy, clone this repo by:\r +\r +```bash\r +$ git clone https://github.com/EBI-Metagenomics/momofy.git\r +```\r +\r +The mobileOG-database is required to run an extra step of annotation on the mobilome coding sequences. The first time you run MoMofy, you will need to download the [Beatrix 1.6 v1](https://mobileogdb.flsi.cloud.vt.edu/entries/database_download) database, move the tarball to `/PATH/momofy/databases`, decompress it, and run the script to format the db for diamond:\r +\r +```bash\r +$ mv beatrix-1-6_v1_all.zip /PATH/momofy/databases\r +$ cd /PATH/momofy/databases\r +$ unzip beatrix-1-6_v1_all.zip\r +$ nextflow run /PATH/momofy/format_mobileOG.nf\r +```\r +\r +Most of the tools are available on [quay.io](https://quay.io) and no install is needed. \r +\r +In the case of ICEfinder, you will need to contact the author to get a copy of the software, visit the [ICEfinder website](https://bioinfo-mml.sjtu.edu.cn/ICEfinder/download.html) for more information. Once you have the `ICEfinder_linux.tar.gz` tarball, move it to `momofy/templates` and build the singularity image using the following command:\r +\r +```bash\r +$ mv ICEfinder_linux.tar.gz /PATH/momofy/templates/\r +$ cd /PATH/momofy/templates/\r +$ sudo singularity build ../../singularity/icefinder-v1.0-local.sif icefinder-v1.0-local.def\r +```\r +\r +PaliDIS is an optional step on the workflow and the install is optional as well. Visit [PaliDIS repo](https://github.com/blue-moon22/PaliDIS) for installing instructions.\r +\r +If you are aim to run the pipeline in a system with jobs scheduler as LSF or SGE, set up a config file and provide it as part of the arguments as follows:\r +\r +```bash\r +$ nextflow run /PATH/momofy/momofy.nf --assembly contigs.fasta -c /PATH/configs/some_cluster.config\r +```\r +\r +You can find an example in the `configs` directory of this repo.\r +\r +\r +\r +## Usage\r +\r +Running the tool with `--help` option will display the following message:\r +\r +```bash\r +$ nextflow run /PATH/momofy/momofy.nf --help\r +N E X T F L O W ~ version 21.10.0\r +Launching `momofy.nf` [gigantic_pare] - revision: XXXXX\r +\r + MoMofy is a wraper that integrates the ouptput of different tools designed for the prediction of autonomous integrative mobile genetic elements in prokaryotic genomes and metagenomes.\r +\r + Usage:\r + The basic command for running the pipeline is as follows:\r +\r + nextflow run momofy.nf --assembly contigs.fasta\r +\r + Mandatory arguments:\r + --assembly (Meta)genomic assembly in fasta format (uncompress)\r +\r + Optional arguments:\r + --user_genes User annotation files. See --prot_fasta and --prot_gff [ default = false ]\r + --prot_gff Annotation file in GFF3 format. Mandatory with --user_genes true\r + --prot_fasta Fasta file of aminoacids. Mandatory with --user_genes true\r + --palidis Incorporate PaliDIS predictions to final output [ default = false ]\r + --palidis_fasta Fasta file of PaliDIS insertion sequences. Mandatory with --palidis true\r + --palidis_info Information file of PaliDIS insertion sequences. Mandatory with --palidis true\r + --gff_validation Run a step of format validation on the GFF3 file output [ default = true ]\r + --outdir Output directory to place final MoMofy results [ default = MoMofy_results ]\r + --help This usage statement [ default = false ]\r +```\r +\r +\r +## Inputs\r +\r +To run MoMofy in multiple samples, create a directory per sample and launch the tool from the sample directory. The only mandatory input is the (meta)genomic assembly file in fasta format (uncompress).\r +\r +Basic run:\r +\r +```bash\r +$ nextflow run /PATH/momofy/momofy.nf --assembly contigs.fasta\r +```\r +\r +Note that the final output in gff format is created by adding information to PROKKA output. If you have your own protein prediction files, provide the gff and the fasta file of amino acid sequences (both uncompressed files are mandatory with this option). These files will be used for Diamond annotation and CDS coordinates mapping to the MGEs boundaries. If any original annotation is present in the gff file, it will remained untouched.\r +\r +Running MoMofy with user's genes prediction:\r +\r +```bash\r +$ nextflow run /PATH/momofy/momofy.nf --assembly contigs.fasta \\\r + --user_genes true \\\r + --prot_fasta proteins.faa \\\r + --prot_gff annotation.gff \\\r +```\r +\r +If you want to incorporate PaliDIS predictions to the final output, provide the path of the two outputs of PaliDIS (fasta file of insertion sequences and the information for each insertion sequence file).\r +\r +To run MoMofy incorporating PaliDIS results:\r +\r +```bash\r +$ nextflow run /PATH/momofy/momofy.nf --assembly contigs.fasta \\\r + --palidis true \\\r + --palidis_fasta insertion_sequences.fasta \\\r + --palidis_info insertion_sequences_info.txt \\\r +```\r +\r +Then, if you have protein files and PaliDIS outputs, you can run:\r +\r +```bash\r +$ nextflow run /PATH/momofy/momofy.nf --assembly contigs.fasta \\\r + --user_genes true \\\r + --prot_fasta proteins.faa \\\r + --prot_gff annotation.gff \\\r + --palidis true \\\r + --palidis_fasta insertion_sequences.fasta \\\r + --palidis_info insertion_sequences_info.txt \\\r +```\r +\r +A GFF validation process is used to detect formatting errors in the final GFF3 output. This process can be skipped adding `--gff_validation false` to the command.\r +\r +\r +\r +## Outputs\r +\r +Results will be written by default in the `MoMofy_results` directory inside the sample dir unless the user define `--outdir` option. There you will find the following output files:\r +\r +```bash\r +MoMofy_results/\r +├── discarded_mge.txt\r +├── momofy_predictions.fna\r +├── momofy_predictions.gff\r +└── nested_integrons.txt\r +```\r +\r +The main MoMofy output files are the `momofy_predictions.fna` containing the nucleotide sequences of every prediction, and the `momofy_predictions.gff` containing the mobilome annotation plus any other feature annotated by PROKKA or in the gff file provided by the user with the option `--user_genes`. The labels used in the Type column of the gff file corresponds to the following nomenclature according to the [Sequence Ontology resource](http://www.sequenceontology.org/browser/current_svn/term/SO:0000001):\r +\r +| Type in gff file | Sequence ontology ID | Element description | Reporting tool |\r +| ------------- | ------------- | ------------- | ------------- |\r +| insertion_sequence | [SO:0000973](http://www.sequenceontology.org/browser/current_svn/term/SO:0000973) | Insertion sequence | ISEScan, PaliDIS |\r +| terminal_inverted_repeat_element | [SO:0000481](http://www.sequenceontology.org/browser/current_svn/term/SO:0000481) | Terminal Inverted Repeat (TIR) flanking insertion sequences | ISEScan, PaliDIS |\r +| integron | [SO:0000365](http://www.sequenceontology.org/browser/current_svn/term/SO:0000365) | Integrative mobilizable element | IntegronFinder, ICEfinder |\r +| attC_site | [SO:0000950](http://www.sequenceontology.org/browser/current_svn/term/SO:0000950) | Integration site of DNA integron | IntegronFinder |\r +| conjugative_transposon | [SO:0000371](http://www.sequenceontology.org/browser/current_svn/term/SO:0000371) | Integrative Conjugative Element | ICEfinder |\r +| direct_repeat | [SO:0000314](http://www.sequenceontology.org/browser/current_svn/term/SO:0000371) | Flanking regions on mobilizable elements | ICEfinder |\r +| CDS | [SO:0000316](http://www.sequenceontology.org/browser/current_svn/term/SO:0000316) | Coding sequence | Prodigal |\r +\r +\r +The file `discarded_mge.txt` contains a list of predictions that were discarded, along with the reason for their exclusion. Possible reasons include:\r +\r +1. overlapping For insertion sequences only, ISEScan prediction is discarded if an overlap with PaliDIS is found. \r +2. mge<500bp Discarded by length.\r +3. no_cds If there are no genes encoded in the prediction.\r +\r +The file `nested_integrons.txt` is a report of overlapping predictions reported by IntegronFinder and ICEfinder. No predictions are discarded in this case.\r +\r +Additionally, you will see the directories containing the main outputs of each tool.\r +\r +\r +## Tests\r +\r +Nextflow tests are executed with [nf-test](https://github.com/askimed/nf-test). It takes around 3 min in executing.\r +\r +Run:\r +\r +```bash\r +$ cd /PATH/momofy\r +$ nf-test test *.nf.test\r +```\r +\r +\r +## Performance\r +\r +MoMofy performance was profiled using 460 public metagenomic assemblies and co-assemblies of chicken gut (ERP122587, ERP125074, and ERP131894) with sizes ranging from ~62 K to ~893 M assembled bases. We used the metagenomic assemblies, CDS prediction and annotation files generated by MGnify v5 pipeline, and PaliDIS outputs generated after downsampling the number of reads to 10 M. MoMofy was run adding the following options: `-with-report -with-trace -with-timeline timeline.out`.\r +\r +\r +

\r + \r +

\r +

\r + \r + \r +

\r +\r +\r +\r +## Citation\r +\r +If you use MoMofy on your data analysis, please cite:\r +\r +XXXXX\r +\r +\r +MoMofy is a wrapper that integrates the output of the following tools and DBs:\r +\r +1) ISEScan v1.7.2.3 [Xie et al., Bioinformatics, 2017](https://doi.org/10.1093/bioinformatics/btx433)\r +2) IntegronFinder2 v2.0.2 [Néron et al., Microorganisms, 2022](https://doi.org/10.3390/microorganisms10040700)\r +3) ICEfinder v1.0 [Liu et al., Nucleic Acids Research, 2019](https://doi.org/10.1093/nar/gky1123)\r +4) PaliDIS [Carr et al., biorxiv, 2022](https://doi.org/10.1101/2022.06.27.497710)\r +\r +Databases:\r +- MobileOG-DB Beatrix 1.6 v1 [Brown et al., Appl Environ Microbiol, 2022](https://doi.org/10.1128/aem.00991-22)\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/452?version=1" ; + schema1:isBasedOn "https://github.com/EBI-Metagenomics/momofy.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for MoMofy: Module for integrative Mobilome prediction" ; + schema1:sdDatePublished "2024-08-05 10:31:08 +0100" ; + schema1:url "https://workflowhub.eu/workflows/452/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 121232 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3473 ; + schema1:creator , + ; + schema1:dateCreated "2023-04-06T09:40:57Z" ; + schema1:dateModified "2023-04-12T08:14:09Z" ; + schema1:description """\r +\r +# MoMofy\r +Module for integrative Mobilome prediction\r +\r +\r +\r +Bacteria can acquire genetic material through horizontal gene transfer, allowing them to rapidly adapt to changing environmental conditions. These mobile genetic elements can be classified into three main categories: plasmids, phages, and integrons. Autonomous elements are those capable of excising themselves from the chromosome, reintegrating elsewhere, and potentially modifying the host's physiology. Small integrative elements like insertion sequences usually contain one or two genes and are frequently present in multiple copies in the genome, whereas large elements like integrative conjugative elements, often carry multiple cargo genes. The acquisition of large mobile genetic elements may provide genes for defence against other mobile genetic elements or impart new metabolic capabilities to the host.\r +\r +MoMofy is a wraper that integrates the ouptput of different tools designed for the prediction of autonomous integrative mobile genetic elements in prokaryotic genomes and metagenomes. \r +\r +## Contents\r +- [ Workflow ](#wf)\r +- [ Setup ](#sp)\r +- [ MoMofy install and dependencies ](#install)\r +- [ Usage ](#usage)\r +- [ Inputs ](#in)\r +- [ Outputs ](#out)\r +- [ Tests ](#test)\r +- [ Performance ](#profile)\r +- [ Citation ](#cite)\r +\r +\r +\r +## Workflow\r +\r +\r +\r +\r +\r +## Setup\r +\r +This workflow is built using [Nextflow](https://www.nextflow.io/). It uses Singularity containers making installation trivial and results highly reproducible.\r +Explained in this section section, there is one manual step required to build the singularity image for [ICEfinder](https://bioinfo-mml.sjtu.edu.cn/ICEfinder/index.php), as we can't distribute that software due to license issues.\r +\r +- Install [Nextflow version >=21.10](https://www.nextflow.io/docs/latest/getstarted.html#installation)\r +- Install [Singularity](https://github.com/apptainer/singularity/blob/master/INSTALL.md)\r +\r +\r +## MoMofy install and dependencies\r +\r +To install MoMofy, clone this repo by:\r +\r +```bash\r +$ git clone https://github.com/EBI-Metagenomics/momofy.git\r +```\r +\r +The mobileOG-database is required to run an extra step of annotation on the mobilome coding sequences. The first time you run MoMofy, you will need to download the [Beatrix 1.6 v1](https://mobileogdb.flsi.cloud.vt.edu/entries/database_download) database, move the tarball to `/PATH/momofy/databases`, decompress it, and run the script to format the db for diamond:\r +\r +```bash\r +$ mv beatrix-1-6_v1_all.zip /PATH/momofy/databases\r +$ cd /PATH/momofy/databases\r +$ unzip beatrix-1-6_v1_all.zip\r +$ nextflow run /PATH/momofy/format_mobileOG.nf\r +```\r +\r +Most of the tools are available on [quay.io](https://quay.io) and no install is needed. \r +\r +In the case of ICEfinder, you will need to contact the author to get a copy of the software, visit the [ICEfinder website](https://bioinfo-mml.sjtu.edu.cn/ICEfinder/download.html) for more information. Once you have the `ICEfinder_linux.tar.gz` tarball, move it to `momofy/templates` and build the singularity image using the following command:\r +\r +```bash\r +$ mv ICEfinder_linux.tar.gz /PATH/momofy/templates/\r +$ cd /PATH/momofy/templates/\r +$ sudo singularity build ../../singularity/icefinder-v1.0-local.sif icefinder-v1.0-local.def\r +```\r +\r +PaliDIS is an optional step on the workflow and the install is optional as well. Visit [PaliDIS repo](https://github.com/blue-moon22/PaliDIS) for installing instructions.\r +\r +If you are aim to run the pipeline in a system with jobs scheduler as LSF or SGE, set up a config file and provide it as part of the arguments as follows:\r +\r +```bash\r +$ nextflow run /PATH/momofy/momofy.nf --assembly contigs.fasta -c /PATH/configs/some_cluster.config\r +```\r +\r +You can find an example in the `configs` directory of this repo.\r +\r +\r +\r +## Usage\r +\r +Running the tool with `--help` option will display the following message:\r +\r +```bash\r +$ nextflow run /PATH/momofy/momofy.nf --help\r +N E X T F L O W ~ version 21.10.0\r +Launching `momofy.nf` [gigantic_pare] - revision: XXXXX\r +\r + MoMofy is a wraper that integrates the ouptput of different tools designed for the prediction of autonomous integrative mobile genetic elements in prokaryotic genomes and metagenomes.\r +\r + Usage:\r + The basic command for running the pipeline is as follows:\r +\r + nextflow run momofy.nf --assembly contigs.fasta\r +\r + Mandatory arguments:\r + --assembly (Meta)genomic assembly in fasta format (uncompress)\r +\r + Optional arguments:\r + --user_genes User annotation files. See --prot_fasta and --prot_gff [ default = false ]\r + --prot_gff Annotation file in GFF3 format. Mandatory with --user_genes true\r + --prot_fasta Fasta file of aminoacids. Mandatory with --user_genes true\r + --palidis Incorporate PaliDIS predictions to final output [ default = false ]\r + --palidis_fasta Fasta file of PaliDIS insertion sequences. Mandatory with --palidis true\r + --palidis_info Information file of PaliDIS insertion sequences. Mandatory with --palidis true\r + --gff_validation Run a step of format validation on the GFF3 file output [ default = true ]\r + --outdir Output directory to place final MoMofy results [ default = MoMofy_results ]\r + --help This usage statement [ default = false ]\r +```\r +\r +\r +## Inputs\r +\r +To run MoMofy in multiple samples, create a directory per sample and launch the tool from the sample directory. The only mandatory input is the (meta)genomic assembly file in fasta format (uncompress).\r +\r +Basic run:\r +\r +```bash\r +$ nextflow run /PATH/momofy/momofy.nf --assembly contigs.fasta\r +```\r +\r +Note that the final output in gff format is created by adding information to PROKKA output. If you have your own protein prediction files, provide the gff and the fasta file of amino acid sequences (both uncompressed files are mandatory with this option). These files will be used for Diamond annotation and CDS coordinates mapping to the MGEs boundaries. If any original annotation is present in the gff file, it will remained untouched.\r +\r +Running MoMofy with user's genes prediction:\r +\r +```bash\r +$ nextflow run /PATH/momofy/momofy.nf --assembly contigs.fasta \\\r + --user_genes true \\\r + --prot_fasta proteins.faa \\\r + --prot_gff annotation.gff \\\r +```\r +\r +If you want to incorporate PaliDIS predictions to the final output, provide the path of the two outputs of PaliDIS (fasta file of insertion sequences and the information for each insertion sequence file).\r +\r +To run MoMofy incorporating PaliDIS results:\r +\r +```bash\r +$ nextflow run /PATH/momofy/momofy.nf --assembly contigs.fasta \\\r + --palidis true \\\r + --palidis_fasta insertion_sequences.fasta \\\r + --palidis_info insertion_sequences_info.txt \\\r +```\r +\r +Then, if you have protein files and PaliDIS outputs, you can run:\r +\r +```bash\r +$ nextflow run /PATH/momofy/momofy.nf --assembly contigs.fasta \\\r + --user_genes true \\\r + --prot_fasta proteins.faa \\\r + --prot_gff annotation.gff \\\r + --palidis true \\\r + --palidis_fasta insertion_sequences.fasta \\\r + --palidis_info insertion_sequences_info.txt \\\r +```\r +\r +A GFF validation process is used to detect formatting errors in the final GFF3 output. This process can be skipped adding `--gff_validation false` to the command.\r +\r +\r +\r +## Outputs\r +\r +Results will be written by default in the `MoMofy_results` directory inside the sample dir unless the user define `--outdir` option. There you will find the following output files:\r +\r +```bash\r +MoMofy_results/\r +├── discarded_mge.txt\r +├── momofy_predictions.fna\r +├── momofy_predictions.gff\r +└── nested_integrons.txt\r +```\r +\r +The main MoMofy output files are the `momofy_predictions.fna` containing the nucleotide sequences of every prediction, and the `momofy_predictions.gff` containing the mobilome annotation plus any other feature annotated by PROKKA or in the gff file provided by the user with the option `--user_genes`. The labels used in the Type column of the gff file corresponds to the following nomenclature according to the [Sequence Ontology resource](http://www.sequenceontology.org/browser/current_svn/term/SO:0000001):\r +\r +| Type in gff file | Sequence ontology ID | Element description | Reporting tool |\r +| ------------- | ------------- | ------------- | ------------- |\r +| insertion_sequence | [SO:0000973](http://www.sequenceontology.org/browser/current_svn/term/SO:0000973) | Insertion sequence | ISEScan, PaliDIS |\r +| terminal_inverted_repeat_element | [SO:0000481](http://www.sequenceontology.org/browser/current_svn/term/SO:0000481) | Terminal Inverted Repeat (TIR) flanking insertion sequences | ISEScan, PaliDIS |\r +| integron | [SO:0000365](http://www.sequenceontology.org/browser/current_svn/term/SO:0000365) | Integrative mobilizable element | IntegronFinder, ICEfinder |\r +| attC_site | [SO:0000950](http://www.sequenceontology.org/browser/current_svn/term/SO:0000950) | Integration site of DNA integron | IntegronFinder |\r +| conjugative_transposon | [SO:0000371](http://www.sequenceontology.org/browser/current_svn/term/SO:0000371) | Integrative Conjugative Element | ICEfinder |\r +| direct_repeat | [SO:0000314](http://www.sequenceontology.org/browser/current_svn/term/SO:0000371) | Flanking regions on mobilizable elements | ICEfinder |\r +| CDS | [SO:0000316](http://www.sequenceontology.org/browser/current_svn/term/SO:0000316) | Coding sequence | Prodigal |\r +\r +\r +The file `discarded_mge.txt` contains a list of predictions that were discarded, along with the reason for their exclusion. Possible reasons include:\r +\r +1. overlapping For insertion sequences only, ISEScan prediction is discarded if an overlap with PaliDIS is found. \r +2. mge<500bp Discarded by length.\r +3. no_cds If there are no genes encoded in the prediction.\r +\r +The file `nested_integrons.txt` is a report of overlapping predictions reported by IntegronFinder and ICEfinder. No predictions are discarded in this case.\r +\r +Additionally, you will see the directories containing the main outputs of each tool.\r +\r +\r +## Tests\r +\r +Nextflow tests are executed with [nf-test](https://github.com/askimed/nf-test). It takes around 3 min in executing.\r +\r +Run:\r +\r +```bash\r +$ cd /PATH/momofy\r +$ nf-test test *.nf.test\r +```\r +\r +\r +## Performance\r +\r +MoMofy performance was profiled using 460 public metagenomic assemblies and co-assemblies of chicken gut (ERP122587, ERP125074, and ERP131894) with sizes ranging from ~62 K to ~893 M assembled bases. We used the metagenomic assemblies, CDS prediction and annotation files generated by MGnify v5 pipeline, and PaliDIS outputs generated after downsampling the number of reads to 10 M. MoMofy was run adding the following options: `-with-report -with-trace -with-timeline timeline.out`.\r +\r +\r +

\r + \r +

\r +

\r + \r + \r +

\r +\r +\r +\r +## Citation\r +\r +If you use MoMofy on your data analysis, please cite:\r +\r +XXXXX\r +\r +\r +MoMofy is a wrapper that integrates the output of the following tools and DBs:\r +\r +1) ISEScan v1.7.2.3 [Xie et al., Bioinformatics, 2017](https://doi.org/10.1093/bioinformatics/btx433)\r +2) IntegronFinder2 v2.0.2 [Néron et al., Microorganisms, 2022](https://doi.org/10.3390/microorganisms10040700)\r +3) ICEfinder v1.0 [Liu et al., Nucleic Acids Research, 2019](https://doi.org/10.1093/nar/gky1123)\r +4) PaliDIS [Carr et al., biorxiv, 2022](https://doi.org/10.1101/2022.06.27.497710)\r +\r +Databases:\r +- MobileOG-DB Beatrix 1.6 v1 [Brown et al., Appl Environ Microbiol, 2022](https://doi.org/10.1128/aem.00991-22)\r +""" ; + schema1:image ; + schema1:keywords "Mobilome, Genomics, Metagenomics, Nextflow, MGE" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "MoMofy: Module for integrative Mobilome prediction" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/452?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for the analysis of CRISPR data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/975?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/crisprseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/crisprseq" ; + schema1:sdDatePublished "2024-08-05 10:22:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/975/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12239 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-07-24T03:02:45Z" ; + schema1:dateModified "2024-07-24T03:02:45Z" ; + schema1:description "Pipeline for the analysis of CRISPR data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/975?version=5" ; + schema1:keywords "crispr, crispr-analysis, crispr-cas, NGS" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/crisprseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/975?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "Pre-processing of mass spectrometry-based metabolomics data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/57?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/metaboigniter" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/metaboigniter" ; + schema1:sdDatePublished "2024-08-05 10:24:29 +0100" ; + schema1:url "https://workflowhub.eu/workflows/57/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 23140 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:00Z" ; + schema1:dateModified "2024-06-11T12:55:00Z" ; + schema1:description "Pre-processing of mass spectrometry-based metabolomics data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/57?version=3" ; + schema1:keywords "Metabolomics, identification, quantification, mass-spectrometry, ms1, ms2" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/metaboigniter" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/57?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "B/T cell repertoire analysis pipeline with immcantation framework. WIP, currently requires a bunch of changes first." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/963?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/airrflow" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/bcellmagic" ; + schema1:sdDatePublished "2024-08-05 10:24:20 +0100" ; + schema1:url "https://workflowhub.eu/workflows/963/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4303 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:36Z" ; + schema1:dateModified "2024-06-11T12:54:36Z" ; + schema1:description "B/T cell repertoire analysis pipeline with immcantation framework. WIP, currently requires a bunch of changes first." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/963?version=13" ; + schema1:keywords "airr, b-cell, immcantation, immunorepertoire, repseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/bcellmagic" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/963?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2024-04-29T07:22:37.314835" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/cutandrun" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "cutandrun/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.285.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_abc_md_setup/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python ABC MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:46 +0100" ; + schema1:url "https://workflowhub.eu/workflows/285/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8001 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-23T12:41:27Z" ; + schema1:dateModified "2023-04-14T08:43:57Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/285?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python ABC MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_abc_md_setup/python/workflow.py" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/gmx-ligand-parameterization).\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.294.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_ligand_parameterization/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy GMX Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/294/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8541 ; + schema1:creator , + ; + schema1:dateCreated "2023-05-03T13:44:01Z" ; + schema1:dateModified "2023-05-03T13:45:13Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/gmx-ligand-parameterization).\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/294?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy GMX Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_ligand_parameterization/galaxy/biobb_wf_ligand_parameterization.ga" ; + schema1:version 3 ; + ns1:output , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Analysis pipeline for CUT&RUN and CUT&TAG experiments that includes sequencing QC, spike-in normalisation, IgG control normalisation, peak calling and downstream peak analysis." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/976?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/cutandrun" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/cutandrun" ; + schema1:sdDatePublished "2024-08-05 10:24:08 +0100" ; + schema1:url "https://workflowhub.eu/workflows/976/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13316 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:46Z" ; + schema1:dateModified "2024-06-11T12:54:46Z" ; + schema1:description "Analysis pipeline for CUT&RUN and CUT&TAG experiments that includes sequencing QC, spike-in normalisation, IgG control normalisation, peak calling and downstream peak analysis." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/976?version=7" ; + schema1:keywords "cutandrun, cutandrun-seq, cutandtag, cutandtag-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/cutandrun" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/976?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Taxonomic classification and profiling of shotgun short- and long-read metagenomic data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1025?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/taxprofiler" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/taxprofiler" ; + schema1:sdDatePublished "2024-08-05 10:23:09 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1025/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15607 ; + schema1:creator , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:19Z" ; + schema1:dateModified "2024-06-11T12:55:19Z" ; + schema1:description "Taxonomic classification and profiling of shotgun short- and long-read metagenomic data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1025?version=10" ; + schema1:keywords "Classification, illumina, long-reads, Metagenomics, microbiome, nanopore, pathogen, Profiling, shotgun, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/taxprofiler" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1025?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for screening for functional components of assembled contigs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/987?version=9" ; + schema1:isBasedOn "https://github.com/nf-core/funcscan" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/funcscan" ; + schema1:sdDatePublished "2024-08-05 10:23:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/987/ro_crate?version=9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 16687 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-07-09T03:02:51Z" ; + schema1:dateModified "2024-07-09T03:02:51Z" ; + schema1:description "Pipeline for screening for functional components of assembled contigs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/987?version=8" ; + schema1:keywords "amp, AMR, antibiotic-resistance, antimicrobial-peptides, antimicrobial-resistance-genes, arg, Assembly, bgc, biosynthetic-gene-clusters, contigs, function, Metagenomics, natural-products, screening, secondary-metabolites" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/funcscan" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/987?version=9" ; + schema1:version 9 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/gmx-ligand-parameterization).\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.294.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_ligand_parameterization/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy GMX Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/294/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9016 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-24T14:09:28Z" ; + schema1:dateModified "2022-11-22T09:50:02Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/gmx-ligand-parameterization).\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/294?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy GMX Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_ligand_parameterization/galaxy/biobb_wf_ligand_parameterization.ga" ; + schema1:version 1 ; + ns1:output , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """\r +\r +![Logo](https://cbg-ethz.github.io/V-pipe/img/logo.svg)\r +\r +[![bio.tools](https://img.shields.io/badge/bio-tools-blue.svg)](https://bio.tools/V-Pipe)\r +[![Snakemake](https://img.shields.io/badge/snakemake-≥7.11.0-blue.svg)](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe)\r +[![Deploy Docker image](https://github.com/cbg-ethz/V-pipe/actions/workflows/deploy-docker.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe)\r +[![Tests](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml)\r +[![Mega-Linter](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml)\r +[![License: Apache-2.0](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)\r +\r +V-pipe is a workflow designed for the analysis of next generation sequencing (NGS) data from viral pathogens. It produces a number of results in a curated format (e.g., consensus sequences, SNV calls, local/global haplotypes).\r +V-pipe is written using the Snakemake workflow management system.\r +\r +## Usage\r +\r +Different ways of initializing V-pipe are presented below. We strongly encourage you to deploy it [using the quick install script](#using-quick-install-script), as this is our preferred method.\r +\r +To configure V-pipe refer to the documentation present in [config/README.md](config/README.md).\r +\r +V-pipe expects the input samples to be organized in a [two-level](config/README.md#samples) directory hierarchy,\r +and the sequencing reads must be provided in a sub-folder named `raw_data`. Further details can be found on the [website](https://cbg-ethz.github.io/V-pipe/usage/).\r +Check the utils subdirectory for [mass-importers tools](utils/README.md#samples-mass-importers) that can assist you in generating this hierarchy.\r +\r +We provide [virus-specific base configuration files](config/README.md#virus-base-config) which contain handy defaults for, e.g., HIV and SARS-CoV-2. Set the virus in the general section of the configuration file:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +```\r +\r +Also see [snakemake's documentation](https://snakemake.readthedocs.io/en/stable/executing/cli.html) to learn more about the command-line options available when executing the workflow.\r +\r +Tutorials introducing usage of V-pipe are available in the [docs/](docs/README.md) subdirectory.\r +\r +### Using quick install script\r +\r +To deploy V-pipe, use the [installation script](utils/README.md#quick-installer) with the following parameters:\r +\r +```bash\r +curl -O 'https://raw.githubusercontent.com/cbg-ethz/V-pipe/master/utils/quick_install.sh'\r +./quick_install.sh -w work\r +```\r +\r +This script will download and install miniconda, checkout the V-pipe git repository (use `-b` to specify which branch/tag) and setup a work directory (specified with `-w`) with an executable script that will execute the workflow:\r +\r +```bash\r +cd work\r +# edit config.yaml and provide samples/ directory\r +./vpipe --jobs 4 --printshellcmds --dry-run\r +```\r +\r +### Using Docker\r +\r +Note: the [docker image](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe) is only setup with components to run the workflow for HIV and SARS-CoV-2 virus base configurations.\r +Using V-pipe with other viruses or configurations might require internet connectivity for additional software components.\r +\r +Create `config.yaml` or `vpipe.config` and then populate the `samples/` directory.\r +For example, the following config file could be used:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +\r +output:\r + snv: true\r + local: true\r + global: false\r + visualization: true\r + QA: true\r +```\r +\r +Then execute:\r +\r +```bash\r +docker run --rm -it -v $PWD:/work ghcr.io/cbg-ethz/v-pipe:master --jobs 4 --printshellcmds --dry-run\r +```\r +\r +### Using Snakedeploy\r +\r +First install [mamba](https://github.com/conda-forge/miniforge#mambaforge), then create and activate an environment with Snakemake and Snakedeploy:\r +\r +```bash\r +mamba create -c bioconda -c conda-forge --name snakemake snakemake snakedeploy\r +conda activate snakemake\r +```\r +\r +Snakemake's [official workflow installer Snakedeploy](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe) can now be used:\r +\r +```bash\r +snakedeploy deploy-workflow https://github.com/cbg-ethz/V-pipe --tag master .\r +# edit config/config.yaml and provide samples/ directory\r +snakemake --use-conda --jobs 4 --printshellcmds --dry-run\r +```\r +\r +## Dependencies\r +\r +- **[Conda](https://conda.io/docs/index.html)**\r +\r + Conda is a cross-platform package management system and an environment manager application. Snakemake uses mamba as a package manager.\r +\r +- **[Snakemake](https://snakemake.readthedocs.io/)**\r +\r + Snakemake is the central workflow and dependency manager of V-pipe. It determines the order in which individual tools are invoked and checks that programs do not exit unexpectedly.\r +\r +- **[VICUNA](https://www.broadinstitute.org/viral-genomics/vicuna)**\r +\r + VICUNA is a *de novo* assembly software designed for populations with high mutation rates. It is used to build an initial reference for mapping reads with ngshmmalign aligner when a `references/cohort_consensus.fasta` file is not provided. Further details can be found in the [wiki](https://github.com/cbg-ethz/V-pipe/wiki/getting-started#input-files) pages.\r +\r +### Computational tools\r +\r +Other dependencies are managed by using isolated conda environments per rule, and below we list some of the computational tools integrated in V-pipe:\r +\r +- **[FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/)**\r +\r + FastQC gives an overview of the raw sequencing data. Flowcells that have been overloaded or otherwise fail during sequencing can easily be determined with FastQC.\r +\r +- **[PRINSEQ](http://prinseq.sourceforge.net/)**\r +\r + Trimming and clipping of reads is performed by PRINSEQ. It is currently the most versatile raw read processor with many customization options.\r +\r +- **[ngshmmalign](https://github.com/cbg-ethz/ngshmmalign)**\r +\r + We perform the alignment of the curated NGS data using our custom ngshmmalign that takes structural variants into account. It produces multiple consensus sequences that include either majority bases or ambiguous bases.\r +\r +- **[bwa](https://github.com/lh3/bwa)**\r +\r + In order to detect specific cross-contaminations with other probes, the Burrows-Wheeler aligner is used. It quickly yields estimates for foreign genomic material in an experiment.\r + Additionally, It can be used as an alternative aligner to ngshmmalign.\r +\r +- **[MAFFT](http://mafft.cbrc.jp/alignment/software/)**\r +\r + To standardise multiple samples to the same reference genome (say HXB2 for HIV-1), the multiple sequence aligner MAFFT is employed. The multiple sequence alignment helps in determining regions of low conservation and thus makes standardisation of alignments more robust.\r +\r +- **[Samtools and bcftools](https://www.htslib.org/)**\r +\r + The Swiss Army knife of alignment postprocessing and diagnostics. bcftools is also used to generate consensus sequence with indels.\r +\r +- **[SmallGenomeUtilities](https://github.com/cbg-ethz/smallgenomeutilities)**\r +\r + We perform genomic liftovers to standardised reference genomes using our in-house developed python library of utilities for rewriting alignments.\r +\r +- **[ShoRAH](https://github.com/cbg-ethz/shorah)**\r +\r + ShoRAh performs SNV calling and local haplotype reconstruction by using bayesian clustering.\r +\r +- **[LoFreq](https://csb5.github.io/lofreq/)**\r +\r + LoFreq (version 2) is SNVs and indels caller from next-generation sequencing data, and can be used as an alternative engine for SNV calling.\r +\r +- **[SAVAGE](https://bitbucket.org/jbaaijens/savage) and [Haploclique](https://github.com/cbg-ethz/haploclique)**\r +\r + We use HaploClique or SAVAGE to perform global haplotype reconstruction for heterogeneous viral populations by using an overlap graph.\r +\r +## Citation\r +\r +If you use this software in your research, please cite:\r +\r +Posada-Céspedes S., Seifert D., Topolsky I., Jablonski K.P., Metzner K.J., and Beerenwinkel N. 2021.\r +"V-pipe: a computational pipeline for assessing viral genetic diversity from high-throughput sequencing data."\r +_Bioinformatics_, January. doi:[10.1093/bioinformatics/btab015](https://doi.org/10.1093/bioinformatics/btab015).\r +\r +## Contributions\r +\r +- [Ivan Topolsky\\* ![orcid]](https://orcid.org/0000-0002-7561-0810), [![github]](https://github.com/dryak)\r +- [Kim Philipp Jablonski ![orcid]](https://orcid.org/0000-0002-4166-4343), [![github]](https://github.com/kpj)\r +- [Lara Fuhrmann ![orcid]](https://orcid.org/0000-0001-6405-0654), [![github]](https://github.com/LaraFuhrmann)\r +- [Uwe Schmitt ![orcid]](https://orcid.org/0000-0002-4658-0616), [![github]](https://github.com/uweschmitt)\r +- [Michal Okoniewski ![orcid]](https://orcid.org/0000-0003-4722-4506), [![github]](https://github.com/michalogit)\r +- [Monica Dragan ![orcid]](https://orcid.org/0000-0002-7719-5892), [![github]](https://github.com/monicadragan)\r +- [Susana Posada Céspedes ![orcid]](https://orcid.org/0000-0002-7459-8186), [![github]](https://github.com/sposadac)\r +- [David Seifert ![orcid]](https://orcid.org/0000-0003-4739-5110), [![github]](https://github.com/SoapZA)\r +- Tobias Marschall\r +- [Niko Beerenwinkel\\*\\* ![orcid]](https://orcid.org/0000-0002-0573-6119)\r +\r +\\* software maintainer ;\r +\\** group leader\r +\r +[github]: https://cbg-ethz.github.io/V-pipe/img/mark-github.svg\r +[orcid]: https://cbg-ethz.github.io/V-pipe/img/ORCIDiD_iconvector.svg\r +\r +## Contact\r +\r +We encourage users to use the [issue tracker](https://github.com/cbg-ethz/V-pipe/issues). For further enquiries, you can also contact the V-pipe Dev Team .\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/301?version=3" ; + schema1:isBasedOn "https://github.com/cbg-ethz/V-pipe.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for V-pipe (main multi-virus version)" ; + schema1:sdDatePublished "2024-08-05 10:23:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/301/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1467 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-06T11:12:18Z" ; + schema1:dateModified "2023-01-16T13:59:09Z" ; + schema1:description """\r +\r +![Logo](https://cbg-ethz.github.io/V-pipe/img/logo.svg)\r +\r +[![bio.tools](https://img.shields.io/badge/bio-tools-blue.svg)](https://bio.tools/V-Pipe)\r +[![Snakemake](https://img.shields.io/badge/snakemake-≥7.11.0-blue.svg)](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe)\r +[![Deploy Docker image](https://github.com/cbg-ethz/V-pipe/actions/workflows/deploy-docker.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe)\r +[![Tests](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml)\r +[![Mega-Linter](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml)\r +[![License: Apache-2.0](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)\r +\r +V-pipe is a workflow designed for the analysis of next generation sequencing (NGS) data from viral pathogens. It produces a number of results in a curated format (e.g., consensus sequences, SNV calls, local/global haplotypes).\r +V-pipe is written using the Snakemake workflow management system.\r +\r +## Usage\r +\r +Different ways of initializing V-pipe are presented below. We strongly encourage you to deploy it [using the quick install script](#using-quick-install-script), as this is our preferred method.\r +\r +To configure V-pipe refer to the documentation present in [config/README.md](config/README.md).\r +\r +V-pipe expects the input samples to be organized in a [two-level](config/README.md#samples) directory hierarchy,\r +and the sequencing reads must be provided in a sub-folder named `raw_data`. Further details can be found on the [website](https://cbg-ethz.github.io/V-pipe/usage/).\r +Check the utils subdirectory for [mass-importers tools](utils/README.md#samples-mass-importers) that can assist you in generating this hierarchy.\r +\r +We provide [virus-specific base configuration files](config/README.md#virus-base-config) which contain handy defaults for, e.g., HIV and SARS-CoV-2. Set the virus in the general section of the configuration file:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +```\r +\r +Also see [snakemake's documentation](https://snakemake.readthedocs.io/en/stable/executing/cli.html) to learn more about the command-line options available when executing the workflow.\r +\r +Tutorials introducing usage of V-pipe are available in the [docs/](docs/README.md) subdirectory.\r +\r +### Using quick install script\r +\r +To deploy V-pipe, use the [installation script](utils/README.md#quick-installer) with the following parameters:\r +\r +```bash\r +curl -O 'https://raw.githubusercontent.com/cbg-ethz/V-pipe/master/utils/quick_install.sh'\r +./quick_install.sh -w work\r +```\r +\r +This script will download and install miniconda, checkout the V-pipe git repository (use `-b` to specify which branch/tag) and setup a work directory (specified with `-w`) with an executable script that will execute the workflow:\r +\r +```bash\r +cd work\r +# edit config.yaml and provide samples/ directory\r +./vpipe --jobs 4 --printshellcmds --dry-run\r +```\r +\r +### Using Docker\r +\r +Note: the [docker image](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe) is only setup with components to run the workflow for HIV and SARS-CoV-2 virus base configurations.\r +Using V-pipe with other viruses or configurations might require internet connectivity for additional software components.\r +\r +Create `config.yaml` or `vpipe.config` and then populate the `samples/` directory.\r +For example, the following config file could be used:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +\r +output:\r + snv: true\r + local: true\r + global: false\r + visualization: true\r + QA: true\r +```\r +\r +Then execute:\r +\r +```bash\r +docker run --rm -it -v $PWD:/work ghcr.io/cbg-ethz/v-pipe:master --jobs 4 --printshellcmds --dry-run\r +```\r +\r +### Using Snakedeploy\r +\r +First install [mamba](https://github.com/conda-forge/miniforge#mambaforge), then create and activate an environment with Snakemake and Snakedeploy:\r +\r +```bash\r +mamba create -c bioconda -c conda-forge --name snakemake snakemake snakedeploy\r +conda activate snakemake\r +```\r +\r +Snakemake's [official workflow installer Snakedeploy](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe) can now be used:\r +\r +```bash\r +snakedeploy deploy-workflow https://github.com/cbg-ethz/V-pipe --tag master .\r +# edit config/config.yaml and provide samples/ directory\r +snakemake --use-conda --jobs 4 --printshellcmds --dry-run\r +```\r +\r +## Dependencies\r +\r +- **[Conda](https://conda.io/docs/index.html)**\r +\r + Conda is a cross-platform package management system and an environment manager application. Snakemake uses mamba as a package manager.\r +\r +- **[Snakemake](https://snakemake.readthedocs.io/)**\r +\r + Snakemake is the central workflow and dependency manager of V-pipe. It determines the order in which individual tools are invoked and checks that programs do not exit unexpectedly.\r +\r +- **[VICUNA](https://www.broadinstitute.org/viral-genomics/vicuna)**\r +\r + VICUNA is a *de novo* assembly software designed for populations with high mutation rates. It is used to build an initial reference for mapping reads with ngshmmalign aligner when a `references/cohort_consensus.fasta` file is not provided. Further details can be found in the [wiki](https://github.com/cbg-ethz/V-pipe/wiki/getting-started#input-files) pages.\r +\r +### Computational tools\r +\r +Other dependencies are managed by using isolated conda environments per rule, and below we list some of the computational tools integrated in V-pipe:\r +\r +- **[FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/)**\r +\r + FastQC gives an overview of the raw sequencing data. Flowcells that have been overloaded or otherwise fail during sequencing can easily be determined with FastQC.\r +\r +- **[PRINSEQ](http://prinseq.sourceforge.net/)**\r +\r + Trimming and clipping of reads is performed by PRINSEQ. It is currently the most versatile raw read processor with many customization options.\r +\r +- **[ngshmmalign](https://github.com/cbg-ethz/ngshmmalign)**\r +\r + We perform the alignment of the curated NGS data using our custom ngshmmalign that takes structural variants into account. It produces multiple consensus sequences that include either majority bases or ambiguous bases.\r +\r +- **[bwa](https://github.com/lh3/bwa)**\r +\r + In order to detect specific cross-contaminations with other probes, the Burrows-Wheeler aligner is used. It quickly yields estimates for foreign genomic material in an experiment.\r + Additionally, It can be used as an alternative aligner to ngshmmalign.\r +\r +- **[MAFFT](http://mafft.cbrc.jp/alignment/software/)**\r +\r + To standardise multiple samples to the same reference genome (say HXB2 for HIV-1), the multiple sequence aligner MAFFT is employed. The multiple sequence alignment helps in determining regions of low conservation and thus makes standardisation of alignments more robust.\r +\r +- **[Samtools and bcftools](https://www.htslib.org/)**\r +\r + The Swiss Army knife of alignment postprocessing and diagnostics. bcftools is also used to generate consensus sequence with indels.\r +\r +- **[SmallGenomeUtilities](https://github.com/cbg-ethz/smallgenomeutilities)**\r +\r + We perform genomic liftovers to standardised reference genomes using our in-house developed python library of utilities for rewriting alignments.\r +\r +- **[ShoRAH](https://github.com/cbg-ethz/shorah)**\r +\r + ShoRAh performs SNV calling and local haplotype reconstruction by using bayesian clustering.\r +\r +- **[LoFreq](https://csb5.github.io/lofreq/)**\r +\r + LoFreq (version 2) is SNVs and indels caller from next-generation sequencing data, and can be used as an alternative engine for SNV calling.\r +\r +- **[SAVAGE](https://bitbucket.org/jbaaijens/savage) and [Haploclique](https://github.com/cbg-ethz/haploclique)**\r +\r + We use HaploClique or SAVAGE to perform global haplotype reconstruction for heterogeneous viral populations by using an overlap graph.\r +\r +## Citation\r +\r +If you use this software in your research, please cite:\r +\r +Posada-Céspedes S., Seifert D., Topolsky I., Jablonski K.P., Metzner K.J., and Beerenwinkel N. 2021.\r +"V-pipe: a computational pipeline for assessing viral genetic diversity from high-throughput sequencing data."\r +_Bioinformatics_, January. doi:[10.1093/bioinformatics/btab015](https://doi.org/10.1093/bioinformatics/btab015).\r +\r +## Contributions\r +\r +- [Ivan Topolsky\\* ![orcid]](https://orcid.org/0000-0002-7561-0810), [![github]](https://github.com/dryak)\r +- [Kim Philipp Jablonski ![orcid]](https://orcid.org/0000-0002-4166-4343), [![github]](https://github.com/kpj)\r +- [Lara Fuhrmann ![orcid]](https://orcid.org/0000-0001-6405-0654), [![github]](https://github.com/LaraFuhrmann)\r +- [Uwe Schmitt ![orcid]](https://orcid.org/0000-0002-4658-0616), [![github]](https://github.com/uweschmitt)\r +- [Michal Okoniewski ![orcid]](https://orcid.org/0000-0003-4722-4506), [![github]](https://github.com/michalogit)\r +- [Monica Dragan ![orcid]](https://orcid.org/0000-0002-7719-5892), [![github]](https://github.com/monicadragan)\r +- [Susana Posada Céspedes ![orcid]](https://orcid.org/0000-0002-7459-8186), [![github]](https://github.com/sposadac)\r +- [David Seifert ![orcid]](https://orcid.org/0000-0003-4739-5110), [![github]](https://github.com/SoapZA)\r +- Tobias Marschall\r +- [Niko Beerenwinkel\\*\\* ![orcid]](https://orcid.org/0000-0002-0573-6119)\r +\r +\\* software maintainer ;\r +\\** group leader\r +\r +[github]: https://cbg-ethz.github.io/V-pipe/img/mark-github.svg\r +[orcid]: https://cbg-ethz.github.io/V-pipe/img/ORCIDiD_iconvector.svg\r +\r +## Contact\r +\r +We encourage users to use the [issue tracker](https://github.com/cbg-ethz/V-pipe/issues). For further enquiries, you can also contact the V-pipe Dev Team .\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/301?version=4" ; + schema1:keywords "Alignment, Assembly, covid-19, Genomics, INDELs, rna, SNPs, variant_calling, workflow" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "V-pipe (main multi-virus version)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/301?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/amber-protein-ligand-complex-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.298.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_complex_md_setup/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/298/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 100823 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-25T11:42:23Z" ; + schema1:dateModified "2022-11-23T08:43:28Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/amber-protein-ligand-complex-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/298?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_complex_md_setup/galaxy/biobb_wf_amber_complex_md_setup.ga" ; + schema1:version 1 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """Workflow for Metagenomics from raw reads to annotated bins.\r +Steps:\r + - workflow_quality.cwl:\r + - FastQC (control)\r + - fastp (trimming)\r + - Kraken2 (Taxonomic Read Classification\r + - SPAdes (Assembly)\r + - QUAST (Assembly quality report)\r + - BBmap (Read mapping to assembly)\r + - sam_to_bam (sam to indexed bam)\r + - metabatContigDepths (jgi_summarize_bam_contig_depths)\r + - MetaBat2 (binning)\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/64?version=7" ; + schema1:isBasedOn "https://gitlab.com/m-unlock/cwl/-/blob/master/cwl/workflows/workflow_metagenomics_binning.cwl" ; + schema1:license "CC0-1.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Metagenomics workflow" ; + schema1:sdDatePublished "2024-08-05 10:31:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/64/ro_crate?version=7" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 43887 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11066 ; + schema1:creator , + ; + schema1:dateCreated "2021-01-08T10:15:11Z" ; + schema1:dateModified "2021-02-12T12:29:29Z" ; + schema1:description """Workflow for Metagenomics from raw reads to annotated bins.\r +Steps:\r + - workflow_quality.cwl:\r + - FastQC (control)\r + - fastp (trimming)\r + - Kraken2 (Taxonomic Read Classification\r + - SPAdes (Assembly)\r + - QUAST (Assembly quality report)\r + - BBmap (Read mapping to assembly)\r + - sam_to_bam (sam to indexed bam)\r + - metabatContigDepths (jgi_summarize_bam_contig_depths)\r + - MetaBat2 (binning)\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/64?version=10" ; + schema1:keywords "Metagenomics, microbial, metagenome, binning" ; + schema1:license "https://spdx.org/licenses/CC0-1.0" ; + schema1:name "Metagenomics workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/64?version=7" ; + schema1:version 7 ; + ns1:input , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.132.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Amber Constant pH MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/132/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 54708 ; + schema1:creator , + ; + schema1:dateCreated "2022-09-15T12:33:15Z" ; + schema1:dateModified "2023-01-16T13:50:20Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/132?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Amber Constant pH MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_amber_md_setup/master/biobb_wf_amber_md_setup/notebooks/mdsetup_ph/biobb_amber_CpHMD_notebook.ipynb" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """Objective. Biomarkers have become important for the prognosis and diagnosis of various diseases. High-throughput methods such as RNA-sequencing facilitate the detection of differentially expressed genes (DEGs), hence potential biomarker candidates. Individual studies suggest long lists of DEGs, hampering the identification of clinically relevant ones. Concerning preeclampsia, a major obstetric burden with high risk for adverse maternal and/or neonatal outcomes, limitations in diagnosis and prediction are still important issues. Therefore, we developed a workflow to facilitate the screening for biomarkers.\r +Methods. Based on the tool DeSeq2, we established a comprehensive workflow for the identification of DEGs, analyzing data from multiple publicly available RNA-sSequencing studies. We applied it to four RNA-sSequencing datasets (one blood, three placenta) analyzing patients with preeclampsia and normotensive controls. We compared our results with other published approaches and evaluated their performance. \r +Results. We identified 110 genes dysregulated in preeclampsia, observed in ≥3 of the analyzed studies, six even in all four studies. Among them were FLT-1, TREM-1, and FN1 which either represent established biomarkers on protein level, or promising candidates based on recent studies. In comparison, using a published meta-analysis approach we obtained 5,240 DEGs.\r +Conclusions. We present a data analysis workflow for preeclampsia biomarker screening, capable of identifying significant biomarker candidates, while drastically decreasing the numbers of candidates. Moreover, we were also able to confirm its performance for heart failure. Our approach can be applied to additional diseases for biomarker identification and the set of identified DEGs in preeclampsia represents a resource for further studies.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.338.1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Biomarker screening in preeclampsia" ; + schema1:sdDatePublished "2024-08-05 10:32:10 +0100" ; + schema1:url "https://workflowhub.eu/workflows/338/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 22749 ; + schema1:creator ; + schema1:dateCreated "2022-05-03T13:05:01Z" ; + schema1:dateModified "2023-02-13T14:06:45Z" ; + schema1:description """Objective. Biomarkers have become important for the prognosis and diagnosis of various diseases. High-throughput methods such as RNA-sequencing facilitate the detection of differentially expressed genes (DEGs), hence potential biomarker candidates. Individual studies suggest long lists of DEGs, hampering the identification of clinically relevant ones. Concerning preeclampsia, a major obstetric burden with high risk for adverse maternal and/or neonatal outcomes, limitations in diagnosis and prediction are still important issues. Therefore, we developed a workflow to facilitate the screening for biomarkers.\r +Methods. Based on the tool DeSeq2, we established a comprehensive workflow for the identification of DEGs, analyzing data from multiple publicly available RNA-sSequencing studies. We applied it to four RNA-sSequencing datasets (one blood, three placenta) analyzing patients with preeclampsia and normotensive controls. We compared our results with other published approaches and evaluated their performance. \r +Results. We identified 110 genes dysregulated in preeclampsia, observed in ≥3 of the analyzed studies, six even in all four studies. Among them were FLT-1, TREM-1, and FN1 which either represent established biomarkers on protein level, or promising candidates based on recent studies. In comparison, using a published meta-analysis approach we obtained 5,240 DEGs.\r +Conclusions. We present a data analysis workflow for preeclampsia biomarker screening, capable of identifying significant biomarker candidates, while drastically decreasing the numbers of candidates. Moreover, we were also able to confirm its performance for heart failure. Our approach can be applied to additional diseases for biomarker identification and the set of identified DEGs in preeclampsia represents a resource for further studies.\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Biomarker screening in preeclampsia" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/338?version=1" ; + schema1:version 1 ; + ns1:output , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# WRF/EMEP Linear Workflow\r +\r +Example Common Workflow Language (CWL) workflow and tool descriptors for running the \r +Weather Research and Forecase (WRF) and EMEP models.\r +\r +This workflow is designed for a single model domain. Example datasets for testing this \r +workflow can be downloaded from Zenodo.\r +\r +\r +## Requirements:\r +\r +* docker or singularity\r +* conda\r +* cwltool\r +* Toil - optional, useful for running on HPC or distributed computing systems\r +\r +### CWL / Toil Installation:\r +\r +The workflow runner (either cwltool, or Toil) can be installed using either conda or pip.\r +Environment files for conda are included, and can be used as shown below:\r +* cwltool only:\r + * `conda env create --file install/env_cwlrunner.yml --name cwl`\r +* Toil & cwltool:\r + * `conda env create --file install/env_toil.yml --name toil`\r +\r +### Setup for Example Workflow\r +\r +* Download the example dataset from Zenodo: https://doi.org/10.5281/zenodo.7817216\r +* Extract into the `input_files` directory:\r + * `tar -zxvf wrf_emep_UK_example_inputs.tar.gz -C input_files --strip-components=1`\r +\r +## Running the Workflow\r +\r +The full workflow is broken into several logical steps:\r +1. ERA5 download\r +2. WPS 1st step: Geogrid geography file creation\r +3. WPS process: ungribbing of ERA5 data, and running of metgrid to produce meteorology files.\r +4. WRF process: generation of WRF input files by REAL, and running of WRF model\r +5. EMEP model: running of EMEP chemistry and transport model\r +\r +Steps 1 and 3 require you to register with the CDS service, in order to download ERA5 data\r +before using in the WPS process.\r +Steps 2 and 5 require you to download extra input data - the instructions on how to do this\r +are included in the README.txt files in the relevant input data directories.\r +\r +A full workflow for all steps is provided here. But each separate step can by run on it's \r +own too, following the instructions given below. We recommend running step 4 first, to \r +explore how the REAL & WRF workflow works, before trying the other steps.\r +\r +### 1. ERA5 download.\r +\r +Before running the ERA5 download tool, ensure that you have reqistered for the CDS service, \r +signed the ERA5 licensing agreement, and saved the CDS API key (`.cdsapirc`) in your \r +working directory.\r +\r +To run the ERA5 download tool use the following command:\r +```\r +cwltool [--cachdir CACHE] [--singularity] workflows/era5_workflow.cwl example_workflow_configurations/era5_download_settings.yaml\r +```\r +Note that the `--cachedir CACHE` option sets the working directory cache, which enables the\r +reuse of any steps previously run (and the restarting of the workflow from this point).\r +The `--singularity` option is needed if you are using singularity instead of docker.\r +\r +### 2. WPS: Geogrid geography file creation\r +\r +Before running the geogrid tool you will need to download the geography data from the\r +[UCAR website](https://www2.mmm.ucar.edu/wrf/users/download/get_sources_wps_geog.html).\r +These should be extracted into the `input_files/geogrid_geog_input` directory.\r +\r +To run the geogrid program use the following command:\r +```\r +cwltool [--cachdir CACHE] [--singularity] workflows/geogrid_workflow.cwl example_workflow_configurations/wps_geogrid_cwl_settings.yaml\r +```\r +\r +### 3. WPS: Creation of meteorology input files\r +\r +Before running the WPS process you will have to download the ERA5 datafiles (which will be\r +called `preslev_[YYYYMMDD].grib` and `surface_[YYYYMMDD].grib`) and copy these to the directory\r +`input_files/wps_era5_input`. If you have also run geogrid in step 2 you can replace the \r +`geo_em.d01.nc` file in the `input_files/wps_geogrid_input` directory with the file that \r +geogrid created.\r +\r +To run the wps metgrid process use the following command:\r +```\r +cwltool [--cachdir CACHE] [--singularity] workflows/wps_workflow.cwl example_workflow_configurations/wps_metgrid_cwl_settings.yaml\r +```\r +\r +### 4. WRF: Creation of WRF input files, and running WRF model\r +\r +The WRF model can be run without any prepreparation, except for the downloading of the \r +input data from Zenodo. However, if you have created new meteorology files (`met_em*`) using\r +WPS you can replace the files in the `input_files/wrf_met_input` directory with these.\r +\r +To run the WRF process (including REAL) use the following command:\r +```\r +cwltool [--cachdir CACHE] [--singularity] workflows/wrf_workflow.cwl example_workflow_configurations/wrf_real_cwl_settings.yaml\r +``` \r +\r +### 5. EMEP: Running EMEP chemistry and transport model\r +\r +Before running the EMEP model you will need to download the EMEP input dataset. This can be\r +done using the `catalog.py` tool, following the instructions in the `input_files/emep_input/README.txt`\r +file. If you have run WRF you can also replace the `wrfout*` data files in the \r +`input_Files/emep_wrf_input` directory with those you have created.\r +\r +To run the EMEP model use the following command:\r +```\r +cwltool [--cachdir CACHE] [--singularity] workflows/emep_workflow.cwl example_workflow_configurations/emep_cwl_settings.yaml\r +```\r +\r +### Full Workflow\r +\r +Before running the full workflow make sure you have carried out the setup tasks described\r +above.\r +\r +To run the full workflow use the following command:\r +```\r +cwltool [--cachdir CACHE] [--singularity] wrf_emep_full_workflow.cwl example_workflow_configurations/wrf_emep_full_workflow_cwl_settings.yaml\r +```\r +\r +## Notes\r +\r +### WRF filenames\r +\r +In order to work with singularity, all filenames need to exclude special characters.\r +To ensure that all WRF filenames comply with this requirement, you will need to add the \r +`nocolons = .true.` option to your WPS, REAL and WRF namelists to ensure this.\r +\r +### MPI parallel processing\r +\r +The WPS processes all run in single thread mode. REAL, WRF and EMEP have been compiled with\r +MPI support. The default cores for each of these is 2, 9 and 9, respectively. The \r +settings file can be edited to modify these requirements.\r +\r +### Caching intermediate workflow steps\r +\r +To cache the data from individual steps you can use the `--cachedir ` optional flag.\r +\r +\r +## License and Copyright \r +\r +These workflow scripts have been developed by the [Research IT](https://research-it.manchester.ac.uk/) \r +at the [University of Manchester](https://www.manchester.ac.uk/).\r +\r +Copyright 2023 [University of Manchester, UK](https://www.manchester.ac.uk/).\r +\r +Licensed under the MIT license, see the LICENSE file for details.""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/455?version=1" ; + schema1:isBasedOn "https://github.com/UoMResearchIT/wrf_emep_cwl_linear_workflow.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for WRF / EMEP Linear Workflow" ; + schema1:sdDatePublished "2024-08-05 10:31:06 +0100" ; + schema1:url "https://workflowhub.eu/workflows/455/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 37708 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4306 ; + schema1:creator ; + schema1:dateCreated "2023-04-12T12:04:44Z" ; + schema1:dateModified "2023-04-12T12:45:16Z" ; + schema1:description """# WRF/EMEP Linear Workflow\r +\r +Example Common Workflow Language (CWL) workflow and tool descriptors for running the \r +Weather Research and Forecase (WRF) and EMEP models.\r +\r +This workflow is designed for a single model domain. Example datasets for testing this \r +workflow can be downloaded from Zenodo.\r +\r +\r +## Requirements:\r +\r +* docker or singularity\r +* conda\r +* cwltool\r +* Toil - optional, useful for running on HPC or distributed computing systems\r +\r +### CWL / Toil Installation:\r +\r +The workflow runner (either cwltool, or Toil) can be installed using either conda or pip.\r +Environment files for conda are included, and can be used as shown below:\r +* cwltool only:\r + * `conda env create --file install/env_cwlrunner.yml --name cwl`\r +* Toil & cwltool:\r + * `conda env create --file install/env_toil.yml --name toil`\r +\r +### Setup for Example Workflow\r +\r +* Download the example dataset from Zenodo: https://doi.org/10.5281/zenodo.7817216\r +* Extract into the `input_files` directory:\r + * `tar -zxvf wrf_emep_UK_example_inputs.tar.gz -C input_files --strip-components=1`\r +\r +## Running the Workflow\r +\r +The full workflow is broken into several logical steps:\r +1. ERA5 download\r +2. WPS 1st step: Geogrid geography file creation\r +3. WPS process: ungribbing of ERA5 data, and running of metgrid to produce meteorology files.\r +4. WRF process: generation of WRF input files by REAL, and running of WRF model\r +5. EMEP model: running of EMEP chemistry and transport model\r +\r +Steps 1 and 3 require you to register with the CDS service, in order to download ERA5 data\r +before using in the WPS process.\r +Steps 2 and 5 require you to download extra input data - the instructions on how to do this\r +are included in the README.txt files in the relevant input data directories.\r +\r +A full workflow for all steps is provided here. But each separate step can by run on it's \r +own too, following the instructions given below. We recommend running step 4 first, to \r +explore how the REAL & WRF workflow works, before trying the other steps.\r +\r +### 1. ERA5 download.\r +\r +Before running the ERA5 download tool, ensure that you have reqistered for the CDS service, \r +signed the ERA5 licensing agreement, and saved the CDS API key (`.cdsapirc`) in your \r +working directory.\r +\r +To run the ERA5 download tool use the following command:\r +```\r +cwltool [--cachdir CACHE] [--singularity] workflows/era5_workflow.cwl example_workflow_configurations/era5_download_settings.yaml\r +```\r +Note that the `--cachedir CACHE` option sets the working directory cache, which enables the\r +reuse of any steps previously run (and the restarting of the workflow from this point).\r +The `--singularity` option is needed if you are using singularity instead of docker.\r +\r +### 2. WPS: Geogrid geography file creation\r +\r +Before running the geogrid tool you will need to download the geography data from the\r +[UCAR website](https://www2.mmm.ucar.edu/wrf/users/download/get_sources_wps_geog.html).\r +These should be extracted into the `input_files/geogrid_geog_input` directory.\r +\r +To run the geogrid program use the following command:\r +```\r +cwltool [--cachdir CACHE] [--singularity] workflows/geogrid_workflow.cwl example_workflow_configurations/wps_geogrid_cwl_settings.yaml\r +```\r +\r +### 3. WPS: Creation of meteorology input files\r +\r +Before running the WPS process you will have to download the ERA5 datafiles (which will be\r +called `preslev_[YYYYMMDD].grib` and `surface_[YYYYMMDD].grib`) and copy these to the directory\r +`input_files/wps_era5_input`. If you have also run geogrid in step 2 you can replace the \r +`geo_em.d01.nc` file in the `input_files/wps_geogrid_input` directory with the file that \r +geogrid created.\r +\r +To run the wps metgrid process use the following command:\r +```\r +cwltool [--cachdir CACHE] [--singularity] workflows/wps_workflow.cwl example_workflow_configurations/wps_metgrid_cwl_settings.yaml\r +```\r +\r +### 4. WRF: Creation of WRF input files, and running WRF model\r +\r +The WRF model can be run without any prepreparation, except for the downloading of the \r +input data from Zenodo. However, if you have created new meteorology files (`met_em*`) using\r +WPS you can replace the files in the `input_files/wrf_met_input` directory with these.\r +\r +To run the WRF process (including REAL) use the following command:\r +```\r +cwltool [--cachdir CACHE] [--singularity] workflows/wrf_workflow.cwl example_workflow_configurations/wrf_real_cwl_settings.yaml\r +``` \r +\r +### 5. EMEP: Running EMEP chemistry and transport model\r +\r +Before running the EMEP model you will need to download the EMEP input dataset. This can be\r +done using the `catalog.py` tool, following the instructions in the `input_files/emep_input/README.txt`\r +file. If you have run WRF you can also replace the `wrfout*` data files in the \r +`input_Files/emep_wrf_input` directory with those you have created.\r +\r +To run the EMEP model use the following command:\r +```\r +cwltool [--cachdir CACHE] [--singularity] workflows/emep_workflow.cwl example_workflow_configurations/emep_cwl_settings.yaml\r +```\r +\r +### Full Workflow\r +\r +Before running the full workflow make sure you have carried out the setup tasks described\r +above.\r +\r +To run the full workflow use the following command:\r +```\r +cwltool [--cachdir CACHE] [--singularity] wrf_emep_full_workflow.cwl example_workflow_configurations/wrf_emep_full_workflow_cwl_settings.yaml\r +```\r +\r +## Notes\r +\r +### WRF filenames\r +\r +In order to work with singularity, all filenames need to exclude special characters.\r +To ensure that all WRF filenames comply with this requirement, you will need to add the \r +`nocolons = .true.` option to your WPS, REAL and WRF namelists to ensure this.\r +\r +### MPI parallel processing\r +\r +The WPS processes all run in single thread mode. REAL, WRF and EMEP have been compiled with\r +MPI support. The default cores for each of these is 2, 9 and 9, respectively. The \r +settings file can be edited to modify these requirements.\r +\r +### Caching intermediate workflow steps\r +\r +To cache the data from individual steps you can use the `--cachedir ` optional flag.\r +\r +\r +## License and Copyright \r +\r +These workflow scripts have been developed by the [Research IT](https://research-it.manchester.ac.uk/) \r +at the [University of Manchester](https://www.manchester.ac.uk/).\r +\r +Copyright 2023 [University of Manchester, UK](https://www.manchester.ac.uk/).\r +\r +Licensed under the MIT license, see the LICENSE file for details.""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "WRF / EMEP Linear Workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/455?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + . + + a schema1:Dataset ; + schema1:datePublished "2024-06-24T13:46:01.976755" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/amr_gene_detection" ; + schema1:license "GPL-3.0-or-later" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "amr_gene_detection/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:MediaObject ; + schema1:about ; + schema1:author , + ; + schema1:encodingFormat "text/markdown" . + + a schema1:Dataset ; + schema1:creator , + , + , + , + , + , + ; + schema1:datePublished "2023-04-13" ; + schema1:description "Study protocol" ; + schema1:hasPart , + , + , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.7551181" ; + schema1:name "BY-COVID - WP5 - Baseline Use Case: SARS-CoV-2 vaccine effectiveness assessment - Study protocol" ; + schema1:url "https://zenodo.org/record/7825979" ; + schema1:version "1.0.3" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# prepareChIPs\r +\r +This is a simple `snakemake` workflow template for preparing **single-end** ChIP-Seq data.\r +The steps implemented are:\r +\r +1. Download raw fastq files from SRA\r +2. Trim and Filter raw fastq files using `AdapterRemoval`\r +3. Align to the supplied genome using `bowtie2`\r +4. Deduplicate Alignments using `Picard MarkDuplicates`\r +5. Call Macs2 Peaks using `macs2`\r +\r +A pdf of the rulegraph is available [here](workflow/rules/rulegraph.pdf)\r +\r +Full details for each step are given below.\r +Any additional parameters for tools can be specified using `config/config.yml`, along with many of the requisite paths\r +\r +To run the workflow with default settings, simply run as follows (after editing `config/samples.tsv`)\r +\r +```bash\r +snakemake --use-conda --cores 16\r +```\r +\r +If running on an HPC cluster, a snakemake profile will required for submission to the queueing system and appropriate resource allocation.\r +Please discuss this will your HPC support team.\r +Nodes may also have restricted internet access and rules which download files may not work on many HPCs.\r +Please see below or discuss this with your support team\r +\r +Whilst no snakemake wrappers are explicitly used in this workflow, the underlying scripts are utilised where possible to minimise any issues with HPC clusters with restrictions on internet access.\r +These scripts are based on `v1.31.1` of the snakemake wrappers\r +\r +### Important Note Regarding OSX Systems\r +\r +It should be noted that this workflow is **currently incompatible with OSX-based systems**. \r +There are two unsolved issues\r +\r +1. `fasterq-dump` has a bug which is specific to conda environments. This has been updated in v3.0.3 but this patch has not yet been made available to conda environments for OSX. Please check [here](https://anaconda.org/bioconda/sra-tools) to see if this has been updated.\r +2. The following error appears in some OSX-based R sessions, in a system-dependent manner:\r +```\r +Error in grid.Call(C_textBounds, as.graphicsAnnot(x$label), x$x, x$y, : \r + polygon edge not found\r +```\r +\r +The fix for this bug is currently unknown\r +\r +## Download Raw Data\r +\r +### Outline\r +\r +The file `samples.tsv` is used to specify all steps for this workflow.\r +This file must contain the columns: `accession`, `target`, `treatment` and `input`\r +\r +1. `accession` must be an SRA accession. Only single-end data is currently supported by this workflow\r +2. `target` defines the ChIP target. All files common to a target and treatment will be used to generate summarised coverage in bigWig Files\r +3. `treatment` defines the treatment group each file belongs to. If only one treatment exists, simply use the value 'control' or similar for every file\r +4. `input` should contain the accession for the relevant input sample. These will only be downloaded once. Valid input samples are *required* for this workflow\r +\r +As some HPCs restrict internet access for submitted jobs, *it may be prudent to run the initial rules in an interactive session* if at all possible.\r +This can be performed using the following (with 2 cores provided as an example)\r +\r +```bash\r +snakemake --use-conda --until get_fastq --cores 2\r +```\r +\r +### Outputs\r +\r +- Downloaded files will be gzipped and written to `data/fastq/raw`.\r +- `FastQC` and `MultiQC` will also be run, with output in `docs/qc/raw`\r +\r +Both of these directories are able to be specified as relative paths in `config.yml`\r +\r +## Read Filtering\r +\r +### Outline\r +\r +Read trimming is performed using [AdapterRemoval](https://adapterremoval.readthedocs.io/en/stable/).\r +Default settings are customisable using config.yml, with the defaults set to discard reads shorter than 50nt, and to trim using quality scores with a threshold of Q30.\r +\r +### Outputs\r +\r +- Trimmed fastq.gz files will be written to `data/fastq/trimmed`\r +- `FastQC` and `MultiQC` will also be run, with output in `docs/qc/trimmed`\r +- AdapterRemoval 'settings' files will be written to `output/adapterremoval`\r +\r +## Alignments\r +\r +### Outline\r +\r +Alignment is performed using [`bowtie2`](https://bowtie-bio.sourceforge.net/bowtie2/manual.shtml) and it is assumed that this index is available before running this workflow.\r +The path and prefix must be provided using config.yml\r +\r +This index will also be used to produce the file `chrom.sizes` which is essential for conversion of bedGraph files to the more efficient bigWig files.\r +\r +### Outputs\r +\r +- Alignments will be written to `data/aligned`\r +- `bowtie2` log files will be written to `output/bowtie2` (not the conenvtional log directory)\r +- The file `chrom.sizes` will be written to `output/annotations`\r +\r +Both sorted and the original unsorted alignments will be returned.\r +However, the unsorted alignments are marked with `temp()` and can be deleted using \r +\r +```bash\r +snakemake --delete-temp-output --cores 1\r +```\r +\r +## Deduplication\r +\r +### Outline\r +\r +Deduplication is performed using [MarkDuplicates](https://gatk.broadinstitute.org/hc/en-us/articles/360037052812-MarkDuplicates-Picard-) from the Picard set of tools.\r +By default, deduplication will remove the duplicates from the set of alignments.\r +All resultant bam files will be sorted and indexed.\r +\r +### Outputs\r +\r +- Deduplicated alignments are written to `data/deduplicated` and are indexed\r +- DuplicationMetrics files are written to `output/markDuplicates`\r +\r +## Peak Calling\r +\r +### Outline\r +\r +This is performed using [`macs2 callpeak`](https://pypi.org/project/MACS2/).\r +\r +- Peak calling will be performed on:\r + a. each sample individually, and \r + b. merged samples for those sharing a common ChIP target and treatment group.\r +- Coverage bigWig files for each individual sample are produced using CPM values (i.e. Signal Per Million Reads, SPMR)\r +- For all combinations of target and treatment coverage bigWig files are also produced, along with fold-enrichment bigWig files\r +\r +### Outputs\r +\r +- Individual outputs are written to `output/macs2/{accession}`\r + + Peaks are written in `narrowPeak` format along with `summits.bed`\r + + bedGraph files are automatically converted to bigWig files, and the originals are marked with `temp()` for subsequent deletion\r + + callpeak log files are also added to this directory\r +- Merged outputs are written to `output/macs2/{target}/`\r + + bedGraph Files are also converted to bigWig and marked with `temp()`\r + + Fold-Enrichment bigWig files are also created with the original bedGraph files marked with `temp()`\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.528.1" ; + schema1:isBasedOn "https://github.com/smped/prepareChIPs.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for prepareChIPs:" ; + schema1:sdDatePublished "2024-08-05 10:29:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/528/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3868 ; + schema1:creator ; + schema1:dateCreated "2023-07-09T08:54:36Z" ; + schema1:dateModified "2023-07-09T09:01:09Z" ; + schema1:description """# prepareChIPs\r +\r +This is a simple `snakemake` workflow template for preparing **single-end** ChIP-Seq data.\r +The steps implemented are:\r +\r +1. Download raw fastq files from SRA\r +2. Trim and Filter raw fastq files using `AdapterRemoval`\r +3. Align to the supplied genome using `bowtie2`\r +4. Deduplicate Alignments using `Picard MarkDuplicates`\r +5. Call Macs2 Peaks using `macs2`\r +\r +A pdf of the rulegraph is available [here](workflow/rules/rulegraph.pdf)\r +\r +Full details for each step are given below.\r +Any additional parameters for tools can be specified using `config/config.yml`, along with many of the requisite paths\r +\r +To run the workflow with default settings, simply run as follows (after editing `config/samples.tsv`)\r +\r +```bash\r +snakemake --use-conda --cores 16\r +```\r +\r +If running on an HPC cluster, a snakemake profile will required for submission to the queueing system and appropriate resource allocation.\r +Please discuss this will your HPC support team.\r +Nodes may also have restricted internet access and rules which download files may not work on many HPCs.\r +Please see below or discuss this with your support team\r +\r +Whilst no snakemake wrappers are explicitly used in this workflow, the underlying scripts are utilised where possible to minimise any issues with HPC clusters with restrictions on internet access.\r +These scripts are based on `v1.31.1` of the snakemake wrappers\r +\r +### Important Note Regarding OSX Systems\r +\r +It should be noted that this workflow is **currently incompatible with OSX-based systems**. \r +There are two unsolved issues\r +\r +1. `fasterq-dump` has a bug which is specific to conda environments. This has been updated in v3.0.3 but this patch has not yet been made available to conda environments for OSX. Please check [here](https://anaconda.org/bioconda/sra-tools) to see if this has been updated.\r +2. The following error appears in some OSX-based R sessions, in a system-dependent manner:\r +```\r +Error in grid.Call(C_textBounds, as.graphicsAnnot(x$label), x$x, x$y, : \r + polygon edge not found\r +```\r +\r +The fix for this bug is currently unknown\r +\r +## Download Raw Data\r +\r +### Outline\r +\r +The file `samples.tsv` is used to specify all steps for this workflow.\r +This file must contain the columns: `accession`, `target`, `treatment` and `input`\r +\r +1. `accession` must be an SRA accession. Only single-end data is currently supported by this workflow\r +2. `target` defines the ChIP target. All files common to a target and treatment will be used to generate summarised coverage in bigWig Files\r +3. `treatment` defines the treatment group each file belongs to. If only one treatment exists, simply use the value 'control' or similar for every file\r +4. `input` should contain the accession for the relevant input sample. These will only be downloaded once. Valid input samples are *required* for this workflow\r +\r +As some HPCs restrict internet access for submitted jobs, *it may be prudent to run the initial rules in an interactive session* if at all possible.\r +This can be performed using the following (with 2 cores provided as an example)\r +\r +```bash\r +snakemake --use-conda --until get_fastq --cores 2\r +```\r +\r +### Outputs\r +\r +- Downloaded files will be gzipped and written to `data/fastq/raw`.\r +- `FastQC` and `MultiQC` will also be run, with output in `docs/qc/raw`\r +\r +Both of these directories are able to be specified as relative paths in `config.yml`\r +\r +## Read Filtering\r +\r +### Outline\r +\r +Read trimming is performed using [AdapterRemoval](https://adapterremoval.readthedocs.io/en/stable/).\r +Default settings are customisable using config.yml, with the defaults set to discard reads shorter than 50nt, and to trim using quality scores with a threshold of Q30.\r +\r +### Outputs\r +\r +- Trimmed fastq.gz files will be written to `data/fastq/trimmed`\r +- `FastQC` and `MultiQC` will also be run, with output in `docs/qc/trimmed`\r +- AdapterRemoval 'settings' files will be written to `output/adapterremoval`\r +\r +## Alignments\r +\r +### Outline\r +\r +Alignment is performed using [`bowtie2`](https://bowtie-bio.sourceforge.net/bowtie2/manual.shtml) and it is assumed that this index is available before running this workflow.\r +The path and prefix must be provided using config.yml\r +\r +This index will also be used to produce the file `chrom.sizes` which is essential for conversion of bedGraph files to the more efficient bigWig files.\r +\r +### Outputs\r +\r +- Alignments will be written to `data/aligned`\r +- `bowtie2` log files will be written to `output/bowtie2` (not the conenvtional log directory)\r +- The file `chrom.sizes` will be written to `output/annotations`\r +\r +Both sorted and the original unsorted alignments will be returned.\r +However, the unsorted alignments are marked with `temp()` and can be deleted using \r +\r +```bash\r +snakemake --delete-temp-output --cores 1\r +```\r +\r +## Deduplication\r +\r +### Outline\r +\r +Deduplication is performed using [MarkDuplicates](https://gatk.broadinstitute.org/hc/en-us/articles/360037052812-MarkDuplicates-Picard-) from the Picard set of tools.\r +By default, deduplication will remove the duplicates from the set of alignments.\r +All resultant bam files will be sorted and indexed.\r +\r +### Outputs\r +\r +- Deduplicated alignments are written to `data/deduplicated` and are indexed\r +- DuplicationMetrics files are written to `output/markDuplicates`\r +\r +## Peak Calling\r +\r +### Outline\r +\r +This is performed using [`macs2 callpeak`](https://pypi.org/project/MACS2/).\r +\r +- Peak calling will be performed on:\r + a. each sample individually, and \r + b. merged samples for those sharing a common ChIP target and treatment group.\r +- Coverage bigWig files for each individual sample are produced using CPM values (i.e. Signal Per Million Reads, SPMR)\r +- For all combinations of target and treatment coverage bigWig files are also produced, along with fold-enrichment bigWig files\r +\r +### Outputs\r +\r +- Individual outputs are written to `output/macs2/{accession}`\r + + Peaks are written in `narrowPeak` format along with `summits.bed`\r + + bedGraph files are automatically converted to bigWig files, and the originals are marked with `temp()` for subsequent deletion\r + + callpeak log files are also added to this directory\r +- Merged outputs are written to `output/macs2/{target}/`\r + + bedGraph Files are also converted to bigWig and marked with `temp()`\r + + Fold-Enrichment bigWig files are also created with the original bedGraph files marked with `temp()`\r +""" ; + schema1:keywords "Bioinformatics, Genomics, Transcriptomics" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "prepareChIPs:" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/528?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Take an anndata file, and perform basic QC with scanpy. Produces a filtered AnnData object." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/467?version=3" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq_CellQC" ; + schema1:sdDatePublished "2024-08-05 10:24:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/467/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 21389 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-06-22T06:28:47Z" ; + schema1:dateModified "2023-11-09T03:49:00Z" ; + schema1:description "Take an anndata file, and perform basic QC with scanpy. Produces a filtered AnnData object." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/467?version=3" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "scRNAseq_CellQC" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/467?version=3" ; + schema1:version 3 ; + ns1:input , + , + , + , + ; + ns1:output , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Simple bacterial assembly and annotation" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/966?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/bacass" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/bacass" ; + schema1:sdDatePublished "2024-08-05 10:24:18 +0100" ; + schema1:url "https://workflowhub.eu/workflows/966/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11764 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:43Z" ; + schema1:dateModified "2024-06-11T12:54:43Z" ; + schema1:description "Simple bacterial assembly and annotation" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/966?version=7" ; + schema1:keywords "Assembly, bacterial-genomes, denovo, denovo-assembly, genome-assembly, hybrid-assembly, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/bacass" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/966?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + schema1:datePublished "2023-10-30T19:22:11.103969" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/kmer-profiling-hifi-VGP1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "kmer-profiling-hifi-VGP1/main" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "kmer-profiling-hifi-VGP1/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/kmer-profiling-hifi-VGP1" ; + schema1:version "0.1.1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 5647 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + schema1:datePublished "2024-03-06T11:16:42.537263" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:description """This is the nested workflow of the "parent" nanopore workflow without the guppy basecalling step\r +\r +Workflow for sequencing with ONT nanopore, from basecalling to assembly quality.\r +Steps:\r + - Kraken2 (taxonomic classification of FASTQ reads)\r + - Krona (classification visualization)\r + - Flye (de novo assembly)\r + - Medaka (assembly polishing)\r + - QUAST (assembly quality reports)\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/254?version=1" ; + schema1:isBasedOn "https://git.wur.nl/unlock/cwl/-/blob/master/cwl/workflows/workflow_nanopore_assembly.cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nanopore workflow with direct FASTQ reads inputs (without Guppy)" ; + schema1:sdDatePublished "2024-08-05 10:31:21 +0100" ; + schema1:url "https://workflowhub.eu/workflows/254/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 27556 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8124 ; + schema1:creator , + , + ; + schema1:dateCreated "2022-01-06T07:38:04Z" ; + schema1:dateModified "2022-01-07T09:30:01Z" ; + schema1:description """This is the nested workflow of the "parent" nanopore workflow without the guppy basecalling step\r +\r +Workflow for sequencing with ONT nanopore, from basecalling to assembly quality.\r +Steps:\r + - Kraken2 (taxonomic classification of FASTQ reads)\r + - Krona (classification visualization)\r + - Flye (de novo assembly)\r + - Medaka (assembly polishing)\r + - QUAST (assembly quality reports)\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/254?version=2" ; + schema1:keywords "nanopore, Genomics, Metagenomics" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "nanopore workflow with direct FASTQ reads inputs (without Guppy)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/254?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + ; + ns1:output , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "The simplest workflow among a collection of workflows intended to solve tasks up to CTF estimation." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/598?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for SCIPION: acquire -> motionCorr -> ctf -> report" ; + schema1:sdDatePublished "2024-08-05 10:22:30 +0100" ; + schema1:url "https://workflowhub.eu/workflows/598/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 34990 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4994 ; + schema1:dateCreated "2023-10-04T10:58:43Z" ; + schema1:dateModified "2024-07-10T12:30:33Z" ; + schema1:description "The simplest workflow among a collection of workflows intended to solve tasks up to CTF estimation." ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "scipion, cryoem, spa, image processing, TalosArctica, TitanKrios, Glacios" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "SCIPION: acquire -> motionCorr -> ctf -> report" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/598?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Metabolome Annotation Workflow (MAW) takes liquid chromatography tandem mass spectrometry data (LC-MS2) as .mzML format data files. It performs spectral database dereplication using R Package Spectra and compound database dereplication using MetFrag/SIRIUS. Final candidate selection is executed in Python using RDKit and PubChemPy. The classification of the tentative candidates from the input data are classified using ChemONT chemical ontology.\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.510.1" ; + schema1:isBasedOn "https://github.com/zmahnoor14/MAW" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Metabolome Annotation Workflow (MAW)" ; + schema1:sdDatePublished "2024-08-05 10:29:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/510/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1569 ; + schema1:creator , + , + , + , + , + ; + schema1:dateCreated "2023-06-19T20:09:24Z" ; + schema1:dateModified "2023-06-19T20:10:21Z" ; + schema1:description """Metabolome Annotation Workflow (MAW) takes liquid chromatography tandem mass spectrometry data (LC-MS2) as .mzML format data files. It performs spectral database dereplication using R Package Spectra and compound database dereplication using MetFrag/SIRIUS. Final candidate selection is executed in Python using RDKit and PubChemPy. The classification of the tentative candidates from the input data are classified using ChemONT chemical ontology.\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/510?version=1" ; + schema1:keywords "Metabolomics, Annotation, mass-spectrometry, identification, Bioinformatics, FAIR workflows, workflow, gnps, massbank, hmdb, spectra, rdkit, Cheminformatics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Metabolome Annotation Workflow (MAW)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/510?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 12178 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """# workflow-ref-guided-stacks\r +\r +These workflows are part of a set designed to work for RAD-seq data on the Galaxy platform, using the tools from the Stacks program. \r +\r +Galaxy Australia: https://usegalaxy.org.au/\r +\r +Stacks: http://catchenlab.life.illinois.edu/stacks/\r +\r +## Inputs\r +* demultiplexed reads in fastq format, may be output from the QC workflow. Files are in a collection. \r +* population map in text format\r +* reference genome in fasta format\r +\r +## Steps and outputs\r +\r +BWA MEM 2:\r +* The reads are mapped to the reference genome; output in BAM format\r +* The collection of bam files is named something like Map with BWA-MEM on collection 5 (mapped reads in BAM format)\r +* Each of the bam files in the collection is named something like sample_CAAC\r +\r +Samtools stats before filtering:\r +* These bam files are sent to Samtools stats to get statistics; these are then sent to MultiQC to provide a nice output. This is tagged as "bam stats before filtering" in the Galaxy history. \r +* The "General Statistics" show how many reads were mapped - if there is a low mapping rate, it may be worth re-checking or repeating QC on the raw reads, or considering a different reference genome, or using a de novo approach. To see if many reads have been soft-clipped by Bwa mem (which may affect how well gstacks can work), look at the "Alignment Metrics" section, and the row with "Mapped bases (Cigar)". Hover over the dots to see sample names especially towards the left of the row - these have the least mapped reads.\r +\r +Samtools view:\r +* This step filters out certain reads from the bam files. The default settings are to exclude reads if they are unmapped, if the alignment is not primary or is supplementary, if the read fails platform/vendor quality checks, and if the read is a PCR or optical duplicate. \r +* The output bams are tagged with "filtered bams" in the Galaxy history.\r +\r +Samtools stats after filtering:\r +* Filtered bams are sent again to samtools stats, and statistics to MultiQC, with the report tagged as "bam stats after filtering" in the Galaxy history. \r +\r +gstacks:\r +* Filtered bams and a population map are sent to gstacks. The outputs are:\r +* Catalog of loci in fasta format\r +* Variant calls in VCF format\r +* Note: some bam files cause errors here with gstacks. For example, the log file may say "Error, all records discard with file SampleXYZ.FASTQ.bam, Aborted". If this occurs, check the bam stats (as described above). Some of the options are to re-do QC on the raw reads, change settings for mapping reads in BWA MEM, and/or delete this sample/s from the population map and proceed to gstacks. \r +The sample can still remain in the list of bam files but gstacks will only consider what is listed in the pop map. \r +\r +populations:\r +* gstacks outputs and a population map are snet to the "populations" module. The outputs are:\r +* Locus consensus sequences in fasta format\r +* Snp calls, in VCF format\r +* Haplotypes, in VCF format\r +* Summary statistics\r +\r +![qc-wf](wf-ref-guided.png)\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/347?version=1" ; + schema1:isBasedOn "https://github.com/AnnaSyme/workflow-ref-guided-stacks.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Stacks RAD-seq reference-guided workflow" ; + schema1:sdDatePublished "2024-08-05 10:32:08 +0100" ; + schema1:url "https://workflowhub.eu/workflows/347/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 33550 ; + schema1:creator ; + schema1:dateCreated "2022-05-31T07:20:38Z" ; + schema1:dateModified "2023-01-30T18:21:31Z" ; + schema1:description """# workflow-ref-guided-stacks\r +\r +These workflows are part of a set designed to work for RAD-seq data on the Galaxy platform, using the tools from the Stacks program. \r +\r +Galaxy Australia: https://usegalaxy.org.au/\r +\r +Stacks: http://catchenlab.life.illinois.edu/stacks/\r +\r +## Inputs\r +* demultiplexed reads in fastq format, may be output from the QC workflow. Files are in a collection. \r +* population map in text format\r +* reference genome in fasta format\r +\r +## Steps and outputs\r +\r +BWA MEM 2:\r +* The reads are mapped to the reference genome; output in BAM format\r +* The collection of bam files is named something like Map with BWA-MEM on collection 5 (mapped reads in BAM format)\r +* Each of the bam files in the collection is named something like sample_CAAC\r +\r +Samtools stats before filtering:\r +* These bam files are sent to Samtools stats to get statistics; these are then sent to MultiQC to provide a nice output. This is tagged as "bam stats before filtering" in the Galaxy history. \r +* The "General Statistics" show how many reads were mapped - if there is a low mapping rate, it may be worth re-checking or repeating QC on the raw reads, or considering a different reference genome, or using a de novo approach. To see if many reads have been soft-clipped by Bwa mem (which may affect how well gstacks can work), look at the "Alignment Metrics" section, and the row with "Mapped bases (Cigar)". Hover over the dots to see sample names especially towards the left of the row - these have the least mapped reads.\r +\r +Samtools view:\r +* This step filters out certain reads from the bam files. The default settings are to exclude reads if they are unmapped, if the alignment is not primary or is supplementary, if the read fails platform/vendor quality checks, and if the read is a PCR or optical duplicate. \r +* The output bams are tagged with "filtered bams" in the Galaxy history.\r +\r +Samtools stats after filtering:\r +* Filtered bams are sent again to samtools stats, and statistics to MultiQC, with the report tagged as "bam stats after filtering" in the Galaxy history. \r +\r +gstacks:\r +* Filtered bams and a population map are sent to gstacks. The outputs are:\r +* Catalog of loci in fasta format\r +* Variant calls in VCF format\r +* Note: some bam files cause errors here with gstacks. For example, the log file may say "Error, all records discard with file SampleXYZ.FASTQ.bam, Aborted". If this occurs, check the bam stats (as described above). Some of the options are to re-do QC on the raw reads, change settings for mapping reads in BWA MEM, and/or delete this sample/s from the population map and proceed to gstacks. \r +The sample can still remain in the list of bam files but gstacks will only consider what is listed in the pop map. \r +\r +populations:\r +* gstacks outputs and a population map are snet to the "populations" module. The outputs are:\r +* Locus consensus sequences in fasta format\r +* Snp calls, in VCF format\r +* Haplotypes, in VCF format\r +* Summary statistics\r +\r +![qc-wf](wf-ref-guided.png)\r +""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Stacks RAD-seq reference-guided workflow" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/347?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 317842 . + + a schema1:Dataset ; + schema1:datePublished "2024-06-21T20:31:16.897292" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/openms-metaprosip" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "openms-metaprosip/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +For this workflow:\r +\r +Inputs:\r +* assembled-genome.fasta\r +* hard-repeat-masked-genome.fasta\r +* If using the mRNAs option, the additional inputs required are .cdna, .pro and .dat files. \r +\r +What it does:\r +* This workflow splits the input genomes into single sequences (to decrease computation time), annotates using FgenesH++, and merges the output. \r +\r +Outputs:\r +* genome annotation in gff3 format\r +* fasta files of mRNAs, cDNAs and proteins\r +* Busco report\r +\r +\r +\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.881.2" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Fgenesh annotation -TSI" ; + schema1:sdDatePublished "2024-08-05 10:22:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/881/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17572 ; + schema1:creator ; + schema1:dateCreated "2024-06-18T08:56:16Z" ; + schema1:dateModified "2024-06-18T09:02:37Z" ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +For this workflow:\r +\r +Inputs:\r +* assembled-genome.fasta\r +* hard-repeat-masked-genome.fasta\r +* If using the mRNAs option, the additional inputs required are .cdna, .pro and .dat files. \r +\r +What it does:\r +* This workflow splits the input genomes into single sequences (to decrease computation time), annotates using FgenesH++, and merges the output. \r +\r +Outputs:\r +* genome annotation in gff3 format\r +* fasta files of mRNAs, cDNAs and proteins\r +* Busco report\r +\r +\r +\r +\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/881?version=3" ; + schema1:isPartOf ; + schema1:keywords "TSI-annotation" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Fgenesh annotation -TSI" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/881?version=2" ; + schema1:version 2 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 1040040 . + + a schema1:Dataset ; + schema1:datePublished "2023-10-18T08:01:03.789178" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/openms-metaprosip" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "openms-metaprosip/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.12" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """\r +

+nf-core/vipr +

\r +

Build Status Nextflow Gitter

\r +

install with bioconda Docker Container available https://www.singularity-hub.org/static/img/hosted-singularity--hub-%23e32929.svg

\r +

nf-core/vipr is a bioinformatics best-practice analysis pipeline for assembly and intrahost / low-frequency variant calling for viral samples.

\r +

The pipeline is built using Nextflow, a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It comes with docker / singularity containers making installation trivial and results highly reproducible.

\r +

+Pipeline Steps

\r +\r +\r +\r +Step\r +Main program/s\r +\r +\r +\r +\r +Trimming, combining of read-pairs per sample and QC\r +Skewer, FastQC\r +\r +\r +Decontamination\r +decont\r +\r +\r +Metagenomics classification / Sample purity\r +Kraken\r +\r +\r +Assembly to contigs\r +BBtools’ Tadpole\r +\r +\r +Assembly polishing\r +ViPR Tools\r +\r +\r +Mapping to assembly\r +BWA, LoFreq\r +\r +\r +Low frequency variant calling\r +LoFreq\r +\r +\r +Coverage and variant AF plots (two processes)\r +Bedtools, ViPR Tools\r +\r +\r +\r +

+Documentation

\r +

Documentation about the pipeline can be found in the docs/ directory:

\r +
    \r +
  1. Installation and configuration
  2. \r +
  3. Running the pipeline
  4. \r +
  5. Output and how to interpret the results
  6. \r +
\r +

+Credits

\r +

This pipeline was originally developed by Andreas Wilm (andreas-wilm) at Genome Institute of Singapore.
\r +It started out as an ecosystem around LoFreq and went through a couple of iterations.
\r +The current version had three predecessors ViPR 1, ViPR 2 and ViPR 3.

\r +

An incomplete list of publications using (previous versions of) ViPR:

\r +\r +

Plenty of people provided essential feedback, including:

\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/20?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/vipr/blob/master/main.nf" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/vipr" ; + schema1:sdDatePublished "2024-08-05 10:33:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/20/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14256 ; + schema1:creator ; + schema1:dateCreated "2020-05-14T14:42:23Z" ; + schema1:dateModified "2023-01-16T13:41:25Z" ; + schema1:description """\r +

+nf-core/vipr +

\r +

Build Status Nextflow Gitter

\r +

install with bioconda Docker Container available https://www.singularity-hub.org/static/img/hosted-singularity--hub-%23e32929.svg

\r +

nf-core/vipr is a bioinformatics best-practice analysis pipeline for assembly and intrahost / low-frequency variant calling for viral samples.

\r +

The pipeline is built using Nextflow, a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It comes with docker / singularity containers making installation trivial and results highly reproducible.

\r +

+Pipeline Steps

\r +\r +\r +\r +Step\r +Main program/s\r +\r +\r +\r +\r +Trimming, combining of read-pairs per sample and QC\r +Skewer, FastQC\r +\r +\r +Decontamination\r +decont\r +\r +\r +Metagenomics classification / Sample purity\r +Kraken\r +\r +\r +Assembly to contigs\r +BBtools’ Tadpole\r +\r +\r +Assembly polishing\r +ViPR Tools\r +\r +\r +Mapping to assembly\r +BWA, LoFreq\r +\r +\r +Low frequency variant calling\r +LoFreq\r +\r +\r +Coverage and variant AF plots (two processes)\r +Bedtools, ViPR Tools\r +\r +\r +\r +

+Documentation

\r +

Documentation about the pipeline can be found in the docs/ directory:

\r +
    \r +
  1. Installation and configuration
  2. \r +
  3. Running the pipeline
  4. \r +
  5. Output and how to interpret the results
  6. \r +
\r +

+Credits

\r +

This pipeline was originally developed by Andreas Wilm (andreas-wilm) at Genome Institute of Singapore.
\r +It started out as an ecosystem around LoFreq and went through a couple of iterations.
\r +The current version had three predecessors ViPR 1, ViPR 2 and ViPR 3.

\r +

An incomplete list of publications using (previous versions of) ViPR:

\r +\r +

Plenty of people provided essential feedback, including:

\r +\r +""" ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/vipr" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/20?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for screening for functional components of assembled contigs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/987?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/funcscan" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/funcscan" ; + schema1:sdDatePublished "2024-08-05 10:23:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/987/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14851 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:53Z" ; + schema1:dateModified "2024-06-11T12:54:53Z" ; + schema1:description "Pipeline for screening for functional components of assembled contigs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/987?version=8" ; + schema1:keywords "amp, AMR, antibiotic-resistance, antimicrobial-peptides, antimicrobial-resistance-genes, arg, Assembly, bgc, biosynthetic-gene-clusters, contigs, function, Metagenomics, natural-products, screening, secondary-metabolites" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/funcscan" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/987?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state of the art epitope prediction pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/984?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/epitopeprediction" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/epitopeprediction" ; + schema1:sdDatePublished "2024-08-05 10:23:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/984/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11589 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:52Z" ; + schema1:dateModified "2024-06-11T12:54:52Z" ; + schema1:description "A fully reproducible and state of the art epitope prediction pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/984?version=7" ; + schema1:keywords "epitope, epitope-prediction, mhc-binding-prediction" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/epitopeprediction" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/984?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Simulations and figures supporting the manuscript \"Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake\"" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.511.5" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake" ; + schema1:sdDatePublished "2024-08-05 10:29:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/511/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 332 ; + schema1:creator , + , + , + , + , + ; + schema1:dateCreated "2024-01-16T13:18:14Z" ; + schema1:dateModified "2024-01-16T13:19:15Z" ; + schema1:description "Simulations and figures supporting the manuscript \"Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake\"" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/511?version=4" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/511?version=5" ; + schema1:version 5 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 75460 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Macromolecular Coarse-Grained Flexibility (FlexServ) tutorial using BioExcel Building Blocks (biobb)\r +\r +This tutorial aims to illustrate the process of generating protein conformational ensembles from 3D structures and analysing its molecular flexibility, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.823.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/main/biobb_wf_flexserv/docker" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Docker Macromolecular Coarse-Grained Flexibility tutorial" ; + schema1:sdDatePublished "2024-08-05 10:24:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/823/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 749 ; + schema1:creator , + ; + schema1:dateCreated "2024-04-22T10:45:48Z" ; + schema1:dateModified "2024-05-22T13:47:12Z" ; + schema1:description """# Macromolecular Coarse-Grained Flexibility (FlexServ) tutorial using BioExcel Building Blocks (biobb)\r +\r +This tutorial aims to illustrate the process of generating protein conformational ensembles from 3D structures and analysing its molecular flexibility, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Docker Macromolecular Coarse-Grained Flexibility tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/main/biobb_wf_flexserv/docker/Dockerfile" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2022-02-04T12:20:39.138022" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/parallel-accession-download" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "parallel-accession-download/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 9406 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """## Summary\r +HPPIDiscovery is a scientific workflow to augment, predict and perform an insilico curation of host-pathogen Protein-Protein Interactions (PPIs) using graph theory to build new candidate ppis and machine learning to predict and evaluate them by combining multiple PPI detection methods of proteins according to three categories: structural, based on primary aminoacid sequence and functional annotations.
\r +\r +HPPIDiscovery contains three main steps: (i) acquirement of pathogen and host proteins information from seed ppis provided by HPIDB search methods, (ii) Model training and generation of new candidate ppis from HPIDB seed proteins' partners, and (iii) Evaluation of new candidate ppis and results exportation.\r +\r +(i) The first step acquires the identification of the taxonomy ids of the host and pathogen organisms in the result files. Then it proceeds parsing and cleaning the HPIDB results and downloading the protein interactions of the found organisms from the STRING database. The string protein identifiers are also mapped using the id mapping tool of uniprot API and we retrieve the uniprot entry ids along with the functional annotations, sequence, domain and kegg enzymes.\r +\r +(ii) The second step builds the training dataset using the non redundant hpidb validated interactions of each genome as positive set and random string low confidence ppis from each genome as negative set. Then, PredPrin tool is executed in the training mode to obtain the model that will evaluate the new candidate PPIs. The new ppis are then generated by performing a pairwise combination of string partners of host and pathogen hpidb proteins. \r +\r +Finally, (iii) in the third step, the predprin tool is used in the test mode to evaluate the new ppis and generate the reports and list of positively predicted ppis.\r +\r +The figure below illustrates the steps of this workflow.\r +\r +## Requirements:\r +* Edit the configuration file (config.yaml) according to your own data, filling out the following fields:\r + - base_data: location of the organism folders directory, example: /home/user/data/genomes \r + - parameters_file: Since this workflow may perform parallel processing of multiple organisms at the same time, you must prepate a tabulated file containng the genome folder names located in base data, where the hpidb files are located. Example: /home/user/data/params.tsv. It must have the following columns: genome (folder name), hpidb_seed_network (the result exported by one of the search methods available in hpidb database), hpidb_search_method (the type of search used to generate the results) and target_taxon (the target taxon id). The column hpidb_source may have two values: keyword or homology. In the keyword mode, you provide a taxonomy, protein name, publication id or detection method and you save all results (mitab.zip) in the genome folder. Finally, in the homology mode allows the user to search for host pathogen ppis giving as input fasta sequences of a set of proteins of the target pathgen for enrichment (so you have to select the search for a pathogen set) and you save the zip folder results (interaction data) in the genome folder. This option is extremely useful when you are not sure that your organism has validated protein interactions, then it finds validated interactions from the closest proteins in the database. In case of using the homology mode, the identifiers of the pathogens' query fasta sequences must be a Uniprot ID. All the query protein IDs must belong to the same target organism (taxon id).\r + - model_file: path of a previously trained model in joblib format (if you want to train from the known validated PPIs given as seeds, just put a 'None' value)\r +\r +## Usage Instructions\r +The steps below consider the creation of a sqlite database file with all he tasks events which can be used after to retrieve the execution time taken by the tasks. It is possible run locally too (see luigi's documentation to change the running command).

\r +* Preparation:\r + 1. ````git clone https://github.com/YasCoMa/hppidiscovery.git````\r + 2. ````cd hppidiscovery````\r + 3. ````mkdir luigi_log```` \r + 4. ````luigid --background --logdir luigi_log```` (start luigi server)\r + 5. conda env create -f hp_ppi_augmentation.yml\r + 6. conda activate hp_ppi_augmentation\r + 6.1. (execute ````pip3 install wget```` (it is not installed in the environment))\r + 7. run ````pwd```` command and get the full path\r + 8. Substitute in config_example.yaml with the full path obtained in the previous step\r + 9. Download SPRINT pre-computed similarities in https://www.csd.uwo.ca/~ilie/SPRINT/precomputed_similarities.zip and unzip it inside workflow_hpAugmentation/predprin/core/sprint/HSP/\r + 10. ````cd workflow_hpAugmentation/predprin/````\r + 11. Uncompress annotation_data.zip\r + 12. Uncompress sequence_data.zip\r + 13. ````cd ../../````\r + 14. ````cd workflow_hpAugmentation````\r + 15. snake -n (check the plan of jobs, it should return no errors and exceptions)\r + 16. snakemake -j 4 (change this number according the number of genomes to analyse and the amount of cores available in your machine)""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/611?version=1" ; + schema1:isBasedOn "https://github.com/YasCoMa/hppidiscovery" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for HPPIDiscovery - Scientific workflow to augment, predict and evaluate host-pathogen protein-protein interactions" ; + schema1:sdDatePublished "2024-08-05 10:27:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/611/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 54971 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10251 ; + schema1:creator ; + schema1:dateCreated "2023-10-19T23:56:34Z" ; + schema1:dateModified "2023-10-19T23:56:34Z" ; + schema1:description """## Summary\r +HPPIDiscovery is a scientific workflow to augment, predict and perform an insilico curation of host-pathogen Protein-Protein Interactions (PPIs) using graph theory to build new candidate ppis and machine learning to predict and evaluate them by combining multiple PPI detection methods of proteins according to three categories: structural, based on primary aminoacid sequence and functional annotations.
\r +\r +HPPIDiscovery contains three main steps: (i) acquirement of pathogen and host proteins information from seed ppis provided by HPIDB search methods, (ii) Model training and generation of new candidate ppis from HPIDB seed proteins' partners, and (iii) Evaluation of new candidate ppis and results exportation.\r +\r +(i) The first step acquires the identification of the taxonomy ids of the host and pathogen organisms in the result files. Then it proceeds parsing and cleaning the HPIDB results and downloading the protein interactions of the found organisms from the STRING database. The string protein identifiers are also mapped using the id mapping tool of uniprot API and we retrieve the uniprot entry ids along with the functional annotations, sequence, domain and kegg enzymes.\r +\r +(ii) The second step builds the training dataset using the non redundant hpidb validated interactions of each genome as positive set and random string low confidence ppis from each genome as negative set. Then, PredPrin tool is executed in the training mode to obtain the model that will evaluate the new candidate PPIs. The new ppis are then generated by performing a pairwise combination of string partners of host and pathogen hpidb proteins. \r +\r +Finally, (iii) in the third step, the predprin tool is used in the test mode to evaluate the new ppis and generate the reports and list of positively predicted ppis.\r +\r +The figure below illustrates the steps of this workflow.\r +\r +## Requirements:\r +* Edit the configuration file (config.yaml) according to your own data, filling out the following fields:\r + - base_data: location of the organism folders directory, example: /home/user/data/genomes \r + - parameters_file: Since this workflow may perform parallel processing of multiple organisms at the same time, you must prepate a tabulated file containng the genome folder names located in base data, where the hpidb files are located. Example: /home/user/data/params.tsv. It must have the following columns: genome (folder name), hpidb_seed_network (the result exported by one of the search methods available in hpidb database), hpidb_search_method (the type of search used to generate the results) and target_taxon (the target taxon id). The column hpidb_source may have two values: keyword or homology. In the keyword mode, you provide a taxonomy, protein name, publication id or detection method and you save all results (mitab.zip) in the genome folder. Finally, in the homology mode allows the user to search for host pathogen ppis giving as input fasta sequences of a set of proteins of the target pathgen for enrichment (so you have to select the search for a pathogen set) and you save the zip folder results (interaction data) in the genome folder. This option is extremely useful when you are not sure that your organism has validated protein interactions, then it finds validated interactions from the closest proteins in the database. In case of using the homology mode, the identifiers of the pathogens' query fasta sequences must be a Uniprot ID. All the query protein IDs must belong to the same target organism (taxon id).\r + - model_file: path of a previously trained model in joblib format (if you want to train from the known validated PPIs given as seeds, just put a 'None' value)\r +\r +## Usage Instructions\r +The steps below consider the creation of a sqlite database file with all he tasks events which can be used after to retrieve the execution time taken by the tasks. It is possible run locally too (see luigi's documentation to change the running command).

\r +* Preparation:\r + 1. ````git clone https://github.com/YasCoMa/hppidiscovery.git````\r + 2. ````cd hppidiscovery````\r + 3. ````mkdir luigi_log```` \r + 4. ````luigid --background --logdir luigi_log```` (start luigi server)\r + 5. conda env create -f hp_ppi_augmentation.yml\r + 6. conda activate hp_ppi_augmentation\r + 6.1. (execute ````pip3 install wget```` (it is not installed in the environment))\r + 7. run ````pwd```` command and get the full path\r + 8. Substitute in config_example.yaml with the full path obtained in the previous step\r + 9. Download SPRINT pre-computed similarities in https://www.csd.uwo.ca/~ilie/SPRINT/precomputed_similarities.zip and unzip it inside workflow_hpAugmentation/predprin/core/sprint/HSP/\r + 10. ````cd workflow_hpAugmentation/predprin/````\r + 11. Uncompress annotation_data.zip\r + 12. Uncompress sequence_data.zip\r + 13. ````cd ../../````\r + 14. ````cd workflow_hpAugmentation````\r + 15. snake -n (check the plan of jobs, it should return no errors and exceptions)\r + 16. snakemake -j 4 (change this number according the number of genomes to analyse and the amount of cores available in your machine)""" ; + schema1:image ; + schema1:keywords "Bioinformatics, Protein-Protein interaction prediction, host-pathogen PPIs, proteins network augmentation" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "HPPIDiscovery - Scientific workflow to augment, predict and evaluate host-pathogen protein-protein interactions" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/611?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + ; + schema1:description """# ![IMPaCT program](https://github.com/EGA-archive/sarek-IMPaCT-data-QC/blob/master/impact_qc/docs/png/impact_data_logo_pink_horitzontal.png)\r +\r +[![IMPaCT](https://img.shields.io/badge/Web%20-IMPaCT-blue)](https://impact.isciii.es/)\r +[![IMPaCT-isciii](https://img.shields.io/badge/Web%20-IMPaCT--isciii-red)](https://www.isciii.es/QueHacemos/Financiacion/IMPaCT/Paginas/default.aspx)\r +[![IMPaCT-Data](https://img.shields.io/badge/Web%20-IMPaCT--Data-1d355c.svg?labelColor=000000)](https://impact-data.bsc.es/)\r +\r +## Introduction of the project\r +\r +IMPaCT-Data is the IMPaCT program that aims to support the development of a common, interoperable and integrated system for the collection and analysis of clinical and molecular data by providing the knowledge and resources available in the Spanish Science and Technology System. This development will make it possible to answer research questions based on the different clinical and molecular information systems available. Fundamentally, it aims to provide researchers with a population perspective based on individual data.\r +\r +The IMPaCT-Data project is divided into different work packages (WP). In the context of IMPaCT-Data WP3 (Genomics), a working group of experts worked on the generation of a specific quality control (QC) workflow for germline exome samples.\r +\r +To achieve this, a set of metrics related to human genomic data was decided upon, and the toolset or software to extract these metrics was implemented in an existing variant calling workflow called Sarek, part of the nf-core community. The final outcome is a Nextflow subworkflow, called IMPaCT-QC implemented in the Sarek pipeline.\r +\r +Below you can find the explanation of this workflow (raw pipeline), the link to the documentation of the IMPaCT QC subworkflow and a linked documentation associated to the QC metrics added in the mentioned workflow.\r +\r +- [IMPaCT-data subworkflow documentation](https://github.com/EGA-archive/sarek-IMPaCT-data-QC/tree/master/impact_qc)\r +\r +- [Metrics documentation](https://github.com/EGA-archive/sarek-IMPaCT-data-QC/blob/master/impact_qc/docs/QC_Sarek_supporing_documentation.pdf)\r +\r +

\r + \r + \r + nf-core/sarek\r + \r +

\r +\r +[![GitHub Actions CI Status](https://github.com/nf-core/sarek/actions/workflows/ci.yml/badge.svg)](https://github.com/nf-core/sarek/actions/workflows/ci.yml)\r +[![GitHub Actions Linting Status](https://github.com/nf-core/sarek/actions/workflows/linting.yml/badge.svg)](https://github.com/nf-core/sarek/actions/workflows/linting.yml)\r +[![AWS CI](https://img.shields.io/badge/CI%20tests-full%20size-FF9900?labelColor=000000&logo=Amazon%20AWS)](https://nf-co.re/sarek/results)\r +[![nf-test](https://img.shields.io/badge/unit_tests-nf--test-337ab7.svg)](https://www.nf-test.com)\r +[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.3476425-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.3476425)\r +[![nf-test](https://img.shields.io/badge/unit_tests-nf--test-337ab7.svg)](https://www.nf-test.com)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A523.04.0-23aa62.svg)](https://www.nextflow.io/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +[![Launch on Seqera Platform](https://img.shields.io/badge/Launch%20%F0%9F%9A%80-Seqera%20Platform-%234256e7)](https://tower.nf/launch?pipeline=https://github.com/nf-core/sarek)\r +\r +[![Get help on Slack](http://img.shields.io/badge/slack-nf--core%20%23sarek-4A154B?labelColor=000000&logo=slack)](https://nfcore.slack.com/channels/sarek)\r +[![Follow on Twitter](http://img.shields.io/badge/twitter-%40nf__core-1DA1F2?labelColor=000000&logo=twitter)](https://twitter.com/nf_core)\r +[![Follow on Mastodon](https://img.shields.io/badge/mastodon-nf__core-6364ff?labelColor=FFFFFF&logo=mastodon)](https://mstdn.science/@nf_core)\r +[![Watch on YouTube](http://img.shields.io/badge/youtube-nf--core-FF0000?labelColor=000000&logo=youtube)](https://www.youtube.com/c/nf-core)\r +\r +## Introduction\r +\r +**nf-core/sarek** is a workflow designed to detect variants on whole genome or targeted sequencing data. Initially designed for Human, and Mouse, it can work on any species with a reference genome. Sarek can also handle tumour / normal pairs and could include additional relapses.\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!\r +\r +On release, automated continuous integration tests run the pipeline on a full-sized dataset on the AWS cloud infrastructure. This ensures that the pipeline runs on AWS, has sensible resource allocation defaults set to run on real-world datasets, and permits the persistent storage of results to benchmark between pipeline releases and other analysis sources. The results obtained from the full-sized test can be viewed on the [nf-core website](https://nf-co.re/sarek/results).\r +\r +It's listed on [Elixir - Tools and Data Services Registry](https://bio.tools/nf-core-sarek) and [Dockstore](https://dockstore.org/workflows/github.com/nf-core/sarek).\r +\r +

\r + \r +

\r +\r +## Pipeline summary\r +\r +Depending on the options and samples provided, the pipeline can currently perform the following:\r +\r +- Form consensus reads from UMI sequences (`fgbio`)\r +- Sequencing quality control and trimming (enabled by `--trim_fastq`) (`FastQC`, `fastp`)\r +- Map Reads to Reference (`BWA-mem`, `BWA-mem2`, `dragmap` or `Sentieon BWA-mem`)\r +- Process BAM file (`GATK MarkDuplicates`, `GATK BaseRecalibrator` and `GATK ApplyBQSR` or `Sentieon LocusCollector` and `Sentieon Dedup`)\r +- Summarise alignment statistics (`samtools stats`, `mosdepth`)\r +- Variant calling (enabled by `--tools`, see [compatibility](https://nf-co.re/sarek/latest/docs/usage#which-variant-calling-tool-is-implemented-for-which-data-type)):\r + - `ASCAT`\r + - `CNVkit`\r + - `Control-FREEC`\r + - `DeepVariant`\r + - `freebayes`\r + - `GATK HaplotypeCaller`\r + - `Manta`\r + - `mpileup`\r + - `MSIsensor-pro`\r + - `Mutect2`\r + - `Sentieon Haplotyper`\r + - `Strelka2`\r + - `TIDDIT`\r +- Variant filtering and annotation (`SnpEff`, `Ensembl VEP`, `BCFtools annotate`)\r +- Summarise and represent QC (`MultiQC`)\r +\r +

\r + \r +

\r +\r +## Usage\r +\r +> [!NOTE]\r +> If you are new to Nextflow and nf-core, please refer to [this page](https://nf-co.re/docs/usage/installation) on how to set-up Nextflow. Make sure to [test your setup](https://nf-co.re/docs/usage/introduction#how-to-run-a-pipeline) with `-profile test` before running the workflow on actual data.\r +\r +First, prepare a samplesheet with your input data that looks as follows:\r +\r +`samplesheet.csv`:\r +\r +```csv\r +patient,sample,lane,fastq_1,fastq_2\r +ID1,S1,L002,ID1_S1_L002_R1_001.fastq.gz,ID1_S1_L002_R2_001.fastq.gz\r +```\r +\r +Each row represents a pair of fastq files (paired end).\r +\r +Now, you can run the pipeline using:\r +\r +```bash\r +nextflow run nf-core/sarek \\\r + -profile \\\r + --input samplesheet.csv \\\r + --outdir \r +```\r +\r +> [!WARNING]\r +> Please provide pipeline parameters via the CLI or Nextflow `-params-file` option. Custom config files including those provided by the `-c` Nextflow option can be used to provide any configuration _**except for parameters**_;\r +> see [docs](https://nf-co.re/usage/configuration#custom-configuration-files).\r +\r +For more details and further functionality, please refer to the [usage documentation](https://nf-co.re/sarek/usage) and the [parameter documentation](https://nf-co.re/sarek/parameters).\r +\r +## Pipeline output\r +\r +To see the results of an example test run with a full size dataset refer to the [results](https://nf-co.re/sarek/results) tab on the nf-core website pipeline page.\r +For more details about the output files and reports, please refer to the\r +[output documentation](https://nf-co.re/sarek/output).\r +\r +## Benchmarking\r +\r +On each release, the pipeline is run on 3 full size tests:\r +\r +- `test_full` runs tumor-normal data for one patient from the SEQ2C consortium\r +- `test_full_germline` runs a WGS 30X Genome-in-a-Bottle(NA12878) dataset\r +- `test_full_germline_ncbench_agilent` runs two WES samples with 75M and 200M reads (data available [here](https://github.com/ncbench/ncbench-workflow#contributing-callsets)). The results are uploaded to Zenodo, evaluated against a truth dataset, and results are made available via the [NCBench dashboard](https://ncbench.github.io/report/report.html#).\r +\r +## Credits\r +\r +Sarek was originally written by Maxime U Garcia and Szilveszter Juhos at the [National Genomics Infastructure](https://ngisweden.scilifelab.se) and [National Bioinformatics Infastructure Sweden](https://nbis.se) which are both platforms at [SciLifeLab](https://scilifelab.se), with the support of [The Swedish Childhood Tumor Biobank (Barntumörbanken)](https://ki.se/forskning/barntumorbanken).\r +Friederike Hanssen and Gisela Gabernet at [QBiC](https://www.qbic.uni-tuebingen.de/) later joined and helped with further development.\r +\r +The Nextflow DSL2 conversion of the pipeline was lead by Friederike Hanssen and Maxime U Garcia.\r +\r +Maintenance is now lead by Friederike Hanssen and Maxime U Garcia (now at [Seqera Labs](https://seqera/io))\r +\r +Main developers:\r +\r +- [Maxime U Garcia](https://github.com/maxulysse)\r +- [Friederike Hanssen](https://github.com/FriederikeHanssen)\r +\r +We thank the following people for their extensive assistance in the development of this pipeline:\r +\r +- [Abhinav Sharma](https://github.com/abhi18av)\r +- [Adam Talbot](https://github.com/adamrtalbot)\r +- [Adrian Lärkeryd](https://github.com/adrlar)\r +- [Alexander Peltzer](https://github.com/apeltzer)\r +- [Alison Meynert](https://github.com/ameynert)\r +- [Anders Sune Pedersen](https://github.com/asp8200)\r +- [arontommi](https://github.com/arontommi)\r +- [BarryDigby](https://github.com/BarryDigby)\r +- [Bekir Ergüner](https://github.com/berguner)\r +- [bjornnystedt](https://github.com/bjornnystedt)\r +- [cgpu](https://github.com/cgpu)\r +- [Chela James](https://github.com/chelauk)\r +- [David Mas-Ponte](https://github.com/davidmasp)\r +- [Edmund Miller](https://github.com/edmundmiller)\r +- [Francesco Lescai](https://github.com/lescai)\r +- [Gavin Mackenzie](https://github.com/GCJMackenzie)\r +- [Gisela Gabernet](https://github.com/ggabernet)\r +- [Grant Neilson](https://github.com/grantn5)\r +- [gulfshores](https://github.com/gulfshores)\r +- [Harshil Patel](https://github.com/drpatelh)\r +- [James A. Fellows Yates](https://github.com/jfy133)\r +- [Jesper Eisfeldt](https://github.com/J35P312)\r +- [Johannes Alneberg](https://github.com/alneberg)\r +- [José Fernández Navarro](https://github.com/jfnavarro)\r +- [Júlia Mir Pedrol](https://github.com/mirpedrol)\r +- [Ken Brewer](https://github.com/kenibrewer)\r +- [Lasse Westergaard Folkersen](https://github.com/lassefolkersen)\r +- [Lucia Conde](https://github.com/lconde-ucl)\r +- [Malin Larsson](https://github.com/malinlarsson)\r +- [Marcel Martin](https://github.com/marcelm)\r +- [Nick Smith](https://github.com/nickhsmith)\r +- [Nicolas Schcolnicov](https://github.com/nschcolnicov)\r +- [Nilesh Tawari](https://github.com/nilesh-tawari)\r +- [Nils Homer](https://github.com/nh13)\r +- [Olga Botvinnik](https://github.com/olgabot)\r +- [Oskar Wacker](https://github.com/WackerO)\r +- [pallolason](https://github.com/pallolason)\r +- [Paul Cantalupo](https://github.com/pcantalupo)\r +- [Phil Ewels](https://github.com/ewels)\r +- [Sabrina Krakau](https://github.com/skrakau)\r +- [Sam Minot](https://github.com/sminot)\r +- [Sebastian-D](https://github.com/Sebastian-D)\r +- [Silvia Morini](https://github.com/silviamorins)\r +- [Simon Pearce](https://github.com/SPPearce)\r +- [Solenne Correard](https://github.com/scorreard)\r +- [Susanne Jodoin](https://github.com/SusiJo)\r +- [Szilveszter Juhos](https://github.com/szilvajuhos)\r +- [Tobias Koch](https://github.com/KochTobi)\r +- [Winni Kretzschmar](https://github.com/winni2k)\r +\r +## Acknowledgements\r +\r +| [![Barntumörbanken](docs/images/BTB_logo.png)](https://ki.se/forskning/barntumorbanken) | [![SciLifeLab](docs/images/SciLifeLab_logo.png)](https://scilifelab.se) |\r +| :-----------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------: |\r +| [![National Genomics Infrastructure](docs/images/NGI_logo.png)](https://ngisweden.scilifelab.se/) | [![National Bioinformatics Infrastructure Sweden](docs/images/NBIS_logo.png)](https://nbis.se) |\r +| [![QBiC](docs/images/QBiC_logo.png)](https://www.qbic.uni-tuebingen.de) | [![GHGA](docs/images/GHGA_logo.png)](https://www.ghga.de/) |\r +| [![DNGC](docs/images/DNGC_logo.png)](https://eng.ngc.dk/) | |\r +\r +## Contributions & Support\r +\r +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\r +\r +For further information or help, don't hesitate to get in touch on the [Slack `#sarek` channel](https://nfcore.slack.com/channels/sarek) (you can join with [this invite](https://nf-co.re/join/slack)), or contact us: [Maxime U Garcia](mailto:maxime.garcia@seqera.io?subject=[GitHub]%20nf-core/sarek), [Friederike Hanssen](mailto:friederike.hanssen@qbic.uni-tuebingen.de?subject=[GitHub]%20nf-core/sarek)\r +\r +## Citations\r +\r +If you use `nf-core/sarek` for your analysis, please cite the `Sarek` article as follows:\r +\r +> Friederike Hanssen, Maxime U Garcia, Lasse Folkersen, Anders Sune Pedersen, Francesco Lescai, Susanne Jodoin, Edmund Miller, Oskar Wacker, Nicholas Smith, nf-core community, Gisela Gabernet, Sven Nahnsen **Scalable and efficient DNA sequencing analysis on different compute infrastructures aiding variant discovery** _NAR Genomics and Bioinformatics_ Volume 6, Issue 2, June 2024, lqae031, [doi: 10.1093/nargab/lqae031](https://doi.org/10.1093/nargab/lqae031).\r +\r +> Garcia M, Juhos S, Larsson M et al. **Sarek: A portable workflow for whole-genome sequencing analysis of germline and somatic variants [version 2; peer review: 2 approved]** _F1000Research_ 2020, 9:63 [doi: 10.12688/f1000research.16665.2](http://dx.doi.org/10.12688/f1000research.16665.2).\r +\r +You can cite the sarek zenodo record for a specific version using the following [doi: 10.5281/zenodo.3476425](https://doi.org/10.5281/zenodo.3476425)\r +\r +An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\r +\r +You can cite the `nf-core` publication as follows:\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +\r +## CHANGELOG\r +\r +- [CHANGELOG](CHANGELOG.md)\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1030?version=3" ; + schema1:isBasedOn "https://github.com/EGA-archive/sarek-IMPaCT-data-QC.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for IMPaCT-Data quality control workflow implementation in nf-core/Sarek" ; + schema1:sdDatePublished "2024-08-05 10:22:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1030/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 19394 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-06-25T06:53:21Z" ; + schema1:dateModified "2024-06-25T07:30:55Z" ; + schema1:description """# ![IMPaCT program](https://github.com/EGA-archive/sarek-IMPaCT-data-QC/blob/master/impact_qc/docs/png/impact_data_logo_pink_horitzontal.png)\r +\r +[![IMPaCT](https://img.shields.io/badge/Web%20-IMPaCT-blue)](https://impact.isciii.es/)\r +[![IMPaCT-isciii](https://img.shields.io/badge/Web%20-IMPaCT--isciii-red)](https://www.isciii.es/QueHacemos/Financiacion/IMPaCT/Paginas/default.aspx)\r +[![IMPaCT-Data](https://img.shields.io/badge/Web%20-IMPaCT--Data-1d355c.svg?labelColor=000000)](https://impact-data.bsc.es/)\r +\r +## Introduction of the project\r +\r +IMPaCT-Data is the IMPaCT program that aims to support the development of a common, interoperable and integrated system for the collection and analysis of clinical and molecular data by providing the knowledge and resources available in the Spanish Science and Technology System. This development will make it possible to answer research questions based on the different clinical and molecular information systems available. Fundamentally, it aims to provide researchers with a population perspective based on individual data.\r +\r +The IMPaCT-Data project is divided into different work packages (WP). In the context of IMPaCT-Data WP3 (Genomics), a working group of experts worked on the generation of a specific quality control (QC) workflow for germline exome samples.\r +\r +To achieve this, a set of metrics related to human genomic data was decided upon, and the toolset or software to extract these metrics was implemented in an existing variant calling workflow called Sarek, part of the nf-core community. The final outcome is a Nextflow subworkflow, called IMPaCT-QC implemented in the Sarek pipeline.\r +\r +Below you can find the explanation of this workflow (raw pipeline), the link to the documentation of the IMPaCT QC subworkflow and a linked documentation associated to the QC metrics added in the mentioned workflow.\r +\r +- [IMPaCT-data subworkflow documentation](https://github.com/EGA-archive/sarek-IMPaCT-data-QC/tree/master/impact_qc)\r +\r +- [Metrics documentation](https://github.com/EGA-archive/sarek-IMPaCT-data-QC/blob/master/impact_qc/docs/QC_Sarek_supporing_documentation.pdf)\r +\r +

\r + \r + \r + nf-core/sarek\r + \r +

\r +\r +[![GitHub Actions CI Status](https://github.com/nf-core/sarek/actions/workflows/ci.yml/badge.svg)](https://github.com/nf-core/sarek/actions/workflows/ci.yml)\r +[![GitHub Actions Linting Status](https://github.com/nf-core/sarek/actions/workflows/linting.yml/badge.svg)](https://github.com/nf-core/sarek/actions/workflows/linting.yml)\r +[![AWS CI](https://img.shields.io/badge/CI%20tests-full%20size-FF9900?labelColor=000000&logo=Amazon%20AWS)](https://nf-co.re/sarek/results)\r +[![nf-test](https://img.shields.io/badge/unit_tests-nf--test-337ab7.svg)](https://www.nf-test.com)\r +[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.3476425-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.3476425)\r +[![nf-test](https://img.shields.io/badge/unit_tests-nf--test-337ab7.svg)](https://www.nf-test.com)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A523.04.0-23aa62.svg)](https://www.nextflow.io/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +[![Launch on Seqera Platform](https://img.shields.io/badge/Launch%20%F0%9F%9A%80-Seqera%20Platform-%234256e7)](https://tower.nf/launch?pipeline=https://github.com/nf-core/sarek)\r +\r +[![Get help on Slack](http://img.shields.io/badge/slack-nf--core%20%23sarek-4A154B?labelColor=000000&logo=slack)](https://nfcore.slack.com/channels/sarek)\r +[![Follow on Twitter](http://img.shields.io/badge/twitter-%40nf__core-1DA1F2?labelColor=000000&logo=twitter)](https://twitter.com/nf_core)\r +[![Follow on Mastodon](https://img.shields.io/badge/mastodon-nf__core-6364ff?labelColor=FFFFFF&logo=mastodon)](https://mstdn.science/@nf_core)\r +[![Watch on YouTube](http://img.shields.io/badge/youtube-nf--core-FF0000?labelColor=000000&logo=youtube)](https://www.youtube.com/c/nf-core)\r +\r +## Introduction\r +\r +**nf-core/sarek** is a workflow designed to detect variants on whole genome or targeted sequencing data. Initially designed for Human, and Mouse, it can work on any species with a reference genome. Sarek can also handle tumour / normal pairs and could include additional relapses.\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!\r +\r +On release, automated continuous integration tests run the pipeline on a full-sized dataset on the AWS cloud infrastructure. This ensures that the pipeline runs on AWS, has sensible resource allocation defaults set to run on real-world datasets, and permits the persistent storage of results to benchmark between pipeline releases and other analysis sources. The results obtained from the full-sized test can be viewed on the [nf-core website](https://nf-co.re/sarek/results).\r +\r +It's listed on [Elixir - Tools and Data Services Registry](https://bio.tools/nf-core-sarek) and [Dockstore](https://dockstore.org/workflows/github.com/nf-core/sarek).\r +\r +

\r + \r +

\r +\r +## Pipeline summary\r +\r +Depending on the options and samples provided, the pipeline can currently perform the following:\r +\r +- Form consensus reads from UMI sequences (`fgbio`)\r +- Sequencing quality control and trimming (enabled by `--trim_fastq`) (`FastQC`, `fastp`)\r +- Map Reads to Reference (`BWA-mem`, `BWA-mem2`, `dragmap` or `Sentieon BWA-mem`)\r +- Process BAM file (`GATK MarkDuplicates`, `GATK BaseRecalibrator` and `GATK ApplyBQSR` or `Sentieon LocusCollector` and `Sentieon Dedup`)\r +- Summarise alignment statistics (`samtools stats`, `mosdepth`)\r +- Variant calling (enabled by `--tools`, see [compatibility](https://nf-co.re/sarek/latest/docs/usage#which-variant-calling-tool-is-implemented-for-which-data-type)):\r + - `ASCAT`\r + - `CNVkit`\r + - `Control-FREEC`\r + - `DeepVariant`\r + - `freebayes`\r + - `GATK HaplotypeCaller`\r + - `Manta`\r + - `mpileup`\r + - `MSIsensor-pro`\r + - `Mutect2`\r + - `Sentieon Haplotyper`\r + - `Strelka2`\r + - `TIDDIT`\r +- Variant filtering and annotation (`SnpEff`, `Ensembl VEP`, `BCFtools annotate`)\r +- Summarise and represent QC (`MultiQC`)\r +\r +

\r + \r +

\r +\r +## Usage\r +\r +> [!NOTE]\r +> If you are new to Nextflow and nf-core, please refer to [this page](https://nf-co.re/docs/usage/installation) on how to set-up Nextflow. Make sure to [test your setup](https://nf-co.re/docs/usage/introduction#how-to-run-a-pipeline) with `-profile test` before running the workflow on actual data.\r +\r +First, prepare a samplesheet with your input data that looks as follows:\r +\r +`samplesheet.csv`:\r +\r +```csv\r +patient,sample,lane,fastq_1,fastq_2\r +ID1,S1,L002,ID1_S1_L002_R1_001.fastq.gz,ID1_S1_L002_R2_001.fastq.gz\r +```\r +\r +Each row represents a pair of fastq files (paired end).\r +\r +Now, you can run the pipeline using:\r +\r +```bash\r +nextflow run nf-core/sarek \\\r + -profile \\\r + --input samplesheet.csv \\\r + --outdir \r +```\r +\r +> [!WARNING]\r +> Please provide pipeline parameters via the CLI or Nextflow `-params-file` option. Custom config files including those provided by the `-c` Nextflow option can be used to provide any configuration _**except for parameters**_;\r +> see [docs](https://nf-co.re/usage/configuration#custom-configuration-files).\r +\r +For more details and further functionality, please refer to the [usage documentation](https://nf-co.re/sarek/usage) and the [parameter documentation](https://nf-co.re/sarek/parameters).\r +\r +## Pipeline output\r +\r +To see the results of an example test run with a full size dataset refer to the [results](https://nf-co.re/sarek/results) tab on the nf-core website pipeline page.\r +For more details about the output files and reports, please refer to the\r +[output documentation](https://nf-co.re/sarek/output).\r +\r +## Benchmarking\r +\r +On each release, the pipeline is run on 3 full size tests:\r +\r +- `test_full` runs tumor-normal data for one patient from the SEQ2C consortium\r +- `test_full_germline` runs a WGS 30X Genome-in-a-Bottle(NA12878) dataset\r +- `test_full_germline_ncbench_agilent` runs two WES samples with 75M and 200M reads (data available [here](https://github.com/ncbench/ncbench-workflow#contributing-callsets)). The results are uploaded to Zenodo, evaluated against a truth dataset, and results are made available via the [NCBench dashboard](https://ncbench.github.io/report/report.html#).\r +\r +## Credits\r +\r +Sarek was originally written by Maxime U Garcia and Szilveszter Juhos at the [National Genomics Infastructure](https://ngisweden.scilifelab.se) and [National Bioinformatics Infastructure Sweden](https://nbis.se) which are both platforms at [SciLifeLab](https://scilifelab.se), with the support of [The Swedish Childhood Tumor Biobank (Barntumörbanken)](https://ki.se/forskning/barntumorbanken).\r +Friederike Hanssen and Gisela Gabernet at [QBiC](https://www.qbic.uni-tuebingen.de/) later joined and helped with further development.\r +\r +The Nextflow DSL2 conversion of the pipeline was lead by Friederike Hanssen and Maxime U Garcia.\r +\r +Maintenance is now lead by Friederike Hanssen and Maxime U Garcia (now at [Seqera Labs](https://seqera/io))\r +\r +Main developers:\r +\r +- [Maxime U Garcia](https://github.com/maxulysse)\r +- [Friederike Hanssen](https://github.com/FriederikeHanssen)\r +\r +We thank the following people for their extensive assistance in the development of this pipeline:\r +\r +- [Abhinav Sharma](https://github.com/abhi18av)\r +- [Adam Talbot](https://github.com/adamrtalbot)\r +- [Adrian Lärkeryd](https://github.com/adrlar)\r +- [Alexander Peltzer](https://github.com/apeltzer)\r +- [Alison Meynert](https://github.com/ameynert)\r +- [Anders Sune Pedersen](https://github.com/asp8200)\r +- [arontommi](https://github.com/arontommi)\r +- [BarryDigby](https://github.com/BarryDigby)\r +- [Bekir Ergüner](https://github.com/berguner)\r +- [bjornnystedt](https://github.com/bjornnystedt)\r +- [cgpu](https://github.com/cgpu)\r +- [Chela James](https://github.com/chelauk)\r +- [David Mas-Ponte](https://github.com/davidmasp)\r +- [Edmund Miller](https://github.com/edmundmiller)\r +- [Francesco Lescai](https://github.com/lescai)\r +- [Gavin Mackenzie](https://github.com/GCJMackenzie)\r +- [Gisela Gabernet](https://github.com/ggabernet)\r +- [Grant Neilson](https://github.com/grantn5)\r +- [gulfshores](https://github.com/gulfshores)\r +- [Harshil Patel](https://github.com/drpatelh)\r +- [James A. Fellows Yates](https://github.com/jfy133)\r +- [Jesper Eisfeldt](https://github.com/J35P312)\r +- [Johannes Alneberg](https://github.com/alneberg)\r +- [José Fernández Navarro](https://github.com/jfnavarro)\r +- [Júlia Mir Pedrol](https://github.com/mirpedrol)\r +- [Ken Brewer](https://github.com/kenibrewer)\r +- [Lasse Westergaard Folkersen](https://github.com/lassefolkersen)\r +- [Lucia Conde](https://github.com/lconde-ucl)\r +- [Malin Larsson](https://github.com/malinlarsson)\r +- [Marcel Martin](https://github.com/marcelm)\r +- [Nick Smith](https://github.com/nickhsmith)\r +- [Nicolas Schcolnicov](https://github.com/nschcolnicov)\r +- [Nilesh Tawari](https://github.com/nilesh-tawari)\r +- [Nils Homer](https://github.com/nh13)\r +- [Olga Botvinnik](https://github.com/olgabot)\r +- [Oskar Wacker](https://github.com/WackerO)\r +- [pallolason](https://github.com/pallolason)\r +- [Paul Cantalupo](https://github.com/pcantalupo)\r +- [Phil Ewels](https://github.com/ewels)\r +- [Sabrina Krakau](https://github.com/skrakau)\r +- [Sam Minot](https://github.com/sminot)\r +- [Sebastian-D](https://github.com/Sebastian-D)\r +- [Silvia Morini](https://github.com/silviamorins)\r +- [Simon Pearce](https://github.com/SPPearce)\r +- [Solenne Correard](https://github.com/scorreard)\r +- [Susanne Jodoin](https://github.com/SusiJo)\r +- [Szilveszter Juhos](https://github.com/szilvajuhos)\r +- [Tobias Koch](https://github.com/KochTobi)\r +- [Winni Kretzschmar](https://github.com/winni2k)\r +\r +## Acknowledgements\r +\r +| [![Barntumörbanken](docs/images/BTB_logo.png)](https://ki.se/forskning/barntumorbanken) | [![SciLifeLab](docs/images/SciLifeLab_logo.png)](https://scilifelab.se) |\r +| :-----------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------: |\r +| [![National Genomics Infrastructure](docs/images/NGI_logo.png)](https://ngisweden.scilifelab.se/) | [![National Bioinformatics Infrastructure Sweden](docs/images/NBIS_logo.png)](https://nbis.se) |\r +| [![QBiC](docs/images/QBiC_logo.png)](https://www.qbic.uni-tuebingen.de) | [![GHGA](docs/images/GHGA_logo.png)](https://www.ghga.de/) |\r +| [![DNGC](docs/images/DNGC_logo.png)](https://eng.ngc.dk/) | |\r +\r +## Contributions & Support\r +\r +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\r +\r +For further information or help, don't hesitate to get in touch on the [Slack `#sarek` channel](https://nfcore.slack.com/channels/sarek) (you can join with [this invite](https://nf-co.re/join/slack)), or contact us: [Maxime U Garcia](mailto:maxime.garcia@seqera.io?subject=[GitHub]%20nf-core/sarek), [Friederike Hanssen](mailto:friederike.hanssen@qbic.uni-tuebingen.de?subject=[GitHub]%20nf-core/sarek)\r +\r +## Citations\r +\r +If you use `nf-core/sarek` for your analysis, please cite the `Sarek` article as follows:\r +\r +> Friederike Hanssen, Maxime U Garcia, Lasse Folkersen, Anders Sune Pedersen, Francesco Lescai, Susanne Jodoin, Edmund Miller, Oskar Wacker, Nicholas Smith, nf-core community, Gisela Gabernet, Sven Nahnsen **Scalable and efficient DNA sequencing analysis on different compute infrastructures aiding variant discovery** _NAR Genomics and Bioinformatics_ Volume 6, Issue 2, June 2024, lqae031, [doi: 10.1093/nargab/lqae031](https://doi.org/10.1093/nargab/lqae031).\r +\r +> Garcia M, Juhos S, Larsson M et al. **Sarek: A portable workflow for whole-genome sequencing analysis of germline and somatic variants [version 2; peer review: 2 approved]** _F1000Research_ 2020, 9:63 [doi: 10.12688/f1000research.16665.2](http://dx.doi.org/10.12688/f1000research.16665.2).\r +\r +You can cite the sarek zenodo record for a specific version using the following [doi: 10.5281/zenodo.3476425](https://doi.org/10.5281/zenodo.3476425)\r +\r +An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\r +\r +You can cite the `nf-core` publication as follows:\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +\r +## CHANGELOG\r +\r +- [CHANGELOG](CHANGELOG.md)\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1030?version=2" ; + schema1:keywords "Bioinformatics, Nextflow, variant calling, wes, WGS, NGS, EGA-archive, quality control" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "IMPaCT-Data quality control workflow implementation in nf-core/Sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1030?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1027?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/viralrecon" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/viralrecon" ; + schema1:sdDatePublished "2024-08-05 10:23:06 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1027/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10146 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:20Z" ; + schema1:dateModified "2024-06-11T12:55:20Z" ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1027?version=10" ; + schema1:keywords "Amplicon, ARTIC, Assembly, covid-19, covid19, illumina, long-read-sequencing, Metagenomics, nanopore, ONT, oxford-nanopore, SARS-CoV-2, variant-calling, viral, Virus" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/viralrecon" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1027?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Analysis of Chromosome Conformation Capture data (Hi-C)" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/989?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/hic" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hic" ; + schema1:sdDatePublished "2024-08-05 10:23:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/989/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4842 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:54Z" ; + schema1:dateModified "2024-06-11T12:54:54Z" ; + schema1:description "Analysis of Chromosome Conformation Capture data (Hi-C)" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/989?version=7" ; + schema1:keywords "chromosome-conformation-capture, Hi-C" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hic" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/989?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-08-05 10:23:16 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5714 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:14Z" ; + schema1:dateModified "2024-06-11T12:55:14Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly and annotation of metatranscriptomic data, both prokaryotic and eukaryotic" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/997?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/metatdenovo" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/metatdenovo" ; + schema1:sdDatePublished "2024-08-05 10:23:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/997/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11793 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:00Z" ; + schema1:dateModified "2024-06-11T12:55:00Z" ; + schema1:description "Assembly and annotation of metatranscriptomic data, both prokaryotic and eukaryotic" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/997?version=1" ; + schema1:keywords "eukaryotes, Metagenomics, metatranscriptomics, prokaryotes, viruses" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/metatdenovo" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/997?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A pipeline to demultiplex, QC and map Nanopore data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1002?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/nanoseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/nanoseq" ; + schema1:sdDatePublished "2024-08-05 10:23:33 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1002/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9734 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:05Z" ; + schema1:dateModified "2024-06-11T12:55:05Z" ; + schema1:description "A pipeline to demultiplex, QC and map Nanopore data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1002?version=5" ; + schema1:keywords "Alignment, demultiplexing, nanopore, QC" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/nanoseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1002?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +**Based on the official [pmx tutorial](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate how to compute a **fast-growth mutation free energy** calculation, step by step, using the BioExcel **Building Blocks library (biobb)**. The particular example used is the **Staphylococcal nuclease** protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +The **non-equilibrium free energy calculation** protocol performs a **fast alchemical transition** in the direction **WT->Mut** and back **Mut->WT**. The two equilibrium trajectories needed for the tutorial, one for **Wild Type (WT)** and another for the **Mutated (Mut)** protein (Isoleucine 10 to Alanine -I10A-), have already been generated and are included in this example. We will name **WT as stateA** and **Mut as stateB**.\r +\r +![](https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/schema.png)\r +\r +The tutorial calculates the **free energy difference** in the folded state of a protein. Starting from **two 1ns-length independent equilibrium simulations** (WT and mutant), snapshots are selected to start **fast (50ps) transitions** driving the system in the **forward** (WT to mutant) and **reverse** (mutant to WT) directions, and the **work values** required to perform these transitions are collected. With these values, **Crooks Gaussian Intersection** (CGI), **Bennett Acceptance Ratio** (BAR) and **Jarzynski estimator** methods are used to calculate the **free energy difference** between the two states.\r +\r +*Please note that for the sake of disk space this tutorial is using 1ns-length equilibrium trajectories, whereas in the [original example](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/eq.mdp) the equilibrium trajectories used were obtained from 10ns-length simulations.*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.328.4" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_pmx_tutorial/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)" ; + schema1:sdDatePublished "2024-08-05 10:32:13 +0100" ; + schema1:url "https://workflowhub.eu/workflows/328/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8390 ; + schema1:creator , + ; + schema1:dateCreated "2023-07-31T07:33:55Z" ; + schema1:dateModified "2023-07-31T07:37:03Z" ; + schema1:description """# Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +**Based on the official [pmx tutorial](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate how to compute a **fast-growth mutation free energy** calculation, step by step, using the BioExcel **Building Blocks library (biobb)**. The particular example used is the **Staphylococcal nuclease** protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +The **non-equilibrium free energy calculation** protocol performs a **fast alchemical transition** in the direction **WT->Mut** and back **Mut->WT**. The two equilibrium trajectories needed for the tutorial, one for **Wild Type (WT)** and another for the **Mutated (Mut)** protein (Isoleucine 10 to Alanine -I10A-), have already been generated and are included in this example. We will name **WT as stateA** and **Mut as stateB**.\r +\r +![](https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/schema.png)\r +\r +The tutorial calculates the **free energy difference** in the folded state of a protein. Starting from **two 1ns-length independent equilibrium simulations** (WT and mutant), snapshots are selected to start **fast (50ps) transitions** driving the system in the **forward** (WT to mutant) and **reverse** (mutant to WT) directions, and the **work values** required to perform these transitions are collected. With these values, **Crooks Gaussian Intersection** (CGI), **Bennett Acceptance Ratio** (BAR) and **Jarzynski estimator** methods are used to calculate the **free energy difference** between the two states.\r +\r +*Please note that for the sake of disk space this tutorial is using 1ns-length equilibrium trajectories, whereas in the [original example](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/eq.mdp) the equilibrium trajectories used were obtained from 10ns-length simulations.*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/328?version=3" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_pmx_tutorial/python/workflow.py" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """# BACPAGE\r +\r +This repository contains an easy-to-use pipeline for the assembly and analysis of bacterial genomes using ONT long-read or Illumina short-read technology. \r +Read the complete documentation and instructions for bacpage and each of its functions [here](https://cholgen.github.io/sequencing-resources/bacpage-command.html)\r +\r +# Introduction\r +Advances in sequencing technology during the COVID-19 pandemic has led to massive increases in the generation of sequencing data. Many bioinformatics tools have been developed to analyze this data, but very few tools can be utilized by individuals without prior bioinformatics training.\r +\r +This pipeline was designed to encapsulate pre-existing tools to automate analysis of whole genome sequencing of bacteria. \r +Installation is fast and straightfoward. \r +The pipeline is easy to setup and contains rationale defaults, but is highly modular and configurable by more advance users.\r +Bacpage has individual commands to generate consensus sequences, perform *de novo* assembly, construct phylogenetic tree, and generate quality control reports.\r +\r +# Features\r +We anticipate the pipeline will be able to perform the following functions:\r +- [x] Reference-based assembly of Illumina paired-end reads\r +- [x] *De novo* assembly of Illumina paired-end reads\r +- [ ] *De novo* assembly of ONT long reads\r +- [x] Run quality control checks\r +- [x] Variant calling using [bcftools](https://github.com/samtools/bcftools)\r +- [x] Maximum-likelihood phylogenetic inference of processed samples and background dataset using [iqtree](https://github.com/iqtree/iqtree2) \r +- [x] MLST profiling and virulence factor detection\r +- [x] Antimicrobial resistance genes detection\r +- [ ] Plasmid detection\r +\r +# Installation\r +1. Install `mamba` by running the following two command:\r +```commandline\r +curl -L -O "https://github.com/conda-forge/miniforge/releases/latest/download/Mambaforge-$(uname)-$(uname -m).sh"\r +bash Mambaforge-$(uname)-$(uname -m).sh\r +```\r +\r +2. Clone the bacpage repository:\r +```commandline\r +git clone https://github.com/CholGen/bacpage.git\r +```\r +\r +3. Switch to the development branch of the pipeline:\r +```commandline\r +cd bacpage/\r +git checkout -b split_into_command\r +```\r +\r +3. Install and activate the pipeline's conda environment:\r +```commandline\r +mamba env create -f environment.yaml\r +mamba activate bacpage\r +```\r +\r +4. Install the `bacpage` command:\r +```commandline\r +pip install .\r +```\r +\r +5. Test the installation:\r +```commandline\r +bacpage -h\r +bacpage version\r +```\r +These command should print the help and version of the program. Please create an issue if this is not the case.\r +\r +# Updating\r +\r +1. Navigate to the directory where you cloned the bacpage repository on the command line:\r +```commandline\r +cd bacpage/\r +```\r +2. Activate the bacpage conda environment:\r +```commandline\r +mamba activate bacpage\r +```\r +3. Pull the lastest changes from GitHub:\r +```commandline\r +git pull\r +```\r +4. Update the bacpage conda environemnt:\r +```commandline\r +mamba env update -f environment.yaml\r +```\r +5. Reinstall the `bacpage` command:\r +```commandline\r +pip install .\r +```\r +\r +# Usage\r +0. Activate the bacpage conda environment:\r +```commandline\r +mamba activate bacpage\r +```\r +1. Create a directory specifically for the batch of samples you would like to analyze (called a project directory).\r +```commandline\r +bacpage setup [your-project-directory-name]\r +```\r +2. Place paired sequencing reads in the `input/` directory of your project directory.\r +3. From the pipeline's directory, run the reference-based assembly pipeline on your samples using the following command:\r +```commandline\r +bacpage assemble [your-project-directory-name]\r +```\r +This will generate a consensus sequence in FASTA format for each of your samples and place them in \r +`/results/consensus_sequences/.masked.fasta`. An HTML report containing alignment \r +and quality metrics for your samples can be found at `/results/reports/qc_report.html`.\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/695?version=2" ; + schema1:isBasedOn "https://github.com/CholGen/bacpage.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Phylogeny reconstruction using bacpage" ; + schema1:sdDatePublished "2024-08-05 10:26:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/695/ro_crate?version=2" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 13473 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3380 ; + schema1:dateCreated "2024-01-09T17:12:32Z" ; + schema1:dateModified "2024-01-09T17:12:32Z" ; + schema1:description """# BACPAGE\r +\r +This repository contains an easy-to-use pipeline for the assembly and analysis of bacterial genomes using ONT long-read or Illumina short-read technology. \r +Read the complete documentation and instructions for bacpage and each of its functions [here](https://cholgen.github.io/sequencing-resources/bacpage-command.html)\r +\r +# Introduction\r +Advances in sequencing technology during the COVID-19 pandemic has led to massive increases in the generation of sequencing data. Many bioinformatics tools have been developed to analyze this data, but very few tools can be utilized by individuals without prior bioinformatics training.\r +\r +This pipeline was designed to encapsulate pre-existing tools to automate analysis of whole genome sequencing of bacteria. \r +Installation is fast and straightfoward. \r +The pipeline is easy to setup and contains rationale defaults, but is highly modular and configurable by more advance users.\r +Bacpage has individual commands to generate consensus sequences, perform *de novo* assembly, construct phylogenetic tree, and generate quality control reports.\r +\r +# Features\r +We anticipate the pipeline will be able to perform the following functions:\r +- [x] Reference-based assembly of Illumina paired-end reads\r +- [x] *De novo* assembly of Illumina paired-end reads\r +- [ ] *De novo* assembly of ONT long reads\r +- [x] Run quality control checks\r +- [x] Variant calling using [bcftools](https://github.com/samtools/bcftools)\r +- [x] Maximum-likelihood phylogenetic inference of processed samples and background dataset using [iqtree](https://github.com/iqtree/iqtree2) \r +- [x] MLST profiling and virulence factor detection\r +- [x] Antimicrobial resistance genes detection\r +- [ ] Plasmid detection\r +\r +# Installation\r +1. Install `mamba` by running the following two command:\r +```commandline\r +curl -L -O "https://github.com/conda-forge/miniforge/releases/latest/download/Mambaforge-$(uname)-$(uname -m).sh"\r +bash Mambaforge-$(uname)-$(uname -m).sh\r +```\r +\r +2. Clone the bacpage repository:\r +```commandline\r +git clone https://github.com/CholGen/bacpage.git\r +```\r +\r +3. Switch to the development branch of the pipeline:\r +```commandline\r +cd bacpage/\r +git checkout -b split_into_command\r +```\r +\r +3. Install and activate the pipeline's conda environment:\r +```commandline\r +mamba env create -f environment.yaml\r +mamba activate bacpage\r +```\r +\r +4. Install the `bacpage` command:\r +```commandline\r +pip install .\r +```\r +\r +5. Test the installation:\r +```commandline\r +bacpage -h\r +bacpage version\r +```\r +These command should print the help and version of the program. Please create an issue if this is not the case.\r +\r +# Updating\r +\r +1. Navigate to the directory where you cloned the bacpage repository on the command line:\r +```commandline\r +cd bacpage/\r +```\r +2. Activate the bacpage conda environment:\r +```commandline\r +mamba activate bacpage\r +```\r +3. Pull the lastest changes from GitHub:\r +```commandline\r +git pull\r +```\r +4. Update the bacpage conda environemnt:\r +```commandline\r +mamba env update -f environment.yaml\r +```\r +5. Reinstall the `bacpage` command:\r +```commandline\r +pip install .\r +```\r +\r +# Usage\r +0. Activate the bacpage conda environment:\r +```commandline\r +mamba activate bacpage\r +```\r +1. Create a directory specifically for the batch of samples you would like to analyze (called a project directory).\r +```commandline\r +bacpage setup [your-project-directory-name]\r +```\r +2. Place paired sequencing reads in the `input/` directory of your project directory.\r +3. From the pipeline's directory, run the reference-based assembly pipeline on your samples using the following command:\r +```commandline\r +bacpage assemble [your-project-directory-name]\r +```\r +This will generate a consensus sequence in FASTA format for each of your samples and place them in \r +`/results/consensus_sequences/.masked.fasta`. An HTML report containing alignment \r +and quality metrics for your samples can be found at `/results/reports/qc_report.html`.\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/695?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Phylogeny reconstruction using bacpage" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/695?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/986?version=11" ; + schema1:isBasedOn "https://github.com/nf-core/fetchngs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/fetchngs" ; + schema1:sdDatePublished "2024-08-05 10:23:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/986/ro_crate?version=11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9188 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:53Z" ; + schema1:dateModified "2024-06-11T12:54:53Z" ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/986?version=13" ; + schema1:keywords "ddbj, download, ena, FASTQ, geo, sra, synapse" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/fetchngs" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/986?version=11" ; + schema1:version 11 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.826.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/main/biobb_wf_md_setup/docker" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Docker Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:24:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/826/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 746 ; + schema1:creator , + ; + schema1:dateCreated "2024-04-22T11:00:31Z" ; + schema1:dateModified "2024-05-22T13:38:14Z" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Docker Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/main/biobb_wf_md_setup/docker/Dockerfile" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1017?version=9" ; + schema1:isBasedOn "https://github.com/nf-core/rnafusion" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnafusion" ; + schema1:sdDatePublished "2024-08-05 10:22:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1017/ro_crate?version=9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10551 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:10Z" ; + schema1:dateModified "2024-06-11T12:55:10Z" ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1017?version=16" ; + schema1:keywords "fusion, fusion-genes, gene-fusion, rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnafusion" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1017?version=9" ; + schema1:version 9 . + + a schema1:Dataset ; + schema1:datePublished "2024-03-26T19:19:56.092517" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Purge-duplicates-one-haplotype-VGP6b" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Purge-duplicates-one-haplotype-VGP6b/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:Dataset ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-variant-calling" ; + schema1:mainEntity ; + schema1:name "COVID-19-PE-ARTIC-ILLUMINA (v0.3)" ; + schema1:sdDatePublished "2021-06-06 03:00:39 +0100" ; + schema1:softwareVersion "v0.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 66812 ; + schema1:name "COVID-19-PE-ARTIC-ILLUMINA" ; + schema1:programmingLanguage . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.120.6" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:31:06 +0100" ; + schema1:url "https://workflowhub.eu/workflows/120/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 52982 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-04T14:12:30Z" ; + schema1:dateModified "2024-05-14T10:08:33Z" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/120?version=5" ; + schema1:isPartOf , + , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_md_setup/blob/main/biobb_wf_md_setup/notebooks/biobb_MDsetup_tutorial.ipynb" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# score-assemblies\r +\r +A Snakemake-wrapper for evaluating *de novo* bacterial genome assemblies, e.g. from Oxford Nanopore (ONT) or Illumina sequencing.\r +\r +The workflow includes the following programs:\r +* [pomoxis](https://github.com/nanoporetech/pomoxis) assess_assembly and assess_homopolymers\r +* dnadiff from the [mummer](https://mummer4.github.io/index.html) package\r +* [NucDiff](https://github.com/uio-cels/NucDiff/)\r +* [QUAST](http://quast.sourceforge.net/quast)\r +* [BUSCO](https://busco.ezlab.org/)\r +* [ideel](https://github.com/mw55309/ideel/), which uses [prodigal](https://github.com/hyattpd/Prodigal) and [diamond](https://github.com/bbuchfink/diamond)\r +* [bakta](https://github.com/oschwengers/bakta)\r +\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.786.1" ; + schema1:isBasedOn "https://github.com/pmenzel/score-assemblies" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for score-assemblies" ; + schema1:sdDatePublished "2024-08-05 10:25:14 +0100" ; + schema1:url "https://workflowhub.eu/workflows/786/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 23878 ; + schema1:creator ; + schema1:dateCreated "2024-03-06T10:19:58Z" ; + schema1:dateModified "2024-03-06T10:54:09Z" ; + schema1:description """# score-assemblies\r +\r +A Snakemake-wrapper for evaluating *de novo* bacterial genome assemblies, e.g. from Oxford Nanopore (ONT) or Illumina sequencing.\r +\r +The workflow includes the following programs:\r +* [pomoxis](https://github.com/nanoporetech/pomoxis) assess_assembly and assess_homopolymers\r +* dnadiff from the [mummer](https://mummer4.github.io/index.html) package\r +* [NucDiff](https://github.com/uio-cels/NucDiff/)\r +* [QUAST](http://quast.sourceforge.net/quast)\r +* [BUSCO](https://busco.ezlab.org/)\r +* [ideel](https://github.com/mw55309/ideel/), which uses [prodigal](https://github.com/hyattpd/Prodigal) and [diamond](https://github.com/bbuchfink/diamond)\r +* [bakta](https://github.com/oschwengers/bakta)\r +\r +\r +""" ; + schema1:keywords "genome_assembly, genome-annotation" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "score-assemblies" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/786?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.127.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_virtual-screening" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein-ligand Docking tutorial (Cluster90)" ; + schema1:sdDatePublished "2024-08-05 10:30:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/127/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 39064 ; + schema1:creator , + , + ; + schema1:dateCreated "2021-06-29T09:06:40Z" ; + schema1:dateModified "2022-09-15T11:15:21Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/127?version=5" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein-ligand Docking tutorial (Cluster90)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_virtual-screening/0ab8d1d3410c67db6a5a25d3dde6f3e0303af08f/biobb_wf_virtual-screening/notebooks/clusterBindingSite/wf_vs_clusterBindingSite.ipynb" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """ Joint multi-omics dimensionality reduction approaches for CAKUT data using peptidome and proteome data\r + \r + **Brief description**\r + In (Cantini et al. 2020), Cantini et al. evaluated 9 representative joint dimensionality reduction (jDR) methods for multi-omics integration and analysis and . The methods are Regularized Generalized Canonical Correlation Analysis (RGCCA), Multiple co-inertia analysis (MCIA), Multi-Omics Factor Analysis (MOFA), Multi-Study Factor Analysis (MSFA), iCluster, Integrative NMF (intNMF), Joint and Individual Variation Explained (JIVE), tensorial Independent Component Analysis (tICA), and matrix-tri-factorization (scikit-fusion) (Tenenhaus, Tenenhaus, and Groenen 2017; Bady et al. 2004; Argelaguet et al. 2018; De Vito et al. 2019; Shen, Olshen, and Ladanyi 2009; Chalise and Fridley 2017; Lock et al. 2013; Teschendorff et al. 2018; Žitnik and Zupan 2015).\r +\r +The authors provided their benchmarking procedure, multi-omics mix (momix), as Jupyter Notebook on GitHub (https://github.com/ComputationalSystemsBiology/momix-notebook) and project environment through Conda. In momix, the factorization methods are called from an R script, and parameters of the methods are also set in that script. We did not modify the parameters of the methods in the provided script. We set factor number to 2.\r +""" ; + schema1:identifier "https://workflowhub.eu/workflows/126?version=1" ; + schema1:isBasedOn "https://gitlab.cmbi.umcn.nl/bayjan/cakut_dre" ; + schema1:license "GPL-3.0" ; + schema1:name "Research Object Crate for EJP-RD WP13 case-study CAKUT momix analysis" ; + schema1:sdDatePublished "2024-08-05 10:31:40 +0100" ; + schema1:url "https://workflowhub.eu/workflows/126/ro_crate?version=1" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A Nanostring nCounter analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1003?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/nanostring" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/nanostring" ; + schema1:sdDatePublished "2024-08-05 10:23:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1003/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9869 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:06Z" ; + schema1:dateModified "2024-06-11T12:55:06Z" ; + schema1:description "A Nanostring nCounter analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1003?version=4" ; + schema1:keywords "nanostring, nanostringnorm" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/nanostring" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1003?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "Non-functional workflow to get a global view of possibilities for plant virus classification." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/100?version=1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for 0: View complete virus identification" ; + schema1:sdDatePublished "2024-08-05 10:33:20 +0100" ; + schema1:url "https://workflowhub.eu/workflows/100/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 46782 ; + schema1:dateCreated "2021-02-04T09:05:08Z" ; + schema1:dateModified "2023-02-13T14:06:45Z" ; + schema1:description "Non-functional workflow to get a global view of possibilities for plant virus classification." ; + schema1:keywords "Virus, identification, exploration" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "0: View complete virus identification" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/100?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "MARS-seq v2 preprocessing pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/996?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/marsseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/marsseq" ; + schema1:sdDatePublished "2024-08-05 10:23:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/996/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9524 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:00Z" ; + schema1:dateModified "2024-06-11T12:55:00Z" ; + schema1:description "MARS-seq v2 preprocessing pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/996?version=4" ; + schema1:keywords "facs-sorting, MARS-seq, single-cell, single-cell-rna-seq, star-solo, transcriptional-dynamics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/marsseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/996?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state of the art epitope prediction pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/984?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/epitopeprediction" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/epitopeprediction" ; + schema1:sdDatePublished "2024-08-05 10:23:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/984/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9461 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:51Z" ; + schema1:dateModified "2024-06-11T12:54:51Z" ; + schema1:description "A fully reproducible and state of the art epitope prediction pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/984?version=7" ; + schema1:keywords "epitope, epitope-prediction, mhc-binding-prediction" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/epitopeprediction" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/984?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=14" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-08-05 10:23:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=14" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17479 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:59Z" ; + schema1:dateModified "2024-06-11T12:54:59Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=14" ; + schema1:version 14 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1023?version=9" ; + schema1:isBasedOn "https://github.com/nf-core/smrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/smrnaseq" ; + schema1:sdDatePublished "2024-08-05 10:23:12 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1023/ro_crate?version=9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11444 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:19Z" ; + schema1:dateModified "2024-06-11T12:55:19Z" ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1023?version=10" ; + schema1:keywords "small-rna, smrna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/smrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1023?version=9" ; + schema1:version 9 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Analysis of Chromosome Conformation Capture data (Hi-C)" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/989?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/hic" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hic" ; + schema1:sdDatePublished "2024-08-05 10:23:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/989/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10893 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:55Z" ; + schema1:dateModified "2024-06-11T12:54:55Z" ; + schema1:description "Analysis of Chromosome Conformation Capture data (Hi-C)" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/989?version=7" ; + schema1:keywords "chromosome-conformation-capture, Hi-C" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hic" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/989?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +****\r +\r +Workflow information:\r +* Input = genome.fasta.\r +* Outputs = masked_genome.fasta and table of repeats found. \r +* Runs RepeatModeler with default settings, uses the output of this (repeat library) as input into RepeatMasker. \r +* Runs RepeatMasker with default settings except for: Skip masking of simple tandem repeats and low complexity regions. (-nolow) : default set to yes. Perform softmasking instead of hardmasking - set to yes. \r +* Workflow report displays an edited table of repeats found. Note: a known bug is that sometimes the workflow report text resets to default text. To restore, look for an earlier workflow version with correct workflow report text, and copy and paste report text into current version.\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.875.2" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Repeat masking - TSI" ; + schema1:sdDatePublished "2024-08-05 10:22:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/875/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8659 ; + schema1:creator , + ; + schema1:dateCreated "2024-05-08T06:24:55Z" ; + schema1:dateModified "2024-05-09T04:01:22Z" ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +****\r +\r +Workflow information:\r +* Input = genome.fasta.\r +* Outputs = masked_genome.fasta and table of repeats found. \r +* Runs RepeatModeler with default settings, uses the output of this (repeat library) as input into RepeatMasker. \r +* Runs RepeatMasker with default settings except for: Skip masking of simple tandem repeats and low complexity regions. (-nolow) : default set to yes. Perform softmasking instead of hardmasking - set to yes. \r +* Workflow report displays an edited table of repeats found. Note: a known bug is that sometimes the workflow report text resets to default text. To restore, look for an earlier workflow version with correct workflow report text, and copy and paste report text into current version.\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/875?version=2" ; + schema1:isPartOf ; + schema1:keywords "TSI-annotation" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Repeat masking - TSI" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/875?version=2" ; + schema1:version 2 ; + ns1:input ; + ns1:output , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 403507 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Demultiplexing pipeline for Illumina sequencing data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/978?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/demultiplex" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/demultiplex" ; + schema1:sdDatePublished "2024-08-05 10:24:06 +0100" ; + schema1:url "https://workflowhub.eu/workflows/978/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8121 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:47Z" ; + schema1:dateModified "2024-06-11T12:54:47Z" ; + schema1:description "Demultiplexing pipeline for Illumina sequencing data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/978?version=7" ; + schema1:keywords "bases2fastq, bcl2fastq, demultiplexing, elementbiosciences, illumina" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/demultiplex" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/978?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Rare disease researchers workflow is that they submit their raw data (fastq), run the mapping and variant calling RD-Connect pipeline and obtain unannotated gvcf files to further submit to the RD-Connect GPAP or analyse on their own.\r +\r +This demonstrator focuses on the variant calling pipeline. The raw genomic data is processed using the RD-Connect pipeline ([Laurie et al., 2016](https://www.ncbi.nlm.nih.gov/pubmed/27604516)) running on the standards (GA4GH) compliant, interoperable container orchestration platform.\r +\r +This demonstrator will be aligned with the current implementation study on [Development of Architecture for Software Containers at ELIXIR and its use by EXCELERATE use-case communities](docs/Appendix%201%20-%20Project%20Plan%202018-biocontainers%2020171117.pdf) \r +\r +For this implementation, different steps are required:\r +\r +1. Adapt the pipeline to CWL and dockerise elements \r +2. Align with IS efforts on software containers to package the different components (Nextflow) \r +3. Submit trio of Illumina NA12878 Platinum Genome or Exome to the GA4GH platform cloud (by Aspera or ftp server)\r +4. Run the RD-Connect pipeline on the container platform\r +5. Return corresponding gvcf files\r +6. OPTIONAL: annotate and update to RD-Connect playground instance\r +\r +N.B: The demonstrator might have some manual steps, which will not be in production. \r +\r +## RD-Connect pipeline\r +\r +Detailed information about the RD-Connect pipeline can be found in [Laurie et al., 2016](https://www.ncbi.nlm.nih.gov/pubmed/?term=27604516)\r +\r +![alt text](https://raw.githubusercontent.com/inab/Wetlab2Variations/eosc-life/docs/RD-Connect_pipeline.jpg)\r +\r +## The applications\r +\r +**1\\. Name of the application: Adaptor removal**\r +Function: remove sequencing adaptors \r +Container (readiness status, location, version): [cutadapt (v.1.18)](https://hub.docker.com/r/cnag/cutadapt) \r +Required resources in cores and RAM: current container size 169MB \r +Input data (amount, format, directory..): raw fastq \r +Output data: paired fastq without adaptors \r +\r +**2\\. Name of the application: Mapping and bam sorting**\r +Function: align data to reference genome \r +Container : [bwa-mem (v.0.7.17)](https://hub.docker.com/r/cnag/bwa) / [Sambamba (v. 0.6.8 )](https://hub.docker.com/r/cnag/sambamba)(or samtools) \r +Resources :current container size 111MB / 32MB \r +Input data: paired fastq without adaptors \r +Output data: sorted bam \r +\r +**3\\. Name of the application: MarkDuplicates** \r +Function: Mark (and remove) duplicates \r +Container: [Picard (v.2.18.25)](https://hub.docker.com/r/cnag/picard)\r +Resources: current container size 261MB \r +Input data:sorted bam \r +Output data: Sorted bam with marked (or removed) duplicates \r +\r +**4\\. Name of the application: Base quality recalibration (BQSR)** \r +Function: Base quality recalibration \r +Container: [GATK (v.3.6-0)](https://hub.docker.com/r/cnag/gatk)\r +Resources: current container size 270MB \r +Input data: Sorted bam with marked (or removed) duplicates \r +Output data: Sorted bam with marked duplicates & base quality recalculated \r +\r +**5\\. Name of the application: Variant calling** \r +Function: variant calling \r +Container: [GATK (v.3.6-0)](https://hub.docker.com/r/cnag/gatk)\r +Resources: current container size 270MB \r +Input data:Sorted bam with marked duplicates & base quality recalculated \r +Output data: unannotated gvcf per sample \r +\r +**6\\. (OPTIONAL)Name of the application: Quality of the fastq** \r +Function: report on the sequencing quality \r +Container: [fastqc 0.11.8](https://hub.docker.com/r/cnag/fastqc)\r +Resources: current container size 173MB \r +Input data: raw fastq \r +Output data: QC report \r +\r +## Licensing\r +\r +GATK declares that archived packages are made available for free to academic researchers under a limited license for non-commercial use. If you need to use one of these packages for commercial use. https://software.broadinstitute.org/gatk/download/archive """ ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/106?version=1" ; + schema1:isBasedOn "https://github.com/inab/Wetlab2Variations/tree/eosc-life/nextflow" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for VariantCaller_GATK3.6" ; + schema1:sdDatePublished "2024-08-05 10:33:15 +0100" ; + schema1:url "https://workflowhub.eu/workflows/106/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 21588 ; + schema1:creator ; + schema1:dateCreated "2021-02-18T14:20:09Z" ; + schema1:dateModified "2021-04-19T15:39:56Z" ; + schema1:description """Rare disease researchers workflow is that they submit their raw data (fastq), run the mapping and variant calling RD-Connect pipeline and obtain unannotated gvcf files to further submit to the RD-Connect GPAP or analyse on their own.\r +\r +This demonstrator focuses on the variant calling pipeline. The raw genomic data is processed using the RD-Connect pipeline ([Laurie et al., 2016](https://www.ncbi.nlm.nih.gov/pubmed/27604516)) running on the standards (GA4GH) compliant, interoperable container orchestration platform.\r +\r +This demonstrator will be aligned with the current implementation study on [Development of Architecture for Software Containers at ELIXIR and its use by EXCELERATE use-case communities](docs/Appendix%201%20-%20Project%20Plan%202018-biocontainers%2020171117.pdf) \r +\r +For this implementation, different steps are required:\r +\r +1. Adapt the pipeline to CWL and dockerise elements \r +2. Align with IS efforts on software containers to package the different components (Nextflow) \r +3. Submit trio of Illumina NA12878 Platinum Genome or Exome to the GA4GH platform cloud (by Aspera or ftp server)\r +4. Run the RD-Connect pipeline on the container platform\r +5. Return corresponding gvcf files\r +6. OPTIONAL: annotate and update to RD-Connect playground instance\r +\r +N.B: The demonstrator might have some manual steps, which will not be in production. \r +\r +## RD-Connect pipeline\r +\r +Detailed information about the RD-Connect pipeline can be found in [Laurie et al., 2016](https://www.ncbi.nlm.nih.gov/pubmed/?term=27604516)\r +\r +![alt text](https://raw.githubusercontent.com/inab/Wetlab2Variations/eosc-life/docs/RD-Connect_pipeline.jpg)\r +\r +## The applications\r +\r +**1\\. Name of the application: Adaptor removal**\r +Function: remove sequencing adaptors \r +Container (readiness status, location, version): [cutadapt (v.1.18)](https://hub.docker.com/r/cnag/cutadapt) \r +Required resources in cores and RAM: current container size 169MB \r +Input data (amount, format, directory..): raw fastq \r +Output data: paired fastq without adaptors \r +\r +**2\\. Name of the application: Mapping and bam sorting**\r +Function: align data to reference genome \r +Container : [bwa-mem (v.0.7.17)](https://hub.docker.com/r/cnag/bwa) / [Sambamba (v. 0.6.8 )](https://hub.docker.com/r/cnag/sambamba)(or samtools) \r +Resources :current container size 111MB / 32MB \r +Input data: paired fastq without adaptors \r +Output data: sorted bam \r +\r +**3\\. Name of the application: MarkDuplicates** \r +Function: Mark (and remove) duplicates \r +Container: [Picard (v.2.18.25)](https://hub.docker.com/r/cnag/picard)\r +Resources: current container size 261MB \r +Input data:sorted bam \r +Output data: Sorted bam with marked (or removed) duplicates \r +\r +**4\\. Name of the application: Base quality recalibration (BQSR)** \r +Function: Base quality recalibration \r +Container: [GATK (v.3.6-0)](https://hub.docker.com/r/cnag/gatk)\r +Resources: current container size 270MB \r +Input data: Sorted bam with marked (or removed) duplicates \r +Output data: Sorted bam with marked duplicates & base quality recalculated \r +\r +**5\\. Name of the application: Variant calling** \r +Function: variant calling \r +Container: [GATK (v.3.6-0)](https://hub.docker.com/r/cnag/gatk)\r +Resources: current container size 270MB \r +Input data:Sorted bam with marked duplicates & base quality recalculated \r +Output data: unannotated gvcf per sample \r +\r +**6\\. (OPTIONAL)Name of the application: Quality of the fastq** \r +Function: report on the sequencing quality \r +Container: [fastqc 0.11.8](https://hub.docker.com/r/cnag/fastqc)\r +Resources: current container size 173MB \r +Input data: raw fastq \r +Output data: QC report \r +\r +## Licensing\r +\r +GATK declares that archived packages are made available for free to academic researchers under a limited license for non-commercial use. If you need to use one of these packages for commercial use. https://software.broadinstitute.org/gatk/download/archive """ ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/106?version=2" ; + schema1:keywords "Nextflow, variant_calling" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "VariantCaller_GATK3.6" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/106?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 83411 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# ![sanger-tol/ensemblgenedownload](docs/images/sanger-tol-ensemblgenedownload_logo.png)\r +\r +[![GitHub Actions CI Status](https://github.com/sanger-tol/ensemblgenedownload/workflows/nf-core%20CI/badge.svg)](https://github.com/sanger-tol/ensemblgenedownload/actions?query=workflow%3A%22nf-core+CI%22)\r +\r +\r +\r +[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.7183206-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.7183206)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A522.04.0-23aa62.svg)](https://www.nextflow.io/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +\r +[![Get help on Slack](http://img.shields.io/badge/slack-SangerTreeofLife%20%23pipelines-4A154B?labelColor=000000&logo=slack)](https://SangerTreeofLife.slack.com/channels/pipelines)\r +[![Follow on Twitter](http://img.shields.io/badge/twitter-%40sangertol-1DA1F2?labelColor=000000&logo=twitter)](https://twitter.com/sangertol)\r +[![Watch on YouTube](http://img.shields.io/badge/youtube-tree--of--life-FF0000?labelColor=000000&logo=youtube)](https://www.youtube.com/channel/UCFeDpvjU58SA9V0ycRXejhA)\r +\r +## Introduction\r +\r +**sanger-tol/ensemblgenedownload** is a pipeline that downloads gene annotations from Ensembl into the Tree of Life directory structure.\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!\r +\r +On release, automated continuous integration tests run the pipeline on a full-sized dataset on the GitHub CI infrastructure. This ensures that the pipeline runs in a third-party environment, and has sensible resource allocation defaults set to run on real-world datasets.\r +\r +## Pipeline summary\r +\r +## Overview\r +\r +The pipeline takes a CSV file that contains assembly accession number, Ensembl species names (as they may differ from Tree of Life ones !), output directories, and geneset versions.\r +Assembly accession numbers are optional. If missing, the pipeline assumes it can be retrieved from files named `ACCESSION` in the standard location on disk.\r +The pipeline downloads the Fasta files of the genes (cdna, cds, and protein sequences) as well as the GFF3 file.\r +All files are compressed with `bgzip`, and indexed with `samtools faidx` or `tabix`.\r +\r +Steps involved:\r +\r +- Download from Ensembl the GFF3 file, and the sequences of the genes in\r + Fasta format.\r +- Compress and index all Fasta files with `bgzip`, `samtools faidx`, and\r + `samtools dict`.\r +- Compress and index the GFF3 file with `bgzip` and `tabix`.\r +\r +## Quick Start\r +\r +1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=22.04.0`)\r +\r +2. Install any of [`Docker`](https://docs.docker.com/engine/installation/), [`Singularity`](https://www.sylabs.io/guides/3.0/user-guide/) (you can follow [this tutorial](https://singularity-tutorial.github.io/01-installation/)), [`Podman`](https://podman.io/), [`Shifter`](https://nersc.gitlab.io/development/shifter/how-to-use/) or [`Charliecloud`](https://hpc.github.io/charliecloud/) for full pipeline reproducibility _(you can use [`Conda`](https://conda.io/miniconda.html) both to install Nextflow itself and also to manage software within pipelines. Please only use it within pipelines as a last resort; see [docs](https://nf-co.re/usage/configuration#basic-configuration-profiles))_.\r +\r +3. Download the pipeline and test it on a minimal dataset with a single command:\r +\r + ```bash\r + nextflow run sanger-tol/ensemblgenedownload -profile test,YOURPROFILE --outdir \r + ```\r +\r + Note that some form of configuration will be needed so that Nextflow knows how to fetch the required software. This is usually done in the form of a config profile (`YOURPROFILE` in the example command above). You can chain multiple config profiles in a comma-separated string.\r +\r + > - The pipeline comes with config profiles called `docker`, `singularity`, `podman`, `shifter`, `charliecloud` and `conda` which instruct the pipeline to use the named tool for software management. For example, `-profile test,docker`.\r + > - Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment.\r + > - If you are using `singularity`, please use the [`nf-core download`](https://nf-co.re/tools/#downloading-pipelines-for-offline-use) command to download images first, before running the pipeline. Setting the [`NXF_SINGULARITY_CACHEDIR` or `singularity.cacheDir`](https://www.nextflow.io/docs/latest/singularity.html?#singularity-docker-hub) Nextflow options enables you to store and re-use the images from a central location for future pipeline runs.\r + > - If you are using `conda`, it is highly recommended to use the [`NXF_CONDA_CACHEDIR` or `conda.cacheDir`](https://www.nextflow.io/docs/latest/conda.html) settings to store the environments in a central location for future pipeline runs.\r +\r +4. Start running your own analysis!\r +\r + ```console\r + nextflow run sanger-tol/ensemblgenedownload --input $PWD/assets/samplesheet.csv --outdir -profile \r + ```\r +\r +## Documentation\r +\r +The sanger-tol/ensemblgenedownload pipeline comes with documentation about the pipeline [usage](docs/usage.md) and [output](docs/output.md).\r +\r +## Credits\r +\r +sanger-tol/ensemblgenedownload was originally written by @muffato.\r +\r +## Contributions and Support\r +\r +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\r +\r +For further information or help, don't hesitate to get in touch on the [Slack `#pipelines` channel](https://sangertreeoflife.slack.com/channels/pipelines). Please [create an issue](https://github.com/sanger-tol/ensemblgenedownload/issues/new/choose) on GitHub if you are not on the Sanger slack channel.\r +\r +## Citations\r +\r +If you use sanger-tol/ensemblgenedownload for your analysis, please cite it using the following doi: [10.5281/zenodo.7183206](https://doi.org/10.5281/zenodo.7183206)\r +\r +An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\r +\r +This pipeline uses code and infrastructure developed and maintained by the [nf-core](https://nf-co.re) community, reused here under the [MIT license](https://github.com/nf-core/tools/blob/master/LICENSE).\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/666?version=1" ; + schema1:isBasedOn "https://github.com/sanger-tol/ensemblgenedownload" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for sanger-tol/insdcdownload v1.0.1 - Hefty mûmakil" ; + schema1:sdDatePublished "2024-08-05 10:27:10 +0100" ; + schema1:url "https://workflowhub.eu/workflows/666/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1705 ; + schema1:creator , + ; + schema1:dateCreated "2023-11-14T12:03:59Z" ; + schema1:dateModified "2023-11-14T12:03:59Z" ; + schema1:description """# ![sanger-tol/ensemblgenedownload](docs/images/sanger-tol-ensemblgenedownload_logo.png)\r +\r +[![GitHub Actions CI Status](https://github.com/sanger-tol/ensemblgenedownload/workflows/nf-core%20CI/badge.svg)](https://github.com/sanger-tol/ensemblgenedownload/actions?query=workflow%3A%22nf-core+CI%22)\r +\r +\r +\r +[![Cite with Zenodo](http://img.shields.io/badge/DOI-10.5281/zenodo.7183206-1073c8?labelColor=000000)](https://doi.org/10.5281/zenodo.7183206)\r +\r +[![Nextflow](https://img.shields.io/badge/nextflow%20DSL2-%E2%89%A522.04.0-23aa62.svg)](https://www.nextflow.io/)\r +[![run with conda](http://img.shields.io/badge/run%20with-conda-3EB049?labelColor=000000&logo=anaconda)](https://docs.conda.io/en/latest/)\r +[![run with docker](https://img.shields.io/badge/run%20with-docker-0db7ed?labelColor=000000&logo=docker)](https://www.docker.com/)\r +[![run with singularity](https://img.shields.io/badge/run%20with-singularity-1d355c.svg?labelColor=000000)](https://sylabs.io/docs/)\r +\r +[![Get help on Slack](http://img.shields.io/badge/slack-SangerTreeofLife%20%23pipelines-4A154B?labelColor=000000&logo=slack)](https://SangerTreeofLife.slack.com/channels/pipelines)\r +[![Follow on Twitter](http://img.shields.io/badge/twitter-%40sangertol-1DA1F2?labelColor=000000&logo=twitter)](https://twitter.com/sangertol)\r +[![Watch on YouTube](http://img.shields.io/badge/youtube-tree--of--life-FF0000?labelColor=000000&logo=youtube)](https://www.youtube.com/channel/UCFeDpvjU58SA9V0ycRXejhA)\r +\r +## Introduction\r +\r +**sanger-tol/ensemblgenedownload** is a pipeline that downloads gene annotations from Ensembl into the Tree of Life directory structure.\r +\r +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!\r +\r +On release, automated continuous integration tests run the pipeline on a full-sized dataset on the GitHub CI infrastructure. This ensures that the pipeline runs in a third-party environment, and has sensible resource allocation defaults set to run on real-world datasets.\r +\r +## Pipeline summary\r +\r +## Overview\r +\r +The pipeline takes a CSV file that contains assembly accession number, Ensembl species names (as they may differ from Tree of Life ones !), output directories, and geneset versions.\r +Assembly accession numbers are optional. If missing, the pipeline assumes it can be retrieved from files named `ACCESSION` in the standard location on disk.\r +The pipeline downloads the Fasta files of the genes (cdna, cds, and protein sequences) as well as the GFF3 file.\r +All files are compressed with `bgzip`, and indexed with `samtools faidx` or `tabix`.\r +\r +Steps involved:\r +\r +- Download from Ensembl the GFF3 file, and the sequences of the genes in\r + Fasta format.\r +- Compress and index all Fasta files with `bgzip`, `samtools faidx`, and\r + `samtools dict`.\r +- Compress and index the GFF3 file with `bgzip` and `tabix`.\r +\r +## Quick Start\r +\r +1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=22.04.0`)\r +\r +2. Install any of [`Docker`](https://docs.docker.com/engine/installation/), [`Singularity`](https://www.sylabs.io/guides/3.0/user-guide/) (you can follow [this tutorial](https://singularity-tutorial.github.io/01-installation/)), [`Podman`](https://podman.io/), [`Shifter`](https://nersc.gitlab.io/development/shifter/how-to-use/) or [`Charliecloud`](https://hpc.github.io/charliecloud/) for full pipeline reproducibility _(you can use [`Conda`](https://conda.io/miniconda.html) both to install Nextflow itself and also to manage software within pipelines. Please only use it within pipelines as a last resort; see [docs](https://nf-co.re/usage/configuration#basic-configuration-profiles))_.\r +\r +3. Download the pipeline and test it on a minimal dataset with a single command:\r +\r + ```bash\r + nextflow run sanger-tol/ensemblgenedownload -profile test,YOURPROFILE --outdir \r + ```\r +\r + Note that some form of configuration will be needed so that Nextflow knows how to fetch the required software. This is usually done in the form of a config profile (`YOURPROFILE` in the example command above). You can chain multiple config profiles in a comma-separated string.\r +\r + > - The pipeline comes with config profiles called `docker`, `singularity`, `podman`, `shifter`, `charliecloud` and `conda` which instruct the pipeline to use the named tool for software management. For example, `-profile test,docker`.\r + > - Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment.\r + > - If you are using `singularity`, please use the [`nf-core download`](https://nf-co.re/tools/#downloading-pipelines-for-offline-use) command to download images first, before running the pipeline. Setting the [`NXF_SINGULARITY_CACHEDIR` or `singularity.cacheDir`](https://www.nextflow.io/docs/latest/singularity.html?#singularity-docker-hub) Nextflow options enables you to store and re-use the images from a central location for future pipeline runs.\r + > - If you are using `conda`, it is highly recommended to use the [`NXF_CONDA_CACHEDIR` or `conda.cacheDir`](https://www.nextflow.io/docs/latest/conda.html) settings to store the environments in a central location for future pipeline runs.\r +\r +4. Start running your own analysis!\r +\r + ```console\r + nextflow run sanger-tol/ensemblgenedownload --input $PWD/assets/samplesheet.csv --outdir -profile \r + ```\r +\r +## Documentation\r +\r +The sanger-tol/ensemblgenedownload pipeline comes with documentation about the pipeline [usage](docs/usage.md) and [output](docs/output.md).\r +\r +## Credits\r +\r +sanger-tol/ensemblgenedownload was originally written by @muffato.\r +\r +## Contributions and Support\r +\r +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md).\r +\r +For further information or help, don't hesitate to get in touch on the [Slack `#pipelines` channel](https://sangertreeoflife.slack.com/channels/pipelines). Please [create an issue](https://github.com/sanger-tol/ensemblgenedownload/issues/new/choose) on GitHub if you are not on the Sanger slack channel.\r +\r +## Citations\r +\r +If you use sanger-tol/ensemblgenedownload for your analysis, please cite it using the following doi: [10.5281/zenodo.7183206](https://doi.org/10.5281/zenodo.7183206)\r +\r +An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file.\r +\r +This pipeline uses code and infrastructure developed and maintained by the [nf-core](https://nf-co.re) community, reused here under the [MIT license](https://github.com/nf-core/tools/blob/master/LICENSE).\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "sanger-tol/insdcdownload v1.0.1 - Hefty mûmakil" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/666?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# BridgeDb tutorial: Gene HGNC name to Ensembl identifier\r +\r +This tutorial explains how to use the BridgeDb identifier mapping service to translate HGNC names to Ensembl identifiers. This step is part of the OpenRiskNet use case to link Adverse Outcome Pathways to [WikiPathways](https://wikipathways.org/).\r +\r +First we need to load the Python library to allow calls to the [BridgeDb REST webservice](http://bridgedb.prod.openrisknet.org/swagger/):\r +\r +\r +```python\r +import requests\r +```\r +\r +Let's assume we're interested in the gene with HGNC MECP2 (FIXME: look up a gene in AOPWiki), the API call to make mappings is given below as `callUrl`. Here, the `H` indicates that the query (`MECP2`) is an HGNC symbol:\r +\r +\r +```python\r +callUrl = 'http://bridgedb.prod.openrisknet.org/Human/xrefs/H/MECP2'\r +```\r +\r +The default call returns all identifiers, not just for Ensembl:\r +\r +\r +```python\r +response = requests.get(callUrl)\r +response.text\r +```\r +\r +\r +\r +\r + 'GO:0001964\\tGeneOntology\\nuc065cav.1\\tUCSC Genome Browser\\n312750\\tOMIM\\nGO:0042551\\tGeneOntology\\nuc065car.1\\tUCSC Genome Browser\\nA0A087X1U4\\tUniprot-TrEMBL\\n4204\\tWikiGenes\\nGO:0043524\\tGeneOntology\\nILMN_1702715\\tIllumina\\n34355_at\\tAffy\\nGO:0007268\\tGeneOntology\\nMECP2\\tHGNC\\nuc065caz.1\\tUCSC Genome Browser\\nA_33_P3339036\\tAgilent\\nGO:0006576\\tGeneOntology\\nuc065cbg.1\\tUCSC Genome Browser\\nGO:0006342\\tGeneOntology\\n300496\\tOMIM\\nGO:0035176\\tGeneOntology\\nuc065cbc.1\\tUCSC Genome Browser\\nGO:0033555\\tGeneOntology\\nGO:0045892\\tGeneOntology\\nA_23_P114361\\tAgilent\\nGO:0045893\\tGeneOntology\\nENSG00000169057\\tEnsembl\\nGO:0090063\\tGeneOntology\\nGO:0005515\\tGeneOntology\\nGO:0002087\\tGeneOntology\\nGO:0005634\\tGeneOntology\\nGO:0007416\\tGeneOntology\\nGO:0008104\\tGeneOntology\\nGO:0042826\\tGeneOntology\\nGO:0007420\\tGeneOntology\\nGO:0035067\\tGeneOntology\\n300005\\tOMIM\\nNP_001104262\\tRefSeq\\nA0A087WVW7\\tUniprot-TrEMBL\\nNP_004983\\tRefSeq\\nGO:0046470\\tGeneOntology\\nGO:0010385\\tGeneOntology\\n11722682_at\\tAffy\\nGO:0051965\\tGeneOntology\\nNM_001316337\\tRefSeq\\nuc065caw.1\\tUCSC Genome Browser\\nA0A0D9SFX7\\tUniprot-TrEMBL\\nA0A140VKC4\\tUniprot-TrEMBL\\nGO:0003723\\tGeneOntology\\nGO:0019233\\tGeneOntology\\nGO:0001666\\tGeneOntology\\nGO:0003729\\tGeneOntology\\nGO:0021591\\tGeneOntology\\nuc065cas.1\\tUCSC Genome Browser\\nGO:0019230\\tGeneOntology\\nGO:0003682\\tGeneOntology\\nGO:0001662\\tGeneOntology\\nuc065cbh.1\\tUCSC Genome Browser\\nX99687_at\\tAffy\\nGO:0008344\\tGeneOntology\\nGO:0009791\\tGeneOntology\\nuc065cbd.1\\tUCSC Genome Browser\\nGO:0019904\\tGeneOntology\\nGO:0030182\\tGeneOntology\\nGO:0035197\\tGeneOntology\\n8175998\\tAffy\\nGO:0016358\\tGeneOntology\\nNM_004992\\tRefSeq\\nGO:0003714\\tGeneOntology\\nGO:0005739\\tGeneOntology\\nGO:0005615\\tGeneOntology\\nGO:0005737\\tGeneOntology\\nuc004fjv.3\\tUCSC Genome Browser\\n202617_s_at\\tAffy\\nGO:0050905\\tGeneOntology\\nGO:0008327\\tGeneOntology\\nD3YJ43\\tUniprot-TrEMBL\\nGO:0003677\\tGeneOntology\\nGO:0006541\\tGeneOntology\\nGO:0040029\\tGeneOntology\\nA_33_P3317211\\tAgilent\\nNP_001303266\\tRefSeq\\n11722683_a_at\\tAffy\\nGO:0008211\\tGeneOntology\\nGO:0051151\\tGeneOntology\\nNM_001110792\\tRefSeq\\nX89430_at\\tAffy\\nGO:2000820\\tGeneOntology\\nuc065cat.1\\tUCSC Genome Browser\\nGO:0003700\\tGeneOntology\\nGO:0047485\\tGeneOntology\\n4204\\tEntrez Gene\\nGO:0009405\\tGeneOntology\\nA0A0D9SEX1\\tUniprot-TrEMBL\\nGO:0098794\\tGeneOntology\\n3C2I\\tPDB\\nHs.200716\\tUniGene\\nGO:0000792\\tGeneOntology\\nuc065cax.1\\tUCSC Genome Browser\\n300055\\tOMIM\\n5BT2\\tPDB\\nGO:0006020\\tGeneOntology\\nGO:0031175\\tGeneOntology\\nuc065cbe.1\\tUCSC Genome Browser\\nGO:0008284\\tGeneOntology\\nuc065cba.1\\tUCSC Genome Browser\\nGO:0060291\\tGeneOntology\\n202618_s_at\\tAffy\\nGO:0016573\\tGeneOntology\\n17115453\\tAffy\\nA0A1B0GTV0\\tUniprot-TrEMBL\\nuc065cbi.1\\tUCSC Genome Browser\\nGO:0048167\\tGeneOntology\\nGO:0007616\\tGeneOntology\\nGO:0016571\\tGeneOntology\\nuc004fjw.3\\tUCSC Genome Browser\\nGO:0007613\\tGeneOntology\\nGO:0007612\\tGeneOntology\\nGO:0021549\\tGeneOntology\\n11722684_a_at\\tAffy\\nGO:0001078\\tGeneOntology\\nX94628_rna1_s_at\\tAffy\\nGO:0007585\\tGeneOntology\\nGO:0010468\\tGeneOntology\\nGO:0031061\\tGeneOntology\\nA_24_P237486\\tAgilent\\nGO:0050884\\tGeneOntology\\nGO:0000930\\tGeneOntology\\nGO:0005829\\tGeneOntology\\nuc065cau.1\\tUCSC Genome Browser\\nH7BY72\\tUniprot-TrEMBL\\n202616_s_at\\tAffy\\nGO:0006355\\tGeneOntology\\nuc065cay.1\\tUCSC Genome Browser\\nGO:0010971\\tGeneOntology\\n300673\\tOMIM\\nGO:0008542\\tGeneOntology\\nGO:0060079\\tGeneOntology\\nuc065cbf.1\\tUCSC Genome Browser\\nGO:0006122\\tGeneOntology\\nuc065cbb.1\\tUCSC Genome Browser\\nGO:0007052\\tGeneOntology\\nC9JH89\\tUniprot-TrEMBL\\nB5MCB4\\tUniprot-TrEMBL\\nGO:0032048\\tGeneOntology\\nGO:0050432\\tGeneOntology\\nGO:0001976\\tGeneOntology\\nI6LM39\\tUniprot-TrEMBL\\nGO:0005813\\tGeneOntology\\nILMN_1682091\\tIllumina\\nP51608\\tUniprot-TrEMBL\\n1QK9\\tPDB\\nGO:0006349\\tGeneOntology\\nGO:1900114\\tGeneOntology\\nGO:0000122\\tGeneOntology\\nGO:0006351\\tGeneOntology\\nGO:0008134\\tGeneOntology\\nILMN_1824898\\tIllumina\\n300260\\tOMIM\\n0006510725\\tIllumina\\n'\r +\r +\r +\r +You can also see the results are returned as a TSV file, consisting of two columns, the identifier and the matching database.\r +\r +We will want to convert this reply into a Python dictionary (with the identifier as key, as one database may have multiple identifiers):\r +\r +\r +```python\r +lines = response.text.split("\\n")\r +mappings = {}\r +for line in lines:\r + if ('\\t' in line):\r + tuple = line.split('\\t')\r + identifier = tuple[0]\r + database = tuple[1]\r + if (database == "Ensembl"):\r + mappings[identifier] = database\r +\r +print(mappings)\r +```\r +\r + {'ENSG00000169057': 'Ensembl'}\r +\r +\r +Alternatively, we can restrivct the return values from the BridgeDb webservice to just return Ensembl identifiers (system code `En`). For this, we add the `?dataSource=En` call parameter:\r +\r +\r +```python\r +callUrl = 'http://bridgedb-swagger.prod.openrisknet.org/Human/xrefs/H/MECP2?dataSource=En'\r +response = requests.get(callUrl)\r +response.text\r +```\r +\r +\r +\r +\r + 'ENSG00000169057\\tEnsembl\\n'\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/326?version=1" ; + schema1:isBasedOn "https://github.com/OpenRiskNet/notebooks.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for BridgeDb tutorial: Gene HGNC name to Ensembl identifier" ; + schema1:sdDatePublished "2024-08-05 10:32:14 +0100" ; + schema1:url "https://workflowhub.eu/workflows/326/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8122 ; + schema1:creator , + ; + schema1:dateCreated "2022-04-06T13:02:33Z" ; + schema1:dateModified "2022-04-06T13:02:33Z" ; + schema1:description """# BridgeDb tutorial: Gene HGNC name to Ensembl identifier\r +\r +This tutorial explains how to use the BridgeDb identifier mapping service to translate HGNC names to Ensembl identifiers. This step is part of the OpenRiskNet use case to link Adverse Outcome Pathways to [WikiPathways](https://wikipathways.org/).\r +\r +First we need to load the Python library to allow calls to the [BridgeDb REST webservice](http://bridgedb.prod.openrisknet.org/swagger/):\r +\r +\r +```python\r +import requests\r +```\r +\r +Let's assume we're interested in the gene with HGNC MECP2 (FIXME: look up a gene in AOPWiki), the API call to make mappings is given below as `callUrl`. Here, the `H` indicates that the query (`MECP2`) is an HGNC symbol:\r +\r +\r +```python\r +callUrl = 'http://bridgedb.prod.openrisknet.org/Human/xrefs/H/MECP2'\r +```\r +\r +The default call returns all identifiers, not just for Ensembl:\r +\r +\r +```python\r +response = requests.get(callUrl)\r +response.text\r +```\r +\r +\r +\r +\r + 'GO:0001964\\tGeneOntology\\nuc065cav.1\\tUCSC Genome Browser\\n312750\\tOMIM\\nGO:0042551\\tGeneOntology\\nuc065car.1\\tUCSC Genome Browser\\nA0A087X1U4\\tUniprot-TrEMBL\\n4204\\tWikiGenes\\nGO:0043524\\tGeneOntology\\nILMN_1702715\\tIllumina\\n34355_at\\tAffy\\nGO:0007268\\tGeneOntology\\nMECP2\\tHGNC\\nuc065caz.1\\tUCSC Genome Browser\\nA_33_P3339036\\tAgilent\\nGO:0006576\\tGeneOntology\\nuc065cbg.1\\tUCSC Genome Browser\\nGO:0006342\\tGeneOntology\\n300496\\tOMIM\\nGO:0035176\\tGeneOntology\\nuc065cbc.1\\tUCSC Genome Browser\\nGO:0033555\\tGeneOntology\\nGO:0045892\\tGeneOntology\\nA_23_P114361\\tAgilent\\nGO:0045893\\tGeneOntology\\nENSG00000169057\\tEnsembl\\nGO:0090063\\tGeneOntology\\nGO:0005515\\tGeneOntology\\nGO:0002087\\tGeneOntology\\nGO:0005634\\tGeneOntology\\nGO:0007416\\tGeneOntology\\nGO:0008104\\tGeneOntology\\nGO:0042826\\tGeneOntology\\nGO:0007420\\tGeneOntology\\nGO:0035067\\tGeneOntology\\n300005\\tOMIM\\nNP_001104262\\tRefSeq\\nA0A087WVW7\\tUniprot-TrEMBL\\nNP_004983\\tRefSeq\\nGO:0046470\\tGeneOntology\\nGO:0010385\\tGeneOntology\\n11722682_at\\tAffy\\nGO:0051965\\tGeneOntology\\nNM_001316337\\tRefSeq\\nuc065caw.1\\tUCSC Genome Browser\\nA0A0D9SFX7\\tUniprot-TrEMBL\\nA0A140VKC4\\tUniprot-TrEMBL\\nGO:0003723\\tGeneOntology\\nGO:0019233\\tGeneOntology\\nGO:0001666\\tGeneOntology\\nGO:0003729\\tGeneOntology\\nGO:0021591\\tGeneOntology\\nuc065cas.1\\tUCSC Genome Browser\\nGO:0019230\\tGeneOntology\\nGO:0003682\\tGeneOntology\\nGO:0001662\\tGeneOntology\\nuc065cbh.1\\tUCSC Genome Browser\\nX99687_at\\tAffy\\nGO:0008344\\tGeneOntology\\nGO:0009791\\tGeneOntology\\nuc065cbd.1\\tUCSC Genome Browser\\nGO:0019904\\tGeneOntology\\nGO:0030182\\tGeneOntology\\nGO:0035197\\tGeneOntology\\n8175998\\tAffy\\nGO:0016358\\tGeneOntology\\nNM_004992\\tRefSeq\\nGO:0003714\\tGeneOntology\\nGO:0005739\\tGeneOntology\\nGO:0005615\\tGeneOntology\\nGO:0005737\\tGeneOntology\\nuc004fjv.3\\tUCSC Genome Browser\\n202617_s_at\\tAffy\\nGO:0050905\\tGeneOntology\\nGO:0008327\\tGeneOntology\\nD3YJ43\\tUniprot-TrEMBL\\nGO:0003677\\tGeneOntology\\nGO:0006541\\tGeneOntology\\nGO:0040029\\tGeneOntology\\nA_33_P3317211\\tAgilent\\nNP_001303266\\tRefSeq\\n11722683_a_at\\tAffy\\nGO:0008211\\tGeneOntology\\nGO:0051151\\tGeneOntology\\nNM_001110792\\tRefSeq\\nX89430_at\\tAffy\\nGO:2000820\\tGeneOntology\\nuc065cat.1\\tUCSC Genome Browser\\nGO:0003700\\tGeneOntology\\nGO:0047485\\tGeneOntology\\n4204\\tEntrez Gene\\nGO:0009405\\tGeneOntology\\nA0A0D9SEX1\\tUniprot-TrEMBL\\nGO:0098794\\tGeneOntology\\n3C2I\\tPDB\\nHs.200716\\tUniGene\\nGO:0000792\\tGeneOntology\\nuc065cax.1\\tUCSC Genome Browser\\n300055\\tOMIM\\n5BT2\\tPDB\\nGO:0006020\\tGeneOntology\\nGO:0031175\\tGeneOntology\\nuc065cbe.1\\tUCSC Genome Browser\\nGO:0008284\\tGeneOntology\\nuc065cba.1\\tUCSC Genome Browser\\nGO:0060291\\tGeneOntology\\n202618_s_at\\tAffy\\nGO:0016573\\tGeneOntology\\n17115453\\tAffy\\nA0A1B0GTV0\\tUniprot-TrEMBL\\nuc065cbi.1\\tUCSC Genome Browser\\nGO:0048167\\tGeneOntology\\nGO:0007616\\tGeneOntology\\nGO:0016571\\tGeneOntology\\nuc004fjw.3\\tUCSC Genome Browser\\nGO:0007613\\tGeneOntology\\nGO:0007612\\tGeneOntology\\nGO:0021549\\tGeneOntology\\n11722684_a_at\\tAffy\\nGO:0001078\\tGeneOntology\\nX94628_rna1_s_at\\tAffy\\nGO:0007585\\tGeneOntology\\nGO:0010468\\tGeneOntology\\nGO:0031061\\tGeneOntology\\nA_24_P237486\\tAgilent\\nGO:0050884\\tGeneOntology\\nGO:0000930\\tGeneOntology\\nGO:0005829\\tGeneOntology\\nuc065cau.1\\tUCSC Genome Browser\\nH7BY72\\tUniprot-TrEMBL\\n202616_s_at\\tAffy\\nGO:0006355\\tGeneOntology\\nuc065cay.1\\tUCSC Genome Browser\\nGO:0010971\\tGeneOntology\\n300673\\tOMIM\\nGO:0008542\\tGeneOntology\\nGO:0060079\\tGeneOntology\\nuc065cbf.1\\tUCSC Genome Browser\\nGO:0006122\\tGeneOntology\\nuc065cbb.1\\tUCSC Genome Browser\\nGO:0007052\\tGeneOntology\\nC9JH89\\tUniprot-TrEMBL\\nB5MCB4\\tUniprot-TrEMBL\\nGO:0032048\\tGeneOntology\\nGO:0050432\\tGeneOntology\\nGO:0001976\\tGeneOntology\\nI6LM39\\tUniprot-TrEMBL\\nGO:0005813\\tGeneOntology\\nILMN_1682091\\tIllumina\\nP51608\\tUniprot-TrEMBL\\n1QK9\\tPDB\\nGO:0006349\\tGeneOntology\\nGO:1900114\\tGeneOntology\\nGO:0000122\\tGeneOntology\\nGO:0006351\\tGeneOntology\\nGO:0008134\\tGeneOntology\\nILMN_1824898\\tIllumina\\n300260\\tOMIM\\n0006510725\\tIllumina\\n'\r +\r +\r +\r +You can also see the results are returned as a TSV file, consisting of two columns, the identifier and the matching database.\r +\r +We will want to convert this reply into a Python dictionary (with the identifier as key, as one database may have multiple identifiers):\r +\r +\r +```python\r +lines = response.text.split("\\n")\r +mappings = {}\r +for line in lines:\r + if ('\\t' in line):\r + tuple = line.split('\\t')\r + identifier = tuple[0]\r + database = tuple[1]\r + if (database == "Ensembl"):\r + mappings[identifier] = database\r +\r +print(mappings)\r +```\r +\r + {'ENSG00000169057': 'Ensembl'}\r +\r +\r +Alternatively, we can restrivct the return values from the BridgeDb webservice to just return Ensembl identifiers (system code `En`). For this, we add the `?dataSource=En` call parameter:\r +\r +\r +```python\r +callUrl = 'http://bridgedb-swagger.prod.openrisknet.org/Human/xrefs/H/MECP2?dataSource=En'\r +response = requests.get(callUrl)\r +response.text\r +```\r +\r +\r +\r +\r + 'ENSG00000169057\\tEnsembl\\n'\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/326?version=2" ; + schema1:keywords "Toxicology, jupyter" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "BridgeDb tutorial: Gene HGNC name to Ensembl identifier" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/326?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2023-09-07T08:42:48.614290" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/polish-with-long-reads" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "polish-with-long-reads/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.10" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=17" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-08-05 10:24:26 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=17" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9564 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:12Z" ; + schema1:dateModified "2024-06-11T12:55:12Z" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=17" ; + schema1:version 17 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-08-05 10:23:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9902 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:58Z" ; + schema1:dateModified "2024-06-11T12:54:58Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + ; + schema1:description "Implementation of the IMPaCT-Data quality control workflow for WES data in nf-core/sarek workflow. Sarek is an analysis pipeline to detect germline or somatic variants (pre-processing, variant calling and annotation) from WGS / targeted sequencing." ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.1030.1" ; + schema1:isBasedOn "https://github.com/EGA-archive/sarek-IMPaCT-data-QC.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for IMPaCT-Data quality control workflow implementation in nf-core/Sarek" ; + schema1:sdDatePublished "2024-08-05 10:22:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1030/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 19394 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-06-05T15:05:57Z" ; + schema1:dateModified "2024-06-12T13:08:47Z" ; + schema1:description "Implementation of the IMPaCT-Data quality control workflow for WES data in nf-core/sarek workflow. Sarek is an analysis pipeline to detect germline or somatic variants (pre-processing, variant calling and annotation) from WGS / targeted sequencing." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1030?version=2" ; + schema1:keywords "Bioinformatics, Nextflow, variant calling, wes, WGS, NGS, EGA-archive, quality control" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "IMPaCT-Data quality control workflow implementation in nf-core/Sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1030?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.120.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:31:05 +0100" ; + schema1:url "https://workflowhub.eu/workflows/120/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 53050 ; + schema1:creator , + ; + schema1:dateCreated "2021-07-01T14:51:52Z" ; + schema1:dateModified "2022-09-15T07:42:03Z" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/120?version=5" ; + schema1:isPartOf , + , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/120?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=24" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-08-05 10:23:20 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=24" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 21667 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:17Z" ; + schema1:dateModified "2024-06-11T12:55:17Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=24" ; + schema1:version 24 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-23T13:33:07.970641" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/gromacs-mmgbsa" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "gromacs-mmgbsa/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/998?version=10" ; + schema1:isBasedOn "https://github.com/nf-core/methylseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/methylseq" ; + schema1:sdDatePublished "2024-08-05 10:22:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/998/ro_crate?version=10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9999 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:02Z" ; + schema1:dateModified "2024-06-11T12:55:02Z" ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/998?version=14" ; + schema1:keywords "bisulfite-sequencing, dna-methylation, em-seq, epigenome, Epigenomics, methyl-seq, pbat, rrbs" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/methylseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/998?version=10" ; + schema1:version 10 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """Assembly polishing subworkflow: Racon polishing with short reads\r +\r +Inputs: short reads and assembly (usually pre-polished with other tools first, e.g. Racon + long reads; Medaka)\r +\r +Workflow steps: \r +* minimap2: short reads (R1 only) are mapped to the assembly => overlaps.paf. Minimap2 setting is for short reads.\r +* overlaps + short reads + assembly => Racon => polished assembly 1\r +* using polished assembly 1 as input; repeat minimap2 + racon => polished assembly 2\r +* Racon short-read polished assembly => Fasta statistics\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.228.1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Racon polish with Illumina reads, x2" ; + schema1:sdDatePublished "2024-08-05 10:32:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/228/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13895 ; + schema1:creator ; + schema1:dateCreated "2021-11-08T05:50:40Z" ; + schema1:dateModified "2023-01-30T18:21:31Z" ; + schema1:description """Assembly polishing subworkflow: Racon polishing with short reads\r +\r +Inputs: short reads and assembly (usually pre-polished with other tools first, e.g. Racon + long reads; Medaka)\r +\r +Workflow steps: \r +* minimap2: short reads (R1 only) are mapped to the assembly => overlaps.paf. Minimap2 setting is for short reads.\r +* overlaps + short reads + assembly => Racon => polished assembly 1\r +* using polished assembly 1 as input; repeat minimap2 + racon => polished assembly 2\r +* Racon short-read polished assembly => Fasta statistics\r +\r +Infrastructure_deployment_metadata: Galaxy Australia (Galaxy)""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "Large-genome-assembly" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Racon polish with Illumina reads, x2" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/228?version=1" ; + schema1:version 1 ; + ns1:input , + ; + ns1:output . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 226509 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.127.5" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_virtual-screening" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein-ligand Docking tutorial (Cluster90)" ; + schema1:sdDatePublished "2024-08-05 10:30:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/127/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 39368 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-07-26T09:27:52Z" ; + schema1:dateModified "2023-07-26T09:28:39Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/127?version=5" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein-ligand Docking tutorial (Cluster90)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_virtual-screening/blob/master/biobb_wf_virtual-screening/notebooks/clusterBindingSite/wf_vs_clusterBindingSite.ipynb" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=9" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-08-05 10:22:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8364 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:39Z" ; + schema1:dateModified "2024-06-11T12:54:39Z" ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=9" ; + schema1:version 9 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """Galaxy Workflow created on Galaxy-E european instance, ecology.usegalaxy.eu, related to the Galaxy training tutorial "[OBIS marine indicators](https://training.galaxyproject.org/training-material/topics/ecology/tutorials/obisindicators/tutorial.html)" .\r +\r +This workflow allows to compute and visualize marine biodiversity indicators from OBIS data.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/662?version=1" ; + schema1:isBasedOn "https://ecology.usegalaxy.eu/u/ylebras/w/workflow-constructed-from-history-imported-tuto-obis-asian-pacific" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Obis biodiversity indicator on Asian pacific" ; + schema1:sdDatePublished "2024-08-05 10:27:16 +0100" ; + schema1:url "https://workflowhub.eu/workflows/662/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6936 ; + schema1:creator , + ; + schema1:dateCreated "2023-11-10T09:00:00Z" ; + schema1:dateModified "2023-11-10T09:00:00Z" ; + schema1:description """Galaxy Workflow created on Galaxy-E european instance, ecology.usegalaxy.eu, related to the Galaxy training tutorial "[OBIS marine indicators](https://training.galaxyproject.org/training-material/topics/ecology/tutorials/obisindicators/tutorial.html)" .\r +\r +This workflow allows to compute and visualize marine biodiversity indicators from OBIS data.\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Obis biodiversity indicator on Asian pacific" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/662?version=1" ; + schema1:version 1 ; + ns1:input . + + a schema1:Dataset ; + schema1:datePublished "2024-02-14T16:08:01.432692" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Purge-duplicates-one-haplotype-VGP6b" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Purge-duplicates-one-haplotype-VGP6b/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Alternative splicing analysis using RNA-seq." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1018?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/rnasplice" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnasplice" ; + schema1:sdDatePublished "2024-08-05 10:23:21 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1018/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13873 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:14Z" ; + schema1:dateModified "2024-06-11T12:55:14Z" ; + schema1:description "Alternative splicing analysis using RNA-seq." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1018?version=4" ; + schema1:keywords "alternative-splicing, rna, rna-seq, splicing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnasplice" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1018?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Basic processing of a QC-filtered Anndata Object. UMAP, clustering e.t.c " ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/468?version=3" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq_QCtoBasicProcessing" ; + schema1:sdDatePublished "2024-08-05 10:24:33 +0100" ; + schema1:url "https://workflowhub.eu/workflows/468/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 33510 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-05-30T05:52:35Z" ; + schema1:dateModified "2024-05-30T05:52:35Z" ; + schema1:description "Basic processing of a QC-filtered Anndata Object. UMAP, clustering e.t.c " ; + schema1:isBasedOn "https://workflowhub.eu/workflows/468?version=2" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "scRNAseq_QCtoBasicProcessing" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/468?version=3" ; + schema1:version 3 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2023-10-30T06:18:00.536806" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Purge-duplicate-contigs-VGP6/main" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Purge-duplicate-contigs-VGP6/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:version "0.1" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/772?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for How Usable Are Published Permeability Data?" ; + schema1:sdDatePublished "2024-08-05 10:25:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/772/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 437141 ; + schema1:creator ; + schema1:dateCreated "2024-03-01T08:58:59Z" ; + schema1:dateModified "2024-03-18T09:40:50Z" ; + schema1:description "" ; + schema1:image ; + schema1:keywords "Cheminformatics, Databases, Permeability, Knime" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "How Usable Are Published Permeability Data?" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/772?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 39634 . + + a schema1:Dataset ; + schema1:datePublished "2024-04-22T12:09:22.942162" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-only-VGP3" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-only-VGP3/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:Dataset ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/parallel-accession-download" ; + schema1:mainEntity ; + schema1:name "main (v0.1)" ; + schema1:sdDatePublished "2021-07-23 10:18:19 +0100" ; + schema1:softwareVersion "v0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 14243 ; + schema1:name "main" ; + schema1:programmingLanguage . + + a schema1:Dataset ; + schema1:datePublished "2023-12-01T20:48:14.125372" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + , + ; + schema1:name "consensus-peaks/consensus-peaks-chip-sr" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-atac-cutandrun" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.4" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-chip-pe" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.4" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """hutch-workflow executes rquest in one shot.\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.471.1" ; + schema1:isBasedOn "https://github.com/HDRUK/rquest-omop-worker-workflows" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for hutch-workflow (x86)" ; + schema1:sdDatePublished "2024-08-05 10:27:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/471/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 9092 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 854 ; + schema1:creator ; + schema1:dateCreated "2023-05-15T16:15:53Z" ; + schema1:dateModified "2023-10-10T14:56:47Z" ; + schema1:description """hutch-workflow executes rquest in one shot.\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/471?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "hutch-workflow (x86)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/471?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + ; + ns1:output . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """The project allowed us to manage and build structured code scripts on the Jupyter Notebook, a simple web application which is user-friendly, flexible to use in the research community. The script is developed to address the specific needs of research between different platforms of dataset.\r +These stakeholders have developed their own platforms for the annotation and standardisation of both data and metadata produced within their respective field.\r +-The INFRAFRONTIER - European Mutant Mouse Archive (EMMA) comprises over 7200 mutant mouse lines that are extensively integrated and enriched with other public dataset.\r +-The EU-OpenScreen offers compound screening protocols containing several metadata and will contribute to the development of tools for linking to the chemical entity database.\r +-The IDR Image Data Resource is a public repository of reference image datasets from published scientific studies, where the community can submit, search and access high-quality bio-image data. \r +-The CIM-XNAT is an XNAT deployment of the Molecular Imaging Center at UniTo that offers a suite of tools for uploading preclinical images.\r +To address the challenges of integrating several EU-RI datasets with focus on preclinical and discovery research bioimaging, our aim is to develop cross researching queries through a web based interface to combine the resources of the RIs for integrating the information associated with data belonging to the involved RIs. Furthermore, the open-source tool provides users with free, open access to collections of datasets distributed over multiple sources that result from searches by specific keywords. \r +The script allows the cross research in different fields of research as: Species, Strain, Gene, Cell line, Disease model, Chemical Compound.\r +The novel aspects of this tool are mainly:\r +a) user friendly, e.g. the user has the flexibility to research among the dataset easily with a simple API, intuitive for researchers and biomedical users. \r +b) the possibility of making a research between different platforms and repositories, from a unique simple way. \r +c) the workflow project follows the FAIR principles in the treatment of data and datasets. \r +The access to Notebook Jupyter needs the installation of Anaconda, which consents to open the web application. \r +Inside the Jupyter, the script was built using Python. The query code is also easy to download and share in a .ipynb file.\r +A visual representation of the detailed results (dataset, metadata, information, query results) of the workflow can be printed immediately after the query run. \r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/516?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Life Science cross-RI (Research Infrastructure) project" ; + schema1:sdDatePublished "2024-08-05 10:30:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/516/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 532334 ; + schema1:creator ; + schema1:dateCreated "2023-06-27T07:39:45Z" ; + schema1:dateModified "2023-06-27T09:25:04Z" ; + schema1:description """The project allowed us to manage and build structured code scripts on the Jupyter Notebook, a simple web application which is user-friendly, flexible to use in the research community. The script is developed to address the specific needs of research between different platforms of dataset.\r +These stakeholders have developed their own platforms for the annotation and standardisation of both data and metadata produced within their respective field.\r +-The INFRAFRONTIER - European Mutant Mouse Archive (EMMA) comprises over 7200 mutant mouse lines that are extensively integrated and enriched with other public dataset.\r +-The EU-OpenScreen offers compound screening protocols containing several metadata and will contribute to the development of tools for linking to the chemical entity database.\r +-The IDR Image Data Resource is a public repository of reference image datasets from published scientific studies, where the community can submit, search and access high-quality bio-image data. \r +-The CIM-XNAT is an XNAT deployment of the Molecular Imaging Center at UniTo that offers a suite of tools for uploading preclinical images.\r +To address the challenges of integrating several EU-RI datasets with focus on preclinical and discovery research bioimaging, our aim is to develop cross researching queries through a web based interface to combine the resources of the RIs for integrating the information associated with data belonging to the involved RIs. Furthermore, the open-source tool provides users with free, open access to collections of datasets distributed over multiple sources that result from searches by specific keywords. \r +The script allows the cross research in different fields of research as: Species, Strain, Gene, Cell line, Disease model, Chemical Compound.\r +The novel aspects of this tool are mainly:\r +a) user friendly, e.g. the user has the flexibility to research among the dataset easily with a simple API, intuitive for researchers and biomedical users. \r +b) the possibility of making a research between different platforms and repositories, from a unique simple way. \r +c) the workflow project follows the FAIR principles in the treatment of data and datasets. \r +The access to Notebook Jupyter needs the installation of Anaconda, which consents to open the web application. \r +Inside the Jupyter, the script was built using Python. The query code is also easy to download and share in a .ipynb file.\r +A visual representation of the detailed results (dataset, metadata, information, query results) of the workflow can be printed immediately after the query run. \r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Life Science cross-RI (Research Infrastructure) project" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/516?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for the analysis of CRISPR data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/975?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/crisprseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/crisprseq" ; + schema1:sdDatePublished "2024-08-05 10:22:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/975/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11160 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:46Z" ; + schema1:dateModified "2024-06-11T12:54:46Z" ; + schema1:description "Pipeline for the analysis of CRISPR data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/975?version=5" ; + schema1:keywords "crispr, crispr-analysis, crispr-cas, NGS" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/crisprseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/975?version=4" ; + schema1:version 4 . + + a schema1:MediaObject ; + schema1:about ; + schema1:author , + ; + schema1:encodingFormat "text/markdown" . + + a schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:name "Diagram of analytical pipeline" . + + a schema1:Dataset ; + schema1:creator , + , + , + , + , + , + ; + schema1:datePublished "2023-04-13" ; + schema1:description "Study protocol" ; + schema1:hasPart , + , + , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.7551181" ; + schema1:name "BY-COVID - WP5 - Baseline Use Case: SARS-CoV-2 vaccine effectiveness assessment - Study protocol" ; + schema1:url "https://zenodo.org/record/7825979" ; + schema1:version "1.0.3" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "The workflow takes trimmed HiC forward and reverse reads, and Hap1/Hap2 assemblies to produce Hap1 and Hap2 scaffolded assemblies using YaHS. It also runs all the QC analyses (gfastats, BUSCO, Merqury and Pretext)." ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.608.1" ; + schema1:isBasedOn "https://github.com/ERGA-consortium/pipelines/tree/main/assembly/galaxy" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ERGA HiC Hap1Hap2 Scaffolding+QC YaHS v2309 (WF4)" ; + schema1:sdDatePublished "2024-08-05 10:27:28 +0100" ; + schema1:url "https://workflowhub.eu/workflows/608/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 84357 ; + schema1:creator , + ; + schema1:dateCreated "2023-10-11T15:06:56Z" ; + schema1:dateModified "2024-03-13T09:55:11Z" ; + schema1:description "The workflow takes trimmed HiC forward and reverse reads, and Hap1/Hap2 assemblies to produce Hap1 and Hap2 scaffolded assemblies using YaHS. It also runs all the QC analyses (gfastats, BUSCO, Merqury and Pretext)." ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "ERGA, Assembly+QC, Hi-C" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ERGA HiC Hap1Hap2 Scaffolding+QC YaHS v2309 (WF4)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/4.Scaffolding/Galaxy-Workflow-ERGA_HiC_Hap1Hap2_Scaffolding_QC_YaHS_v2309_(WF4).ga" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 592516 ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/4.Scaffolding/pics/Scaf_yahs_h1h2_2309.png" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:description """Workflow for quality assessment of paired reads and classification using NGTax 2.0 and functional annotation using picrust2. \r +In addition files are exported to their respective subfolders for easier data management in a later stage.\r +Steps: \r + - FastQC (read quality control)\r + - NGTax 2.0\r + - Picrust 2\r + - Export module\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/154?version=1" ; + schema1:isBasedOn "https://git.wur.nl/unlock/cwl/-/raw/master/cwl/workflows/workflow_ngtax_picrust2.cwl" ; + schema1:license "CC0-1.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Quality assessment, amplicon classification and functional prediction" ; + schema1:sdDatePublished "2024-08-05 10:33:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/154/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 29547 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5820 ; + schema1:creator , + ; + schema1:dateCreated "2021-08-30T07:18:44Z" ; + schema1:dateModified "2021-08-30T07:18:44Z" ; + schema1:description """Workflow for quality assessment of paired reads and classification using NGTax 2.0 and functional annotation using picrust2. \r +In addition files are exported to their respective subfolders for easier data management in a later stage.\r +Steps: \r + - FastQC (read quality control)\r + - NGTax 2.0\r + - Picrust 2\r + - Export module\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/154?version=1" ; + schema1:keywords "Amplicon, Classification, CWL" ; + schema1:license "https://spdx.org/licenses/CC0-1.0" ; + schema1:name "Quality assessment, amplicon classification and functional prediction" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/154?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=27" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-08-05 10:24:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=27" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13557 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:51Z" ; + schema1:dateModified "2024-06-11T12:54:51Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=27" ; + schema1:version 27 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """[![Development](https://img.shields.io/badge/development-active-blue.svg)](https://img.shields.io/badge/development-active-blue.svg)\r +[![Reads2Map](https://circleci.com/gh/Cristianetaniguti/Reads2Map.svg?style=svg)](https://app.circleci.com/pipelines/github/Cristianetaniguti/Reads2Map)\r +\r +## Reads2Map \r +\r +Reads2Map presents a collection of [WDL workflows](https://openwdl.org/) to build linkage maps from sequencing reads. Each workflow release is described in the [Read2Map releases page](https://github.com/Cristianetaniguti/Reads2Map/releases). \r +\r +The main workflows are the `EmpiricalReads2Map.wdl` and the `SimulatedReads2Map.wdl`. The `EmpiricalReads2Map.wdl` is composed by the `EmpiricalSNPCalling.wdl` that performs the SNP calling, and the `EmpiricalMaps.wdl` that performs the genotype calling and map building in empirical reads. The `SimulatedReads2Map.wdl` simulates Illumina reads for RADseq, exome, or WGS data and performs the SNP and genotype calling and genetic map building.\r +\r +By now, [GATK](https://github.com/broadinstitute/gatk), [Freebayes](https://github.com/ekg/freebayes) are included for SNP calling; [updog](https://github.com/dcgerard/updog), [polyRAD](https://github.com/lvclark/polyRAD), [SuperMASSA](https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0030906) for dosage calling; and [OneMap](https://github.com/augusto-garcia/onemap), and [GUSMap](https://github.com/tpbilton/GUSMap) for linkage map build.\r +\r +![math_meth2](https://user-images.githubusercontent.com/7572527/203172239-e4d2d857-84e2-48c5-bb88-01052a287004.png)\r +\r +## How to use\r +\r +Multiple systems are available to run WDL workflows such as Cromwell, miniWDL, and dxWDL. See further information in the [openwdl documentation](https://github.com/openwdl/wdl#execution-engines).\r +\r +To run a pipeline, first navigate to [Reads2Map releases page](https://github.com/Cristianetaniguti/Reads2Map/releases), search for the pipeline tag you which to run, and download the pipeline’s assets (the WDL workflow, the JSON, and the ZIP with accompanying dependencies).\r +\r +## Documentation\r +\r +Check the description of the inputs for the pipelines:\r +\r +* [EmpiricalReads2Map (EmpiricalSNPCalling and EmpiricalMaps)](https://cristianetaniguti.github.io/Tutorials/Reads2Map/EmpiricalReads.html)\r +\r +* [SimulatedReads2Map](https://cristianetaniguti.github.io/Tutorials/Reads2Map/simulatedreads.html)\r +\r +Check how to evaluate the workflows results in Reads2MapApp Shiny:\r +\r +* [Reads2MapApp](https://github.com/Cristianetaniguti/Reads2MapApp)\r +\r +Once you selected the best pipeline using a subset of your data, you can build a complete high-density linkage map:\r +\r +* [A Guide to Build High-Density Linkage Maps](https://cristianetaniguti.github.io/Tutorials/onemap/Quick_HighDens/High_density_maps.html)\r +\r +Check more information and examples of usage in:\r +\r +* [Taniguti, C. H., Taniguti, L. M., Amadeu, R. R., Mollinari, M., Da, G., Pereira, S., Riera-Lizarazu, O., Lau, J., Byrne, D., de Siqueira Gesteira, G., De, T., Oliveira, P., Ferreira, G. C., & Franco Garcia, A. A. Developing best practices for genotyping-by-sequencing analysis using linkage maps as benchmarks. BioRxiv. https://doi.org/10.1101/2022.11.24.517847](https://www.biorxiv.org/content/10.1101/2022.11.24.517847v2)\r +\r +## Third-party software and images\r +\r +- [BWA](https://github.com/lh3/bwa) in [us.gcr.io/broad-gotc-prod/genomes-in-the-cloud:2.5.7-2021-06-09_16-47-48Z](https://console.cloud.google.com/gcr/images/broad-gotc-prod/US/genomes-in-the-cloud): Used to align simulated reads to reference;\r +- [cutadapt](https://github.com/marcelm/cutadapt) in [cristaniguti/ pirs-ddrad-cutadapt:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/pirs-ddrad-cutadapt): Trim simulated reads;\r +- [ddRADseqTools](https://github.com/GGFHF/ddRADseqTools) in [cristaniguti/ pirs-ddrad-cutadapt:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/pirs-ddrad-cutadapt): Set of applications useful to in silico design and testing of double digest RADseq (ddRADseq) experiments;\r +- [Freebayes](https://github.com/ekg/freebayes) in [Cristaniguti/freebayes:0.0.1](): Variant call step;\r +- [GATK](https://github.com/broadinstitute/gatk) in [us.gcr.io/broad-gotc-prod/genomes-in-the-cloud:2.5.7-2021-06-09_16-47-48Z](https://console.cloud.google.com/gcr/images/broad-gotc-prod/US/genomes-in-the-cloud): Variant call step using Haplotype Caller, GenomicsDBImport and GenotypeGVCFs;\r +- [PedigreeSim](https://github.com/PBR/pedigreeSim?files=1) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Simulates progeny genotypes from parents genotypes for different types of populations;\r +- [picard](https://github.com/broadinstitute/picard) in [us.gcr.io/broad-gotc-prod/genomes-in-the-cloud:2.5.7-2021-06-09_16-47-48Z](https://console.cloud.google.com/gcr/images/broad-gotc-prod/US/genomes-in-the-cloud): Process alignment files;\r +- [pirs](https://github.com/galaxy001/pirs) in [cristaniguti/ pirs-ddrad-cutadapt:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/pirs-ddrad-cutadapt): To generate simulates paired-end reads from a reference genome;\r +- [samtools](https://github.com/samtools/samtools) in [us.gcr.io/broad-gotc-prod/genomes-in-the-cloud:2.5.7-2021-06-09_16-47-48Z](https://console.cloud.google.com/gcr/images/broad-gotc-prod/US/genomes-in-the-cloud): Process alignment files;\r +- [SimuSCoP](https://github.com/qasimyu/simuscop) in [cristaniguti/simuscopr:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/simuscopr): Exome and WGS Illumina reads simulations;\r +- [RADinitio](http://catchenlab.life.illinois.edu/radinitio/) in [ cristaniguti/radinitio:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/radinitio): RADseq Illumina reads simulation;\r +- [SuperMASSA](https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0030906) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Efficient Exact Maximum a Posteriori Computation for Bayesian SNP Genotyping in Polyploids;\r +- [bcftools](https://github.com/samtools/bcftools) in [lifebitai/bcftools:1.10.2](https://hub.docker.com/r/lifebitai/bcftools): utilities for variant calling and manipulating VCFs and BCFs;\r +- [vcftools](http://vcftools.sourceforge.net/) in [cristaniguti/split_markers:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/split_markers): program package designed for working with VCF files.\r +- [MCHap](https://github.com/PlantandFoodResearch/MCHap) in [cristaniguti/mchap:0.7.0](https://hub.docker.com/repository/docker/cristaniguti/mchap): Polyploid micro-haplotype assembly using Markov chain Monte Carlo simulation.\r +\r +### R packages\r +\r +- [OneMap](https://github.com/augusto-garcia/onemap) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Is a software for constructing genetic maps in experimental crosses: full-sib, RILs, F2 and backcrosses;\r +- [Reads2MapTools](https://github.com/Cristianetaniguti/Reads2MapTools) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Support package to perform mapping populations simulations and genotyping for OneMap genetic map building\r +- [GUSMap](https://github.com/tpbilton/GUSMap): Genotyping Uncertainty with Sequencing data and linkage MAPping\r +- [updog](https://github.com/dcgerard/updog) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Flexible Genotyping of Polyploids using Next Generation Sequencing Data\r +- [polyRAD](https://github.com/lvclark/polyRAD) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Genotype Calling with Uncertainty from Sequencing Data in Polyploids\r +- [Reads2MapApp](https://github.com/Cristianetaniguti/Reads2MapApp) in [cristaniguti/reads2mapApp:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Shiny app to evaluate Reads2Map workflows results\r +- [simuscopR](https://github.com/Cristianetaniguti/simuscopR) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Wrap-up R package for SimusCop simulations.""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.410.1" ; + schema1:isBasedOn "https://github.com/Cristianetaniguti/Reads2Map" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for SimulatedReads2Map" ; + schema1:sdDatePublished "2024-08-05 10:31:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/410/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2579 ; + schema1:creator ; + schema1:dateCreated "2022-11-29T20:17:01Z" ; + schema1:dateModified "2023-01-16T14:04:54Z" ; + schema1:description """[![Development](https://img.shields.io/badge/development-active-blue.svg)](https://img.shields.io/badge/development-active-blue.svg)\r +[![Reads2Map](https://circleci.com/gh/Cristianetaniguti/Reads2Map.svg?style=svg)](https://app.circleci.com/pipelines/github/Cristianetaniguti/Reads2Map)\r +\r +## Reads2Map \r +\r +Reads2Map presents a collection of [WDL workflows](https://openwdl.org/) to build linkage maps from sequencing reads. Each workflow release is described in the [Read2Map releases page](https://github.com/Cristianetaniguti/Reads2Map/releases). \r +\r +The main workflows are the `EmpiricalReads2Map.wdl` and the `SimulatedReads2Map.wdl`. The `EmpiricalReads2Map.wdl` is composed by the `EmpiricalSNPCalling.wdl` that performs the SNP calling, and the `EmpiricalMaps.wdl` that performs the genotype calling and map building in empirical reads. The `SimulatedReads2Map.wdl` simulates Illumina reads for RADseq, exome, or WGS data and performs the SNP and genotype calling and genetic map building.\r +\r +By now, [GATK](https://github.com/broadinstitute/gatk), [Freebayes](https://github.com/ekg/freebayes) are included for SNP calling; [updog](https://github.com/dcgerard/updog), [polyRAD](https://github.com/lvclark/polyRAD), [SuperMASSA](https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0030906) for dosage calling; and [OneMap](https://github.com/augusto-garcia/onemap), and [GUSMap](https://github.com/tpbilton/GUSMap) for linkage map build.\r +\r +![math_meth2](https://user-images.githubusercontent.com/7572527/203172239-e4d2d857-84e2-48c5-bb88-01052a287004.png)\r +\r +## How to use\r +\r +Multiple systems are available to run WDL workflows such as Cromwell, miniWDL, and dxWDL. See further information in the [openwdl documentation](https://github.com/openwdl/wdl#execution-engines).\r +\r +To run a pipeline, first navigate to [Reads2Map releases page](https://github.com/Cristianetaniguti/Reads2Map/releases), search for the pipeline tag you which to run, and download the pipeline’s assets (the WDL workflow, the JSON, and the ZIP with accompanying dependencies).\r +\r +## Documentation\r +\r +Check the description of the inputs for the pipelines:\r +\r +* [EmpiricalReads2Map (EmpiricalSNPCalling and EmpiricalMaps)](https://cristianetaniguti.github.io/Tutorials/Reads2Map/EmpiricalReads.html)\r +\r +* [SimulatedReads2Map](https://cristianetaniguti.github.io/Tutorials/Reads2Map/simulatedreads.html)\r +\r +Check how to evaluate the workflows results in Reads2MapApp Shiny:\r +\r +* [Reads2MapApp](https://github.com/Cristianetaniguti/Reads2MapApp)\r +\r +Once you selected the best pipeline using a subset of your data, you can build a complete high-density linkage map:\r +\r +* [A Guide to Build High-Density Linkage Maps](https://cristianetaniguti.github.io/Tutorials/onemap/Quick_HighDens/High_density_maps.html)\r +\r +Check more information and examples of usage in:\r +\r +* [Taniguti, C. H., Taniguti, L. M., Amadeu, R. R., Mollinari, M., Da, G., Pereira, S., Riera-Lizarazu, O., Lau, J., Byrne, D., de Siqueira Gesteira, G., De, T., Oliveira, P., Ferreira, G. C., & Franco Garcia, A. A. Developing best practices for genotyping-by-sequencing analysis using linkage maps as benchmarks. BioRxiv. https://doi.org/10.1101/2022.11.24.517847](https://www.biorxiv.org/content/10.1101/2022.11.24.517847v2)\r +\r +## Third-party software and images\r +\r +- [BWA](https://github.com/lh3/bwa) in [us.gcr.io/broad-gotc-prod/genomes-in-the-cloud:2.5.7-2021-06-09_16-47-48Z](https://console.cloud.google.com/gcr/images/broad-gotc-prod/US/genomes-in-the-cloud): Used to align simulated reads to reference;\r +- [cutadapt](https://github.com/marcelm/cutadapt) in [cristaniguti/ pirs-ddrad-cutadapt:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/pirs-ddrad-cutadapt): Trim simulated reads;\r +- [ddRADseqTools](https://github.com/GGFHF/ddRADseqTools) in [cristaniguti/ pirs-ddrad-cutadapt:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/pirs-ddrad-cutadapt): Set of applications useful to in silico design and testing of double digest RADseq (ddRADseq) experiments;\r +- [Freebayes](https://github.com/ekg/freebayes) in [Cristaniguti/freebayes:0.0.1](): Variant call step;\r +- [GATK](https://github.com/broadinstitute/gatk) in [us.gcr.io/broad-gotc-prod/genomes-in-the-cloud:2.5.7-2021-06-09_16-47-48Z](https://console.cloud.google.com/gcr/images/broad-gotc-prod/US/genomes-in-the-cloud): Variant call step using Haplotype Caller, GenomicsDBImport and GenotypeGVCFs;\r +- [PedigreeSim](https://github.com/PBR/pedigreeSim?files=1) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Simulates progeny genotypes from parents genotypes for different types of populations;\r +- [picard](https://github.com/broadinstitute/picard) in [us.gcr.io/broad-gotc-prod/genomes-in-the-cloud:2.5.7-2021-06-09_16-47-48Z](https://console.cloud.google.com/gcr/images/broad-gotc-prod/US/genomes-in-the-cloud): Process alignment files;\r +- [pirs](https://github.com/galaxy001/pirs) in [cristaniguti/ pirs-ddrad-cutadapt:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/pirs-ddrad-cutadapt): To generate simulates paired-end reads from a reference genome;\r +- [samtools](https://github.com/samtools/samtools) in [us.gcr.io/broad-gotc-prod/genomes-in-the-cloud:2.5.7-2021-06-09_16-47-48Z](https://console.cloud.google.com/gcr/images/broad-gotc-prod/US/genomes-in-the-cloud): Process alignment files;\r +- [SimuSCoP](https://github.com/qasimyu/simuscop) in [cristaniguti/simuscopr:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/simuscopr): Exome and WGS Illumina reads simulations;\r +- [RADinitio](http://catchenlab.life.illinois.edu/radinitio/) in [ cristaniguti/radinitio:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/radinitio): RADseq Illumina reads simulation;\r +- [SuperMASSA](https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0030906) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Efficient Exact Maximum a Posteriori Computation for Bayesian SNP Genotyping in Polyploids;\r +- [bcftools](https://github.com/samtools/bcftools) in [lifebitai/bcftools:1.10.2](https://hub.docker.com/r/lifebitai/bcftools): utilities for variant calling and manipulating VCFs and BCFs;\r +- [vcftools](http://vcftools.sourceforge.net/) in [cristaniguti/split_markers:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/split_markers): program package designed for working with VCF files.\r +- [MCHap](https://github.com/PlantandFoodResearch/MCHap) in [cristaniguti/mchap:0.7.0](https://hub.docker.com/repository/docker/cristaniguti/mchap): Polyploid micro-haplotype assembly using Markov chain Monte Carlo simulation.\r +\r +### R packages\r +\r +- [OneMap](https://github.com/augusto-garcia/onemap) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Is a software for constructing genetic maps in experimental crosses: full-sib, RILs, F2 and backcrosses;\r +- [Reads2MapTools](https://github.com/Cristianetaniguti/Reads2MapTools) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Support package to perform mapping populations simulations and genotyping for OneMap genetic map building\r +- [GUSMap](https://github.com/tpbilton/GUSMap): Genotyping Uncertainty with Sequencing data and linkage MAPping\r +- [updog](https://github.com/dcgerard/updog) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Flexible Genotyping of Polyploids using Next Generation Sequencing Data\r +- [polyRAD](https://github.com/lvclark/polyRAD) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Genotype Calling with Uncertainty from Sequencing Data in Polyploids\r +- [Reads2MapApp](https://github.com/Cristianetaniguti/Reads2MapApp) in [cristaniguti/reads2mapApp:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Shiny app to evaluate Reads2Map workflows results\r +- [simuscopR](https://github.com/Cristianetaniguti/simuscopR) in [cristaniguti/reads2map:0.0.1](https://hub.docker.com/repository/docker/cristaniguti/reads2map): Wrap-up R package for SimusCop simulations.""" ; + schema1:keywords "linkage_map, variant_calling, WDL, reads_simulation" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "SimulatedReads2Map" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/410?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2024-04-29T16:50:30.687834" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-only-VGP3" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-only-VGP3/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """MGnify (http://www.ebi.ac.uk/metagenomics) provides a free to use platform for the assembly, analysis and archiving of microbiome data derived from sequencing microbial populations that are present in particular environments. Over the past 2 years, MGnify (formerly EBI Metagenomics) has more than doubled the number of publicly available analysed datasets held within the resource. Recently, an updated approach to data analysis has been unveiled (version 5.0), replacing the previous single pipeline with multiple analysis pipelines that are tailored according to the input data, and that are formally described using the Common Workflow Language, enabling greater provenance, reusability, and reproducibility. MGnify's new analysis pipelines offer additional approaches for taxonomic assertions based on ribosomal internal transcribed spacer regions (ITS1/2) and expanded protein functional annotations. Biochemical pathways and systems predictions have also been added for assembled contigs. MGnify's growing focus on the assembly of metagenomic data has also seen the number of datasets it has assembled and analysed increase six-fold. The non-redundant protein database constructed from the proteins encoded by these assemblies now exceeds 1 billion sequences. Meanwhile, a newly developed contig viewer provides fine-grained visualisation of the assembled contigs and their enriched annotations.\r +\r +Documentation: https://docs.mgnify.org/en/latest/analysis.html#amplicon-analysis-pipeline\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/361?version=1" ; + schema1:isBasedOn "https://github.com/EBI-Metagenomics/pipeline-v5.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for MGnify - amplicon analysis pipeline" ; + schema1:sdDatePublished "2024-08-05 10:32:02 +0100" ; + schema1:url "https://workflowhub.eu/workflows/361/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 37548 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4228 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2022-06-07T08:28:11Z" ; + schema1:dateModified "2023-01-16T14:01:07Z" ; + schema1:description """MGnify (http://www.ebi.ac.uk/metagenomics) provides a free to use platform for the assembly, analysis and archiving of microbiome data derived from sequencing microbial populations that are present in particular environments. Over the past 2 years, MGnify (formerly EBI Metagenomics) has more than doubled the number of publicly available analysed datasets held within the resource. Recently, an updated approach to data analysis has been unveiled (version 5.0), replacing the previous single pipeline with multiple analysis pipelines that are tailored according to the input data, and that are formally described using the Common Workflow Language, enabling greater provenance, reusability, and reproducibility. MGnify's new analysis pipelines offer additional approaches for taxonomic assertions based on ribosomal internal transcribed spacer regions (ITS1/2) and expanded protein functional annotations. Biochemical pathways and systems predictions have also been added for assembled contigs. MGnify's growing focus on the assembly of metagenomic data has also seen the number of datasets it has assembled and analysed increase six-fold. The non-redundant protein database constructed from the proteins encoded by these assemblies now exceeds 1 billion sequences. Meanwhile, a newly developed contig viewer provides fine-grained visualisation of the assembled contigs and their enriched annotations.\r +\r +Documentation: https://docs.mgnify.org/en/latest/analysis.html#amplicon-analysis-pipeline\r +""" ; + schema1:image ; + schema1:keywords "CWL, Metagenomics, rna, workflow" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "MGnify - amplicon analysis pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/361?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-08-05 10:24:24 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5800 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:11Z" ; + schema1:dateModified "2024-06-11T12:55:11Z" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1017?version=11" ; + schema1:isBasedOn "https://github.com/nf-core/rnafusion" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnafusion" ; + schema1:sdDatePublished "2024-08-05 10:22:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1017/ro_crate?version=11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10551 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:10Z" ; + schema1:dateModified "2024-06-11T12:55:10Z" ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1017?version=16" ; + schema1:keywords "fusion, fusion-genes, gene-fusion, rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnafusion" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1017?version=11" ; + schema1:version 11 . + + a schema1:Dataset ; + schema1:datePublished "2024-04-22T14:55:25.506908" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Scaffolding-Bionano-VGP7" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Scaffolding-Bionano-VGP7/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.132.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Amber Constant pH MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/132/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 54708 ; + schema1:creator , + ; + schema1:dateCreated "2023-04-14T08:40:22Z" ; + schema1:dateModified "2023-04-14T08:41:23Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/132?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Amber Constant pH MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_amber_md_setup/master/biobb_wf_amber_md_setup/notebooks/mdsetup_ph/biobb_amber_CpHMD_notebook.ipynb" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Galaxy Workflow created on Galaxy-E european instance, ecology.usegalaxy.eu, related to the Galaxy training tutorial "[Sentinel 2 biodiversity](https://training.galaxyproject.org/training-material/topics/ecology/tutorials/species-distribution-modeling/tutorial.html)" .\r +\r +This workflow allows to analyze remote sensing sentinel 2 satellites data to compute spectral indices such as the NDVI and visualizing biodiversity indicators\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/657?version=1" ; + schema1:isBasedOn "https://ecology.usegalaxy.eu/u/ylebras/w/remote-sensing-sentinel-2-data-analysis-to-produce-biodiversity-metrics" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Remote sensing Sentinel 2 data analysis to produce biodiversity metrics" ; + schema1:sdDatePublished "2024-08-05 10:27:18 +0100" ; + schema1:url "https://workflowhub.eu/workflows/657/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 20815 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-11-09T12:52:32Z" ; + schema1:dateModified "2023-11-09T20:59:33Z" ; + schema1:description """Galaxy Workflow created on Galaxy-E european instance, ecology.usegalaxy.eu, related to the Galaxy training tutorial "[Sentinel 2 biodiversity](https://training.galaxyproject.org/training-material/topics/ecology/tutorials/species-distribution-modeling/tutorial.html)" .\r +\r +This workflow allows to analyze remote sensing sentinel 2 satellites data to compute spectral indices such as the NDVI and visualizing biodiversity indicators\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Remote sensing Sentinel 2 data analysis to produce biodiversity metrics" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/657?version=1" ; + schema1:version 1 ; + ns1:input , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """![workflow](https://github.com/naturalis/barcode-constrained-phylogeny/actions/workflows/python-package-conda.yml/badge.svg)\r +[![License: Apache-2.0](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)\r +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.10519081.svg)](https://doi.org/10.5281/zenodo.10519081)\r +\r +![Logo](https://github.com/naturalis/barcode-constrained-phylogeny/blob/main/doc/logo-small.png?raw=true)\r +\r +# Bactria: BarCode TRee Inference and Analysis\r +This repository contains code and data for building very large, topologically-constrained \r +barcode phylogenies through a divide-and-conquer strategy. Such trees are useful as \r +reference materials for curating barcode data by detecting rogue terminals (indicating\r +incorrect taxonomic annotation) and in the comparable calculation of alpha and beta \r +biodiversity metrics across metabarcoding assays. \r +\r +The input data for the approach we develop here currently comes from BOLD data dumps. \r +The international database [BOLD Systems](https://www.boldsystems.org/index.php) \r +contains DNA barcodes for hundreds of thousands of species, with multiple barcodes per \r +species. The data dumps we use here are TSV files whose columns conform to the nascent\r +BCDM (barcode data model) vocabulary. As such, other data sources that conform to this\r +vocabulary could in the future be used as well, such as [UNITE](https://unite.ut.ee/).\r +\r +Theoretically, such data could be filtered and aligned per DNA marker to make \r +phylogenetic trees. However, there are two limiting factors: building very large \r +phylogenies is computationally intensive, and barcodes are not considered ideal for \r +building big trees because they are short (providing insufficient signal to resolve large \r +trees) and because they tend to saturate across large patristic distances.\r +\r +![concept](https://github.com/naturalis/barcode-constrained-phylogeny/blob/main/doc/concept.png)\r +\r +Both problems can be mitigated by using the \r +[Open Tree of Life](https://tree.opentreeoflife.org/opentree/argus/opentree13.4@ott93302) \r +as a further source of phylogenetic signal. The BOLD data can be split into chunks that \r +correspond to Open Tree of Life clades. These chunks can be made into alignments and \r +subtrees. The OpenTOL can be used as a constraint in the algorithms to make these. The \r +chunks are then combined in a large synthesis by grafting them on a backbone made from \r +exemplar taxa from the subtrees. Here too, the OpenTOL is a source of phylogenetic \r +constraint.\r +\r +In this repository this concept is developed for both animal species and plant species.\r +\r +## Installation\r +\r +The pipeline and its dependencies are managed using conda. On a linux or osx system, you \r +can follow these steps to set up the `bactria` Conda environment using an `environment.yml` \r +file and a `requirements.txt` file:\r +\r +1. **Clone the Repository:** \r + Clone the repository containing the environment files to your local machine:\r + ```bash\r + git clone https://github.com/naturalis/barcode-constrained-phylogeny.git\r + cd barcode-constrained-phylogeny\r + ```\r +2. **Create the Conda Environment:**\r + Create the bactria Conda environment using the environment.yml file with the following \r + command:\r + ```bash\r + conda env create -f workflow/envs/environment.yml\r + ```\r + This command will create a new Conda environment named bactria with the packages \r + specified in the environment.yml file. This step is largely a placeholder because\r + most of the dependency management is handled at the level of individual pipeline\r + steps, which each have their own environment specification.\r +3. **Activate the Environment:**\r + After creating the environment, activate it using the conda activate command:\r + ```bash\r + conda activate bactria\r + ```\r +4. **Verify the Environment:**\r + Verify that the bactria environment was set up correctly and that all packages were \r + installed using the conda list command:\r + ```bash\r + conda list\r + ```\r + This command will list all packages installed in the active conda environment. You should \r + see all the packages specified in the environment.yml file and the requirements.txt file.\r +\r +## How to run\r +\r +The pipeline is implemented using snakemake, which is available within the conda \r +environment that results from the installation. Important before running the snakemake pipeline \r +is to change in [config/config.yaml](config/config.yaml) the number of threads available on your \r +computer. Which marker gene is used in the pipeline is also specified in the config.yaml (default \r +COI-5P). Prior to execution, the BOLD data package to use (we used the \r +[release of 30 December 2022](https://www.boldsystems.org/index.php/datapackage?id=BOLD_Public.30-Dec-2022)) \r +must be downloaded manually and stored in the [resources/](resources/) directory. If a BOLD release \r +from another date is used the file names in config.yaml need to be updated. \r +\r +How to run the entire pipeline:\r +\r +```bash \r +snakemake -j {number of threads} --use-conda\r +```\r +\r +Snakemake rules can be performed separately:\r +```bash \r +snakemake -R {Rule} -j {number of threads} --use-conda\r +```\r +\r +Enter the same number at {number of threads} as you filled in previously in src/config.yaml.\r +In {Rule} insert the rule to be performed.\r +\r +Here is an overview of all the rules in the Snakefile:\r +\r +![graphviz (1)](https://github.com/naturalis/barcode-constrained-phylogeny/blob/main/doc/dag.svg)\r +(zoomed view is available [here](https://raw.githubusercontent.com/naturalis/barcode-constrained-phylogeny/main/doc/dag.svg))\r +\r +## Repository layout\r +\r +Below is the top-level layout of the repository. This layout is in line with \r +[community standards](https://snakemake.readthedocs.io/en/stable/snakefiles/deployment.html) and must be adhered to.\r +All of these subfolders contains further explanatory READMEs to explain their contents in more detail.\r +\r +- [config](config/) - configuration files\r +- [doc](doc/) - documentation and background literature\r +- [logs](logs/) - where log files are written during pipeline runtime\r +- [resources](resources/) - external data resources (from BOLD and OpenTree) are downloaded here\r +- [results](results/) - intermediate and final results are generated here\r +- [workflow](workflow/) - script source code and driver snakefile \r +\r +## License\r +\r +© 2023 Naturalis Biodiversity Center\r +\r +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except \r +in compliance with the License. You may obtain a copy of the License at\r +\r +[http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0)\r + \r +Unless required by applicable law or agreed to in writing, software distributed under the License \r +is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express \r +or implied. See the License for the specific language governing permissions and limitations under \r +the License.""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/706?version=1" ; + schema1:isBasedOn "https://github.com/naturalis/barcode-constrained-phylogeny.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Bactria: BarCode TRee Inference and Analysis" ; + schema1:sdDatePublished "2024-08-05 10:25:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/706/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 131932 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13366 ; + schema1:dateCreated "2024-01-24T10:38:28Z" ; + schema1:dateModified "2024-02-05T10:09:43Z" ; + schema1:description """![workflow](https://github.com/naturalis/barcode-constrained-phylogeny/actions/workflows/python-package-conda.yml/badge.svg)\r +[![License: Apache-2.0](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)\r +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.10519081.svg)](https://doi.org/10.5281/zenodo.10519081)\r +\r +![Logo](https://github.com/naturalis/barcode-constrained-phylogeny/blob/main/doc/logo-small.png?raw=true)\r +\r +# Bactria: BarCode TRee Inference and Analysis\r +This repository contains code and data for building very large, topologically-constrained \r +barcode phylogenies through a divide-and-conquer strategy. Such trees are useful as \r +reference materials for curating barcode data by detecting rogue terminals (indicating\r +incorrect taxonomic annotation) and in the comparable calculation of alpha and beta \r +biodiversity metrics across metabarcoding assays. \r +\r +The input data for the approach we develop here currently comes from BOLD data dumps. \r +The international database [BOLD Systems](https://www.boldsystems.org/index.php) \r +contains DNA barcodes for hundreds of thousands of species, with multiple barcodes per \r +species. The data dumps we use here are TSV files whose columns conform to the nascent\r +BCDM (barcode data model) vocabulary. As such, other data sources that conform to this\r +vocabulary could in the future be used as well, such as [UNITE](https://unite.ut.ee/).\r +\r +Theoretically, such data could be filtered and aligned per DNA marker to make \r +phylogenetic trees. However, there are two limiting factors: building very large \r +phylogenies is computationally intensive, and barcodes are not considered ideal for \r +building big trees because they are short (providing insufficient signal to resolve large \r +trees) and because they tend to saturate across large patristic distances.\r +\r +![concept](https://github.com/naturalis/barcode-constrained-phylogeny/blob/main/doc/concept.png)\r +\r +Both problems can be mitigated by using the \r +[Open Tree of Life](https://tree.opentreeoflife.org/opentree/argus/opentree13.4@ott93302) \r +as a further source of phylogenetic signal. The BOLD data can be split into chunks that \r +correspond to Open Tree of Life clades. These chunks can be made into alignments and \r +subtrees. The OpenTOL can be used as a constraint in the algorithms to make these. The \r +chunks are then combined in a large synthesis by grafting them on a backbone made from \r +exemplar taxa from the subtrees. Here too, the OpenTOL is a source of phylogenetic \r +constraint.\r +\r +In this repository this concept is developed for both animal species and plant species.\r +\r +## Installation\r +\r +The pipeline and its dependencies are managed using conda. On a linux or osx system, you \r +can follow these steps to set up the `bactria` Conda environment using an `environment.yml` \r +file and a `requirements.txt` file:\r +\r +1. **Clone the Repository:** \r + Clone the repository containing the environment files to your local machine:\r + ```bash\r + git clone https://github.com/naturalis/barcode-constrained-phylogeny.git\r + cd barcode-constrained-phylogeny\r + ```\r +2. **Create the Conda Environment:**\r + Create the bactria Conda environment using the environment.yml file with the following \r + command:\r + ```bash\r + conda env create -f workflow/envs/environment.yml\r + ```\r + This command will create a new Conda environment named bactria with the packages \r + specified in the environment.yml file. This step is largely a placeholder because\r + most of the dependency management is handled at the level of individual pipeline\r + steps, which each have their own environment specification.\r +3. **Activate the Environment:**\r + After creating the environment, activate it using the conda activate command:\r + ```bash\r + conda activate bactria\r + ```\r +4. **Verify the Environment:**\r + Verify that the bactria environment was set up correctly and that all packages were \r + installed using the conda list command:\r + ```bash\r + conda list\r + ```\r + This command will list all packages installed in the active conda environment. You should \r + see all the packages specified in the environment.yml file and the requirements.txt file.\r +\r +## How to run\r +\r +The pipeline is implemented using snakemake, which is available within the conda \r +environment that results from the installation. Important before running the snakemake pipeline \r +is to change in [config/config.yaml](config/config.yaml) the number of threads available on your \r +computer. Which marker gene is used in the pipeline is also specified in the config.yaml (default \r +COI-5P). Prior to execution, the BOLD data package to use (we used the \r +[release of 30 December 2022](https://www.boldsystems.org/index.php/datapackage?id=BOLD_Public.30-Dec-2022)) \r +must be downloaded manually and stored in the [resources/](resources/) directory. If a BOLD release \r +from another date is used the file names in config.yaml need to be updated. \r +\r +How to run the entire pipeline:\r +\r +```bash \r +snakemake -j {number of threads} --use-conda\r +```\r +\r +Snakemake rules can be performed separately:\r +```bash \r +snakemake -R {Rule} -j {number of threads} --use-conda\r +```\r +\r +Enter the same number at {number of threads} as you filled in previously in src/config.yaml.\r +In {Rule} insert the rule to be performed.\r +\r +Here is an overview of all the rules in the Snakefile:\r +\r +![graphviz (1)](https://github.com/naturalis/barcode-constrained-phylogeny/blob/main/doc/dag.svg)\r +(zoomed view is available [here](https://raw.githubusercontent.com/naturalis/barcode-constrained-phylogeny/main/doc/dag.svg))\r +\r +## Repository layout\r +\r +Below is the top-level layout of the repository. This layout is in line with \r +[community standards](https://snakemake.readthedocs.io/en/stable/snakefiles/deployment.html) and must be adhered to.\r +All of these subfolders contains further explanatory READMEs to explain their contents in more detail.\r +\r +- [config](config/) - configuration files\r +- [doc](doc/) - documentation and background literature\r +- [logs](logs/) - where log files are written during pipeline runtime\r +- [resources](resources/) - external data resources (from BOLD and OpenTree) are downloaded here\r +- [results](results/) - intermediate and final results are generated here\r +- [workflow](workflow/) - script source code and driver snakefile \r +\r +## License\r +\r +© 2023 Naturalis Biodiversity Center\r +\r +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except \r +in compliance with the License. You may obtain a copy of the License at\r +\r +[http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0)\r + \r +Unless required by applicable law or agreed to in writing, software distributed under the License \r +is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express \r +or implied. See the License for the specific language governing permissions and limitations under \r +the License.""" ; + schema1:image ; + schema1:keywords "Bioinformatics, Python, Snakemake, phylogenetics" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Bactria: BarCode TRee Inference and Analysis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/706?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/986?version=10" ; + schema1:isBasedOn "https://github.com/nf-core/fetchngs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/fetchngs" ; + schema1:sdDatePublished "2024-08-05 10:23:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/986/ro_crate?version=10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7986 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:53Z" ; + schema1:dateModified "2024-06-11T12:54:53Z" ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/986?version=13" ; + schema1:keywords "ddbj, download, ena, FASTQ, geo, sra, synapse" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/fetchngs" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/986?version=10" ; + schema1:version 10 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/998?version=15" ; + schema1:isBasedOn "https://github.com/nf-core/methylseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/methylseq" ; + schema1:sdDatePublished "2024-08-05 10:22:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/998/ro_crate?version=15" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11971 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:02Z" ; + schema1:dateModified "2024-06-11T12:55:02Z" ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/998?version=14" ; + schema1:keywords "bisulfite-sequencing, dna-methylation, em-seq, epigenome, Epigenomics, methyl-seq, pbat, rrbs" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/methylseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/998?version=15" ; + schema1:version 15 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=18" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-08-05 10:24:26 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=18" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9566 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:12Z" ; + schema1:dateModified "2024-06-11T12:55:12Z" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=18" ; + schema1:version 18 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """The input to this workflow is a data matrix of gene expression that was collected from a pediatric patient tumor patient from the KidsFirst Common Fund program [1]. The RNA-seq samples are the columns of the matrix, and the rows are the raw expression gene count for all human coding genes (Table 1). This data matrix is fed into TargetRanger [2] to screen for targets which are highly expressed in the tumor but lowly expressed across most healthy human tissues based on gene expression data collected from postmortem patients with RNA-seq by the GTEx Common Fund program [3]. Based on this analysis the gene IMP U3 small nucleolar ribonucleoprotein 3 (IMP3) was selected because it was the top candidate returned from the TargetRanger analysis (Tables 2-3). IMP3 is also commonly called insulin-like growth factor 2 mRNA-binding protein 3 (IGF2BP3). Next, we leverage unique knowledge from various other Common Fund programs to examine various functions and knowledge related to IMP3. First, we queried the LINCS L1000 data [4] from the LINCS program [5] converted into RNA-seq-like LINCS L1000 Signatures [6] using the SigCom LINCS API [7] to identify mimicker or reverser small molecules that maximally impact the expression of IMP3 in human cell lines (Fig. 1, Table 4). In addition, we also queried the LINCS L1000 data to identify single gene CRISPR knockouts that down-regulate the expression of IMP3 (Fig. 1, Table 5). These potential drug targets were filtered using the Common Fund IDG program's list of understudied proteins [8] to produce a set of additional targets (Table 6). Next, IMP3 was searched for knowledge provided by the with the Metabolomics Workbench MetGENE tool [9]. MetGENE aggregates knowledge about pathways, reactions, metabolites, and studies from the Metabolomics Workbench Common Fund supported resource [10]. The Metabolomics Workbench was searched to find associated metabolites linked to IMP3 [10]. Furthermore, we leveraged the Linked Data Hub API [11] to list knowledge about regulatory elements associated with IMP3 (Table 6). Finally, the GlyGen database [12] was queried to identify relevant sets of proteins that are the product of the IMP3 genes, as well as known post-translational modifications discovered on IMP3.\r +\r +1. Lonsdale, J. et al. The Genotype-Tissue Expression (GTEx) project. Nature Genetics vol. 45 580–585 (2013). doi:10.1038/ng.2653\r +2. Evangelista, J. E. et al. SigCom LINCS: data and metadata search engine for a million gene expression signatures. Nucleic Acids Research vol. 50 W697–W709 (2022). doi:10.1093/nar/gkac328\r +3. IDG Understudied Proteins, https://druggablegenome.net/AboutIDGProteinList\r +4. MetGENE, https://sc-cfdewebdev.sdsc.edu/MetGENE/metGene.php\r +5. The Metabolomics Workbench, https://www.metabolomicsworkbench.org/\r +6. Linked Data Hub, https://ldh.genome.network/cfde/ldh/\r +7. York, W. S. et al. GlyGen: Computational and Informatics Resources for Glycoscience. Glycobiology vol. 30 72–73 (2019). doi:10.1093/glycob/cwz080""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/814?version=1" ; + schema1:license "CC-BY-NC-SA-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Use Case 13: Novel Cell Surface Targets for Individual Cancer Patients Analyzed with Common Fund Datasets" ; + schema1:sdDatePublished "2024-08-05 10:24:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/814/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 32077 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8205 ; + schema1:dateCreated "2024-04-16T21:42:58Z" ; + schema1:dateModified "2024-04-23T15:54:49Z" ; + schema1:description """The input to this workflow is a data matrix of gene expression that was collected from a pediatric patient tumor patient from the KidsFirst Common Fund program [1]. The RNA-seq samples are the columns of the matrix, and the rows are the raw expression gene count for all human coding genes (Table 1). This data matrix is fed into TargetRanger [2] to screen for targets which are highly expressed in the tumor but lowly expressed across most healthy human tissues based on gene expression data collected from postmortem patients with RNA-seq by the GTEx Common Fund program [3]. Based on this analysis the gene IMP U3 small nucleolar ribonucleoprotein 3 (IMP3) was selected because it was the top candidate returned from the TargetRanger analysis (Tables 2-3). IMP3 is also commonly called insulin-like growth factor 2 mRNA-binding protein 3 (IGF2BP3). Next, we leverage unique knowledge from various other Common Fund programs to examine various functions and knowledge related to IMP3. First, we queried the LINCS L1000 data [4] from the LINCS program [5] converted into RNA-seq-like LINCS L1000 Signatures [6] using the SigCom LINCS API [7] to identify mimicker or reverser small molecules that maximally impact the expression of IMP3 in human cell lines (Fig. 1, Table 4). In addition, we also queried the LINCS L1000 data to identify single gene CRISPR knockouts that down-regulate the expression of IMP3 (Fig. 1, Table 5). These potential drug targets were filtered using the Common Fund IDG program's list of understudied proteins [8] to produce a set of additional targets (Table 6). Next, IMP3 was searched for knowledge provided by the with the Metabolomics Workbench MetGENE tool [9]. MetGENE aggregates knowledge about pathways, reactions, metabolites, and studies from the Metabolomics Workbench Common Fund supported resource [10]. The Metabolomics Workbench was searched to find associated metabolites linked to IMP3 [10]. Furthermore, we leveraged the Linked Data Hub API [11] to list knowledge about regulatory elements associated with IMP3 (Table 6). Finally, the GlyGen database [12] was queried to identify relevant sets of proteins that are the product of the IMP3 genes, as well as known post-translational modifications discovered on IMP3.\r +\r +1. Lonsdale, J. et al. The Genotype-Tissue Expression (GTEx) project. Nature Genetics vol. 45 580–585 (2013). doi:10.1038/ng.2653\r +2. Evangelista, J. E. et al. SigCom LINCS: data and metadata search engine for a million gene expression signatures. Nucleic Acids Research vol. 50 W697–W709 (2022). doi:10.1093/nar/gkac328\r +3. IDG Understudied Proteins, https://druggablegenome.net/AboutIDGProteinList\r +4. MetGENE, https://sc-cfdewebdev.sdsc.edu/MetGENE/metGene.php\r +5. The Metabolomics Workbench, https://www.metabolomicsworkbench.org/\r +6. Linked Data Hub, https://ldh.genome.network/cfde/ldh/\r +7. York, W. S. et al. GlyGen: Computational and Informatics Resources for Glycoscience. Glycobiology vol. 30 72–73 (2019). doi:10.1093/glycob/cwz080""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-NC-SA-4.0" ; + schema1:name "Use Case 13: Novel Cell Surface Targets for Individual Cancer Patients Analyzed with Common Fund Datasets" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/814?version=1" ; + schema1:version 1 ; + ns1:input , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Genome assessment post assembly\r +\r +## General usage recommendations\r +\r +Please see the [Genome assessment post assembly](https://australianbiocommons.github.io/how-to-guides/genome_assembly/assembly_qc) guide.\r +\r +## See [change log](./change_log.md)\r +\r +## Attributions\r +\r +The workflow & the [doc_guidelines template](https://github.com/AustralianBioCommons/doc_guidelines) used are supported by the Australian BioCommons via Bioplatforms Australia funding, the Australian Research Data Commons (https://doi.org/10.47486/PL105) and the Queensland Government RICF programme. Bioplatforms Australia and the Australian Research Data Commons are enabled by the National Collaborative Research Infrastructure Strategy (NCRIS).\r +\r +\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.403.1" ; + schema1:isBasedOn "https://github.com/AustralianBioCommons/Genome-assessment-post-assembly.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genome assessment post assembly" ; + schema1:sdDatePublished "2024-08-05 10:22:13 +0100" ; + schema1:url "https://workflowhub.eu/workflows/403/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14172 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2022-11-07T07:10:28Z" ; + schema1:dateModified "2023-01-30T18:19:40Z" ; + schema1:description """# Genome assessment post assembly\r +\r +## General usage recommendations\r +\r +Please see the [Genome assessment post assembly](https://australianbiocommons.github.io/how-to-guides/genome_assembly/assembly_qc) guide.\r +\r +## See [change log](./change_log.md)\r +\r +## Attributions\r +\r +The workflow & the [doc_guidelines template](https://github.com/AustralianBioCommons/doc_guidelines) used are supported by the Australian BioCommons via Bioplatforms Australia funding, the Australian Research Data Commons (https://doi.org/10.47486/PL105) and the Queensland Government RICF programme. Bioplatforms Australia and the Australian Research Data Commons are enabled by the National Collaborative Research Infrastructure Strategy (NCRIS).\r +\r +\r +\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/403?version=3" ; + schema1:isPartOf , + ; + schema1:keywords "HiFi, hifiasm, QC, Quast, Meryl, Merqury, BUSCO" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Genome assessment post assembly" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/403?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Taxonomic classification and profiling of shotgun short- and long-read metagenomic data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1025?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/taxprofiler" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/taxprofiler" ; + schema1:sdDatePublished "2024-08-05 10:23:09 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1025/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15368 ; + schema1:creator , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:19Z" ; + schema1:dateModified "2024-06-11T12:55:19Z" ; + schema1:description "Taxonomic classification and profiling of shotgun short- and long-read metagenomic data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1025?version=10" ; + schema1:keywords "Classification, illumina, long-reads, Metagenomics, microbiome, nanopore, pathogen, Profiling, shotgun, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/taxprofiler" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1025?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + schema1:datePublished "2022-11-28T16:36:23.118637" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/chipseq-pe" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "chipseq-pe/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.2" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "ATACSeq peak-calling and differential analysis pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/965?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/atacseq" ; + schema1:sdDatePublished "2024-08-05 10:24:19 +0100" ; + schema1:url "https://workflowhub.eu/workflows/965/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9720 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:42Z" ; + schema1:dateModified "2024-06-11T12:54:42Z" ; + schema1:description "ATACSeq peak-calling and differential analysis pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/965?version=8" ; + schema1:keywords "ATAC-seq, chromatin-accessibiity" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/atacseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/965?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +**Based on the official [pmx tutorial](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate how to compute a **fast-growth mutation free energy** calculation, step by step, using the BioExcel **Building Blocks library (biobb)**. The particular example used is the **Staphylococcal nuclease** protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +The **non-equilibrium free energy calculation** protocol performs a **fast alchemical transition** in the direction **WT->Mut** and back **Mut->WT**. The two equilibrium trajectories needed for the tutorial, one for **Wild Type (WT)** and another for the **Mutated (Mut)** protein (Isoleucine 10 to Alanine -I10A-), have already been generated and are included in this example. We will name **WT as stateA** and **Mut as stateB**.\r +\r +![](https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/schema.png)\r +\r +The tutorial calculates the **free energy difference** in the folded state of a protein. Starting from **two 1ns-length independent equilibrium simulations** (WT and mutant), snapshots are selected to start **fast (50ps) transitions** driving the system in the **forward** (WT to mutant) and **reverse** (mutant to WT) directions, and the **work values** required to perform these transitions are collected. With these values, **Crooks Gaussian Intersection** (CGI), **Bennett Acceptance Ratio** (BAR) and **Jarzynski estimator** methods are used to calculate the **free energy difference** between the two states.\r +\r +*Please note that for the sake of disk space this tutorial is using 1ns-length equilibrium trajectories, whereas in the [original example](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/eq.mdp) the equilibrium trajectories used were obtained from 10ns-length simulations.*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.328.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_pmx_tutorial/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)" ; + schema1:sdDatePublished "2024-08-05 10:32:12 +0100" ; + schema1:url "https://workflowhub.eu/workflows/328/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8358 ; + schema1:creator , + ; + schema1:dateCreated "2022-04-07T10:32:20Z" ; + schema1:dateModified "2022-06-10T09:44:01Z" ; + schema1:description """# Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +**Based on the official [pmx tutorial](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate how to compute a **fast-growth mutation free energy** calculation, step by step, using the BioExcel **Building Blocks library (biobb)**. The particular example used is the **Staphylococcal nuclease** protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +The **non-equilibrium free energy calculation** protocol performs a **fast alchemical transition** in the direction **WT->Mut** and back **Mut->WT**. The two equilibrium trajectories needed for the tutorial, one for **Wild Type (WT)** and another for the **Mutated (Mut)** protein (Isoleucine 10 to Alanine -I10A-), have already been generated and are included in this example. We will name **WT as stateA** and **Mut as stateB**.\r +\r +![](https://raw.githubusercontent.com/bioexcel/biobb_wf_pmx_tutorial/master/biobb_wf_pmx_tutorial/notebooks/schema.png)\r +\r +The tutorial calculates the **free energy difference** in the folded state of a protein. Starting from **two 1ns-length independent equilibrium simulations** (WT and mutant), snapshots are selected to start **fast (50ps) transitions** driving the system in the **forward** (WT to mutant) and **reverse** (mutant to WT) directions, and the **work values** required to perform these transitions are collected. With these values, **Crooks Gaussian Intersection** (CGI), **Bennett Acceptance Ratio** (BAR) and **Jarzynski estimator** methods are used to calculate the **free energy difference** between the two states.\r +\r +*Please note that for the sake of disk space this tutorial is using 1ns-length equilibrium trajectories, whereas in the [original example](http://pmx.mpibpc.mpg.de/sardinia2018_tutorial1/eq.mdp) the equilibrium trajectories used were obtained from 10ns-length simulations.*\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/328?version=3" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Mutation Free Energy Calculations using BioExcel Building Blocks (biobb)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_pmx_tutorial/python/workflow.py" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.284.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_complex_md_setup/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/284/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7128 ; + schema1:creator , + ; + schema1:dateCreated "2023-04-14T08:38:51Z" ; + schema1:dateModified "2023-04-14T08:39:41Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/284?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_complex_md_setup/python/workflow.py" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """From the R1 and R2 fastq files of a single samples, make a scRNAseq counts matrix, and perform basic QC with scanpy. Then, do further processing by making a UMAP and clustering. Produces a processed AnnData \r +\r +Depreciated: use individual workflows insead for multiple samples""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/465?version=4" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq Single Sample Processing STARSolo" ; + schema1:sdDatePublished "2024-08-05 10:24:33 +0100" ; + schema1:url "https://workflowhub.eu/workflows/465/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 126954 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-12-12T03:15:52Z" ; + schema1:dateModified "2024-05-30T05:54:50Z" ; + schema1:description """From the R1 and R2 fastq files of a single samples, make a scRNAseq counts matrix, and perform basic QC with scanpy. Then, do further processing by making a UMAP and clustering. Produces a processed AnnData \r +\r +Depreciated: use individual workflows insead for multiple samples""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/465?version=3" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "scRNAseq Single Sample Processing STARSolo" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/465?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Taxonomic classification and profiling of shotgun short- and long-read metagenomic data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1025?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/taxprofiler" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/taxprofiler" ; + schema1:sdDatePublished "2024-08-05 10:23:08 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1025/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15373 ; + schema1:creator , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:19Z" ; + schema1:dateModified "2024-06-11T12:55:19Z" ; + schema1:description "Taxonomic classification and profiling of shotgun short- and long-read metagenomic data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1025?version=10" ; + schema1:keywords "Classification, illumina, long-reads, Metagenomics, microbiome, nanopore, pathogen, Profiling, shotgun, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/taxprofiler" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1025?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + schema1:datePublished "2024-04-22T12:13:28.281437" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.828.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/main/biobb_wf_protein_complex_md_setup/docker" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Docker Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:24:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/828/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 786 ; + schema1:creator , + ; + schema1:dateCreated "2024-04-22T11:10:41Z" ; + schema1:dateModified "2024-05-22T13:39:56Z" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Docker Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/main/biobb_wf_protein_complex_md_setup/docker/Dockerfile" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2024-03-06T13:04:13.877998" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + , + ; + schema1:name "consensus-peaks/consensus-peaks-chip-sr" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-atac-cutandrun" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.5" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-chip-pe" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.5" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """# ![nf-core/ampliseq](docs/images/nf-core-ampliseq_logo.png) + +[![Nextflow](https://img.shields.io/badge/nextflow-%E2%89%A519.10.0-brightgreen.svg)](https://www.nextflow.io/) +[![nf-core](https://img.shields.io/badge/nf--core-pipeline-brightgreen.svg)](https://nf-co.re/) +[![DOI](https://zenodo.org/badge/150448201.svg)](https://zenodo.org/badge/latestdoi/150448201) +[![Cite Preprint](https://img.shields.io/badge/Cite%20Us!-Cite%20Publication-important)](https://doi.org/10.3389/fmicb.2020.550420) + +[![GitHub Actions CI Status](https://github.com/nf-core/ampliseq/workflows/nf-core%20CI/badge.svg)](https://github.com/nf-core/ampliseq/actions) +[![GitHub Actions Linting Status](https://github.com/nf-core/ampliseq/workflows/nf-core%20linting/badge.svg)](https://github.com/nf-core/ampliseq/actions) +[![Nextflow](https://img.shields.io/badge/nextflow-%E2%89%A519.10.0-brightgreen.svg)](https://www.nextflow.io/) + +[![install with bioconda](https://img.shields.io/badge/install%20with-bioconda-brightgreen.svg)](https://bioconda.github.io/) +[![Docker](https://img.shields.io/docker/automated/nfcore/ampliseq.svg)](https://hub.docker.com/r/nfcore/ampliseq) +[![Get help on Slack](http://img.shields.io/badge/slack-nf--core%20%23ampliseq-4A154B?logo=slack)](https://nfcore.slack.com/channels/ampliseq) + +## Introduction + +**nfcore/ampliseq** is a bioinformatics analysis pipeline used for 16S rRNA amplicon sequencing data (currently supported is Illumina paired end data). + +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It comes with docker containers making installation trivial and results highly reproducible. + +## Quick Start + +1. Install [`nextflow`](https://nf-co.re/usage/installation) + +2. Install any of [`Docker`](https://docs.docker.com/engine/installation/), [`Singularity`](https://www.sylabs.io/guides/3.0/user-guide/) or [`Podman`](https://podman.io/) for full pipeline reproducibility _(please only use [`Conda`](https://conda.io/miniconda.html) as a last resort; see [docs](https://nf-co.re/usage/configuration#basic-configuration-profiles))_ + +3. Download the pipeline and test it on a minimal dataset with a single command: + + ```bash + nextflow run nf-core/ampliseq -profile test, + ``` + + > Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment. + +4. Start running your own analysis! + + ```bash + nextflow run nf-core/ampliseq -profile --input "data" --FW_primer GTGYCAGCMGCCGCGGTAA --RV_primer GGACTACNVGGGTWTCTAAT --metadata "data/Metadata.tsv" + ``` + +See [usage docs](https://nf-co.re/ampliseq/usage) for all of the available options when running the pipeline. + +## Documentation + +The nf-core/ampliseq pipeline comes with documentation about the pipeline: [usage](https://nf-co.re/ampliseq/usage) and [output](https://nf-co.re/ampliseq/output). + +The workflow processes raw data from FastQ inputs ([FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/)), trims primer sequences from the reads ([Cutadapt](https://journal.embnet.org/index.php/embnetjournal/article/view/200)), imports data into [QIIME2](https://www.nature.com/articles/s41587-019-0209-9), generates amplicon sequencing variants (ASV, [DADA2](https://www.nature.com/articles/nmeth.3869)), classifies features against the [SILVA](https://www.arb-silva.de/) [v132](https://www.arb-silva.de/documentation/release-132/) database, excludes unwanted taxa, produces absolute and relative feature/taxa count tables and plots, plots alpha rarefaction curves, computes alpha and beta diversity indices and plots thereof, and finally calls differentially abundant taxa ([ANCOM](https://www.ncbi.nlm.nih.gov/pubmed/26028277)). See the [output documentation](docs/output.md) for more details of the results. + +## Credits + +These scripts were originally written for use at the [Quantitative Biology Center (QBiC)](http://www.qbic.life) and [Microbial Ecology, Center for Applied Geosciences](http://www.uni-tuebingen.de/de/104325), part of Eberhard Karls Universität Tübingen (Germany) by Daniel Straub ([@d4straub](https://github.com/d4straub)) and Alexander Peltzer ([@apeltzer](https://github.com/apeltzer)). + +## Contributions and Support + +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md). + +For further information or help, don't hesitate to get in touch on the [Slack `#ampliseq` channel](https://nfcore.slack.com/channels/ampliseq) (you can join with [this invite](https://nf-co.re/join/slack)). + +## Citation + +If you use `nf-core/ampliseq` for your analysis, please cite the `ampliseq` article as follows: +> Daniel Straub, Nia Blackwell, Adrian Langarica-Fuentes, Alexander Peltzer, Sven Nahnsen, Sara Kleindienst **Interpretations of Environmental Microbial Community Studies Are Biased by the Selected 16S rRNA (Gene) Amplicon Sequencing Pipeline** *Frontiers in Microbiology* 2020, 11:2652 [doi: 10.3389/fmicb.2020.550420](https://doi.org/10.3389/fmicb.2020.550420). + +You can cite the `nf-core/ampliseq` zenodo record for a specific version using the following [doi: 10.5281/zenodo.1493841](https://zenodo.org/badge/latestdoi/150448201) + +You can cite the `nf-core` publication as follows: + +> **The nf-core framework for community-curated bioinformatics pipelines.** +> +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen. +> +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x). +> ReadCube: [Full Access Link](https://rdcu.be/b1GjZ) +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-08-05 10:22:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5747 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:38Z" ; + schema1:dateModified "2024-06-11T12:54:38Z" ; + schema1:description """# ![nf-core/ampliseq](docs/images/nf-core-ampliseq_logo.png) + +[![Nextflow](https://img.shields.io/badge/nextflow-%E2%89%A519.10.0-brightgreen.svg)](https://www.nextflow.io/) +[![nf-core](https://img.shields.io/badge/nf--core-pipeline-brightgreen.svg)](https://nf-co.re/) +[![DOI](https://zenodo.org/badge/150448201.svg)](https://zenodo.org/badge/latestdoi/150448201) +[![Cite Preprint](https://img.shields.io/badge/Cite%20Us!-Cite%20Publication-important)](https://doi.org/10.3389/fmicb.2020.550420) + +[![GitHub Actions CI Status](https://github.com/nf-core/ampliseq/workflows/nf-core%20CI/badge.svg)](https://github.com/nf-core/ampliseq/actions) +[![GitHub Actions Linting Status](https://github.com/nf-core/ampliseq/workflows/nf-core%20linting/badge.svg)](https://github.com/nf-core/ampliseq/actions) +[![Nextflow](https://img.shields.io/badge/nextflow-%E2%89%A519.10.0-brightgreen.svg)](https://www.nextflow.io/) + +[![install with bioconda](https://img.shields.io/badge/install%20with-bioconda-brightgreen.svg)](https://bioconda.github.io/) +[![Docker](https://img.shields.io/docker/automated/nfcore/ampliseq.svg)](https://hub.docker.com/r/nfcore/ampliseq) +[![Get help on Slack](http://img.shields.io/badge/slack-nf--core%20%23ampliseq-4A154B?logo=slack)](https://nfcore.slack.com/channels/ampliseq) + +## Introduction + +**nfcore/ampliseq** is a bioinformatics analysis pipeline used for 16S rRNA amplicon sequencing data (currently supported is Illumina paired end data). + +The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It comes with docker containers making installation trivial and results highly reproducible. + +## Quick Start + +1. Install [`nextflow`](https://nf-co.re/usage/installation) + +2. Install any of [`Docker`](https://docs.docker.com/engine/installation/), [`Singularity`](https://www.sylabs.io/guides/3.0/user-guide/) or [`Podman`](https://podman.io/) for full pipeline reproducibility _(please only use [`Conda`](https://conda.io/miniconda.html) as a last resort; see [docs](https://nf-co.re/usage/configuration#basic-configuration-profiles))_ + +3. Download the pipeline and test it on a minimal dataset with a single command: + + ```bash + nextflow run nf-core/ampliseq -profile test, + ``` + + > Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment. + +4. Start running your own analysis! + + ```bash + nextflow run nf-core/ampliseq -profile --input "data" --FW_primer GTGYCAGCMGCCGCGGTAA --RV_primer GGACTACNVGGGTWTCTAAT --metadata "data/Metadata.tsv" + ``` + +See [usage docs](https://nf-co.re/ampliseq/usage) for all of the available options when running the pipeline. + +## Documentation + +The nf-core/ampliseq pipeline comes with documentation about the pipeline: [usage](https://nf-co.re/ampliseq/usage) and [output](https://nf-co.re/ampliseq/output). + +The workflow processes raw data from FastQ inputs ([FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/)), trims primer sequences from the reads ([Cutadapt](https://journal.embnet.org/index.php/embnetjournal/article/view/200)), imports data into [QIIME2](https://www.nature.com/articles/s41587-019-0209-9), generates amplicon sequencing variants (ASV, [DADA2](https://www.nature.com/articles/nmeth.3869)), classifies features against the [SILVA](https://www.arb-silva.de/) [v132](https://www.arb-silva.de/documentation/release-132/) database, excludes unwanted taxa, produces absolute and relative feature/taxa count tables and plots, plots alpha rarefaction curves, computes alpha and beta diversity indices and plots thereof, and finally calls differentially abundant taxa ([ANCOM](https://www.ncbi.nlm.nih.gov/pubmed/26028277)). See the [output documentation](docs/output.md) for more details of the results. + +## Credits + +These scripts were originally written for use at the [Quantitative Biology Center (QBiC)](http://www.qbic.life) and [Microbial Ecology, Center for Applied Geosciences](http://www.uni-tuebingen.de/de/104325), part of Eberhard Karls Universität Tübingen (Germany) by Daniel Straub ([@d4straub](https://github.com/d4straub)) and Alexander Peltzer ([@apeltzer](https://github.com/apeltzer)). + +## Contributions and Support + +If you would like to contribute to this pipeline, please see the [contributing guidelines](.github/CONTRIBUTING.md). + +For further information or help, don't hesitate to get in touch on the [Slack `#ampliseq` channel](https://nfcore.slack.com/channels/ampliseq) (you can join with [this invite](https://nf-co.re/join/slack)). + +## Citation + +If you use `nf-core/ampliseq` for your analysis, please cite the `ampliseq` article as follows: +> Daniel Straub, Nia Blackwell, Adrian Langarica-Fuentes, Alexander Peltzer, Sven Nahnsen, Sara Kleindienst **Interpretations of Environmental Microbial Community Studies Are Biased by the Selected 16S rRNA (Gene) Amplicon Sequencing Pipeline** *Frontiers in Microbiology* 2020, 11:2652 [doi: 10.3389/fmicb.2020.550420](https://doi.org/10.3389/fmicb.2020.550420). + +You can cite the `nf-core/ampliseq` zenodo record for a specific version using the following [doi: 10.5281/zenodo.1493841](https://zenodo.org/badge/latestdoi/150448201) + +You can cite the `nf-core` publication as follows: + +> **The nf-core framework for community-curated bioinformatics pipelines.** +> +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen. +> +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x). +> ReadCube: [Full Access Link](https://rdcu.be/b1GjZ) +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=5" ; + schema1:version 5 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 182054 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """![CoVigator logo](images/CoVigator_logo_txt_nobg.png "CoVigator logo")\r +\r +# CoVigator pipeline: variant detection pipeline for Sars-CoV-2\r +\r +[![DOI](https://zenodo.org/badge/374669617.svg)](https://zenodo.org/badge/latestdoi/374669617)\r +[![Run tests](https://github.com/TRON-Bioinformatics/covigator-ngs-pipeline/actions/workflows/automated_tests.yml/badge.svg?branch=master)](https://github.com/TRON-Bioinformatics/covigator-ngs-pipeline/actions/workflows/automated_tests.yml)\r +[![Powered by NumFOCUS](https://img.shields.io/badge/powered%20by-Nextflow-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)](https://www.nextflow.io/)\r +[![License](https://img.shields.io/badge/license-MIT-green)](https://opensource.org/licenses/MIT)\r +\r +\r +\r +The Covigator pipeline processes SARS-CoV-2 FASTQ or FASTA files into annotated and normalized analysis ready VCF files.\r +It also classifies samples into lineages using pangolin.\r +The pipeline is implemented in the Nextflow framework (Di Tommaso, 2017), it is a stand-alone pipeline that can be\r +used independently of the CoVigator dashboard and knowledge base.\r +\r +Although it is configured by default for SARS-CoV-2 it can be employed for the analysis of other microbial organisms \r +if the required references are provided.\r +\r +The result of the pipeline is one or more annotated VCFs with the list of SNVs and indels ready for analysis.\r +\r +The results from the CoVigator pipeline populate our CoVigator dashboard [https://covigator.tron-mainz.de](https://covigator.tron-mainz.de) \r +\r +**Table of Contents**\r +\r +1. [Two pipelines in one](#id1)\r +2. [Implementation](#id2)\r +3. [How to run](#id3)\r +4. [Understanding the output](#id4)\r +6. [Annotation resources](#id5)\r +7. [Future work](#id6)\r +8. [Bibliography](#id7)\r +\r +\r +## Two pipelines in one\r +\r +In CoVigator we analyse samples from two different formats, FASTQ files (e.g.: as provided by the European Nucleotide \r +Archive) and FASTA files containing a consensus assembly. While from the first we get the raw reads, \r +from the second we obtain already assembled genomes. Each of these formats has to be \r +analysed differently. Also, the output data that we can obtain from each of these is different.\r +\r +![CoVigator pipeline](images/pipeline.drawio.png)\r +\r +### Pipeline for FASTQ files\r +\r +When FASTQ files are provided the pipeline includes the following steps:\r +- **Trimming**. `fastp` is used to trim reads with default values. This step also includes QC filtering.\r +- **Alignment**. `BWA mem 2` is used for the alignment of single or paired end samples.\r +- **BAM preprocessing**. BAM files are prepared and duplicate reads are marked using GATK and Sambamba tools.\r +- **Primer trimming**. When a BED with primers is provided, these are trimmed from the reads using iVar. This is applicable to the results from all variant callers.\r +- **Coverage analysis**. `samtools coverage` and `samtools depth` are used to compute the horizontal and vertical \r + coverage respectively.\r +- **Variant calling**. Four different variant callers are employed: BCFtools, LoFreq, iVar and GATK. \r + Subsequent processing of resulting VCF files is independent for each caller.\r +- **Variant normalization**. `bcftools norm` is employed to left align indels, trim variant calls and remove variant duplicates.\r +- **Technical annotation**. `VAFator` is employed to add VAF and coverage annotations from the reads pileup.\r +- **Phasing**. Clonal mutations (ie: VAF >= 0.8) occurring in the same amino acid are merged for its correct functional annotation.\r +- **Biological annotation**. `SnpEff` is employed to annotate the variant consequences of variants and\r + `bcftools annotate` is employed to add additional SARS-CoV-2 annotations.\r +- **Lineage determination**. `pangolin` is used for this purpose, this runs over the results from each of the variant callers separately.\r +\r +Both single end and paired end FASTQ files are supported.\r +\r +### Pipeline for FASTA files\r +\r +When a FASTA file is provided with a single assembly sequence the pipeline includes the following steps:\r +- **Variant calling**. A Smith-Waterman global alignment is performed against the reference sequence to call SNVs and \r + indels. Indels longer than 50 bp and at the beginning or end of the assembly sequence are excluded. Any mutation where\r + either reference or assembly contain an N is excluded.\r +- **Variant normalization**. Same as described above.\r +- **Phasing**. mutations occurring in the same amino acid are merged for its correct annotation.\r +- **Biological annotation**. Same as described above.\r +- **Lineage determination**. `pangolin` is used for this purpose.\r +\r +The FASTA file is expected to contain a single assembly sequence. \r +Bear in mind that only clonal variants can be called on the assembly.\r +\r +### Pipeline for VCF files\r +\r +When a VCF file is provided the pipeline includes the following steps:\r +- **Variant normalization**. Same as described above.\r +- **Technical annotation**. Same as described above (optional if BAM is provided)\r +- **Phasing**. mutations occurring in the same amino acid are merged for its correct annotation.\r +- **Biological annotation**. Same as described above\r +- **Lineage determination**. `pangolin` is used for this purpose.\r +\r +## Implementation\r +\r +The pipeline is implemented as a Nextflow workflow with its DSL2 syntax.\r +The dependencies are managed through a conda environment to ensure version traceability and reproducibility.\r +The references for SARS-CoV-2 are embedded in the pipeline.\r +The pipeline is based on a number of third-party tools, plus a custom implementation based on biopython (Cock, 2009) \r +for the alignment and subsequent variant calling over a FASTA file.\r +\r +All code is open sourced in GitHub [https://github.com/TRON-Bioinformatics/covigator-ngs-pipeline](https://github.com/TRON-Bioinformatics/covigator-ngs-pipeline)\r +and made available under the MIT license. We welcome any contribution. \r +If you have troubles using the CoVigator pipeline or you find an issue, we will be thankful if you would report a ticket \r +in GitHub.\r +\r +The alignment, BAM preprocessing and variant normalization pipelines are based on the implementations in additional \r +Nextflow pipelines within the TronFlow initiative [https://tronflow-docs.readthedocs.io/](https://tronflow-docs.readthedocs.io/). \r +\r +\r +### Variant annotations\r +\r +The variants derived from a FASTQ file are annotated on the `FILTER` column using the VAFator \r +(https://github.com/TRON-Bioinformatics/vafator) variant allele frequency \r +(VAF) into `LOW_FREQUENCY`, `SUBCLONAL`, `LOW_QUALITY_CLONAL` and finally `PASS` variants correspond to clonal variants. \r +By default, variants with a VAF < 2 % are considered `LOW_FREQUENCY`, variants with a VAF >= 2 % and < 50 % are \r +considered `SUBCLONAL` and variants with a VAF >= 50 % and < 80 % are considered `LOW_QUALITY_CLONAL`. \r +This thresholds can be changed with the parameters `--low_frequency_variant_threshold`,\r +`--subclonal_variant_threshold` and `--low_quality_clonal_variant_threshold` respectively.\r +\r +VAFator technical annotations:\r +\r +- `INFO/vafator_af`: variant allele frequency of the mutation \r +- `INFO/vafator_ac`: number of reads supporting the mutation \r +- `INFO/vafator_dp`: total number of reads at the position, in the case of indels this represents the number of reads in the previous position\r +\r +SnpEff provides the functional annotations. And all mutations are additionally annotated with the following SARS-CoV-2 specific annotations:\r +- ConsHMM conservation scores as reported in (Kwon, 2021)\r +- Pfam domains as reported in Ensemble annotations.\r +\r +Biological annotations: \r +\r +- `INFO/ANN` are the SnpEff consequence annotations (eg: overlapping gene, effect of the mutation). \r +This are described in detail here [http://pcingola.github.io/SnpEff/se_inputoutput/](http://pcingola.github.io/SnpEff/se_inputoutput/) \r +- `INFO/CONS_HMM_SARS_COV_2` is the ConsHMM conservation score in SARS-CoV-2\r +- `INFO/CONS_HMM_SARBECOVIRUS` is the ConsHMM conservation score among Sarbecovirus\r +- `INFO/CONS_HMM_VERTEBRATE_COV` is the ConsHMM conservation score among vertebrate Corona virus\r +- `INFO/PFAM_NAME` is the Interpro name for the overlapping Pfam domains\r +- `INFO/PFAM_DESCRIPTION` is the Interpro description for the overlapping Pfam domains\r +- `INFO/problematic` contains the filter provided in DeMaio et al. (2020) for problematic mutations\r +\r +According to DeMaio et al. (2020), mutations at the beginning (ie: POS <= 50) and end (ie: POS >= 29,804) of the \r +genome are filtered out\r +\r +This is an example of biological annotations of a missense mutation in the spike protein on the N-terminal subunit 1 domain.\r +```\r +ANN=A|missense_variant|MODERATE|S|gene-GU280_gp02|transcript|TRANSCRIPT_gene-GU280_gp02|protein_coding|1/1|c.118G>A|\r +p.D40N|118/3822|118/3822|40/1273||;CONS_HMM_SARS_COV_2=0.57215;CONS_HMM_SARBECOVIRUS=0.57215;CONS_HMM_VERTEBRATE_COV=0;\r +PFAM_NAME=bCoV_S1_N;PFAM_DESCRIPTION=Betacoronavirus-like spike glycoprotein S1, N-terminal\r +```\r +\r +\r +### Phasing limitations\r +\r +The phasing implementation is applicable only to clonal mutations. It assumes all clonal mutations are in phase and \r +hence it merges those occurring in the same amino acid.\r +In order to phase intrahost mutations we would need to implement a read-backed phasing approach such as in WhatsHap \r +or GATK's ReadBackedPhasing. Unfortunately these tools do not support the scenario of a haploid organism with an\r +undefined number of subclones.\r +For this reason, phasing is implemented with custom Python code at `bin/phasing.py`.\r +\r +### Primers trimming\r +\r +With some library preparation protocols such as ARTIC it is recommended to trim the primers from the reads.\r +We have observed that if primers are not trimmed spurious mutations are being called specially SNVs with lower frequencies and long deletions.\r +Also the variant allele frequencies of clonal mutations are underestimated.\r +\r +The BED files containing the primers for each ARTIC version can be found at https://github.com/artic-network/artic-ncov2019/tree/master/primer_schemes/nCoV-2019.\r +\r +If the adequate BED file is provided to the CoVigator pipeline with `--primers` the primers will be trimmed with iVar. \r +This affects the output of every variant caller, not only iVar.\r +\r +### Reference data\r +\r +The default SARS-CoV-2 reference files correspond to Sars_cov_2.ASM985889v3 and were downloaded from Ensembl servers.\r +No additional parameter needs to be provided to use the default SARS-CoV-2 reference genome.\r +\r +#### Using a custom reference genome\r +\r +These references can be customised to use a different SARS-CoV-2 reference or to analyse a different virus.\r +Two files need to be provided:\r +- Use a custom reference genome by providing the parameter `--reference your.fasta`.\r +- Gene annotation file in GFFv3 format `--gff your.gff`. This is only required to run iVar\r +\r +Additionally, the FASTA needs bwa indexes, .fai index and a .dict index.\r +These indexes can be generated with the following two commands:\r +```\r +bwa index reference.fasta\r +samtools faidx reference.fasta\r +gatk CreateSequenceDictionary --REFERENCE your.fasta\r +```\r +\r +**NOTE**: beware that for Nextflow to find these indices the reference needs to be passed as an absolute path.\r +\r +The SARS-CoV-2 specific annotations will be skipped when using a custom genome.\r +\r +In order to have SnpEff functional annotations available you will also need to provide three parameters:\r +- `--snpeff_organism`: organism to annotate with SnpEff (ie: as registered in SnpEff)\r +- `--snpeff_data`: path to the SnpEff data folder\r +- `--snpeff_config`: path to the SnpEff config file\r +\r +### Intrahost mutations\r +\r +Some mutations may be observed in a subset of the virus sample, this may arise through intrahost virus evolution or\r +co-infection. Intrahost mutations can only be detected when analysing the raw reads (ie: the FASTQs) \r +as in the assembly (ie: the FASTA file) a single virus consensus sequence is represented. \r +BCFtools and GATK do not normally capture intrahost mutations; on the other hand LoFreq and iVar both capture\r +mutations that deviate from a clonal-like VAF. \r +Nevertheless, mutations with lower variant allele frequency (VAF) are challenging to distinguish from sequencing and\r +analytical errors. \r +\r +Mutations are annotated on the `FILTER` column using the VAF into three categories: \r +- `LOW_FREQUENCY`: subset of intrahost mutations with lowest frequencies, potentially enriched with false positive calls (VAF < 2 %).\r +- `SUBCLONAL`: subset of intrahost mutations with higher frequencies (2 % <= VAF < 50 %).\r +- `LOW_QUALITY_CLONAL`: subset of clonal mutations with lower frequencies (50 % <= VAF < 80 %).\r +- `PASS` clonal mutations (VAF >= 80 %)\r +\r +Other low quality mutations are removed from the output.\r +\r +The VAF thresholds can be changed with the parameters `--low_frequency_variant_threshold`,\r +`--subclonal_variant_threshold` and `--low_quality_clonal_variant_threshold`.\r +\r +## How to run\r +\r +### Requirements\r +\r +- Nextflow >= 19.10.0\r +- Java >= 8\r +- Conda >=4.9\r +\r +### Testing\r +\r +To run the workflow on a test assembly dataset run:\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline -profile conda,test_fasta\r +```\r +\r +Find the output in the folder `covigator_test_fasta`.\r +\r +To run the workflow on a test raw reads dataset run:\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline -profile conda,test_fastq\r +```\r +\r +Find the output in the folder `covigator_test_fastq`.\r +\r +The above commands are useful to create the conda environments beforehand.\r +\r +**NOTE**: pangolin is the most time-consuming step of the whole pipeline. To make it faster, locate the conda \r +environment that Nextflow created with pangolin (eg: `find $YOUR_NEXTFOW_CONDA_ENVS_FOLDER -name pangolin`) and run\r +`pangolin --decompress-model`.\r +\r +### Running\r +\r +For paired end reads:\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline \\\r +[-r v0.10.0] \\\r +[-profile conda] \\\r +--fastq1 \\\r +--fastq2 \\\r +--name example_run \\\r +--output \\\r +[--reference /Sars_cov_2.ASM985889v3.fa] \\\r +[--gff /Sars_cov_2.ASM985889v3.gff3]\r +```\r +\r +For single end reads:\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline \\\r +[-r v0.10.0] \\\r +[-profile conda] \\\r +--fastq1 \\\r +--name example_run \\\r +--output \\\r +[--reference /Sars_cov_2.ASM985889v3.fa] \\\r +[--gff /Sars_cov_2.ASM985889v3.gff3]\r +```\r +\r +For assembly:\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline \\\r +[-r v0.10.0] \\\r +[-profile conda] \\\r +--fasta \\\r +--name example_run \\\r +--output \\\r +[--reference /Sars_cov_2.ASM985889v3.fa] \\\r +[--gff /Sars_cov_2.ASM985889v3.gff3]\r +```\r +\r +For VCF:\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline \\\r +[-r v0.10.0] \\\r +[-profile conda] \\\r +--vcf \\\r +--name example_run \\\r +--output \\\r +[--reference /Sars_cov_2.ASM985889v3.fa] \\\r +[--gff /Sars_cov_2.ASM985889v3.gff3]\r +```\r +\r +As an optional input when processing directly VCF files you can provide BAM files to annotate VAFs:\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline \\\r +[-r v0.10.0] \\\r +[-profile conda] \\\r +--vcf \\\r +--bam \\\r +--bai \\\r +--name example_run \\\r +--output \\\r +[--reference /Sars_cov_2.ASM985889v3.fa] \\\r +[--gff /Sars_cov_2.ASM985889v3.gff3]\r +```\r +\r +For batch processing of reads use `--input_fastqs_list` and `--name`.\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline [-profile conda] --input_fastqs_list --library --output [--reference /Sars_cov_2.ASM985889v3.fa] [--gff /Sars_cov_2.ASM985889v3.gff3]\r +```\r +where the TSV file contains two or three columns tab-separated columns **without header**. Columns: sample name, path to FASTQ 1 and optionally path to FASTQ 2. \r +\r +| Sample | FASTQ 1 | FASTQ 2 (optional column) |\r +|-----------|-------------------------------|-------------------------------|\r +| sample1 | /path/to/sample1_fastq1.fastq | /path/to/sample1_fastq2.fastq |\r +| sample2 | /path/to/sample2_fastq1.fastq | /path/to/sample2_fastq2.fastq |\r +| ... | ... | ... |\r +\r +\r +For batch processing of assemblies use `--input_fastas_list`.\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline [-profile conda] --input_fastas_list --library --output [--reference /Sars_cov_2.ASM985889v3.fa] [--gff /Sars_cov_2.ASM985889v3.gff3]\r +```\r +where the TSV file contains two columns tab-separated columns **without header**. Columns: sample name and path to FASTA.\r +\r +| Sample | FASTA | \r +|-----------|------------------------|\r +| sample1 | /path/to/sample1.fasta |\r +| sample2 | /path/to/sample2.fasta |\r +| ... | ... |\r +\r +For batch processing of VCFs use `--input_vcfs_list`.\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline [-profile conda] --input_vcfs_list --output [--reference /Sars_cov_2.ASM985889v3.fa] [--gff /Sars_cov_2.ASM985889v3.gff3]\r +```\r +where the TSV file contains two columns tab-separated columns **without header**. Columns: sample name and path to VCF.\r +\r +| Sample | FASTA |\r +|-----------|------------------------|\r +| sample1 | /path/to/sample1.vcf |\r +| sample2 | /path/to/sample2.vcf |\r +| ... | ... |\r +\r +Optionally, provide BAM files for batch processing of VCFs using `--input_bams_list`.\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline [-profile conda] \\\r + --input_vcfs_list \\\r + --input_bams_list \\\r + --output \\\r + [--reference /Sars_cov_2.ASM985889v3.fa] \\\r + [--gff /Sars_cov_2.ASM985889v3.gff3]\r +```\r +where the BAMs TSV file contains three columns tab-separated columns **without header**. Columns: sample name, \r +path to BAM and path to BAI.\r +\r +| Sample | BAM | BAI |\r +|-----------|----------------------|----------------------|\r +| sample1 | /path/to/sample1.bam | /path/to/sample1.bai |\r +| sample2 | /path/to/sample2.bam | /path/to/sample2.bai |\r +| ... | ... | ... |\r +\r +\r +\r +### Getting help\r +\r +You can always contact us directly or create a GitHub issue, otherwise see all available options using `--help`:\r +```\r +$ nextflow run tron-bioinformatics/covigator-ngs-pipeline -profile conda --help\r +\r +Usage:\r + nextflow run tron-bioinformatics/covigator-ngs-pipeline -profile conda --help\r +\r +Input:\r + * --fastq1: the first input FASTQ file (not compatible with --fasta, nor --vcf)\r + * --fasta: the FASTA file containing the assembly sequence (not compatible with --fastq1, nor --vcf)\r + * --vcf: the VCF file containing mutations to analyze (not compatible with --fastq1, nor --fasta)\r + * --bam: the BAM file containing reads to annotate VAFs on a VCF (not compatible with --fastq1, nor --fasta)\r + * --bai: the BAI index for a BAM file (not compatible with --fastq1, nor --fasta)\r + * --name: the sample name, output files will be named after this name\r + * --output: the folder where to publish output\r + * --input_fastqs_list: alternative to --name and --fastq1 for batch processing\r + * --library: required only when using --input_fastqs\r + * --input_fastas_list: alternative to --name and --fasta for batch processing\r + * --input_vcfs_list: alternative to --name and --vcf for batch processing\r + * --input_bams_list: alternative to --name, --vcf, --bam and --bai for batch processing\r +\r +Optional input only required to use a custom reference:\r + * --reference: the reference genome FASTA file, *.fai, *.dict and bwa indexes are required.\r + * --gff: the GFFv3 gene annotations file (required to run iVar and to phase mutations from all variant callers) \r + * --snpeff_data: path to the SnpEff data folder, it will be useful to use the pipeline on other virus than SARS-CoV-2\r + * --snpeff_config: path to the SnpEff config file, it will be useful to use the pipeline on other virus than SARS-CoV-2\r + * --snpeff_organism: organism to annotate with SnpEff, it will be useful to use the pipeline on other virus than SARS-CoV-2\r +\r +Optional input:\r + * --fastq2: the second input FASTQ file\r + * --primers: a BED file containing the primers used during library preparation. If provided primers are trimmed from the reads.\r + * --min_base_quality: minimum base call quality to take a base into account for variant calling (default: 20)\r + * --min_mapping_quality: minimum mapping quality to take a read into account for variant calling (default: 20)\r + * --vafator_min_base_quality: minimum base call quality to take a base into account for VAF annotation (default: 0)\r + * --vafator_min_mapping_quality: minimum mapping quality to take a read into account for VAF annotation (default: 0)\r + * --low_frequency_variant_threshold: VAF threshold to mark a variant as low frequency (default: 0.02)\r + * --subclonal_variant_threshold: VAF superior threshold to mark a variant as subclonal (default: 0.5)\r + * --lq_clonal_variant_threshold: VAF superior threshold to mark a variant as loq quality clonal (default: 0.8)\r + * --memory: the ammount of memory used by each job (default: 3g)\r + * --cpus: the number of CPUs used by each job (default: 1)\r + * --skip_lofreq: skips calling variants with LoFreq\r + * --skip_gatk: skips calling variants with GATK\r + * --skip_bcftools: skips calling variants with BCFTools\r + * --skip_ivar: skips calling variants with iVar\r + * --skip_pangolin: skips lineage determination with pangolin\r + * --match_score: global alignment match score, only applicable for assemblies (default: 2)\r + * --mismatch_score: global alignment mismatch score, only applicable for assemblies (default: -1)\r + * --open_gap_score: global alignment open gap score, only applicable for assemblies (default: -3)\r + * --extend_gap_score: global alignment extend gap score, only applicable for assemblies (default: -0.1)\r + * --skip_sarscov2_annotations: skip some of the SARS-CoV-2 specific annotations (default: false)\r + * --keep_intermediate: keep intermediate files (ie: BAM files and intermediate VCF files)\r + * --args_bcftools_mpileup: additional arguments for bcftools mpileup command (eg: --args_bcftools_mpileup='--ignore-overlaps')\r + * --args_bcftools_call: additional arguments for bcftools call command (eg: --args_bcftools_call='--something')\r + * --args_lofreq: additional arguments for lofreq command (eg: --args_lofreq='--something')\r + * --args_gatk: additional arguments for gatk command (eg: --args_gatk='--something')\r + * --args_ivar_samtools: additional arguments for ivar samtools mpileup command (eg: --args_ivar_samtools='--ignore-overlaps')\r + * --args_ivar: additional arguments for ivar command (eg: --args_ivar='--something')\r +\r +Output:\r + * Output a VCF file for each of BCFtools, GATK, LoFreq and iVar when FASTQ files are\r + provided or a single VCF obtained from a global alignment when a FASTA file is provided.\r + * A pangolin results file for each of the VCF files.\r + * Only when FASTQs are provided:\r + * FASTP statistics\r + * Depth and breadth of coverage analysis results\r + \r +```\r +\r +## Understanding the output\r +\r +Although the VCFs are normalized for both pipelines, the FASTQ pipeline runs four variant callers, while the FASTA\r +pipeline runs a single variant caller. Also, there are several metrics in the FASTQ pipeline that are not present\r +in the output of the FASTA pipeline. Here we will describe these outputs.\r +\r +### FASTQ pipeline output\r +\r +Find in the table below a description of each of the expected files and a link to a sample file for the FASTQ pipeline.\r +The VCF files will be described in more detail later.\r +\r +| Name | Description | Sample file |\r +|---------------------------------|----------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------|\r +| $NAME.fastp_stats.json | Output metrics of the fastp trimming process in JSON format | [ERR4145453.fastp_stats.json](_static/covigator_pipeline_sample_output_reads/ERR4145453.fastp_stats.json) |\r +| $NAME.fastp_stats.html | Output metrics of the fastp trimming process in HTML format | [ERR4145453.fastp_stats.html](_static/covigator_pipeline_sample_output_reads/ERR4145453.fastp_stats.html) |\r +| $NAME.deduplication_metrics.txt | Deduplication metrics | [ERR4145453.deduplication_metrics.txt](_static/covigator_pipeline_sample_output_reads/ERR4145453.deduplication_metrics.txt) |\r +| $NAME.coverage.tsv | Coverage metrics (eg: mean depth, % horizontal coverage) | [ERR4145453.coverage.tsv](_static/covigator_pipeline_sample_output_reads/ERR4145453.coverage.tsv) |\r +| $NAME.depth.tsv | Depth of coverage per position | [ERR4145453.depth.tsv](_static/covigator_pipeline_sample_output_reads/ERR4145453.depth.tsv) |\r +| $NAME.bcftools.vcf.gz | Bgzipped, tabix-indexed and annotated output VCF from BCFtools | [ERR4145453.bcftools.normalized.annotated.vcf.gz](_static/covigator_pipeline_sample_output_reads/ERR4145453.bcftools.normalized.annotated.vcf.gz) |\r +| $NAME.gatk.vcf.gz | Bgzipped, tabix-indexed and annotated output VCF from GATK | [ERR4145453.gatk.normalized.annotated.vcf.gz](_static/covigator_pipeline_sample_output_reads/ERR4145453.gatk.normalized.annotated.vcf.gz) |\r +| $NAME.lofreq.vcf.gz | Bgzipped, tabix-indexed and annotated output VCF from LoFreq | [ERR4145453.lofreq.normalized.annotated.vcf.gz](_static/covigator_pipeline_sample_output_reads/ERR4145453.lofreq.normalized.annotated.vcf.gz) |\r +| $NAME.ivar.vcf.gz | Bgzipped, tabix-indexed and annotated output VCF from LoFreq | [ERR4145453.ivar.tsv](_static/covigator_pipeline_sample_output_reads/ERR4145453.ivar.tsv) |\r +| $NAME.lofreq.pangolin.csv | Pangolin CSV output file derived from LoFreq mutations | [ERR4145453.lofreq.pangolin.csv](_static/covigator_pipeline_sample_output_reads/ERR4145453.lofreq.pangolin.csv) |\r +\r +\r +### FASTA pipeline output\r +\r +The FASTA pipeline returns a single VCF file. The VCF files will be described in more detail later.\r +\r +| Name | Description | Sample file |\r +|-----------------------------|--------------------------------------------------------------|------------------------------------------------------------------------------------------------------|\r +| $NAME.assembly.vcf.gz | Bgzipped, tabix-indexed and annotated output VCF | [ERR4145453.assembly.normalized.annotated.vcf.gz](_static/covigator_pipeline_sample_output_assembly/hCoV-19_NTXX.assembly.normalized.annotated.vcf.gz) |\r +\r +\r +## Annotations resources\r +\r +SARS-CoV-2 ASM985889v3 references were downloaded from Ensembl on 6th of October 2020:\r +- ftp://ftp.ensemblgenomes.org/pub/viruses/fasta/sars_cov_2/dna/Sars_cov_2.ASM985889v3.dna.toplevel.fa.gz\r +- ftp://ftp.ensemblgenomes.org/pub/viruses/gff3/sars_cov_2/Sars_cov_2.ASM985889v3.101.gff3.gz\r +\r +ConsHMM mutation depletion scores downloaded on 1st of July 2021:\r +- https://github.com/ernstlab/ConsHMM_CoV/blob/master/wuhCor1.mutDepletionConsHMM.bed\r +- https://github.com/ernstlab/ConsHMM_CoV/blob/master/wuhCor1.mutDepletionSarbecovirusConsHMM.bed\r +- https://github.com/ernstlab/ConsHMM_CoV/blob/master/wuhCor1.mutDepletionVertebrateCoVConsHMM.bed\r +\r +Gene annotations including Pfam domains downloaded from Ensembl on 25th of February 2021 from:\r +- ftp://ftp.ensemblgenomes.org/pub/viruses/json/sars_cov_2/sars_cov_2.json\r +\r +\r +## Future work\r +\r +- Primer trimming on an arbitrary sequencing library.\r +- Pipeline for Oxford Nanopore technology.\r +- Variant calls from assemblies contain an abnormally high number of deletions of size greater than 3 bp. This\r +is a technical artifact that would need to be avoided.\r +\r +## Bibliography\r +\r +- Di Tommaso, P., Chatzou, M., Floden, E. W., Barja, P. P., Palumbo, E., & Notredame, C. (2017). Nextflow enables reproducible computational workflows. Nature Biotechnology, 35(4), 316–319. https://doi.org/10.1038/nbt.3820\r +- Vasimuddin Md, Sanchit Misra, Heng Li, Srinivas Aluru. Efficient Architecture-Aware Acceleration of BWA-MEM for Multicore Systems. IEEE Parallel and Distributed Processing Symposium (IPDPS), 2019.\r +- Adrian Tan, Gonçalo R. Abecasis and Hyun Min Kang. Unified Representation of Genetic Variants. Bioinformatics (2015) 31(13): 2202-2204](http://bioinformatics.oxfordjournals.org/content/31/13/2202) and uses bcftools [Li, H. (2011). A statistical framework for SNP calling, mutation discovery, association mapping and population genetical parameter estimation from sequencing data. Bioinformatics (Oxford, England), 27(21), 2987–2993. 10.1093/bioinformatics/btr509\r +- Danecek P, Bonfield JK, Liddle J, Marshall J, Ohan V, Pollard MO, Whitwham A, Keane T, McCarthy SA, Davies RM, Li H. Twelve years of SAMtools and BCFtools. Gigascience. 2021 Feb 16;10(2):giab008. doi: 10.1093/gigascience/giab008. PMID: 33590861; PMCID: PMC7931819.\r +- Van der Auwera GA, Carneiro M, Hartl C, Poplin R, del Angel G, Levy-Moonshine A, Jordan T, Shakir K, Roazen D, Thibault J, Banks E, Garimella K, Altshuler D, Gabriel S, DePristo M. (2013). From FastQ Data to High-Confidence Variant Calls: The Genome Analysis Toolkit Best Practices Pipeline. Curr Protoc Bioinformatics, 43:11.10.1-11.10.33. DOI: 10.1002/0471250953.bi1110s43.\r +- Martin, M., Patterson, M., Garg, S., O Fischer, S., Pisanti, N., Klau, G., Schöenhuth, A., & Marschall, T. (2016). WhatsHap: fast and accurate read-based phasing. BioRxiv, 085050. https://doi.org/10.1101/085050\r +- Danecek, P., & McCarthy, S. A. (2017). BCFtools/csq: haplotype-aware variant consequences. Bioinformatics, 33(13), 2037–2039. https://doi.org/10.1093/bioinformatics/btx100\r +- Wilm, A., Aw, P. P. K., Bertrand, D., Yeo, G. H. T., Ong, S. H., Wong, C. H., Khor, C. C., Petric, R., Hibberd, M. L., & Nagarajan, N. (2012). LoFreq: A sequence-quality aware, ultra-sensitive variant caller for uncovering cell-population heterogeneity from high-throughput sequencing datasets. Nucleic Acids Research, 40(22), 11189–11201. https://doi.org/10.1093/nar/gks918\r +- Grubaugh, N. D., Gangavarapu, K., Quick, J., Matteson, N. L., De Jesus, J. G., Main, B. J., Tan, A. L., Paul, L. M., Brackney, D. E., Grewal, S., Gurfield, N., Van Rompay, K. K. A., Isern, S., Michael, S. F., Coffey, L. L., Loman, N. J., & Andersen, K. G. (2019). An amplicon-based sequencing framework for accurately measuring intrahost virus diversity using PrimalSeq and iVar. Genome Biology, 20(1), 8. https://doi.org/10.1186/s13059-018-1618-7\r +- Shifu Chen, Yanqing Zhou, Yaru Chen, Jia Gu; fastp: an ultra-fast all-in-one FASTQ preprocessor, Bioinformatics, Volume 34, Issue 17, 1 September 2018, Pages i884–i890, https://doi.org/10.1093/bioinformatics/bty560\r +- Kwon, S. Bin, & Ernst, J. (2021). Single-nucleotide conservation state annotation of the SARS-CoV-2 genome. Communications Biology, 4(1), 1–11. https://doi.org/10.1038/s42003-021-02231-w\r +- Cock, P. J., Antao, T., Chang, J. T., Chapman, B. A., Cox, C. J., Dalke, A., et al. (2009). Biopython: freely available Python tools for computational molecular biology and bioinformatics. Bioinformatics, 25(11), 1422–1423.\r +- Artem Tarasov, Albert J. Vilella, Edwin Cuppen, Isaac J. Nijman, Pjotr Prins, Sambamba: fast processing of NGS alignment formats, Bioinformatics, Volume 31, Issue 12, 15 June 2015, Pages 2032–2034, https://doi.org/10.1093/bioinformatics/btv098\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/417?version=1" ; + schema1:isBasedOn "https://github.com/TRON-Bioinformatics/covigator-ngs-pipeline" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CoVigator pipeline: variant detection pipeline for Sars-CoV-2 (and other viruses...)" ; + schema1:sdDatePublished "2024-08-05 10:31:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/417/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 240617 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11134 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-01-17T15:06:13Z" ; + schema1:dateModified "2023-01-17T15:06:13Z" ; + schema1:description """![CoVigator logo](images/CoVigator_logo_txt_nobg.png "CoVigator logo")\r +\r +# CoVigator pipeline: variant detection pipeline for Sars-CoV-2\r +\r +[![DOI](https://zenodo.org/badge/374669617.svg)](https://zenodo.org/badge/latestdoi/374669617)\r +[![Run tests](https://github.com/TRON-Bioinformatics/covigator-ngs-pipeline/actions/workflows/automated_tests.yml/badge.svg?branch=master)](https://github.com/TRON-Bioinformatics/covigator-ngs-pipeline/actions/workflows/automated_tests.yml)\r +[![Powered by NumFOCUS](https://img.shields.io/badge/powered%20by-Nextflow-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)](https://www.nextflow.io/)\r +[![License](https://img.shields.io/badge/license-MIT-green)](https://opensource.org/licenses/MIT)\r +\r +\r +\r +The Covigator pipeline processes SARS-CoV-2 FASTQ or FASTA files into annotated and normalized analysis ready VCF files.\r +It also classifies samples into lineages using pangolin.\r +The pipeline is implemented in the Nextflow framework (Di Tommaso, 2017), it is a stand-alone pipeline that can be\r +used independently of the CoVigator dashboard and knowledge base.\r +\r +Although it is configured by default for SARS-CoV-2 it can be employed for the analysis of other microbial organisms \r +if the required references are provided.\r +\r +The result of the pipeline is one or more annotated VCFs with the list of SNVs and indels ready for analysis.\r +\r +The results from the CoVigator pipeline populate our CoVigator dashboard [https://covigator.tron-mainz.de](https://covigator.tron-mainz.de) \r +\r +**Table of Contents**\r +\r +1. [Two pipelines in one](#id1)\r +2. [Implementation](#id2)\r +3. [How to run](#id3)\r +4. [Understanding the output](#id4)\r +6. [Annotation resources](#id5)\r +7. [Future work](#id6)\r +8. [Bibliography](#id7)\r +\r +\r +## Two pipelines in one\r +\r +In CoVigator we analyse samples from two different formats, FASTQ files (e.g.: as provided by the European Nucleotide \r +Archive) and FASTA files containing a consensus assembly. While from the first we get the raw reads, \r +from the second we obtain already assembled genomes. Each of these formats has to be \r +analysed differently. Also, the output data that we can obtain from each of these is different.\r +\r +![CoVigator pipeline](images/pipeline.drawio.png)\r +\r +### Pipeline for FASTQ files\r +\r +When FASTQ files are provided the pipeline includes the following steps:\r +- **Trimming**. `fastp` is used to trim reads with default values. This step also includes QC filtering.\r +- **Alignment**. `BWA mem 2` is used for the alignment of single or paired end samples.\r +- **BAM preprocessing**. BAM files are prepared and duplicate reads are marked using GATK and Sambamba tools.\r +- **Primer trimming**. When a BED with primers is provided, these are trimmed from the reads using iVar. This is applicable to the results from all variant callers.\r +- **Coverage analysis**. `samtools coverage` and `samtools depth` are used to compute the horizontal and vertical \r + coverage respectively.\r +- **Variant calling**. Four different variant callers are employed: BCFtools, LoFreq, iVar and GATK. \r + Subsequent processing of resulting VCF files is independent for each caller.\r +- **Variant normalization**. `bcftools norm` is employed to left align indels, trim variant calls and remove variant duplicates.\r +- **Technical annotation**. `VAFator` is employed to add VAF and coverage annotations from the reads pileup.\r +- **Phasing**. Clonal mutations (ie: VAF >= 0.8) occurring in the same amino acid are merged for its correct functional annotation.\r +- **Biological annotation**. `SnpEff` is employed to annotate the variant consequences of variants and\r + `bcftools annotate` is employed to add additional SARS-CoV-2 annotations.\r +- **Lineage determination**. `pangolin` is used for this purpose, this runs over the results from each of the variant callers separately.\r +\r +Both single end and paired end FASTQ files are supported.\r +\r +### Pipeline for FASTA files\r +\r +When a FASTA file is provided with a single assembly sequence the pipeline includes the following steps:\r +- **Variant calling**. A Smith-Waterman global alignment is performed against the reference sequence to call SNVs and \r + indels. Indels longer than 50 bp and at the beginning or end of the assembly sequence are excluded. Any mutation where\r + either reference or assembly contain an N is excluded.\r +- **Variant normalization**. Same as described above.\r +- **Phasing**. mutations occurring in the same amino acid are merged for its correct annotation.\r +- **Biological annotation**. Same as described above.\r +- **Lineage determination**. `pangolin` is used for this purpose.\r +\r +The FASTA file is expected to contain a single assembly sequence. \r +Bear in mind that only clonal variants can be called on the assembly.\r +\r +### Pipeline for VCF files\r +\r +When a VCF file is provided the pipeline includes the following steps:\r +- **Variant normalization**. Same as described above.\r +- **Technical annotation**. Same as described above (optional if BAM is provided)\r +- **Phasing**. mutations occurring in the same amino acid are merged for its correct annotation.\r +- **Biological annotation**. Same as described above\r +- **Lineage determination**. `pangolin` is used for this purpose.\r +\r +## Implementation\r +\r +The pipeline is implemented as a Nextflow workflow with its DSL2 syntax.\r +The dependencies are managed through a conda environment to ensure version traceability and reproducibility.\r +The references for SARS-CoV-2 are embedded in the pipeline.\r +The pipeline is based on a number of third-party tools, plus a custom implementation based on biopython (Cock, 2009) \r +for the alignment and subsequent variant calling over a FASTA file.\r +\r +All code is open sourced in GitHub [https://github.com/TRON-Bioinformatics/covigator-ngs-pipeline](https://github.com/TRON-Bioinformatics/covigator-ngs-pipeline)\r +and made available under the MIT license. We welcome any contribution. \r +If you have troubles using the CoVigator pipeline or you find an issue, we will be thankful if you would report a ticket \r +in GitHub.\r +\r +The alignment, BAM preprocessing and variant normalization pipelines are based on the implementations in additional \r +Nextflow pipelines within the TronFlow initiative [https://tronflow-docs.readthedocs.io/](https://tronflow-docs.readthedocs.io/). \r +\r +\r +### Variant annotations\r +\r +The variants derived from a FASTQ file are annotated on the `FILTER` column using the VAFator \r +(https://github.com/TRON-Bioinformatics/vafator) variant allele frequency \r +(VAF) into `LOW_FREQUENCY`, `SUBCLONAL`, `LOW_QUALITY_CLONAL` and finally `PASS` variants correspond to clonal variants. \r +By default, variants with a VAF < 2 % are considered `LOW_FREQUENCY`, variants with a VAF >= 2 % and < 50 % are \r +considered `SUBCLONAL` and variants with a VAF >= 50 % and < 80 % are considered `LOW_QUALITY_CLONAL`. \r +This thresholds can be changed with the parameters `--low_frequency_variant_threshold`,\r +`--subclonal_variant_threshold` and `--low_quality_clonal_variant_threshold` respectively.\r +\r +VAFator technical annotations:\r +\r +- `INFO/vafator_af`: variant allele frequency of the mutation \r +- `INFO/vafator_ac`: number of reads supporting the mutation \r +- `INFO/vafator_dp`: total number of reads at the position, in the case of indels this represents the number of reads in the previous position\r +\r +SnpEff provides the functional annotations. And all mutations are additionally annotated with the following SARS-CoV-2 specific annotations:\r +- ConsHMM conservation scores as reported in (Kwon, 2021)\r +- Pfam domains as reported in Ensemble annotations.\r +\r +Biological annotations: \r +\r +- `INFO/ANN` are the SnpEff consequence annotations (eg: overlapping gene, effect of the mutation). \r +This are described in detail here [http://pcingola.github.io/SnpEff/se_inputoutput/](http://pcingola.github.io/SnpEff/se_inputoutput/) \r +- `INFO/CONS_HMM_SARS_COV_2` is the ConsHMM conservation score in SARS-CoV-2\r +- `INFO/CONS_HMM_SARBECOVIRUS` is the ConsHMM conservation score among Sarbecovirus\r +- `INFO/CONS_HMM_VERTEBRATE_COV` is the ConsHMM conservation score among vertebrate Corona virus\r +- `INFO/PFAM_NAME` is the Interpro name for the overlapping Pfam domains\r +- `INFO/PFAM_DESCRIPTION` is the Interpro description for the overlapping Pfam domains\r +- `INFO/problematic` contains the filter provided in DeMaio et al. (2020) for problematic mutations\r +\r +According to DeMaio et al. (2020), mutations at the beginning (ie: POS <= 50) and end (ie: POS >= 29,804) of the \r +genome are filtered out\r +\r +This is an example of biological annotations of a missense mutation in the spike protein on the N-terminal subunit 1 domain.\r +```\r +ANN=A|missense_variant|MODERATE|S|gene-GU280_gp02|transcript|TRANSCRIPT_gene-GU280_gp02|protein_coding|1/1|c.118G>A|\r +p.D40N|118/3822|118/3822|40/1273||;CONS_HMM_SARS_COV_2=0.57215;CONS_HMM_SARBECOVIRUS=0.57215;CONS_HMM_VERTEBRATE_COV=0;\r +PFAM_NAME=bCoV_S1_N;PFAM_DESCRIPTION=Betacoronavirus-like spike glycoprotein S1, N-terminal\r +```\r +\r +\r +### Phasing limitations\r +\r +The phasing implementation is applicable only to clonal mutations. It assumes all clonal mutations are in phase and \r +hence it merges those occurring in the same amino acid.\r +In order to phase intrahost mutations we would need to implement a read-backed phasing approach such as in WhatsHap \r +or GATK's ReadBackedPhasing. Unfortunately these tools do not support the scenario of a haploid organism with an\r +undefined number of subclones.\r +For this reason, phasing is implemented with custom Python code at `bin/phasing.py`.\r +\r +### Primers trimming\r +\r +With some library preparation protocols such as ARTIC it is recommended to trim the primers from the reads.\r +We have observed that if primers are not trimmed spurious mutations are being called specially SNVs with lower frequencies and long deletions.\r +Also the variant allele frequencies of clonal mutations are underestimated.\r +\r +The BED files containing the primers for each ARTIC version can be found at https://github.com/artic-network/artic-ncov2019/tree/master/primer_schemes/nCoV-2019.\r +\r +If the adequate BED file is provided to the CoVigator pipeline with `--primers` the primers will be trimmed with iVar. \r +This affects the output of every variant caller, not only iVar.\r +\r +### Reference data\r +\r +The default SARS-CoV-2 reference files correspond to Sars_cov_2.ASM985889v3 and were downloaded from Ensembl servers.\r +No additional parameter needs to be provided to use the default SARS-CoV-2 reference genome.\r +\r +#### Using a custom reference genome\r +\r +These references can be customised to use a different SARS-CoV-2 reference or to analyse a different virus.\r +Two files need to be provided:\r +- Use a custom reference genome by providing the parameter `--reference your.fasta`.\r +- Gene annotation file in GFFv3 format `--gff your.gff`. This is only required to run iVar\r +\r +Additionally, the FASTA needs bwa indexes, .fai index and a .dict index.\r +These indexes can be generated with the following two commands:\r +```\r +bwa index reference.fasta\r +samtools faidx reference.fasta\r +gatk CreateSequenceDictionary --REFERENCE your.fasta\r +```\r +\r +**NOTE**: beware that for Nextflow to find these indices the reference needs to be passed as an absolute path.\r +\r +The SARS-CoV-2 specific annotations will be skipped when using a custom genome.\r +\r +In order to have SnpEff functional annotations available you will also need to provide three parameters:\r +- `--snpeff_organism`: organism to annotate with SnpEff (ie: as registered in SnpEff)\r +- `--snpeff_data`: path to the SnpEff data folder\r +- `--snpeff_config`: path to the SnpEff config file\r +\r +### Intrahost mutations\r +\r +Some mutations may be observed in a subset of the virus sample, this may arise through intrahost virus evolution or\r +co-infection. Intrahost mutations can only be detected when analysing the raw reads (ie: the FASTQs) \r +as in the assembly (ie: the FASTA file) a single virus consensus sequence is represented. \r +BCFtools and GATK do not normally capture intrahost mutations; on the other hand LoFreq and iVar both capture\r +mutations that deviate from a clonal-like VAF. \r +Nevertheless, mutations with lower variant allele frequency (VAF) are challenging to distinguish from sequencing and\r +analytical errors. \r +\r +Mutations are annotated on the `FILTER` column using the VAF into three categories: \r +- `LOW_FREQUENCY`: subset of intrahost mutations with lowest frequencies, potentially enriched with false positive calls (VAF < 2 %).\r +- `SUBCLONAL`: subset of intrahost mutations with higher frequencies (2 % <= VAF < 50 %).\r +- `LOW_QUALITY_CLONAL`: subset of clonal mutations with lower frequencies (50 % <= VAF < 80 %).\r +- `PASS` clonal mutations (VAF >= 80 %)\r +\r +Other low quality mutations are removed from the output.\r +\r +The VAF thresholds can be changed with the parameters `--low_frequency_variant_threshold`,\r +`--subclonal_variant_threshold` and `--low_quality_clonal_variant_threshold`.\r +\r +## How to run\r +\r +### Requirements\r +\r +- Nextflow >= 19.10.0\r +- Java >= 8\r +- Conda >=4.9\r +\r +### Testing\r +\r +To run the workflow on a test assembly dataset run:\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline -profile conda,test_fasta\r +```\r +\r +Find the output in the folder `covigator_test_fasta`.\r +\r +To run the workflow on a test raw reads dataset run:\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline -profile conda,test_fastq\r +```\r +\r +Find the output in the folder `covigator_test_fastq`.\r +\r +The above commands are useful to create the conda environments beforehand.\r +\r +**NOTE**: pangolin is the most time-consuming step of the whole pipeline. To make it faster, locate the conda \r +environment that Nextflow created with pangolin (eg: `find $YOUR_NEXTFOW_CONDA_ENVS_FOLDER -name pangolin`) and run\r +`pangolin --decompress-model`.\r +\r +### Running\r +\r +For paired end reads:\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline \\\r +[-r v0.10.0] \\\r +[-profile conda] \\\r +--fastq1 \\\r +--fastq2 \\\r +--name example_run \\\r +--output \\\r +[--reference /Sars_cov_2.ASM985889v3.fa] \\\r +[--gff /Sars_cov_2.ASM985889v3.gff3]\r +```\r +\r +For single end reads:\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline \\\r +[-r v0.10.0] \\\r +[-profile conda] \\\r +--fastq1 \\\r +--name example_run \\\r +--output \\\r +[--reference /Sars_cov_2.ASM985889v3.fa] \\\r +[--gff /Sars_cov_2.ASM985889v3.gff3]\r +```\r +\r +For assembly:\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline \\\r +[-r v0.10.0] \\\r +[-profile conda] \\\r +--fasta \\\r +--name example_run \\\r +--output \\\r +[--reference /Sars_cov_2.ASM985889v3.fa] \\\r +[--gff /Sars_cov_2.ASM985889v3.gff3]\r +```\r +\r +For VCF:\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline \\\r +[-r v0.10.0] \\\r +[-profile conda] \\\r +--vcf \\\r +--name example_run \\\r +--output \\\r +[--reference /Sars_cov_2.ASM985889v3.fa] \\\r +[--gff /Sars_cov_2.ASM985889v3.gff3]\r +```\r +\r +As an optional input when processing directly VCF files you can provide BAM files to annotate VAFs:\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline \\\r +[-r v0.10.0] \\\r +[-profile conda] \\\r +--vcf \\\r +--bam \\\r +--bai \\\r +--name example_run \\\r +--output \\\r +[--reference /Sars_cov_2.ASM985889v3.fa] \\\r +[--gff /Sars_cov_2.ASM985889v3.gff3]\r +```\r +\r +For batch processing of reads use `--input_fastqs_list` and `--name`.\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline [-profile conda] --input_fastqs_list --library --output [--reference /Sars_cov_2.ASM985889v3.fa] [--gff /Sars_cov_2.ASM985889v3.gff3]\r +```\r +where the TSV file contains two or three columns tab-separated columns **without header**. Columns: sample name, path to FASTQ 1 and optionally path to FASTQ 2. \r +\r +| Sample | FASTQ 1 | FASTQ 2 (optional column) |\r +|-----------|-------------------------------|-------------------------------|\r +| sample1 | /path/to/sample1_fastq1.fastq | /path/to/sample1_fastq2.fastq |\r +| sample2 | /path/to/sample2_fastq1.fastq | /path/to/sample2_fastq2.fastq |\r +| ... | ... | ... |\r +\r +\r +For batch processing of assemblies use `--input_fastas_list`.\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline [-profile conda] --input_fastas_list --library --output [--reference /Sars_cov_2.ASM985889v3.fa] [--gff /Sars_cov_2.ASM985889v3.gff3]\r +```\r +where the TSV file contains two columns tab-separated columns **without header**. Columns: sample name and path to FASTA.\r +\r +| Sample | FASTA | \r +|-----------|------------------------|\r +| sample1 | /path/to/sample1.fasta |\r +| sample2 | /path/to/sample2.fasta |\r +| ... | ... |\r +\r +For batch processing of VCFs use `--input_vcfs_list`.\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline [-profile conda] --input_vcfs_list --output [--reference /Sars_cov_2.ASM985889v3.fa] [--gff /Sars_cov_2.ASM985889v3.gff3]\r +```\r +where the TSV file contains two columns tab-separated columns **without header**. Columns: sample name and path to VCF.\r +\r +| Sample | FASTA |\r +|-----------|------------------------|\r +| sample1 | /path/to/sample1.vcf |\r +| sample2 | /path/to/sample2.vcf |\r +| ... | ... |\r +\r +Optionally, provide BAM files for batch processing of VCFs using `--input_bams_list`.\r +```\r +nextflow run tron-bioinformatics/covigator-ngs-pipeline [-profile conda] \\\r + --input_vcfs_list \\\r + --input_bams_list \\\r + --output \\\r + [--reference /Sars_cov_2.ASM985889v3.fa] \\\r + [--gff /Sars_cov_2.ASM985889v3.gff3]\r +```\r +where the BAMs TSV file contains three columns tab-separated columns **without header**. Columns: sample name, \r +path to BAM and path to BAI.\r +\r +| Sample | BAM | BAI |\r +|-----------|----------------------|----------------------|\r +| sample1 | /path/to/sample1.bam | /path/to/sample1.bai |\r +| sample2 | /path/to/sample2.bam | /path/to/sample2.bai |\r +| ... | ... | ... |\r +\r +\r +\r +### Getting help\r +\r +You can always contact us directly or create a GitHub issue, otherwise see all available options using `--help`:\r +```\r +$ nextflow run tron-bioinformatics/covigator-ngs-pipeline -profile conda --help\r +\r +Usage:\r + nextflow run tron-bioinformatics/covigator-ngs-pipeline -profile conda --help\r +\r +Input:\r + * --fastq1: the first input FASTQ file (not compatible with --fasta, nor --vcf)\r + * --fasta: the FASTA file containing the assembly sequence (not compatible with --fastq1, nor --vcf)\r + * --vcf: the VCF file containing mutations to analyze (not compatible with --fastq1, nor --fasta)\r + * --bam: the BAM file containing reads to annotate VAFs on a VCF (not compatible with --fastq1, nor --fasta)\r + * --bai: the BAI index for a BAM file (not compatible with --fastq1, nor --fasta)\r + * --name: the sample name, output files will be named after this name\r + * --output: the folder where to publish output\r + * --input_fastqs_list: alternative to --name and --fastq1 for batch processing\r + * --library: required only when using --input_fastqs\r + * --input_fastas_list: alternative to --name and --fasta for batch processing\r + * --input_vcfs_list: alternative to --name and --vcf for batch processing\r + * --input_bams_list: alternative to --name, --vcf, --bam and --bai for batch processing\r +\r +Optional input only required to use a custom reference:\r + * --reference: the reference genome FASTA file, *.fai, *.dict and bwa indexes are required.\r + * --gff: the GFFv3 gene annotations file (required to run iVar and to phase mutations from all variant callers) \r + * --snpeff_data: path to the SnpEff data folder, it will be useful to use the pipeline on other virus than SARS-CoV-2\r + * --snpeff_config: path to the SnpEff config file, it will be useful to use the pipeline on other virus than SARS-CoV-2\r + * --snpeff_organism: organism to annotate with SnpEff, it will be useful to use the pipeline on other virus than SARS-CoV-2\r +\r +Optional input:\r + * --fastq2: the second input FASTQ file\r + * --primers: a BED file containing the primers used during library preparation. If provided primers are trimmed from the reads.\r + * --min_base_quality: minimum base call quality to take a base into account for variant calling (default: 20)\r + * --min_mapping_quality: minimum mapping quality to take a read into account for variant calling (default: 20)\r + * --vafator_min_base_quality: minimum base call quality to take a base into account for VAF annotation (default: 0)\r + * --vafator_min_mapping_quality: minimum mapping quality to take a read into account for VAF annotation (default: 0)\r + * --low_frequency_variant_threshold: VAF threshold to mark a variant as low frequency (default: 0.02)\r + * --subclonal_variant_threshold: VAF superior threshold to mark a variant as subclonal (default: 0.5)\r + * --lq_clonal_variant_threshold: VAF superior threshold to mark a variant as loq quality clonal (default: 0.8)\r + * --memory: the ammount of memory used by each job (default: 3g)\r + * --cpus: the number of CPUs used by each job (default: 1)\r + * --skip_lofreq: skips calling variants with LoFreq\r + * --skip_gatk: skips calling variants with GATK\r + * --skip_bcftools: skips calling variants with BCFTools\r + * --skip_ivar: skips calling variants with iVar\r + * --skip_pangolin: skips lineage determination with pangolin\r + * --match_score: global alignment match score, only applicable for assemblies (default: 2)\r + * --mismatch_score: global alignment mismatch score, only applicable for assemblies (default: -1)\r + * --open_gap_score: global alignment open gap score, only applicable for assemblies (default: -3)\r + * --extend_gap_score: global alignment extend gap score, only applicable for assemblies (default: -0.1)\r + * --skip_sarscov2_annotations: skip some of the SARS-CoV-2 specific annotations (default: false)\r + * --keep_intermediate: keep intermediate files (ie: BAM files and intermediate VCF files)\r + * --args_bcftools_mpileup: additional arguments for bcftools mpileup command (eg: --args_bcftools_mpileup='--ignore-overlaps')\r + * --args_bcftools_call: additional arguments for bcftools call command (eg: --args_bcftools_call='--something')\r + * --args_lofreq: additional arguments for lofreq command (eg: --args_lofreq='--something')\r + * --args_gatk: additional arguments for gatk command (eg: --args_gatk='--something')\r + * --args_ivar_samtools: additional arguments for ivar samtools mpileup command (eg: --args_ivar_samtools='--ignore-overlaps')\r + * --args_ivar: additional arguments for ivar command (eg: --args_ivar='--something')\r +\r +Output:\r + * Output a VCF file for each of BCFtools, GATK, LoFreq and iVar when FASTQ files are\r + provided or a single VCF obtained from a global alignment when a FASTA file is provided.\r + * A pangolin results file for each of the VCF files.\r + * Only when FASTQs are provided:\r + * FASTP statistics\r + * Depth and breadth of coverage analysis results\r + \r +```\r +\r +## Understanding the output\r +\r +Although the VCFs are normalized for both pipelines, the FASTQ pipeline runs four variant callers, while the FASTA\r +pipeline runs a single variant caller. Also, there are several metrics in the FASTQ pipeline that are not present\r +in the output of the FASTA pipeline. Here we will describe these outputs.\r +\r +### FASTQ pipeline output\r +\r +Find in the table below a description of each of the expected files and a link to a sample file for the FASTQ pipeline.\r +The VCF files will be described in more detail later.\r +\r +| Name | Description | Sample file |\r +|---------------------------------|----------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------|\r +| $NAME.fastp_stats.json | Output metrics of the fastp trimming process in JSON format | [ERR4145453.fastp_stats.json](_static/covigator_pipeline_sample_output_reads/ERR4145453.fastp_stats.json) |\r +| $NAME.fastp_stats.html | Output metrics of the fastp trimming process in HTML format | [ERR4145453.fastp_stats.html](_static/covigator_pipeline_sample_output_reads/ERR4145453.fastp_stats.html) |\r +| $NAME.deduplication_metrics.txt | Deduplication metrics | [ERR4145453.deduplication_metrics.txt](_static/covigator_pipeline_sample_output_reads/ERR4145453.deduplication_metrics.txt) |\r +| $NAME.coverage.tsv | Coverage metrics (eg: mean depth, % horizontal coverage) | [ERR4145453.coverage.tsv](_static/covigator_pipeline_sample_output_reads/ERR4145453.coverage.tsv) |\r +| $NAME.depth.tsv | Depth of coverage per position | [ERR4145453.depth.tsv](_static/covigator_pipeline_sample_output_reads/ERR4145453.depth.tsv) |\r +| $NAME.bcftools.vcf.gz | Bgzipped, tabix-indexed and annotated output VCF from BCFtools | [ERR4145453.bcftools.normalized.annotated.vcf.gz](_static/covigator_pipeline_sample_output_reads/ERR4145453.bcftools.normalized.annotated.vcf.gz) |\r +| $NAME.gatk.vcf.gz | Bgzipped, tabix-indexed and annotated output VCF from GATK | [ERR4145453.gatk.normalized.annotated.vcf.gz](_static/covigator_pipeline_sample_output_reads/ERR4145453.gatk.normalized.annotated.vcf.gz) |\r +| $NAME.lofreq.vcf.gz | Bgzipped, tabix-indexed and annotated output VCF from LoFreq | [ERR4145453.lofreq.normalized.annotated.vcf.gz](_static/covigator_pipeline_sample_output_reads/ERR4145453.lofreq.normalized.annotated.vcf.gz) |\r +| $NAME.ivar.vcf.gz | Bgzipped, tabix-indexed and annotated output VCF from LoFreq | [ERR4145453.ivar.tsv](_static/covigator_pipeline_sample_output_reads/ERR4145453.ivar.tsv) |\r +| $NAME.lofreq.pangolin.csv | Pangolin CSV output file derived from LoFreq mutations | [ERR4145453.lofreq.pangolin.csv](_static/covigator_pipeline_sample_output_reads/ERR4145453.lofreq.pangolin.csv) |\r +\r +\r +### FASTA pipeline output\r +\r +The FASTA pipeline returns a single VCF file. The VCF files will be described in more detail later.\r +\r +| Name | Description | Sample file |\r +|-----------------------------|--------------------------------------------------------------|------------------------------------------------------------------------------------------------------|\r +| $NAME.assembly.vcf.gz | Bgzipped, tabix-indexed and annotated output VCF | [ERR4145453.assembly.normalized.annotated.vcf.gz](_static/covigator_pipeline_sample_output_assembly/hCoV-19_NTXX.assembly.normalized.annotated.vcf.gz) |\r +\r +\r +## Annotations resources\r +\r +SARS-CoV-2 ASM985889v3 references were downloaded from Ensembl on 6th of October 2020:\r +- ftp://ftp.ensemblgenomes.org/pub/viruses/fasta/sars_cov_2/dna/Sars_cov_2.ASM985889v3.dna.toplevel.fa.gz\r +- ftp://ftp.ensemblgenomes.org/pub/viruses/gff3/sars_cov_2/Sars_cov_2.ASM985889v3.101.gff3.gz\r +\r +ConsHMM mutation depletion scores downloaded on 1st of July 2021:\r +- https://github.com/ernstlab/ConsHMM_CoV/blob/master/wuhCor1.mutDepletionConsHMM.bed\r +- https://github.com/ernstlab/ConsHMM_CoV/blob/master/wuhCor1.mutDepletionSarbecovirusConsHMM.bed\r +- https://github.com/ernstlab/ConsHMM_CoV/blob/master/wuhCor1.mutDepletionVertebrateCoVConsHMM.bed\r +\r +Gene annotations including Pfam domains downloaded from Ensembl on 25th of February 2021 from:\r +- ftp://ftp.ensemblgenomes.org/pub/viruses/json/sars_cov_2/sars_cov_2.json\r +\r +\r +## Future work\r +\r +- Primer trimming on an arbitrary sequencing library.\r +- Pipeline for Oxford Nanopore technology.\r +- Variant calls from assemblies contain an abnormally high number of deletions of size greater than 3 bp. This\r +is a technical artifact that would need to be avoided.\r +\r +## Bibliography\r +\r +- Di Tommaso, P., Chatzou, M., Floden, E. W., Barja, P. P., Palumbo, E., & Notredame, C. (2017). Nextflow enables reproducible computational workflows. Nature Biotechnology, 35(4), 316–319. https://doi.org/10.1038/nbt.3820\r +- Vasimuddin Md, Sanchit Misra, Heng Li, Srinivas Aluru. Efficient Architecture-Aware Acceleration of BWA-MEM for Multicore Systems. IEEE Parallel and Distributed Processing Symposium (IPDPS), 2019.\r +- Adrian Tan, Gonçalo R. Abecasis and Hyun Min Kang. Unified Representation of Genetic Variants. Bioinformatics (2015) 31(13): 2202-2204](http://bioinformatics.oxfordjournals.org/content/31/13/2202) and uses bcftools [Li, H. (2011). A statistical framework for SNP calling, mutation discovery, association mapping and population genetical parameter estimation from sequencing data. Bioinformatics (Oxford, England), 27(21), 2987–2993. 10.1093/bioinformatics/btr509\r +- Danecek P, Bonfield JK, Liddle J, Marshall J, Ohan V, Pollard MO, Whitwham A, Keane T, McCarthy SA, Davies RM, Li H. Twelve years of SAMtools and BCFtools. Gigascience. 2021 Feb 16;10(2):giab008. doi: 10.1093/gigascience/giab008. PMID: 33590861; PMCID: PMC7931819.\r +- Van der Auwera GA, Carneiro M, Hartl C, Poplin R, del Angel G, Levy-Moonshine A, Jordan T, Shakir K, Roazen D, Thibault J, Banks E, Garimella K, Altshuler D, Gabriel S, DePristo M. (2013). From FastQ Data to High-Confidence Variant Calls: The Genome Analysis Toolkit Best Practices Pipeline. Curr Protoc Bioinformatics, 43:11.10.1-11.10.33. DOI: 10.1002/0471250953.bi1110s43.\r +- Martin, M., Patterson, M., Garg, S., O Fischer, S., Pisanti, N., Klau, G., Schöenhuth, A., & Marschall, T. (2016). WhatsHap: fast and accurate read-based phasing. BioRxiv, 085050. https://doi.org/10.1101/085050\r +- Danecek, P., & McCarthy, S. A. (2017). BCFtools/csq: haplotype-aware variant consequences. Bioinformatics, 33(13), 2037–2039. https://doi.org/10.1093/bioinformatics/btx100\r +- Wilm, A., Aw, P. P. K., Bertrand, D., Yeo, G. H. T., Ong, S. H., Wong, C. H., Khor, C. C., Petric, R., Hibberd, M. L., & Nagarajan, N. (2012). LoFreq: A sequence-quality aware, ultra-sensitive variant caller for uncovering cell-population heterogeneity from high-throughput sequencing datasets. Nucleic Acids Research, 40(22), 11189–11201. https://doi.org/10.1093/nar/gks918\r +- Grubaugh, N. D., Gangavarapu, K., Quick, J., Matteson, N. L., De Jesus, J. G., Main, B. J., Tan, A. L., Paul, L. M., Brackney, D. E., Grewal, S., Gurfield, N., Van Rompay, K. K. A., Isern, S., Michael, S. F., Coffey, L. L., Loman, N. J., & Andersen, K. G. (2019). An amplicon-based sequencing framework for accurately measuring intrahost virus diversity using PrimalSeq and iVar. Genome Biology, 20(1), 8. https://doi.org/10.1186/s13059-018-1618-7\r +- Shifu Chen, Yanqing Zhou, Yaru Chen, Jia Gu; fastp: an ultra-fast all-in-one FASTQ preprocessor, Bioinformatics, Volume 34, Issue 17, 1 September 2018, Pages i884–i890, https://doi.org/10.1093/bioinformatics/bty560\r +- Kwon, S. Bin, & Ernst, J. (2021). Single-nucleotide conservation state annotation of the SARS-CoV-2 genome. Communications Biology, 4(1), 1–11. https://doi.org/10.1038/s42003-021-02231-w\r +- Cock, P. J., Antao, T., Chang, J. T., Chapman, B. A., Cox, C. J., Dalke, A., et al. (2009). Biopython: freely available Python tools for computational molecular biology and bioinformatics. Bioinformatics, 25(11), 1422–1423.\r +- Artem Tarasov, Albert J. Vilella, Edwin Cuppen, Isaac J. Nijman, Pjotr Prins, Sambamba: fast processing of NGS alignment formats, Bioinformatics, Volume 31, Issue 12, 15 June 2015, Pages 2032–2034, https://doi.org/10.1093/bioinformatics/btv098\r +""" ; + schema1:image ; + schema1:keywords "Bioinformatics, SARS-CoV-2, covid-19, variant calling, Nextflow" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "CoVigator pipeline: variant detection pipeline for Sars-CoV-2 (and other viruses...)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/417?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Take an anndata file, and perform basic QC with scanpy. Produces a filtered AnnData object." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/467?version=2" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq_CellQC" ; + schema1:sdDatePublished "2024-08-05 10:24:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/467/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 21389 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-06-21T06:48:26Z" ; + schema1:dateModified "2023-06-22T06:28:07Z" ; + schema1:description "Take an anndata file, and perform basic QC with scanpy. Produces a filtered AnnData object." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/467?version=3" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "scRNAseq_CellQC" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/467?version=2" ; + schema1:version 2 ; + ns1:input , + , + , + , + ; + ns1:output , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2023-01-13T14:08:15.844414" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/rnaseq-pe" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "rnaseq-pe/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.3" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """\r +\r +![Logo](https://cbg-ethz.github.io/V-pipe/img/logo.svg)\r +\r +[![bio.tools](https://img.shields.io/badge/bio-tools-blue.svg)](https://bio.tools/V-Pipe)\r +[![Snakemake](https://img.shields.io/badge/snakemake-≥7.11.0-blue.svg)](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe)\r +[![Deploy Docker image](https://github.com/cbg-ethz/V-pipe/actions/workflows/deploy-docker.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe)\r +[![Tests](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml)\r +[![Mega-Linter](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml)\r +[![License: Apache-2.0](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)\r +\r +V-pipe is a workflow designed for the analysis of next generation sequencing (NGS) data from viral pathogens. It produces a number of results in a curated format (e.g., consensus sequences, SNV calls, local/global haplotypes).\r +V-pipe is written using the Snakemake workflow management system.\r +\r +## Usage\r +\r +Different ways of initializing V-pipe are presented below. We strongly encourage you to deploy it [using the quick install script](#using-quick-install-script), as this is our preferred method.\r +\r +To configure V-pipe refer to the documentation present in [config/README.md](config/README.md).\r +\r +V-pipe expects the input samples to be organized in a [two-level](config/README.md#samples) directory hierarchy,\r +and the sequencing reads must be provided in a sub-folder named `raw_data`. Further details can be found on the [website](https://cbg-ethz.github.io/V-pipe/usage/).\r +Check the utils subdirectory for [mass-importers tools](utils/README.md#samples-mass-importers) that can assist you in generating this hierarchy.\r +\r +We provide [virus-specific base configuration files](config/README.md#virus-base-config) which contain handy defaults for, e.g., HIV and SARS-CoV-2. Set the virus in the general section of the configuration file:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +```\r +\r +Also see [snakemake's documentation](https://snakemake.readthedocs.io/en/stable/executing/cli.html) to learn more about the command-line options available when executing the workflow.\r +\r +Tutorials introducing usage of V-pipe are available in the [docs/](docs/README.md) subdirectory.\r +\r +### Using quick install script\r +\r +To deploy V-pipe, use the [installation script](utils/README.md#quick-installer) with the following parameters:\r +\r +```bash\r +curl -O 'https://raw.githubusercontent.com/cbg-ethz/V-pipe/master/utils/quick_install.sh'\r +./quick_install.sh -w work\r +```\r +\r +This script will download and install miniconda, checkout the V-pipe git repository (use `-b` to specify which branch/tag) and setup a work directory (specified with `-w`) with an executable script that will execute the workflow:\r +\r +```bash\r +cd work\r +# edit config.yaml and provide samples/ directory\r +./vpipe --jobs 4 --printshellcmds --dry-run\r +```\r +\r +### Using Docker\r +\r +Note: the [docker image](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe) is only setup with components to run the workflow for HIV and SARS-CoV-2 virus base configurations.\r +Using V-pipe with other viruses or configurations might require internet connectivity for additional software components.\r +\r +Create `config.yaml` or `vpipe.config` and then populate the `samples/` directory.\r +For example, the following config file could be used:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +\r +output:\r + snv: true\r + local: true\r + global: false\r + visualization: true\r + QA: true\r +```\r +\r +Then execute:\r +\r +```bash\r +docker run --rm -it -v $PWD:/work ghcr.io/cbg-ethz/v-pipe:master --jobs 4 --printshellcmds --dry-run\r +```\r +\r +### Using Snakedeploy\r +\r +First install [mamba](https://github.com/conda-forge/miniforge#mambaforge), then create and activate an environment with Snakemake and Snakedeploy:\r +\r +```bash\r +mamba create -c conda-forge -c bioconda --name snakemake snakemake snakedeploy\r +conda activate snakemake\r +```\r +\r +Snakemake's [official workflow installer Snakedeploy](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe) can now be used:\r +\r +```bash\r +snakedeploy deploy-workflow https://github.com/cbg-ethz/V-pipe --tag master .\r +# edit config/config.yaml and provide samples/ directory\r +snakemake --use-conda --jobs 4 --printshellcmds --dry-run\r +```\r +\r +## Dependencies\r +\r +- **[Conda](https://conda.io/docs/index.html)**\r +\r + Conda is a cross-platform package management system and an environment manager application. Snakemake uses mamba as a package manager.\r +\r +- **[Snakemake](https://snakemake.readthedocs.io/)**\r +\r + Snakemake is the central workflow and dependency manager of V-pipe. It determines the order in which individual tools are invoked and checks that programs do not exit unexpectedly.\r +\r +- **[VICUNA](https://www.broadinstitute.org/viral-genomics/vicuna)**\r +\r + VICUNA is a _de novo_ assembly software designed for populations with high mutation rates. It is used to build an initial reference for mapping reads with ngshmmalign aligner when a `references/cohort_consensus.fasta` file is not provided. Further details can be found in the [wiki](https://github.com/cbg-ethz/V-pipe/wiki/getting-started#input-files) pages.\r +\r +### Computational tools\r +\r +Other dependencies are managed by using isolated conda environments per rule, and below we list some of the computational tools integrated in V-pipe:\r +\r +- **[FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/)**\r +\r + FastQC gives an overview of the raw sequencing data. Flowcells that have been overloaded or otherwise fail during sequencing can easily be determined with FastQC.\r +\r +- **[PRINSEQ](http://prinseq.sourceforge.net/)**\r +\r + Trimming and clipping of reads is performed by PRINSEQ. It is currently the most versatile raw read processor with many customization options.\r +\r +- **[ngshmmalign](https://github.com/cbg-ethz/ngshmmalign)**\r +\r + We perform the alignment of the curated NGS data using our custom ngshmmalign that takes structural variants into account. It produces multiple consensus sequences that include either majority bases or ambiguous bases.\r +\r +- **[bwa](https://github.com/lh3/bwa)**\r +\r + In order to detect specific cross-contaminations with other probes, the Burrows-Wheeler aligner is used. It quickly yields estimates for foreign genomic material in an experiment.\r + Additionally, It can be used as an alternative aligner to ngshmmalign.\r +\r +- **[MAFFT](http://mafft.cbrc.jp/alignment/software/)**\r +\r + To standardise multiple samples to the same reference genome (say HXB2 for HIV-1), the multiple sequence aligner MAFFT is employed. The multiple sequence alignment helps in determining regions of low conservation and thus makes standardisation of alignments more robust.\r +\r +- **[Samtools and bcftools](https://www.htslib.org/)**\r +\r + The Swiss Army knife of alignment postprocessing and diagnostics. bcftools is also used to generate consensus sequence with indels.\r +\r +- **[SmallGenomeUtilities](https://github.com/cbg-ethz/smallgenomeutilities)**\r +\r + We perform genomic liftovers to standardised reference genomes using our in-house developed python library of utilities for rewriting alignments.\r +\r +- **[ShoRAH](https://github.com/cbg-ethz/shorah)**\r +\r + ShoRAh performs SNV calling and local haplotype reconstruction by using bayesian clustering.\r +\r +- **[LoFreq](https://csb5.github.io/lofreq/)**\r +\r + LoFreq (version 2) is SNVs and indels caller from next-generation sequencing data, and can be used as an alternative engine for SNV calling.\r +\r +- **[SAVAGE](https://bitbucket.org/jbaaijens/savage) and [Haploclique](https://github.com/cbg-ethz/haploclique)**\r +\r + We use HaploClique or SAVAGE to perform global haplotype reconstruction for heterogeneous viral populations by using an overlap graph.\r +\r +## Citation\r +\r +If you use this software in your research, please cite:\r +\r +Posada-Céspedes S., Seifert D., Topolsky I., Jablonski K.P., Metzner K.J., and Beerenwinkel N. 2021.\r +"V-pipe: a computational pipeline for assessing viral genetic diversity from high-throughput sequencing data."\r +_Bioinformatics_, January. doi:[10.1093/bioinformatics/btab015](https://doi.org/10.1093/bioinformatics/btab015).\r +\r +## Contributions\r +\r +- [Ivan Topolsky\\* ![orcid]](https://orcid.org/0000-0002-7561-0810), [![github]](https://github.com/dryak)\r +- [Pelin Icer Baykal ![orcid]](https://orcid.org/0000-0002-9542-5292), [![github]](https://github.com/picerbaykal)\r +- [Kim Philipp Jablonski ![orcid]](https://orcid.org/0000-0002-4166-4343), [![github]](https://github.com/kpj)\r +- [Lara Fuhrmann ![orcid]](https://orcid.org/0000-0001-6405-0654), [![github]](https://github.com/LaraFuhrmann)\r +- [Uwe Schmitt ![orcid]](https://orcid.org/0000-0002-4658-0616), [![github]](https://github.com/uweschmitt)\r +- [Michal Okoniewski ![orcid]](https://orcid.org/0000-0003-4722-4506), [![github]](https://github.com/michalogit)\r +- [Monica Dragan ![orcid]](https://orcid.org/0000-0002-7719-5892), [![github]](https://github.com/monicadragan)\r +- [Susana Posada Céspedes ![orcid]](https://orcid.org/0000-0002-7459-8186), [![github]](https://github.com/sposadac)\r +- [David Seifert ![orcid]](https://orcid.org/0000-0003-4739-5110), [![github]](https://github.com/SoapZA)\r +- Tobias Marschall\r +- [Niko Beerenwinkel\\*\\* ![orcid]](https://orcid.org/0000-0002-0573-6119)\r +\r +\\* software maintainer ;\r +\\** group leader\r +\r +[github]: https://cbg-ethz.github.io/V-pipe/img/mark-github.svg\r +[orcid]: https://cbg-ethz.github.io/V-pipe/img/ORCIDiD_iconvector.svg\r +\r +## Contact\r +\r +We encourage users to use the [issue tracker](https://github.com/cbg-ethz/V-pipe/issues). For further enquiries, you can also contact the V-pipe Dev Team .\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/301?version=4" ; + schema1:isBasedOn "https://github.com/cbg-ethz/V-pipe.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for V-pipe (main multi-virus version)" ; + schema1:sdDatePublished "2024-08-05 10:23:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/301/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1582 ; + schema1:creator , + ; + schema1:dateCreated "2023-11-02T09:55:11Z" ; + schema1:dateModified "2023-11-02T09:55:11Z" ; + schema1:description """\r +\r +![Logo](https://cbg-ethz.github.io/V-pipe/img/logo.svg)\r +\r +[![bio.tools](https://img.shields.io/badge/bio-tools-blue.svg)](https://bio.tools/V-Pipe)\r +[![Snakemake](https://img.shields.io/badge/snakemake-≥7.11.0-blue.svg)](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe)\r +[![Deploy Docker image](https://github.com/cbg-ethz/V-pipe/actions/workflows/deploy-docker.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe)\r +[![Tests](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/run_regression_tests.yaml)\r +[![Mega-Linter](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml/badge.svg)](https://github.com/cbg-ethz/V-pipe/actions/workflows/mega-linter.yml)\r +[![License: Apache-2.0](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)\r +\r +V-pipe is a workflow designed for the analysis of next generation sequencing (NGS) data from viral pathogens. It produces a number of results in a curated format (e.g., consensus sequences, SNV calls, local/global haplotypes).\r +V-pipe is written using the Snakemake workflow management system.\r +\r +## Usage\r +\r +Different ways of initializing V-pipe are presented below. We strongly encourage you to deploy it [using the quick install script](#using-quick-install-script), as this is our preferred method.\r +\r +To configure V-pipe refer to the documentation present in [config/README.md](config/README.md).\r +\r +V-pipe expects the input samples to be organized in a [two-level](config/README.md#samples) directory hierarchy,\r +and the sequencing reads must be provided in a sub-folder named `raw_data`. Further details can be found on the [website](https://cbg-ethz.github.io/V-pipe/usage/).\r +Check the utils subdirectory for [mass-importers tools](utils/README.md#samples-mass-importers) that can assist you in generating this hierarchy.\r +\r +We provide [virus-specific base configuration files](config/README.md#virus-base-config) which contain handy defaults for, e.g., HIV and SARS-CoV-2. Set the virus in the general section of the configuration file:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +```\r +\r +Also see [snakemake's documentation](https://snakemake.readthedocs.io/en/stable/executing/cli.html) to learn more about the command-line options available when executing the workflow.\r +\r +Tutorials introducing usage of V-pipe are available in the [docs/](docs/README.md) subdirectory.\r +\r +### Using quick install script\r +\r +To deploy V-pipe, use the [installation script](utils/README.md#quick-installer) with the following parameters:\r +\r +```bash\r +curl -O 'https://raw.githubusercontent.com/cbg-ethz/V-pipe/master/utils/quick_install.sh'\r +./quick_install.sh -w work\r +```\r +\r +This script will download and install miniconda, checkout the V-pipe git repository (use `-b` to specify which branch/tag) and setup a work directory (specified with `-w`) with an executable script that will execute the workflow:\r +\r +```bash\r +cd work\r +# edit config.yaml and provide samples/ directory\r +./vpipe --jobs 4 --printshellcmds --dry-run\r +```\r +\r +### Using Docker\r +\r +Note: the [docker image](https://github.com/cbg-ethz/V-pipe/pkgs/container/v-pipe) is only setup with components to run the workflow for HIV and SARS-CoV-2 virus base configurations.\r +Using V-pipe with other viruses or configurations might require internet connectivity for additional software components.\r +\r +Create `config.yaml` or `vpipe.config` and then populate the `samples/` directory.\r +For example, the following config file could be used:\r +\r +```yaml\r +general:\r + virus_base_config: hiv\r +\r +output:\r + snv: true\r + local: true\r + global: false\r + visualization: true\r + QA: true\r +```\r +\r +Then execute:\r +\r +```bash\r +docker run --rm -it -v $PWD:/work ghcr.io/cbg-ethz/v-pipe:master --jobs 4 --printshellcmds --dry-run\r +```\r +\r +### Using Snakedeploy\r +\r +First install [mamba](https://github.com/conda-forge/miniforge#mambaforge), then create and activate an environment with Snakemake and Snakedeploy:\r +\r +```bash\r +mamba create -c conda-forge -c bioconda --name snakemake snakemake snakedeploy\r +conda activate snakemake\r +```\r +\r +Snakemake's [official workflow installer Snakedeploy](https://snakemake.github.io/snakemake-workflow-catalog/?usage=cbg-ethz/V-pipe) can now be used:\r +\r +```bash\r +snakedeploy deploy-workflow https://github.com/cbg-ethz/V-pipe --tag master .\r +# edit config/config.yaml and provide samples/ directory\r +snakemake --use-conda --jobs 4 --printshellcmds --dry-run\r +```\r +\r +## Dependencies\r +\r +- **[Conda](https://conda.io/docs/index.html)**\r +\r + Conda is a cross-platform package management system and an environment manager application. Snakemake uses mamba as a package manager.\r +\r +- **[Snakemake](https://snakemake.readthedocs.io/)**\r +\r + Snakemake is the central workflow and dependency manager of V-pipe. It determines the order in which individual tools are invoked and checks that programs do not exit unexpectedly.\r +\r +- **[VICUNA](https://www.broadinstitute.org/viral-genomics/vicuna)**\r +\r + VICUNA is a _de novo_ assembly software designed for populations with high mutation rates. It is used to build an initial reference for mapping reads with ngshmmalign aligner when a `references/cohort_consensus.fasta` file is not provided. Further details can be found in the [wiki](https://github.com/cbg-ethz/V-pipe/wiki/getting-started#input-files) pages.\r +\r +### Computational tools\r +\r +Other dependencies are managed by using isolated conda environments per rule, and below we list some of the computational tools integrated in V-pipe:\r +\r +- **[FastQC](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/)**\r +\r + FastQC gives an overview of the raw sequencing data. Flowcells that have been overloaded or otherwise fail during sequencing can easily be determined with FastQC.\r +\r +- **[PRINSEQ](http://prinseq.sourceforge.net/)**\r +\r + Trimming and clipping of reads is performed by PRINSEQ. It is currently the most versatile raw read processor with many customization options.\r +\r +- **[ngshmmalign](https://github.com/cbg-ethz/ngshmmalign)**\r +\r + We perform the alignment of the curated NGS data using our custom ngshmmalign that takes structural variants into account. It produces multiple consensus sequences that include either majority bases or ambiguous bases.\r +\r +- **[bwa](https://github.com/lh3/bwa)**\r +\r + In order to detect specific cross-contaminations with other probes, the Burrows-Wheeler aligner is used. It quickly yields estimates for foreign genomic material in an experiment.\r + Additionally, It can be used as an alternative aligner to ngshmmalign.\r +\r +- **[MAFFT](http://mafft.cbrc.jp/alignment/software/)**\r +\r + To standardise multiple samples to the same reference genome (say HXB2 for HIV-1), the multiple sequence aligner MAFFT is employed. The multiple sequence alignment helps in determining regions of low conservation and thus makes standardisation of alignments more robust.\r +\r +- **[Samtools and bcftools](https://www.htslib.org/)**\r +\r + The Swiss Army knife of alignment postprocessing and diagnostics. bcftools is also used to generate consensus sequence with indels.\r +\r +- **[SmallGenomeUtilities](https://github.com/cbg-ethz/smallgenomeutilities)**\r +\r + We perform genomic liftovers to standardised reference genomes using our in-house developed python library of utilities for rewriting alignments.\r +\r +- **[ShoRAH](https://github.com/cbg-ethz/shorah)**\r +\r + ShoRAh performs SNV calling and local haplotype reconstruction by using bayesian clustering.\r +\r +- **[LoFreq](https://csb5.github.io/lofreq/)**\r +\r + LoFreq (version 2) is SNVs and indels caller from next-generation sequencing data, and can be used as an alternative engine for SNV calling.\r +\r +- **[SAVAGE](https://bitbucket.org/jbaaijens/savage) and [Haploclique](https://github.com/cbg-ethz/haploclique)**\r +\r + We use HaploClique or SAVAGE to perform global haplotype reconstruction for heterogeneous viral populations by using an overlap graph.\r +\r +## Citation\r +\r +If you use this software in your research, please cite:\r +\r +Posada-Céspedes S., Seifert D., Topolsky I., Jablonski K.P., Metzner K.J., and Beerenwinkel N. 2021.\r +"V-pipe: a computational pipeline for assessing viral genetic diversity from high-throughput sequencing data."\r +_Bioinformatics_, January. doi:[10.1093/bioinformatics/btab015](https://doi.org/10.1093/bioinformatics/btab015).\r +\r +## Contributions\r +\r +- [Ivan Topolsky\\* ![orcid]](https://orcid.org/0000-0002-7561-0810), [![github]](https://github.com/dryak)\r +- [Pelin Icer Baykal ![orcid]](https://orcid.org/0000-0002-9542-5292), [![github]](https://github.com/picerbaykal)\r +- [Kim Philipp Jablonski ![orcid]](https://orcid.org/0000-0002-4166-4343), [![github]](https://github.com/kpj)\r +- [Lara Fuhrmann ![orcid]](https://orcid.org/0000-0001-6405-0654), [![github]](https://github.com/LaraFuhrmann)\r +- [Uwe Schmitt ![orcid]](https://orcid.org/0000-0002-4658-0616), [![github]](https://github.com/uweschmitt)\r +- [Michal Okoniewski ![orcid]](https://orcid.org/0000-0003-4722-4506), [![github]](https://github.com/michalogit)\r +- [Monica Dragan ![orcid]](https://orcid.org/0000-0002-7719-5892), [![github]](https://github.com/monicadragan)\r +- [Susana Posada Céspedes ![orcid]](https://orcid.org/0000-0002-7459-8186), [![github]](https://github.com/sposadac)\r +- [David Seifert ![orcid]](https://orcid.org/0000-0003-4739-5110), [![github]](https://github.com/SoapZA)\r +- Tobias Marschall\r +- [Niko Beerenwinkel\\*\\* ![orcid]](https://orcid.org/0000-0002-0573-6119)\r +\r +\\* software maintainer ;\r +\\** group leader\r +\r +[github]: https://cbg-ethz.github.io/V-pipe/img/mark-github.svg\r +[orcid]: https://cbg-ethz.github.io/V-pipe/img/ORCIDiD_iconvector.svg\r +\r +## Contact\r +\r +We encourage users to use the [issue tracker](https://github.com/cbg-ethz/V-pipe/issues). For further enquiries, you can also contact the V-pipe Dev Team .\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/301?version=4" ; + schema1:keywords "Alignment, Assembly, covid-19, Genomics, INDELs, rna, SNPs, variant_calling, workflow" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "V-pipe (main multi-virus version)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/301?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-08-05 10:24:24 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7214 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:11Z" ; + schema1:dateModified "2024-06-11T12:55:11Z" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.196.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook ABC MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/196/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 101184 ; + schema1:creator , + ; + schema1:dateCreated "2023-04-14T08:42:07Z" ; + schema1:dateModified "2023-04-14T08:43:04Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/196?version=4" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook ABC MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_amber_md_setup/blob/master/biobb_wf_amber_md_setup/notebooks/abcsetup/biobb_amber_ABC_setup.ipynb" ; + schema1:version 3 . + + a schema1:Dataset ; + schema1:datePublished "2023-09-26T14:26:12.939323" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/average-bigwig-between-replicates" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "average-bigwig-between-replicates/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.12" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/998?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/methylseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/methylseq" ; + schema1:sdDatePublished "2024-08-05 10:22:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/998/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3742 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:00Z" ; + schema1:dateModified "2024-06-11T12:55:00Z" ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/998?version=14" ; + schema1:keywords "bisulfite-sequencing, dna-methylation, em-seq, epigenome, Epigenomics, methyl-seq, pbat, rrbs" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/methylseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/998?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=10" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-08-05 10:22:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8767 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:39Z" ; + schema1:dateModified "2024-06-11T12:54:39Z" ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=10" ; + schema1:version 10 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=22" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-08-05 10:23:20 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=22" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 19626 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:16Z" ; + schema1:dateModified "2024-06-11T12:55:16Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=22" ; + schema1:version 22 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.56.7" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_protein-complex_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:31:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/56/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 68528 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-04T14:23:59Z" ; + schema1:dateModified "2024-05-14T10:10:00Z" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/56?version=6" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_protein-complex_md_setup/blob/main/biobb_wf_protein-complex_md_setup/notebooks/biobb_Protein-Complex_MDsetup_tutorial.ipynb" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=18" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-08-05 10:23:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=18" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17866 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:59Z" ; + schema1:dateModified "2024-06-11T12:54:59Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=18" ; + schema1:version 18 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Macromolecular Coarse-Grained Flexibility (FlexServ) tutorial using BioExcel Building Blocks (biobb)\r +\r +This tutorial aims to illustrate the process of generating protein conformational ensembles from 3D structures and analysing its molecular flexibility, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.551.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_flexserv" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Macromolecular Coarse-Grained Flexibility tutorial" ; + schema1:sdDatePublished "2024-08-05 10:29:46 +0100" ; + schema1:url "https://workflowhub.eu/workflows/551/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 109981 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-04T15:24:02Z" ; + schema1:dateModified "2024-05-14T10:14:52Z" ; + schema1:description """# Macromolecular Coarse-Grained Flexibility (FlexServ) tutorial using BioExcel Building Blocks (biobb)\r +\r +This tutorial aims to illustrate the process of generating protein conformational ensembles from 3D structures and analysing its molecular flexibility, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/551?version=1" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Macromolecular Coarse-Grained Flexibility tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_flexserv/blob/main/biobb_wf_flexserv/notebooks/biobb_wf_flexserv.ipynb" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/986?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/fetchngs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/fetchngs" ; + schema1:sdDatePublished "2024-08-05 10:23:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/986/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7284 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:53Z" ; + schema1:dateModified "2024-06-11T12:54:53Z" ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/986?version=13" ; + schema1:keywords "ddbj, download, ena, FASTQ, geo, sra, synapse" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/fetchngs" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/986?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/383?version=2" ; + schema1:isBasedOn "https://gitlab.bsc.es/lrodrig1/structuralvariants_poc/-/tree/1.1.3/structuralvariants/nextflow" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CNV_pipeline" ; + schema1:sdDatePublished "2024-08-05 10:31:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/383/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4406 ; + schema1:creator , + , + ; + schema1:dateCreated "2022-09-16T14:31:19Z" ; + schema1:dateModified "2022-09-16T14:31:19Z" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/383?version=2" ; + schema1:keywords "CODEX2, TransBioNet, ExomeDepth, variant calling, cancer, manta, GRIDS, structural variants" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CNV_pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/383?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.281.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_protein_complex_md_setup/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/281/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8792 ; + schema1:creator , + ; + schema1:dateCreated "2023-04-14T08:07:07Z" ; + schema1:dateModified "2023-04-14T08:08:08Z" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/281?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_protein_complex_md_setup/python/workflow.py" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Alternative splicing analysis using RNA-seq." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1018?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/rnasplice" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnasplice" ; + schema1:sdDatePublished "2024-08-05 10:23:21 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1018/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13851 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:13Z" ; + schema1:dateModified "2024-06-11T12:55:13Z" ; + schema1:description "Alternative splicing analysis using RNA-seq." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1018?version=4" ; + schema1:keywords "alternative-splicing, rna, rna-seq, splicing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnasplice" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1018?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Alignment, assembly and annotation of RNASEQ reads as well as annotation of generated transcripts." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/39?version=1" ; + schema1:isBasedOn "https://usegalaxy.eu/u/ambarishk/w/covid-19-stringtie-assembly-and-annotation" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for StringTie assembly and annotation" ; + schema1:sdDatePublished "2024-08-05 10:33:30 +0100" ; + schema1:url "https://workflowhub.eu/workflows/39/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 41223 ; + schema1:dateCreated "2020-06-18T23:13:25Z" ; + schema1:dateModified "2023-02-13T14:06:46Z" ; + schema1:description "Alignment, assembly and annotation of RNASEQ reads as well as annotation of generated transcripts." ; + schema1:image ; + schema1:keywords "Alignment, Assembly, Annotation, RNASEQ, StringTie, covid-19" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "StringTie assembly and annotation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/39?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 201563 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "This project is about the automated quantification of wound healing in high-throughput microscopy scratch assays." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/782?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Wound Healing Scrath Assay Image Analysis" ; + schema1:sdDatePublished "2024-08-05 10:25:15 +0100" ; + schema1:url "https://workflowhub.eu/workflows/782/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3367 ; + schema1:creator ; + schema1:dateCreated "2024-03-06T09:42:37Z" ; + schema1:dateModified "2024-03-06T09:56:18Z" ; + schema1:description "This project is about the automated quantification of wound healing in high-throughput microscopy scratch assays." ; + schema1:keywords "imageJ, Bioimage" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Wound Healing Scrath Assay Image Analysis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/782?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.129.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_virtual-screening" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein-ligand Docking tutorial (Fpocket)" ; + schema1:sdDatePublished "2024-08-05 10:30:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/129/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 43078 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-04-14T08:21:56Z" ; + schema1:dateModified "2023-04-14T08:23:50Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/129?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein-ligand Docking tutorial (Fpocket)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_virtual-screening/blob/master/biobb_wf_virtual-screening/notebooks/fpocket/wf_vs_fpocket.ipynb" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=20" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-08-05 10:23:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=20" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9565 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:04Z" ; + schema1:dateModified "2024-06-11T12:55:04Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=20" ; + schema1:version 20 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-08-05 10:23:16 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5698 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:14Z" ; + schema1:dateModified "2024-06-11T12:55:14Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Demultiplexing pipeline for Illumina sequencing data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/978?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/demultiplex" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/demultiplex" ; + schema1:sdDatePublished "2024-08-05 10:24:07 +0100" ; + schema1:url "https://workflowhub.eu/workflows/978/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10118 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:47Z" ; + schema1:dateModified "2024-06-11T12:54:47Z" ; + schema1:description "Demultiplexing pipeline for Illumina sequencing data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/978?version=7" ; + schema1:keywords "bases2fastq, bcl2fastq, demultiplexing, elementbiosciences, illumina" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/demultiplex" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/978?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Google DeepVariant variant caller as a Nextflow pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/977?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/deepvariant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/deepvariant" ; + schema1:sdDatePublished "2024-08-05 10:24:07 +0100" ; + schema1:url "https://workflowhub.eu/workflows/977/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4292 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:47Z" ; + schema1:dateModified "2024-06-11T12:54:47Z" ; + schema1:description "Google DeepVariant variant caller as a Nextflow pipeline" ; + schema1:keywords "deep-variant, DNA, google, variant-calling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/deepvariant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/977?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/amber-protein-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.297.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_md_setup/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy Amber Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/297/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 83494 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-25T10:10:43Z" ; + schema1:dateModified "2022-11-23T13:28:16Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/amber-protein-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/297?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy Amber Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_md_setup/galaxy/biobb_wf_amber_md_setup.ga" ; + schema1:version 1 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2023-12-05T16:19:14.974403" ; + schema1:hasPart , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/baredsc" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + ; + schema1:name "baredsc/baredSC-1d-logNorm" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "baredsc/baredSC-2d-logNorm" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/baredsc" ; + schema1:version "0.2" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=14" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-08-05 10:23:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=14" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5996 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:04Z" ; + schema1:dateModified "2024-06-11T12:55:04Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=14" ; + schema1:version 14 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1023?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/smrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/smrnaseq" ; + schema1:sdDatePublished "2024-08-05 10:23:11 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1023/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9333 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:18Z" ; + schema1:dateModified "2024-06-11T12:55:18Z" ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1023?version=10" ; + schema1:keywords "small-rna, smrna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/smrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1023?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/278?version=10" ; + schema1:isBasedOn "https://gitlab.bsc.es/lrodrig1/structuralvariants_poc/-/tree/1.1.3/structuralvariants/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CNV_pipeline" ; + schema1:sdDatePublished "2024-08-05 10:32:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/278/ro_crate?version=10" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 9694 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9435 ; + schema1:creator , + ; + schema1:dateCreated "2022-09-09T10:27:12Z" ; + schema1:dateModified "2022-09-09T10:40:03Z" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/278?version=10" ; + schema1:keywords "cancer, CODEX2, ExomeDepth, manta, TransBioNet, variant calling, GRIDSS, structural variants" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CNV_pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/278?version=10" ; + schema1:version 10 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 72914 . + + a schema1:Dataset ; + schema1:datePublished "2023-01-13T10:44:42.687400" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/chipseq-pe" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "chipseq-pe/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.3" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=15" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-08-05 10:23:19 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=15" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17571 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:16Z" ; + schema1:dateModified "2024-06-11T12:55:16Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=15" ; + schema1:version 15 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "cccccc" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/456?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Formula" ; + schema1:sdDatePublished "2024-08-05 10:31:04 +0100" ; + schema1:url "https://workflowhub.eu/workflows/456/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6618 ; + schema1:dateCreated "2023-04-14T01:48:47Z" ; + schema1:dateModified "2023-04-14T01:50:33Z" ; + schema1:description "cccccc" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Formula" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/456?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=20" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-08-05 10:24:26 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=20" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10486 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:13Z" ; + schema1:dateModified "2024-06-11T12:55:13Z" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=20" ; + schema1:version 20 . + + a schema1:Dataset ; + schema1:datePublished "2024-07-24T13:58:15.637918" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.24" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# CWL-assembly\r +[![Codacy Badge](https://api.codacy.com/project/badge/Grade/684724bbc0134960ab41748f4a4b732f)](https://www.codacy.com/app/mb1069/CWL-assembly?utm_source=github.com&utm_medium=referral&utm_content=EBI-Metagenomics/CWL-assembly&utm_campaign=Badge_Grade)\r +[![Build Status](https://travis-ci.org/EBI-Metagenomics/CWL-assembly.svg?branch=develop)](https://travis-ci.org/EBI-Metagenomics/CWL-assembly)\r +\r +## Description\r +\r +This repository contains two workflows for metagenome and metatranscriptome assembly of short read data. MetaSPAdes is used as default for paired-end data, and MEGAHIT for single-end data and co-assemblies. MEGAHIT can be specified as the default assembler in the yaml file if preferred. Steps include:\r +\r + * _QC_: removal of short reads, low quality regions, adapters and host decontamination\r + * _Assembly_: with metaSPADES or MEGAHIT\r + * _Post-assembly_: Host and PhiX decontamination, contig length filter (500bp), stats generation\r +\r +## Requirements - How to install\r +\r +This pipeline requires a conda environment with cwltool, blastn, and metaspades. If created with `requirements.yml`, the environment will be called `cwl_assembly`. \r +\r +```\r +conda env create -f requirements.yml\r +conda activate cwl_assembly\r +pip install cwltool==3.1.20230601100705\r +```\r +\r +## Databases\r +\r +You will need to pre-download fasta files for host decontamination and generate the following databases accordingly:\r + * bwa index\r + * blast index\r + \r +Specify the locations in the yaml file when running the pipeline.\r +\r +## Main pipeline executables\r +\r + * `src/workflows/metagenome_pipeline.cwl`\r + * `src/workflows/metatranscriptome_pipeline.cwl`\r +\r +## Example command\r +\r +```cwltool --singularity --outdir ${OUTDIR} ${CWL} ${YML}```\r +\r +`$CWL` is going to be one of the executables mentioned above\r +`$YML` should be a config yaml file including entries among what follows. \r +You can find a yml template in the `examples` folder.\r +\r +## Example output directory structure\r +```\r +Root directory\r + ├── megahit\r + │ └── 001 -------------------------------- Assembly root directory\r + │ ├── assembly_stats.json ------------ Human-readable assembly stats file\r + │ ├── coverage.tab ------------------- Coverage file\r + │ ├── log ---------------------------- CwlToil+megahit output log\r + | ├── options.json ------------------- Megahit input options\r + │ ├── SRR6257420.fasta.gz ------------ Archived and trimmed assembly\r + │ └── SRR6257420.fasta.gz.md5 -------- MD5 hash of above archive\r + ├── metaspades\r + │ └── 001 -------------------------------- Assembly root directory\r + │ ├── assembly_graph.fastg ----------- Assembly graph\r + │ ├── assembly_stats.json ------------ Human-readable assembly stats file\r + │ ├── coverage.tab ------------------- Coverage file\r + | ├── params.txt --------------------- Metaspades input options\r + │ ├── spades.log --------------------- Metaspades output log\r + │ ├── SRR6257420.fasta.gz ------------ Archived and trimmed assembly\r + │ └── SRR6257420.fasta.gz.md5 -------- MD5 hash of above archive\r + │ \r + └── raw ------------------------------------ Raw data directory\r + ├── SRR6257420.fastq.qc_stats.tsv ------ Stats for cleaned fastq\r + ├── SRR6257420_fastp_clean_1.fastq.gz -- Cleaned paired-end file_1\r + └── SRR6257420_fastp_clean_2.fastq.gz -- Cleaned paired-end file_2\r +```\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/474?version=2" ; + schema1:isBasedOn "https://github.com/EBI-Metagenomics/CWL-assembly.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Metagenome and metatranscriptome assembly in CWL" ; + schema1:sdDatePublished "2024-08-05 10:30:06 +0100" ; + schema1:url "https://workflowhub.eu/workflows/474/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6441 ; + schema1:dateCreated "2023-06-21T10:41:38Z" ; + schema1:dateModified "2023-06-21T10:41:38Z" ; + schema1:description """# CWL-assembly\r +[![Codacy Badge](https://api.codacy.com/project/badge/Grade/684724bbc0134960ab41748f4a4b732f)](https://www.codacy.com/app/mb1069/CWL-assembly?utm_source=github.com&utm_medium=referral&utm_content=EBI-Metagenomics/CWL-assembly&utm_campaign=Badge_Grade)\r +[![Build Status](https://travis-ci.org/EBI-Metagenomics/CWL-assembly.svg?branch=develop)](https://travis-ci.org/EBI-Metagenomics/CWL-assembly)\r +\r +## Description\r +\r +This repository contains two workflows for metagenome and metatranscriptome assembly of short read data. MetaSPAdes is used as default for paired-end data, and MEGAHIT for single-end data and co-assemblies. MEGAHIT can be specified as the default assembler in the yaml file if preferred. Steps include:\r +\r + * _QC_: removal of short reads, low quality regions, adapters and host decontamination\r + * _Assembly_: with metaSPADES or MEGAHIT\r + * _Post-assembly_: Host and PhiX decontamination, contig length filter (500bp), stats generation\r +\r +## Requirements - How to install\r +\r +This pipeline requires a conda environment with cwltool, blastn, and metaspades. If created with `requirements.yml`, the environment will be called `cwl_assembly`. \r +\r +```\r +conda env create -f requirements.yml\r +conda activate cwl_assembly\r +pip install cwltool==3.1.20230601100705\r +```\r +\r +## Databases\r +\r +You will need to pre-download fasta files for host decontamination and generate the following databases accordingly:\r + * bwa index\r + * blast index\r + \r +Specify the locations in the yaml file when running the pipeline.\r +\r +## Main pipeline executables\r +\r + * `src/workflows/metagenome_pipeline.cwl`\r + * `src/workflows/metatranscriptome_pipeline.cwl`\r +\r +## Example command\r +\r +```cwltool --singularity --outdir ${OUTDIR} ${CWL} ${YML}```\r +\r +`$CWL` is going to be one of the executables mentioned above\r +`$YML` should be a config yaml file including entries among what follows. \r +You can find a yml template in the `examples` folder.\r +\r +## Example output directory structure\r +```\r +Root directory\r + ├── megahit\r + │ └── 001 -------------------------------- Assembly root directory\r + │ ├── assembly_stats.json ------------ Human-readable assembly stats file\r + │ ├── coverage.tab ------------------- Coverage file\r + │ ├── log ---------------------------- CwlToil+megahit output log\r + | ├── options.json ------------------- Megahit input options\r + │ ├── SRR6257420.fasta.gz ------------ Archived and trimmed assembly\r + │ └── SRR6257420.fasta.gz.md5 -------- MD5 hash of above archive\r + ├── metaspades\r + │ └── 001 -------------------------------- Assembly root directory\r + │ ├── assembly_graph.fastg ----------- Assembly graph\r + │ ├── assembly_stats.json ------------ Human-readable assembly stats file\r + │ ├── coverage.tab ------------------- Coverage file\r + | ├── params.txt --------------------- Metaspades input options\r + │ ├── spades.log --------------------- Metaspades output log\r + │ ├── SRR6257420.fasta.gz ------------ Archived and trimmed assembly\r + │ └── SRR6257420.fasta.gz.md5 -------- MD5 hash of above archive\r + │ \r + └── raw ------------------------------------ Raw data directory\r + ├── SRR6257420.fastq.qc_stats.tsv ------ Stats for cleaned fastq\r + ├── SRR6257420_fastp_clean_1.fastq.gz -- Cleaned paired-end file_1\r + └── SRR6257420_fastp_clean_2.fastq.gz -- Cleaned paired-end file_2\r +```\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/474?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Metagenome and metatranscriptome assembly in CWL" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/474?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description "Genome assembly: Flye-based WF for highly repetitive genomes [Schmid et al. NAR 2018]" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/51?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ONT -- Assembly-Flye-AhrensLab" ; + schema1:sdDatePublished "2024-08-05 10:33:26 +0100" ; + schema1:url "https://workflowhub.eu/workflows/51/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 18132 ; + schema1:creator ; + schema1:dateCreated "2020-08-05T13:01:18Z" ; + schema1:dateModified "2023-02-13T14:06:46Z" ; + schema1:description "Genome assembly: Flye-based WF for highly repetitive genomes [Schmid et al. NAR 2018]" ; + schema1:keywords "name:ONT, ONT" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ONT -- Assembly-Flye-AhrensLab" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/51?version=1" ; + schema1:version 1 ; + ns1:input <#ont____assembly_flye_ahrenslab-inputs-ftp://biftp.informatik.uni-freiburg.de/pub/T0/Ahrens/SRR6982805.fastq>, + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """Workflow for Creating a large disease network from various datasets and databases for IBM, and applying the active subnetwork identification method MOGAMUN.\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/681?version=1" ; + schema1:isBasedOn "https://github.com/jdwijnbergen/IBM_ASI_workflow.git" ; + schema1:license "notspecified" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Inclusion Body Myositis Active Subnetwork Identification Workflow" ; + schema1:sdDatePublished "2024-08-05 10:25:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/681/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 27757 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5790 ; + schema1:creator , + ; + schema1:dateCreated "2023-11-27T12:52:50Z" ; + schema1:dateModified "2023-11-27T15:48:40Z" ; + schema1:description """Workflow for Creating a large disease network from various datasets and databases for IBM, and applying the active subnetwork identification method MOGAMUN.\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/681?version=6" ; + schema1:keywords "Bioinformatics, CWL, Genomics, Transcriptomics, Protein-Protein Interaction" ; + schema1:license "https://choosealicense.com/no-permission/" ; + schema1:name "Inclusion Body Myositis Active Subnetwork Identification Workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/681?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """[![Snakemake](https://img.shields.io/badge/snakemake-≥7.0.0-brightgreen.svg?style=flat)](https://snakemake.readthedocs.io)\r +\r +\r +# About SnakeMAGs\r +SnakeMAGs is a workflow to reconstruct prokaryotic genomes from metagenomes. The main purpose of SnakeMAGs is to process Illumina data from raw reads to metagenome-assembled genomes (MAGs).\r +SnakeMAGs is efficient, easy to handle and flexible to different projects. The workflow is CeCILL licensed, implemented in Snakemake (run on multiple cores) and available for Linux.\r +SnakeMAGs performed eight main steps:\r +- Quality filtering of the reads\r +- Adapter trimming\r +- Filtering of the host sequences (optional)\r +- Assembly\r +- Binning\r +- Evaluation of the quality of the bins\r +- Classification of the MAGs\r +- Estimation of the relative abundance of the MAGs\r +\r +\r +![scheme of workflow](SnakeMAGs_schema.jpg?raw=true)\r +\r +# How to use SnakeMAGs\r +## Install conda\r +The easiest way to install and run SnakeMAGs is to use [conda](https://www.anaconda.com/products/distribution). These package managers will help you to easily install [Snakemake](https://snakemake.readthedocs.io/en/stable/getting_started/installation.html).\r +\r +## Install and activate Snakemake environment\r +Note: The workflow was developed with Snakemake 7.0.0\r +```\r +conda activate\r +\r +# First, set up your channel priorities\r +conda config --add channels defaults\r +conda config --add channels bioconda\r +conda config --add channels conda-forge\r +\r +# Then, create a new environment for the Snakemake version you require\r +conda create -n snakemake_7.0.0 snakemake=7.0.0\r +\r +# And activate it\r +conda activate snakemake_7.0.0\r +```\r +\r +Alternatively, you can also install Snakemake via mamba:\r +```\r +# If you do not have mamba yet on your machine, you can install it with:\r +conda install -n base -c conda-forge mamba\r +\r +# Then you can install Snakemake\r +conda activate base\r +mamba create -c conda-forge -c bioconda -n snakemake snakemake\r +\r +# And activate it\r +conda activate snakemake\r +\r +```\r +\r +## SnakeMAGs executable\r +The easiest way to procure SnakeMAGs and its related files is to clone the repository using git:\r +```\r +git clone https://github.com/Nachida08/SnakeMAGs.git\r +```\r +Alternatively, you can download the relevant files:\r +```\r +wget https://github.com/Nachida08/SnakeMAGs/blob/main/SnakeMAGs.smk https://github.com/Nachida08/SnakeMAGs/blob/main/config.yaml\r +```\r +\r +## SnakeMAGs input files\r +- Illumina paired-end reads in FASTQ.\r +- Adapter sequence file ([adapter.fa](https://github.com/Nachida08/SnakeMAGs/blob/main/adapters.fa)).\r +- Host genome sequences in FASTA (if host_genome: "yes"), in case you work with host-associated metagenomes (e.g. human gut metagenome).\r +\r +## Download Genome Taxonomy Database (GTDB)\r +GTDB-Tk requires ~66G+ of external data (GTDB) that need to be downloaded and unarchived. Because this database is voluminous, we let you decide where you want to store it.\r +SnakeMAGs do not download automatically GTDB, you have to do it:\r +\r +```\r +#Download the latest release (tested with release207)\r +#Note: SnakeMAGs uses GTDBtk v2.1.0 and therefore require release 207 as minimum version. See https://ecogenomics.github.io/GTDBTk/installing/index.html#installing for details.\r +wget https://data.gtdb.ecogenomic.org/releases/latest/auxillary_files/gtdbtk_v2_data.tar.gz\r +#Decompress\r +tar -xzvf *tar.gz\r +#This will create a folder called release207_v2\r +```\r +All you have to do now is to indicate the path to the database folder (in our example, the folder is called release207_v2) in the config file, Classification section.\r +\r +## Download the GUNC database (required if gunc: "yes")\r +GUNC accepts either a progenomes or GTDB based reference database. Both can be downloaded using the ```gunc download_db``` command. For our study we used the default proGenome-derived GUNC database. It requires less resources with similar performance.\r +\r +```\r +conda activate\r +# Install and activate GUNC environment\r +conda create --prefix /path/to/gunc_env\r +conda install -c bioconda metabat2 --prefix /path/to/gunc_env\r +source activate /path/to/gunc_env\r +\r +#Download the proGenome-derived GUNC database (tested with gunc_db_progenomes2.1)\r +#Note: SnakeMAGs uses GUNC v1.0.5\r +gunc download_db -db progenomes /path/to/GUNC_DB\r +```\r +All you have to do now is to indicate the path to the GUNC database file in the config file, Bins quality section.\r +\r +## Edit config file\r +You need to edit the config.yaml file. In particular, you need to set the correct paths: for the working directory, to specify where are your fastq files, where you want to place the conda environments (that will be created using the provided .yaml files available in [SnakeMAGs_conda_env directory](https://github.com/Nachida08/SnakeMAGs/tree/main/SnakeMAGs_conda_env)), where are the adapters, where is GTDB and optionally where is the GUNC database and where is your host genome reference.\r +\r +Lastly, you need to allocate the proper computational resources (threads, memory) for each of the main steps. These can be optimized according to your hardware.\r +\r +\r +\r +Here is an example of a config file:\r +\r +```\r +#####################################################################################################\r +##### _____ ___ _ _ _ ______ __ __ _______ _____ #####\r +##### / ___| | \\ | | /\\ | | / / | ____| | \\ / | /\\ / _____| / ___| #####\r +##### | (___ | |\\ \\ | | / \\ | |/ / | |____ | \\/ | / \\ | | __ | (___ #####\r +##### \\___ \\ | | \\ \\| | / /\\ \\ | |\\ \\ | ____| | |\\ /| | / /\\ \\ | | |_ | \\___ \\ #####\r +##### ____) | | | \\ | / /__\\ \\ | | \\ \\ | |____ | | \\/ | | / /__\\ \\ | |____|| ____) | #####\r +##### |_____/ |_| \\__| /_/ \\_\\ |_| \\_\\ |______| |_| |_| /_/ \\_\\ \\______/ |_____/ #####\r +##### #####\r +#####################################################################################################\r +\r +############################\r +### Execution parameters ###\r +############################\r +\r +working_dir: /path/to/working/directory/ #The main directory for the project\r +raw_fastq: /path/to/raw_fastq/ #The directory that contains all the fastq files of all the samples (eg. sample1_R1.fastq & sample1_R2.fastq, sample2_R1.fastq & sample2_R2.fastq...)\r +suffix_1: "_R1.fastq" #Main type of suffix for forward reads file (eg. _1.fastq or _R1.fastq or _r1.fastq or _1.fq or _R1.fq or _r1.fq )\r +suffix_2: "_R2.fastq" #Main type of suffix for reverse reads file (eg. _2.fastq or _R2.fastq or _r2.fastq or _2.fq or _R2.fq or _r2.fq )\r +\r +###########################\r +### Conda environnemnts ###\r +###########################\r +\r +conda_env: "/path/to/SnakeMAGs_conda_env/" #Path to the provided SnakeMAGs_conda_env directory which contains the yaml file for each conda environment\r +\r +#########################\r +### Quality filtering ###\r +#########################\r +email: name.surname@your-univ.com #Your e-mail address\r +threads_filter: 10 #The number of threads to run this process. To be adjusted according to your hardware\r +resources_filter: 150 #Memory according to tools need (in GB)\r +\r +########################\r +### Adapter trimming ###\r +########################\r +adapters: /path/to/working/directory/adapters.fa #A fasta file contanning a set of various Illumina adaptors (this file is provided and is also available on github)\r +trim_params: "2:40:15" #For further details, see the Trimmomatic documentation\r +threads_trim: 10 #The number of threads to run this process. To be adjusted according to your hardware\r +resources_trim: 150 #Memory according to tools need (in GB)\r +\r +######################\r +### Host filtering ###\r +######################\r +host_genome: "yes" #yes or no. An optional step for host-associated samples (eg. termite, human, plant...)\r +threads_bowtie2: 50 #The number of threads to run this process. To be adjusted according to your hardware\r +host_genomes_directory: /path/to/working/host_genomes/ #the directory where the host genome is stored\r +host_genomes: /path/to/working/host_genomes/host_genomes.fa #A fasta file containing the DNA sequences of the host genome(s)\r +threads_samtools: 50 #The number of threads to run this process. To be adjusted according to your hardware\r +resources_host_filtering: 150 #Memory according to tools need (in GB)\r +\r +################\r +### Assembly ###\r +################\r +threads_megahit: 50 #The number of threads to run this process. To be adjusted according to your hardware\r +min_contig_len: 1000 #Minimum length (in bp) of the assembled contigs\r +k_list: "21,31,41,51,61,71,81,91,99,109,119" #Kmer size (for further details, see the megahit documentation)\r +resources_megahit: 250 #Memory according to tools need (in GB)\r +\r +###############\r +### Binning ###\r +###############\r +threads_bwa: 50 #The number of threads to run this process. To be adjusted according to your hardware\r +resources_bwa: 150 #Memory according to tools need (in GB)\r +threads_samtools: 50 #The number of threads to run this process. To be adjusted according to your hardware\r +resources_samtools: 150 #Memory according to tools need (in GB)\r +seed: 19860615 #Seed number for reproducible results\r +threads_metabat: 50 #The number of threads to run this process. To be adjusted according to your hardware\r +minContig: 2500 #Minimum length (in bp) of the contigs\r +resources_binning: 250 #Memory according to tools need (in GB)\r +\r +####################\r +### Bins quality ###\r +####################\r +#checkM\r +threads_checkm: 50 #The number of threads to run this process. To be adjusted according to your hardware\r +resources_checkm: 250 #Memory according to tools need (in GB)\r +#bins_quality_filtering\r +completion: 50 #The minimum completion rate of bins\r +contamination: 10 #The maximum contamination rate of bins\r +parks_quality_score: "yes" #yes or no. If yes bins are filtered according to the Parks quality score (completion-5*contamination >= 50)\r +#GUNC\r +gunc: "yes" #yes or no. An optional step to detect and discard chimeric and contaminated genomes using the GUNC tool\r +threads_gunc: 50 #The number of threads to run this process. To be adjusted according to your hardware\r +resources_gunc: 250 #Memory according to tools need (in GB)\r +GUNC_db: /path/to/GUNC_DB/gunc_db_progenomes2.1.dmnd #Path to the downloaded GUNC database (see the readme file)\r +\r +######################\r +### Classification ###\r +######################\r +GTDB_data_ref: /path/to/downloaded/GTDB #Path to uncompressed GTDB-Tk reference data (GTDB)\r +threads_gtdb: 10 #The number of threads to run this process. To be adjusted according to your hardware\r +resources_gtdb: 250 #Memory according to tools need (in GB)\r +\r +##################\r +### Abundances ###\r +##################\r +threads_coverM: 10 #The number of threads to run this process. To be adjusted according to your hardware\r +resources_coverM: 150 #Memory according to tools need (in GB)\r +```\r +# Run SnakeMAGs\r +If you are using a workstation with Ubuntu (tested on Ubuntu 22.04):\r +```{bash}\r +snakemake --cores 30 --snakefile SnakeMAGs.smk --use-conda --conda-prefix /path/to/SnakeMAGs_conda_env/ --configfile /path/to/config.yaml --keep-going --latency-wait 180\r +```\r +\r +If you are working on a cluster with Slurm (tested with version 18.08.7):\r +```{bash}\r +snakemake --snakefile SnakeMAGs.smk --cluster 'sbatch -p --mem -c -o "cluster_logs/{wildcards}.{rule}.{jobid}.out" -e "cluster_logs/{wildcards}.{rule}.{jobid}.err" ' --jobs --use-conda --conda-frontend conda --conda-prefix /path/to/SnakeMAGs_conda_env/ --jobname "{rule}.{wildcards}.{jobid}" --latency-wait 180 --configfile /path/to/config.yaml --keep-going\r +```\r +\r +If you are working on a cluster with SGE (tested with version 8.1.9):\r +```{bash}\r +snakemake --snakefile SnakeMAGs.smk --cluster "qsub -cwd -V -q -pe thread {threads} -e cluster_logs/{rule}.e{jobid} -o cluster_logs/{rule}.o{jobid}" --jobs --use-conda --conda-frontend conda --conda-prefix /path/to/SnakeMAGs_conda_env/ --jobname "{rule}.{wildcards}.{jobid}" --latency-wait 180 --configfile /path/to/config.yaml --keep-going\r +```\r +\r +\r +# Test\r +We provide you a small data set in the [test](https://github.com/Nachida08/SnakeMAGs/tree/main/test) directory which will allow you to validate your instalation and take your first steps with SnakeMAGs. This data set is a subset from [ZymoBiomics Mock Community](https://www.zymoresearch.com/blogs/blog/zymobiomics-microbial-standards-optimize-your-microbiomics-workflow) (250K reads) used in this tutoriel [metagenomics_tutorial](https://github.com/pjtorres/metagenomics_tutorial).\r +\r +1. Before getting started make sure you have cloned the SnakeMAGs repository or you have downloaded all the necessary files (SnakeMAGs.smk, config.yaml, chr19.fa.gz, insub732_2_R1.fastq.gz, insub732_2_R2.fastq.gz). See the [SnakeMAGs executable](#snakemags-executable) section.\r +2. Unzip the fastq files and the host sequences file.\r +```\r +gunzip fastqs/insub732_2_R1.fastq.gz fastqs/insub732_2_R2.fastq.gz host_genomes/chr19.fa.gz\r +```\r +3. For better organisation put all the read files in the same directory (eg. fastqs) and the host sequences file in a separate directory (eg. host_genomes)\r +4. Edit the config file (see [Edit config file](#edit-config-file) section)\r +5. Run the test (see [Run SnakeMAGs](#run-snakemags) section)\r +\r +Note: the analysis of these files took 1159.32 secondes to complete on a Ubuntu 22.04 LTS with an Intel(R) Xeon(R) Silver 4210 CPU @ 2.20GHz x 40 processor, 96GB of RAM.\r +\r +# Genome reference for host reads filtering\r +For host-associated samples, one can remove host sequences from the metagenomic reads by mapping these reads against a reference genome. In the case of termite gut metagenomes, we are providing [here](https://zenodo.org/record/6908287#.YuAdFXZBx8M) the relevant files (fasta and index files) from termite genomes.\r +\r +Upon request, we can help you to generate these files for your own reference genome and make them available to the community.\r +\r +NB. These steps of mapping generate voluminous files such as .bam and .sam. Depending on your disk space, you might want to delete these files after use.\r +\r +\r +# Use case\r +During the test phase of the development of SnakeMAGs, we used this workflow to process 10 publicly available termite gut metagenomes generated by Illumina sequencing, to ultimately reconstruct prokaryotic MAGs. These metagenomes were retrieved from the NCBI database using the following accession numbers: SRR10402454; SRR14739927; SRR8296321; SRR8296327; SRR8296329; SRR8296337; SRR8296343; DRR097505; SRR7466794; SRR7466795. They come from five different studies: Waidele et al, 2019; Tokuda et al, 2018; Romero Victorica et al, 2020; Moreira et al, 2021; and Calusinska et al, 2020.\r +\r +## Download the Illumina pair-end reads\r +We use fasterq-dump tool to extract data in FASTQ-format from SRA-accessions. It is a commandline-tool which offers a faster solution for downloading those large files.\r +\r +```\r +# Install and activate sra-tools environment\r +## Note: For this study we used sra-tools 2.11.0\r +\r +conda activate\r +conda install -c bioconda sra-tools\r +conda activate sra-tools\r +\r +# Download fastqs in a single directory\r +mkdir raw_fastq\r +cd raw_fastq\r +fasterq-dump --threads --skip-technical --split-3\r +```\r +\r +## Download Genome reference for host reads filtering\r +```\r +mkdir host_genomes\r +cd host_genomes\r +wget https://zenodo.org/record/6908287/files/termite_genomes.fasta.gz\r +gunzip termite_genomes.fasta.gz\r +```\r +\r +## Edit the config file\r +See [Edit config file](#edit-config-file) section.\r +\r +## Run SnakeMAGs\r +```\r +conda activate snakemake_7.0.0\r +mkdir cluster_logs\r +snakemake --snakefile SnakeMAGs.smk --cluster 'sbatch -p --mem -c -o "cluster_logs/{wildcards}.{rule}.{jobid}.out" -e "cluster_logs/{wildcards}.{rule}.{jobid}.err" ' --jobs --use-conda --conda-frontend conda --conda-prefix /path/to/SnakeMAGs_conda_env/ --jobname "{rule}.{wildcards}.{jobid}" --latency-wait 180 --configfile /path/to/config.yaml --keep-going\r +```\r +\r +## Study results\r +The MAGs reconstructed from each metagenome and their taxonomic classification are available in this [repository](https://doi.org/10.5281/zenodo.7661004).\r +\r +# Citations\r +\r +If you use SnakeMAGs, please cite:\r +> Tadrent N, Dedeine F and Hervé V. SnakeMAGs: a simple, efficient, flexible and scalable workflow to reconstruct prokaryotic genomes from metagenomes [version 2; peer review: 2 approved]. F1000Research 2023, 11:1522 (https://doi.org/10.12688/f1000research.128091.2)\r +\r +\r +Please also cite the dependencies:\r +- [Snakemake](https://doi.org/10.12688/f1000research.29032.2) : Mölder, F., Jablonski, K. P., Letcher, B., Hall, M. B., Tomkins-tinch, C. H., Sochat, V., Forster, J., Lee, S., Twardziok, S. O., Kanitz, A., Wilm, A., Holtgrewe, M., Rahmann, S., Nahnsen, S., & Köster, J. (2021) Sustainable data analysis with Snakemake [version 2; peer review: 2 approved]. *F1000Research* 2021, 10:33.\r +- [illumina-utils](https://doi.org/10.1371/journal.pone.0066643) : Murat Eren, A., Vineis, J. H., Morrison, H. G., & Sogin, M. L. (2013). A Filtering Method to Generate High Quality Short Reads Using Illumina Paired-End Technology. *PloS ONE*, 8(6), e66643.\r +- [Trimmomatic](https://doi.org/10.1093/bioinformatics/btu170) : Bolger, A. M., Lohse, M., & Usadel, B. (2014). Genome analysis Trimmomatic: a flexible trimmer for Illumina sequence data. *Bioinformatics*, 30(15), 2114-2120.\r +- [Bowtie2](https://doi.org/10.1038/nmeth.1923) : Langmead, B., & Salzberg, S. L. (2012). Fast gapped-read alignment with Bowtie 2. *Nature Methods*, 9(4), 357–359.\r +- [SAMtools](https://doi.org/10.1093/bioinformatics/btp352) : Li, H., Handsaker, B., Wysoker, A., Fennell, T., Ruan, J., Homer, N., Marth, G., Abecasis, G., & Durbin, R. (2009). The Sequence Alignment/Map format and SAMtools. *Bioinformatics*, 25(16), 2078–2079.\r +- [BEDtools](https://doi.org/10.1093/bioinformatics/btq033) : Quinlan, A. R., & Hall, I. M. (2010). BEDTools: A flexible suite of utilities for comparing genomic features. *Bioinformatics*, 26(6), 841–842.\r +- [MEGAHIT](https://doi.org/10.1093/bioinformatics/btv033) : Li, D., Liu, C. M., Luo, R., Sadakane, K., & Lam, T. W. (2015). MEGAHIT: An ultra-fast single-node solution for large and complex metagenomics assembly via succinct de Bruijn graph. *Bioinformatics*, 31(10), 1674–1676.\r +- [bwa](https://doi.org/10.1093/bioinformatics/btp324) : Li, H., & Durbin, R. (2009). Fast and accurate short read alignment with Burrows-Wheeler transform. *Bioinformatics*, 25(14), 1754–1760.\r +- [MetaBAT2](https://doi.org/10.7717/peerj.7359) : Kang, D. D., Li, F., Kirton, E., Thomas, A., Egan, R., An, H., & Wang, Z. (2019). MetaBAT 2: An adaptive binning algorithm for robust and efficient genome reconstruction from metagenome assemblies. *PeerJ*, 2019(7), 1–13.\r +- [CheckM](https://doi.org/10.1101/gr.186072.114) : Parks, D. H., Imelfort, M., Skennerton, C. T., Hugenholtz, P., & Tyson, G. W. (2015). CheckM: Assessing the quality of microbial genomes recovered from isolates, single cells, and metagenomes. *Genome Research*, 25(7), 1043–1055.\r +- [GTDB-Tk](https://doi.org/10.1093/BIOINFORMATICS/BTAC672) : Chaumeil, P.-A., Mussig, A. J., Hugenholtz, P., Parks, D. H. (2022). GTDB-Tk v2: memory friendly classification with the genome taxonomy database. *Bioinformatics*.\r +- [CoverM](https://github.com/wwood/CoverM)\r +- [Waidele et al, 2019](https://doi.org/10.1101/526038) : Waidele, L., Korb, J., Voolstra, C. R., Dedeine, F., & Staubach, F. (2019). Ecological specificity of the metagenome in a set of lower termite species supports contribution of the microbiome to adaptation of the host. *Animal Microbiome*, 1(1), 1–13.\r +- [Tokuda et al, 2018](https://doi.org/10.1073/pnas.1810550115) : Tokuda, G., Mikaelyan, A., Fukui, C., Matsuura, Y., Watanabe, H., Fujishima, M., & Brune, A. (2018). Fiber-associated spirochetes are major agents of hemicellulose degradation in the hindgut of wood-feeding higher termites. *Proceedings of the National Academy of Sciences of the United States of America*, 115(51), E11996–E12004.\r +- [Romero Victorica et al, 2020](https://doi.org/10.1038/s41598-020-60850-5) : Romero Victorica, M., Soria, M. A., Batista-García, R. A., Ceja-Navarro, J. A., Vikram, S., Ortiz, M., Ontañon, O., Ghio, S., Martínez-Ávila, L., Quintero García, O. J., Etcheverry, C., Campos, E., Cowan, D., Arneodo, J., & Talia, P. M. (2020). Neotropical termite microbiomes as sources of novel plant cell wall degrading enzymes. *Scientific Reports*, 10(1), 1–14.\r +- [Moreira et al, 2021](https://doi.org/10.3389/fevo.2021.632590) : Moreira, E. A., Persinoti, G. F., Menezes, L. R., Paixão, D. A. A., Alvarez, T. M., Cairo, J. P. L. F., Squina, F. M., Costa-Leonardo, A. M., Rodrigues, A., Sillam-Dussès, D., & Arab, A. (2021). Complementary contribution of Fungi and Bacteria to lignocellulose digestion in the food stored by a neotropical higher termite. *Frontiers in Ecology and Evolution*, 9(April), 1–12.\r +- [Calusinska et al, 2020](https://doi.org/10.1038/s42003-020-1004-3) : Calusinska, M., Marynowska, M., Bertucci, M., Untereiner, B., Klimek, D., Goux, X., Sillam-Dussès, D., Gawron, P., Halder, R., Wilmes, P., Ferrer, P., Gerin, P., Roisin, Y., & Delfosse, P. (2020). Integrative omics analysis of the termite gut system adaptation to Miscanthus diet identifies lignocellulose degradation enzymes. *Communications Biology*, 3(1), 1–12.\r +- [Orakov et al, 2021](https://doi.org/10.1186/s13059-021-02393-0) : Orakov, A., Fullam, A., Coelho, L. P., Khedkar, S., Szklarczyk, D., Mende, D. R., Schmidt, T. S. B., & Bork, P. (2021). GUNC: detection of chimerism and contamination in prokaryotic genomes. *Genome Biology*, 22(1).\r +- [Parks et al, 2015](https://doi.org/10.1101/gr.186072.114) : Parks, D. H., Imelfort, M., Skennerton, C. T., Hugenholtz, P., & Tyson, G. W. (2015). CheckM: Assessing the quality of microbial genomes recovered from isolates, single cells, and metagenomes. *Genome Research*, 25(7), 1043–1055.\r +# License\r +This project is licensed under the CeCILL License - see the [LICENSE](https://github.com/Nachida08/SnakeMAGs/blob/main/LICENCE) file for details.\r +\r +Developed by Nachida Tadrent at the Insect Biology Research Institute ([IRBI](https://irbi.univ-tours.fr/)), under the supervision of Franck Dedeine and Vincent Hervé.\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/554?version=1" ; + schema1:isBasedOn "https://github.com/Nachida08/SnakeMAGs.git" ; + schema1:license "CECILL-2.1" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for SnakeMAGs: a simple, efficient, flexible and scalable workflow to reconstruct prokaryotic genomes from metagenomes" ; + schema1:sdDatePublished "2024-08-05 10:29:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/554/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17575 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-08-02T11:41:06Z" ; + schema1:dateModified "2023-08-02T11:41:06Z" ; + schema1:description """[![Snakemake](https://img.shields.io/badge/snakemake-≥7.0.0-brightgreen.svg?style=flat)](https://snakemake.readthedocs.io)\r +\r +\r +# About SnakeMAGs\r +SnakeMAGs is a workflow to reconstruct prokaryotic genomes from metagenomes. The main purpose of SnakeMAGs is to process Illumina data from raw reads to metagenome-assembled genomes (MAGs).\r +SnakeMAGs is efficient, easy to handle and flexible to different projects. The workflow is CeCILL licensed, implemented in Snakemake (run on multiple cores) and available for Linux.\r +SnakeMAGs performed eight main steps:\r +- Quality filtering of the reads\r +- Adapter trimming\r +- Filtering of the host sequences (optional)\r +- Assembly\r +- Binning\r +- Evaluation of the quality of the bins\r +- Classification of the MAGs\r +- Estimation of the relative abundance of the MAGs\r +\r +\r +![scheme of workflow](SnakeMAGs_schema.jpg?raw=true)\r +\r +# How to use SnakeMAGs\r +## Install conda\r +The easiest way to install and run SnakeMAGs is to use [conda](https://www.anaconda.com/products/distribution). These package managers will help you to easily install [Snakemake](https://snakemake.readthedocs.io/en/stable/getting_started/installation.html).\r +\r +## Install and activate Snakemake environment\r +Note: The workflow was developed with Snakemake 7.0.0\r +```\r +conda activate\r +\r +# First, set up your channel priorities\r +conda config --add channels defaults\r +conda config --add channels bioconda\r +conda config --add channels conda-forge\r +\r +# Then, create a new environment for the Snakemake version you require\r +conda create -n snakemake_7.0.0 snakemake=7.0.0\r +\r +# And activate it\r +conda activate snakemake_7.0.0\r +```\r +\r +Alternatively, you can also install Snakemake via mamba:\r +```\r +# If you do not have mamba yet on your machine, you can install it with:\r +conda install -n base -c conda-forge mamba\r +\r +# Then you can install Snakemake\r +conda activate base\r +mamba create -c conda-forge -c bioconda -n snakemake snakemake\r +\r +# And activate it\r +conda activate snakemake\r +\r +```\r +\r +## SnakeMAGs executable\r +The easiest way to procure SnakeMAGs and its related files is to clone the repository using git:\r +```\r +git clone https://github.com/Nachida08/SnakeMAGs.git\r +```\r +Alternatively, you can download the relevant files:\r +```\r +wget https://github.com/Nachida08/SnakeMAGs/blob/main/SnakeMAGs.smk https://github.com/Nachida08/SnakeMAGs/blob/main/config.yaml\r +```\r +\r +## SnakeMAGs input files\r +- Illumina paired-end reads in FASTQ.\r +- Adapter sequence file ([adapter.fa](https://github.com/Nachida08/SnakeMAGs/blob/main/adapters.fa)).\r +- Host genome sequences in FASTA (if host_genome: "yes"), in case you work with host-associated metagenomes (e.g. human gut metagenome).\r +\r +## Download Genome Taxonomy Database (GTDB)\r +GTDB-Tk requires ~66G+ of external data (GTDB) that need to be downloaded and unarchived. Because this database is voluminous, we let you decide where you want to store it.\r +SnakeMAGs do not download automatically GTDB, you have to do it:\r +\r +```\r +#Download the latest release (tested with release207)\r +#Note: SnakeMAGs uses GTDBtk v2.1.0 and therefore require release 207 as minimum version. See https://ecogenomics.github.io/GTDBTk/installing/index.html#installing for details.\r +wget https://data.gtdb.ecogenomic.org/releases/latest/auxillary_files/gtdbtk_v2_data.tar.gz\r +#Decompress\r +tar -xzvf *tar.gz\r +#This will create a folder called release207_v2\r +```\r +All you have to do now is to indicate the path to the database folder (in our example, the folder is called release207_v2) in the config file, Classification section.\r +\r +## Download the GUNC database (required if gunc: "yes")\r +GUNC accepts either a progenomes or GTDB based reference database. Both can be downloaded using the ```gunc download_db``` command. For our study we used the default proGenome-derived GUNC database. It requires less resources with similar performance.\r +\r +```\r +conda activate\r +# Install and activate GUNC environment\r +conda create --prefix /path/to/gunc_env\r +conda install -c bioconda metabat2 --prefix /path/to/gunc_env\r +source activate /path/to/gunc_env\r +\r +#Download the proGenome-derived GUNC database (tested with gunc_db_progenomes2.1)\r +#Note: SnakeMAGs uses GUNC v1.0.5\r +gunc download_db -db progenomes /path/to/GUNC_DB\r +```\r +All you have to do now is to indicate the path to the GUNC database file in the config file, Bins quality section.\r +\r +## Edit config file\r +You need to edit the config.yaml file. In particular, you need to set the correct paths: for the working directory, to specify where are your fastq files, where you want to place the conda environments (that will be created using the provided .yaml files available in [SnakeMAGs_conda_env directory](https://github.com/Nachida08/SnakeMAGs/tree/main/SnakeMAGs_conda_env)), where are the adapters, where is GTDB and optionally where is the GUNC database and where is your host genome reference.\r +\r +Lastly, you need to allocate the proper computational resources (threads, memory) for each of the main steps. These can be optimized according to your hardware.\r +\r +\r +\r +Here is an example of a config file:\r +\r +```\r +#####################################################################################################\r +##### _____ ___ _ _ _ ______ __ __ _______ _____ #####\r +##### / ___| | \\ | | /\\ | | / / | ____| | \\ / | /\\ / _____| / ___| #####\r +##### | (___ | |\\ \\ | | / \\ | |/ / | |____ | \\/ | / \\ | | __ | (___ #####\r +##### \\___ \\ | | \\ \\| | / /\\ \\ | |\\ \\ | ____| | |\\ /| | / /\\ \\ | | |_ | \\___ \\ #####\r +##### ____) | | | \\ | / /__\\ \\ | | \\ \\ | |____ | | \\/ | | / /__\\ \\ | |____|| ____) | #####\r +##### |_____/ |_| \\__| /_/ \\_\\ |_| \\_\\ |______| |_| |_| /_/ \\_\\ \\______/ |_____/ #####\r +##### #####\r +#####################################################################################################\r +\r +############################\r +### Execution parameters ###\r +############################\r +\r +working_dir: /path/to/working/directory/ #The main directory for the project\r +raw_fastq: /path/to/raw_fastq/ #The directory that contains all the fastq files of all the samples (eg. sample1_R1.fastq & sample1_R2.fastq, sample2_R1.fastq & sample2_R2.fastq...)\r +suffix_1: "_R1.fastq" #Main type of suffix for forward reads file (eg. _1.fastq or _R1.fastq or _r1.fastq or _1.fq or _R1.fq or _r1.fq )\r +suffix_2: "_R2.fastq" #Main type of suffix for reverse reads file (eg. _2.fastq or _R2.fastq or _r2.fastq or _2.fq or _R2.fq or _r2.fq )\r +\r +###########################\r +### Conda environnemnts ###\r +###########################\r +\r +conda_env: "/path/to/SnakeMAGs_conda_env/" #Path to the provided SnakeMAGs_conda_env directory which contains the yaml file for each conda environment\r +\r +#########################\r +### Quality filtering ###\r +#########################\r +email: name.surname@your-univ.com #Your e-mail address\r +threads_filter: 10 #The number of threads to run this process. To be adjusted according to your hardware\r +resources_filter: 150 #Memory according to tools need (in GB)\r +\r +########################\r +### Adapter trimming ###\r +########################\r +adapters: /path/to/working/directory/adapters.fa #A fasta file contanning a set of various Illumina adaptors (this file is provided and is also available on github)\r +trim_params: "2:40:15" #For further details, see the Trimmomatic documentation\r +threads_trim: 10 #The number of threads to run this process. To be adjusted according to your hardware\r +resources_trim: 150 #Memory according to tools need (in GB)\r +\r +######################\r +### Host filtering ###\r +######################\r +host_genome: "yes" #yes or no. An optional step for host-associated samples (eg. termite, human, plant...)\r +threads_bowtie2: 50 #The number of threads to run this process. To be adjusted according to your hardware\r +host_genomes_directory: /path/to/working/host_genomes/ #the directory where the host genome is stored\r +host_genomes: /path/to/working/host_genomes/host_genomes.fa #A fasta file containing the DNA sequences of the host genome(s)\r +threads_samtools: 50 #The number of threads to run this process. To be adjusted according to your hardware\r +resources_host_filtering: 150 #Memory according to tools need (in GB)\r +\r +################\r +### Assembly ###\r +################\r +threads_megahit: 50 #The number of threads to run this process. To be adjusted according to your hardware\r +min_contig_len: 1000 #Minimum length (in bp) of the assembled contigs\r +k_list: "21,31,41,51,61,71,81,91,99,109,119" #Kmer size (for further details, see the megahit documentation)\r +resources_megahit: 250 #Memory according to tools need (in GB)\r +\r +###############\r +### Binning ###\r +###############\r +threads_bwa: 50 #The number of threads to run this process. To be adjusted according to your hardware\r +resources_bwa: 150 #Memory according to tools need (in GB)\r +threads_samtools: 50 #The number of threads to run this process. To be adjusted according to your hardware\r +resources_samtools: 150 #Memory according to tools need (in GB)\r +seed: 19860615 #Seed number for reproducible results\r +threads_metabat: 50 #The number of threads to run this process. To be adjusted according to your hardware\r +minContig: 2500 #Minimum length (in bp) of the contigs\r +resources_binning: 250 #Memory according to tools need (in GB)\r +\r +####################\r +### Bins quality ###\r +####################\r +#checkM\r +threads_checkm: 50 #The number of threads to run this process. To be adjusted according to your hardware\r +resources_checkm: 250 #Memory according to tools need (in GB)\r +#bins_quality_filtering\r +completion: 50 #The minimum completion rate of bins\r +contamination: 10 #The maximum contamination rate of bins\r +parks_quality_score: "yes" #yes or no. If yes bins are filtered according to the Parks quality score (completion-5*contamination >= 50)\r +#GUNC\r +gunc: "yes" #yes or no. An optional step to detect and discard chimeric and contaminated genomes using the GUNC tool\r +threads_gunc: 50 #The number of threads to run this process. To be adjusted according to your hardware\r +resources_gunc: 250 #Memory according to tools need (in GB)\r +GUNC_db: /path/to/GUNC_DB/gunc_db_progenomes2.1.dmnd #Path to the downloaded GUNC database (see the readme file)\r +\r +######################\r +### Classification ###\r +######################\r +GTDB_data_ref: /path/to/downloaded/GTDB #Path to uncompressed GTDB-Tk reference data (GTDB)\r +threads_gtdb: 10 #The number of threads to run this process. To be adjusted according to your hardware\r +resources_gtdb: 250 #Memory according to tools need (in GB)\r +\r +##################\r +### Abundances ###\r +##################\r +threads_coverM: 10 #The number of threads to run this process. To be adjusted according to your hardware\r +resources_coverM: 150 #Memory according to tools need (in GB)\r +```\r +# Run SnakeMAGs\r +If you are using a workstation with Ubuntu (tested on Ubuntu 22.04):\r +```{bash}\r +snakemake --cores 30 --snakefile SnakeMAGs.smk --use-conda --conda-prefix /path/to/SnakeMAGs_conda_env/ --configfile /path/to/config.yaml --keep-going --latency-wait 180\r +```\r +\r +If you are working on a cluster with Slurm (tested with version 18.08.7):\r +```{bash}\r +snakemake --snakefile SnakeMAGs.smk --cluster 'sbatch -p --mem -c -o "cluster_logs/{wildcards}.{rule}.{jobid}.out" -e "cluster_logs/{wildcards}.{rule}.{jobid}.err" ' --jobs --use-conda --conda-frontend conda --conda-prefix /path/to/SnakeMAGs_conda_env/ --jobname "{rule}.{wildcards}.{jobid}" --latency-wait 180 --configfile /path/to/config.yaml --keep-going\r +```\r +\r +If you are working on a cluster with SGE (tested with version 8.1.9):\r +```{bash}\r +snakemake --snakefile SnakeMAGs.smk --cluster "qsub -cwd -V -q -pe thread {threads} -e cluster_logs/{rule}.e{jobid} -o cluster_logs/{rule}.o{jobid}" --jobs --use-conda --conda-frontend conda --conda-prefix /path/to/SnakeMAGs_conda_env/ --jobname "{rule}.{wildcards}.{jobid}" --latency-wait 180 --configfile /path/to/config.yaml --keep-going\r +```\r +\r +\r +# Test\r +We provide you a small data set in the [test](https://github.com/Nachida08/SnakeMAGs/tree/main/test) directory which will allow you to validate your instalation and take your first steps with SnakeMAGs. This data set is a subset from [ZymoBiomics Mock Community](https://www.zymoresearch.com/blogs/blog/zymobiomics-microbial-standards-optimize-your-microbiomics-workflow) (250K reads) used in this tutoriel [metagenomics_tutorial](https://github.com/pjtorres/metagenomics_tutorial).\r +\r +1. Before getting started make sure you have cloned the SnakeMAGs repository or you have downloaded all the necessary files (SnakeMAGs.smk, config.yaml, chr19.fa.gz, insub732_2_R1.fastq.gz, insub732_2_R2.fastq.gz). See the [SnakeMAGs executable](#snakemags-executable) section.\r +2. Unzip the fastq files and the host sequences file.\r +```\r +gunzip fastqs/insub732_2_R1.fastq.gz fastqs/insub732_2_R2.fastq.gz host_genomes/chr19.fa.gz\r +```\r +3. For better organisation put all the read files in the same directory (eg. fastqs) and the host sequences file in a separate directory (eg. host_genomes)\r +4. Edit the config file (see [Edit config file](#edit-config-file) section)\r +5. Run the test (see [Run SnakeMAGs](#run-snakemags) section)\r +\r +Note: the analysis of these files took 1159.32 secondes to complete on a Ubuntu 22.04 LTS with an Intel(R) Xeon(R) Silver 4210 CPU @ 2.20GHz x 40 processor, 96GB of RAM.\r +\r +# Genome reference for host reads filtering\r +For host-associated samples, one can remove host sequences from the metagenomic reads by mapping these reads against a reference genome. In the case of termite gut metagenomes, we are providing [here](https://zenodo.org/record/6908287#.YuAdFXZBx8M) the relevant files (fasta and index files) from termite genomes.\r +\r +Upon request, we can help you to generate these files for your own reference genome and make them available to the community.\r +\r +NB. These steps of mapping generate voluminous files such as .bam and .sam. Depending on your disk space, you might want to delete these files after use.\r +\r +\r +# Use case\r +During the test phase of the development of SnakeMAGs, we used this workflow to process 10 publicly available termite gut metagenomes generated by Illumina sequencing, to ultimately reconstruct prokaryotic MAGs. These metagenomes were retrieved from the NCBI database using the following accession numbers: SRR10402454; SRR14739927; SRR8296321; SRR8296327; SRR8296329; SRR8296337; SRR8296343; DRR097505; SRR7466794; SRR7466795. They come from five different studies: Waidele et al, 2019; Tokuda et al, 2018; Romero Victorica et al, 2020; Moreira et al, 2021; and Calusinska et al, 2020.\r +\r +## Download the Illumina pair-end reads\r +We use fasterq-dump tool to extract data in FASTQ-format from SRA-accessions. It is a commandline-tool which offers a faster solution for downloading those large files.\r +\r +```\r +# Install and activate sra-tools environment\r +## Note: For this study we used sra-tools 2.11.0\r +\r +conda activate\r +conda install -c bioconda sra-tools\r +conda activate sra-tools\r +\r +# Download fastqs in a single directory\r +mkdir raw_fastq\r +cd raw_fastq\r +fasterq-dump --threads --skip-technical --split-3\r +```\r +\r +## Download Genome reference for host reads filtering\r +```\r +mkdir host_genomes\r +cd host_genomes\r +wget https://zenodo.org/record/6908287/files/termite_genomes.fasta.gz\r +gunzip termite_genomes.fasta.gz\r +```\r +\r +## Edit the config file\r +See [Edit config file](#edit-config-file) section.\r +\r +## Run SnakeMAGs\r +```\r +conda activate snakemake_7.0.0\r +mkdir cluster_logs\r +snakemake --snakefile SnakeMAGs.smk --cluster 'sbatch -p --mem -c -o "cluster_logs/{wildcards}.{rule}.{jobid}.out" -e "cluster_logs/{wildcards}.{rule}.{jobid}.err" ' --jobs --use-conda --conda-frontend conda --conda-prefix /path/to/SnakeMAGs_conda_env/ --jobname "{rule}.{wildcards}.{jobid}" --latency-wait 180 --configfile /path/to/config.yaml --keep-going\r +```\r +\r +## Study results\r +The MAGs reconstructed from each metagenome and their taxonomic classification are available in this [repository](https://doi.org/10.5281/zenodo.7661004).\r +\r +# Citations\r +\r +If you use SnakeMAGs, please cite:\r +> Tadrent N, Dedeine F and Hervé V. SnakeMAGs: a simple, efficient, flexible and scalable workflow to reconstruct prokaryotic genomes from metagenomes [version 2; peer review: 2 approved]. F1000Research 2023, 11:1522 (https://doi.org/10.12688/f1000research.128091.2)\r +\r +\r +Please also cite the dependencies:\r +- [Snakemake](https://doi.org/10.12688/f1000research.29032.2) : Mölder, F., Jablonski, K. P., Letcher, B., Hall, M. B., Tomkins-tinch, C. H., Sochat, V., Forster, J., Lee, S., Twardziok, S. O., Kanitz, A., Wilm, A., Holtgrewe, M., Rahmann, S., Nahnsen, S., & Köster, J. (2021) Sustainable data analysis with Snakemake [version 2; peer review: 2 approved]. *F1000Research* 2021, 10:33.\r +- [illumina-utils](https://doi.org/10.1371/journal.pone.0066643) : Murat Eren, A., Vineis, J. H., Morrison, H. G., & Sogin, M. L. (2013). A Filtering Method to Generate High Quality Short Reads Using Illumina Paired-End Technology. *PloS ONE*, 8(6), e66643.\r +- [Trimmomatic](https://doi.org/10.1093/bioinformatics/btu170) : Bolger, A. M., Lohse, M., & Usadel, B. (2014). Genome analysis Trimmomatic: a flexible trimmer for Illumina sequence data. *Bioinformatics*, 30(15), 2114-2120.\r +- [Bowtie2](https://doi.org/10.1038/nmeth.1923) : Langmead, B., & Salzberg, S. L. (2012). Fast gapped-read alignment with Bowtie 2. *Nature Methods*, 9(4), 357–359.\r +- [SAMtools](https://doi.org/10.1093/bioinformatics/btp352) : Li, H., Handsaker, B., Wysoker, A., Fennell, T., Ruan, J., Homer, N., Marth, G., Abecasis, G., & Durbin, R. (2009). The Sequence Alignment/Map format and SAMtools. *Bioinformatics*, 25(16), 2078–2079.\r +- [BEDtools](https://doi.org/10.1093/bioinformatics/btq033) : Quinlan, A. R., & Hall, I. M. (2010). BEDTools: A flexible suite of utilities for comparing genomic features. *Bioinformatics*, 26(6), 841–842.\r +- [MEGAHIT](https://doi.org/10.1093/bioinformatics/btv033) : Li, D., Liu, C. M., Luo, R., Sadakane, K., & Lam, T. W. (2015). MEGAHIT: An ultra-fast single-node solution for large and complex metagenomics assembly via succinct de Bruijn graph. *Bioinformatics*, 31(10), 1674–1676.\r +- [bwa](https://doi.org/10.1093/bioinformatics/btp324) : Li, H., & Durbin, R. (2009). Fast and accurate short read alignment with Burrows-Wheeler transform. *Bioinformatics*, 25(14), 1754–1760.\r +- [MetaBAT2](https://doi.org/10.7717/peerj.7359) : Kang, D. D., Li, F., Kirton, E., Thomas, A., Egan, R., An, H., & Wang, Z. (2019). MetaBAT 2: An adaptive binning algorithm for robust and efficient genome reconstruction from metagenome assemblies. *PeerJ*, 2019(7), 1–13.\r +- [CheckM](https://doi.org/10.1101/gr.186072.114) : Parks, D. H., Imelfort, M., Skennerton, C. T., Hugenholtz, P., & Tyson, G. W. (2015). CheckM: Assessing the quality of microbial genomes recovered from isolates, single cells, and metagenomes. *Genome Research*, 25(7), 1043–1055.\r +- [GTDB-Tk](https://doi.org/10.1093/BIOINFORMATICS/BTAC672) : Chaumeil, P.-A., Mussig, A. J., Hugenholtz, P., Parks, D. H. (2022). GTDB-Tk v2: memory friendly classification with the genome taxonomy database. *Bioinformatics*.\r +- [CoverM](https://github.com/wwood/CoverM)\r +- [Waidele et al, 2019](https://doi.org/10.1101/526038) : Waidele, L., Korb, J., Voolstra, C. R., Dedeine, F., & Staubach, F. (2019). Ecological specificity of the metagenome in a set of lower termite species supports contribution of the microbiome to adaptation of the host. *Animal Microbiome*, 1(1), 1–13.\r +- [Tokuda et al, 2018](https://doi.org/10.1073/pnas.1810550115) : Tokuda, G., Mikaelyan, A., Fukui, C., Matsuura, Y., Watanabe, H., Fujishima, M., & Brune, A. (2018). Fiber-associated spirochetes are major agents of hemicellulose degradation in the hindgut of wood-feeding higher termites. *Proceedings of the National Academy of Sciences of the United States of America*, 115(51), E11996–E12004.\r +- [Romero Victorica et al, 2020](https://doi.org/10.1038/s41598-020-60850-5) : Romero Victorica, M., Soria, M. A., Batista-García, R. A., Ceja-Navarro, J. A., Vikram, S., Ortiz, M., Ontañon, O., Ghio, S., Martínez-Ávila, L., Quintero García, O. J., Etcheverry, C., Campos, E., Cowan, D., Arneodo, J., & Talia, P. M. (2020). Neotropical termite microbiomes as sources of novel plant cell wall degrading enzymes. *Scientific Reports*, 10(1), 1–14.\r +- [Moreira et al, 2021](https://doi.org/10.3389/fevo.2021.632590) : Moreira, E. A., Persinoti, G. F., Menezes, L. R., Paixão, D. A. A., Alvarez, T. M., Cairo, J. P. L. F., Squina, F. M., Costa-Leonardo, A. M., Rodrigues, A., Sillam-Dussès, D., & Arab, A. (2021). Complementary contribution of Fungi and Bacteria to lignocellulose digestion in the food stored by a neotropical higher termite. *Frontiers in Ecology and Evolution*, 9(April), 1–12.\r +- [Calusinska et al, 2020](https://doi.org/10.1038/s42003-020-1004-3) : Calusinska, M., Marynowska, M., Bertucci, M., Untereiner, B., Klimek, D., Goux, X., Sillam-Dussès, D., Gawron, P., Halder, R., Wilmes, P., Ferrer, P., Gerin, P., Roisin, Y., & Delfosse, P. (2020). Integrative omics analysis of the termite gut system adaptation to Miscanthus diet identifies lignocellulose degradation enzymes. *Communications Biology*, 3(1), 1–12.\r +- [Orakov et al, 2021](https://doi.org/10.1186/s13059-021-02393-0) : Orakov, A., Fullam, A., Coelho, L. P., Khedkar, S., Szklarczyk, D., Mende, D. R., Schmidt, T. S. B., & Bork, P. (2021). GUNC: detection of chimerism and contamination in prokaryotic genomes. *Genome Biology*, 22(1).\r +- [Parks et al, 2015](https://doi.org/10.1101/gr.186072.114) : Parks, D. H., Imelfort, M., Skennerton, C. T., Hugenholtz, P., & Tyson, G. W. (2015). CheckM: Assessing the quality of microbial genomes recovered from isolates, single cells, and metagenomes. *Genome Research*, 25(7), 1043–1055.\r +# License\r +This project is licensed under the CeCILL License - see the [LICENSE](https://github.com/Nachida08/SnakeMAGs/blob/main/LICENCE) file for details.\r +\r +Developed by Nachida Tadrent at the Insect Biology Research Institute ([IRBI](https://irbi.univ-tours.fr/)), under the supervision of Franck Dedeine and Vincent Hervé.\r +""" ; + schema1:image ; + schema1:keywords "Bioinformatics, Metagenomics, binning, MAG" ; + schema1:license "https://spdx.org/licenses/CECILL-2.1" ; + schema1:name "SnakeMAGs: a simple, efficient, flexible and scalable workflow to reconstruct prokaryotic genomes from metagenomes" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/554?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 102488 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.292.1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Amber Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-08-05 10:32:26 +0100" ; + schema1:url "https://workflowhub.eu/workflows/292/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1722 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-17T14:45:57Z" ; + schema1:dateModified "2022-03-23T10:04:51Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/292?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Amber Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/292?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """## Learning Objectives\r +- How to access genomic resource via its Python API\r +- How to access image resource via its Python API\r +- Relate image data to genomic data\r +\r +## Diabetes related genes expressed in pancreas\r +\r +This notebook looks at the question **Which diabetes related genes are expressed in the pancreas?** Tissue and disease can be modified.\r +\r +Steps:\r +\r +- Query [humanmine.org](https://www.humanmine.org/humanmine), an integrated database of Homo sapiens genomic data using the intermine API to find the genes.\r +- Using the list of found genes, search in the [Image Data Resource (IDR)](https://idr.openmicroscopy.org/) for images linked to the genes, tissue and disease.\r +- Analyse the images found.\r +\r +## Launch\r +This notebook uses the [environment.yml](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/environment.yml) file.\r +\r +See [Setup](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/setup.md).\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.494.1" ; + schema1:isBasedOn "https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/PublicResources.ipynb" ; + schema1:license "BSD-2-Clause" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Use Public Resources to answer a biological question" ; + schema1:sdDatePublished "2024-08-05 10:30:15 +0100" ; + schema1:url "https://workflowhub.eu/workflows/494/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 1135634 ; + schema1:url "https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/includes/HumanMineIDR.png" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2884598 ; + schema1:creator , + ; + schema1:dateCreated "2023-06-01T10:14:31Z" ; + schema1:dateModified "2023-06-01T10:53:01Z" ; + schema1:description """## Learning Objectives\r +- How to access genomic resource via its Python API\r +- How to access image resource via its Python API\r +- Relate image data to genomic data\r +\r +## Diabetes related genes expressed in pancreas\r +\r +This notebook looks at the question **Which diabetes related genes are expressed in the pancreas?** Tissue and disease can be modified.\r +\r +Steps:\r +\r +- Query [humanmine.org](https://www.humanmine.org/humanmine), an integrated database of Homo sapiens genomic data using the intermine API to find the genes.\r +- Using the list of found genes, search in the [Image Data Resource (IDR)](https://idr.openmicroscopy.org/) for images linked to the genes, tissue and disease.\r +- Analyse the images found.\r +\r +## Launch\r +This notebook uses the [environment.yml](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/environment.yml) file.\r +\r +See [Setup](https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/setup.md).\r +""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "imaging, Python" ; + schema1:license "https://spdx.org/licenses/BSD-2-Clause" ; + schema1:name "Use Public Resources to answer a biological question" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ome/EMBL-EBI-imaging-course-05-2023/blob/main/Day_4/PublicResources.ipynb" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.196.5" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook ABC MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/196/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 99544 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-04T15:34:43Z" ; + schema1:dateModified "2024-05-14T10:17:33Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/196?version=4" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook ABC MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_amber_md_setup/blob/main/biobb_wf_amber_md_setup/notebooks/abcsetup/biobb_amber_ABC_setup.ipynb" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """A variation of the Cancer variant annotation (hg38 VEP-based) workflow at https://doi.org/10.48546/workflowhub.workflow.607.1.\r +\r +Like that other workflow it takes a list of tumor/normal sample pair variants in VCF format (see the other workflow for details about the expected format) and\r +\r +1. annotates them using the ENSEMBL Variant Effect Predictor and custom annotation data\r +2. turns the annotated VCF into a MAF file for import into cBioPortal\r +3. generates human-readable variant- and gene-centric reports\r +\r +In addition, this worklfow exports the resulting MAF dataset to a WebDAV-enabled remote folder for subsequent import into cBioPortal.\r +WebDAV access details can be configured in the Galaxy user preferences.""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.629.1" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Cancer variant annotation (hg38 VEP-based) with MAF export" ; + schema1:sdDatePublished "2024-08-05 10:27:30 +0100" ; + schema1:url "https://workflowhub.eu/workflows/629/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 178377 ; + schema1:creator ; + schema1:dateCreated "2023-10-27T15:07:03Z" ; + schema1:dateModified "2023-10-27T15:07:32Z" ; + schema1:description """A variation of the Cancer variant annotation (hg38 VEP-based) workflow at https://doi.org/10.48546/workflowhub.workflow.607.1.\r +\r +Like that other workflow it takes a list of tumor/normal sample pair variants in VCF format (see the other workflow for details about the expected format) and\r +\r +1. annotates them using the ENSEMBL Variant Effect Predictor and custom annotation data\r +2. turns the annotated VCF into a MAF file for import into cBioPortal\r +3. generates human-readable variant- and gene-centric reports\r +\r +In addition, this worklfow exports the resulting MAF dataset to a WebDAV-enabled remote folder for subsequent import into cBioPortal.\r +WebDAV access details can be configured in the Galaxy user preferences.""" ; + schema1:keywords "EOSC4Cancer" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Cancer variant annotation (hg38 VEP-based) with MAF export" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://usegalaxy.eu/api/workflows/ce1712139b4a4273/download?format=json-download" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 18709 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "We present an R script that describes the workflow for analysing honey bee (Apis mellifera) wing shape. It is based on a large dataset of wing images and landmark coordinates available at Zenodo: https://doi.org/10.5281/zenodo.7244070. The dataset can be used as a reference for the identification of unknown samples. As unknown samples, we used data from Nawrocka et al. (2018), available at Zenodo: https://doi.org/10.5281/zenodo.7567336. Among others, the script can be used to identify the geographic origin of unknown samples and therefore assist in the monitoring and conservation of honey bee biodiversity in Europe." ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.422.1" ; + schema1:license "CC0-1.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Apis-wings-EU: A workflow for morphometric identification of honey bees from Europe" ; + schema1:sdDatePublished "2024-08-05 10:31:16 +0100" ; + schema1:url "https://workflowhub.eu/workflows/422/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 165859 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2784682 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2023-01-25T07:16:25Z" ; + schema1:dateModified "2023-02-28T13:04:32Z" ; + schema1:description "We present an R script that describes the workflow for analysing honey bee (Apis mellifera) wing shape. It is based on a large dataset of wing images and landmark coordinates available at Zenodo: https://doi.org/10.5281/zenodo.7244070. The dataset can be used as a reference for the identification of unknown samples. As unknown samples, we used data from Nawrocka et al. (2018), available at Zenodo: https://doi.org/10.5281/zenodo.7567336. Among others, the script can be used to identify the geographic origin of unknown samples and therefore assist in the monitoring and conservation of honey bee biodiversity in Europe." ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC0-1.0" ; + schema1:name "Apis-wings-EU: A workflow for morphometric identification of honey bees from Europe" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/422?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/amber-protein-ligand-complex-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.298.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_complex_md_setup/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/298/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 96594 ; + schema1:creator , + ; + schema1:dateCreated "2023-05-03T13:52:05Z" ; + schema1:dateModified "2023-05-03T13:53:54Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/amber-protein-ligand-complex-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/298?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy Amber Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_complex_md_setup/galaxy/biobb_wf_amber_complex_md_setup.ga" ; + schema1:version 3 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# ChIP-Seq pipeline\r +Here we provide the tools to perform paired end or single read ChIP-Seq analysis including raw data quality control, read mapping, peak calling, differential binding analysis and functional annotation. As input files you may use either zipped fastq-files (.fastq.gz) or mapped read data (.bam files). In case of paired end reads, corresponding fastq files should be named using *.R1.fastq.gz* and *.R2.fastq.gz* suffixes.\r +\r +\r +## Pipeline Workflow\r +All analysis steps are illustrated in the pipeline [flowchart](https://www.draw.io/?lightbox=1&highlight=0000ff&edit=_blank&layers=1&nav=1&title=NGSpipe2go_ChIPseq_pipeline.html#R7R1Zc6M489e4KvNgF4fPx0kyzmSPTCbJfLOzLykMsq0NBgLYOX79p5NTYLC5kkyyNRuEAKnV6m712VPPNs8Xruas%2F7YNYPYUyXjuqec9RZGHitLD%2F0nGC22ZDGe0YeVCg3UKG27hK2CNEmvdQgN4sY6%2BbZs%2BdOKNum1ZQPdjbZrr2k%2FxbkvbjH%2FV0VYg1XCra2a69Sc0%2FDVrlcez8MZXAFdr9umpMqE3Fpr%2BsHLtrcW%2BZ9kWoHc2Gn8Nm6O31gz7KdKkfumpZ65t%2B%2FSvzfMZMDFYOcToc%2FOMu8GQXWD5RR44%2F%2B%2FFO%2F%2Fxv8eH1%2FXZ5Hopg0tf6geD8184LICBQMMubddf2yvb0swvYespmS%2FAr5XQ1X%2FbjcP7rzQHtYRP%2FWXbqOFcxt2A77%2Bwlde2vo2a1v7GZHfBM%2FT%2FwS8cjNjVr8id82f2LXLxwi8s332JPIQvf0XvhY%2BRK%2F6c52uuz0Yyxtf21tXBNXDhBvjAvXU0HVqr4FEj0hU9uQJ%2BRlfPd%2B2HAH%2FQ%2Bp6ml4itGv0kbdrpV99vf27%2F2J6%2F%2Fnt3eTf%2F5e3%2B7asM8egHczoO2QLiVYt8gqHABbDRQN0X1MEFpubDXRzhNbZvVkG%2FEIHQHwyHxPj0Vfpjub6a%2BuZufbO4%2Bno7v3%2F92ucbeqeZW%2FapnjI20QxODbjDXzThyiI3xo9bjPqnLtlWwSX6a8X%2BTx5buMkWNDDyLt6awN8IVpnaApinwRY9s03bJZ3UOfkRLtrStjh2yHjNDc1bE2SXY6iPr5bQNKMvlfBv8FJ%2Bh9AE9XTlagZEiJBo1u0N1BkGrUzN8zg2cXIhkc%2F6aPFsDLj%2BTMrDrB1wffCciwns7nSsDpQRfYqRbbR806DtKUIIJ2xZ1xEaOJSGg9HxaCTEavlD49GXMf6tA4%2FgBjGyz55DeahcDSIpUzmGRfJISmHQUJqlMWjM0eoY%2FHn96Z%2FfSf%2F8%2B8f85ou7kY27i5fLvno0V%2Bs8D6uQ1xRkNeqwalZDHv3sutpLpINjQ8v3Im%2B%2Bxg0hug2nihDd5hn91Uluf%2FQHHUGIbcFUCiHg%2FGasX%2Fxzvr4Bfz3qy18vd9%2FOXvvy6L1jYIyctYKOajfRkZ2FstAx1X%2BYj76jmZTX%2F2j0zVuDLO6b4KzJBsz5YrjPeTK%2B0fcI9n5GHeSh8yxg2GdK73OUSdPXpXh31Z9NzuZsfXndvwWP6Ilr6AATIjaLhnaaOwahlJExAQEoqWxz5AyOg0OfbCnoawv0NNrBfOrocA%2BW0IJEDEXog%2F%2FZM8dqFsRzNKtIm4alCxcsY19a%2Bz5WIXzG%2B0WZr6CPRLmBazoDC9EWZQ43i6WN6Az68%2BriFs9VWdnooo%2FX1rQXeJXADis75hwQiBTMMWp4GDPm%2Bho66K8BvztAEqK9e0nPJrsjh6AmQBHRPLObG8azTqOwaWuGhxU0GOBYHWMbWxMvnqRZBkZxz0OcCWom5VIaOc9XtIsr2IDPQN%2F6ZLiRDYhQFb8WWiuyNU888An9jzQ6GnQBmReZ3YlDbhnAw6ek4rPKPgJVck5IcDI1fUyQBQdNmZ8dKj9mDgVsLgECdHxy8J%2FoeOWggxaePBXCuLAz5Q2BfnAoBFcuny0MQzkFMkUAMd5WTvJJiR79hHpgEn%2BBvVx6mIomFqEC8aNd6Zn%2F%2FSsmSe%2BTnmOycyhKN3B%2Bk5WiEnPVAnPG5vvhAffb4j%2BsZECEGOtOBDKkgDwu7GdMHYlGFdPHhe0awO2jZixrYBBxxCfUU0JYPgruRDXxn4lsooIh%2Fg16OJphBO9W8thPK6IEXT5v4D8LtFjxm3kig4AfcVkuDfFscCZZFdhEByXpaLG9QPsd3pJSvQKoR3op2aD3F7bxkmpMcS7f4C2X17n8LeyYfpS3wOi7ECJt8PwhAZmFBIPI22ChL%2BDG9Ihz5xD9TkXz8LSNk5qD8N11zerq%2BsddhZOynK2ftz6Nz63iRWPza33dMBV1Dp0Fe5gNXrKXkRlVMXDcKCIRhIASMheVZfOlDCXC%2FphYEpE4sFAGdc38zCwNPhYjTrndwQRL%2FBkb9VqaRL%2BOlfi9mEBRhcycUK3LM4EAKJKZuVBQgm2jywjnLiGuKdPf4lpxcY0fMt6MuOYIRYcNGj60qHQgOaGERtv7ZLfge%2BPIPYT0fp%2FtoM9EPrDoqXufLu46PASnDuuF1XBOsm2d6nXQVCkpwDenYqHyNKIq64zmKlCADHaa62XqrbK6pUVQJnXnLECG2iq1LvWsQoJROa69gwbYpwoScavo5E4D3dIVejAGpBhABN%2Blj%2FWxNRYuoR7%2FunRiO%2F7gU9FhiIC5lwEq%2BQywDKt7WkMfYHcY%2FOyTS9yAqmd%2Fw6FI%2FyFgf7PyjgmHcz%2BRsaRRLRL%2FYGfVSMP4KtagRhI6t8lyaiHenVzSlCtbzNx7uPCjFrbujjsi%2FQSyxVzz%2FO9nhKoXETwet4h%2B%2BniEuo3WzDb5acjVnsiwiX0CDxbx5oce07cneP56uxggmpHD7xmv36DBYbYxD%2BwcuBvW76Mbj%2FqANgfsey9roOuUyRpE9CzBLph26C9yda6OM5gE8ZJiL5YqPz6NuOmAn5%2FG0xTlk8dymvSNy3smFWEgQmclfqbLIFPAXNhPrVOo%2BijCsCBF4F5lrft7xK1YQ3b6rcofQ4gie9y0f6MIs%2BfNOoki6rQJFMkXdj4IilS88mlvLDlxIpETRlEmVtGnwjU%2FGoVG1aLQXOS02KoaT4qh0eQgcVndKy5X6KZYmChVjppFfR6Eiyz%2F9o0%2BcpWFYG3quFJskdXjF7nSsK44j5h0X0lfeHerHRE5xnF%2BobCDT6YXcrI%2FO2%2FXyl%2F4mb6bWHkYUu5jOfW5yRelTB1B0KTz32yYj6CJ%2Fspo1gCC5p%2FEfyNoPQLS5F1g6LhiRYAQQ4f5ioC3gaFyHEOVfYy9AyhauXjXEooqx6BoYVX5wn7yIVAK68p5S8R38QZoRg8bdx0HmyX2WJaz4hBo04lHXJGwVdWhgSzYLf5T1vN7dPIH2%2BFD1XxohWeQKq%2BfH0awpE79fMXuTDMljsBjtgNj%2BniBKbImfXyu6UkQ%2FbbfM4GEdDD%2FBOkA15dvW5%2B6AeIcJqhDuIUi%2B0d61PM2Vnu%2BLhSN8WYLpgFp1A2NXQnHXcR54XhnvGgMPo%2FJn2sbaGL8%2BwrMHcBv7TXhyjAZTmOor0rjNOrz7XEk6hfE80nH8FxKoboLvK2JOWIXkf0EIRmaGzA%2BEcaiPYTo3qxv0zlcLoEb%2BjDRoZjQ8xsfyjX9tmZZPGNJx3Z3DTt5OBZkTBHtZB75Xf1OnnZ%2BJ5v2qqPbGI2sz7mUbpsmkhZIJCXlWEDT1z3uq%2FcBkHmsFkTmKtjSn7uLe%2Bfs7od%2FtQHj6832yr%2B%2Fajd3SxNOW4efV%2Fnpf%2B95ddaaOSJ33J2hT2mJ2gUOQpuOkqi4C3nN7uI48SI%2BpAYQmXtraL3cswwG97QdHR3vcU92mqWNg5uNkR596n5m%2BGLSsbrLMx6QPujWYA1MB7je4EY8dXHHvBDOdhzi8RohmhTsBbxjXOx7GEtBEJVvCweltTMhxLPh8oWwMh%2FzcH3r%2BXQ%2B3M0%2BTAfhI%2FTy1ohA4CaI3TQtADBHKTyxd39gncgFJQNZqiAt4Fyo%2FT7eobt97XdJaaE25ffw7bpiX1qoJ9snBVXMeIo4axF%2BEEMHP8sJ3FjbYESwFh7dOvEQ49TXaXIV0UeZmFOl8jjTsZtOBc%2BjtPJ42JRzd2TLJFJcAvLTS6a4RHfUsTpTjV4kcWVNvuAqv47Ss5EoTWU9umdxyMrwvZ9%2BEvSs%2B6mYh6M3SyedtWYhkYcoBIsSyu9nWOJCI4C6R6kfpY70X0wcoWVQk5y%2BRoKUjoAPPT%2FonaCVXhat3EMoj4l8Cc1rEQCUp5Kj31QS%2B64LLHQNUkmhHMj1nd2UAw90b5y0JgiOCxK46h11jiVw4NkHLOFTEdr2hXan0Xf41cBakaMhWlg0wN2KiITaagOsiJDYDaJGpT464fK0bNw8LYtRpvKErQJalvQ2GEuTNC1r0NtAHLfzFqL%2FqnPJrijeh5OivTSL56Zv2XNroiY9sY7K4lzcE0vbnGEtE6l%2FU5BK6og6%2BiQBle1uEBnwiDlsAVdPEBNIH8l8D02ELqOx62zs5QnepHmCV4P0Neb5UFuiWD8t9eHp9PHu35%2FX9zfWn456ubv%2FXVyhFKGapgmVEKqV06mjNK0812s3JewDBewmA4hE657rhdW2a3EiIGi4z7U40X%2FEMKZuhoaP8vcR96LCTA1Res2FHkleT20%2BO%2BjhHB2vzE%2BpGYVEfPjl%2Bdq0W4L8coR%2FRYL8mPz0qAkrKuCTn3oE%2FGHL7sRila7SJr%2F8rdLNyRPxFlW6C70wzbs%2Bu0FvO7V93wQW0LFj5pkNlkuoQ8DSEzRA8Rblsw%2FR9fnoqtfhSGBwb91A9e5T8785ajYq6hXYPWoGHQQIqoQtStQc08ZvpSnO%2BcPNkLJwtKUp2kjqNU7RaiBJI255aYkkiYs9vvtae5XKPoVjXqcNUYuCGolO2%2FzegkaicEqTjmok5HIpTZrSSGw03Sse6syCs3TNNIkTRSOMiwyxvBT%2BPmKM1anAz6vtGGNulupMRETays3tPF0MiFjA1c%2BYMYpFazGVnhcLPexuqHEF2J700h6NBTbuuuK3FpOtcWFf%2FeOATf%2F7zcyf6Rfj%2Frs%2FIFbJlKeFmXJbwljusA8iX7VXC7kBJBxaxzbsC2DZsRJeOby5fbpGw55Ov%2F28u%2FxC%2BDPaFc%2FE%2FL6KzSMZKMXvSjjdN46WQnx%2FSyHQdBAOmQC0cTmLaBQ42h36A%2FZCoKWh8mOI2q4Iwjz%2FTVsnoycxQXgO%2B4qhHFSio2MVN%2BLC00SYpUJw4Jfl2mjNSEBrEoCrt75GQO0%2Bbn0NYaR2q7EKcb1LwNxL6V2CRGQNcPqZwMtWqOjgWv%2BOsPpZdjakztdxDXh9JASZoFL%2BoeajlVClIInnbEL%2FO0FIbgBaVpxFAidDuXlo7YMFffdTeL2EFklFE%2BTvoaVPNIJuosoo1CNDQ0%2B9eNAjgAaON4gsSpEqlAWLcPbKVsQs%2Bf4PMrBeqVKclVbWrEn4mfFSMLkhz8IS9XWFPKdlndZO028gbbcsFw7W6xSTDcbdij6QVx7OT5CiPRkaPlt2UR9Iqlz1wmyDJ6%2FQcUhitpbPlOisy2IlbWuHz47kLEyVlY4GKXNNhTB1J4cGLd0sD27kAYHxYPXa7XEqB4zzTSuAlTjLkkVHT1GFzFlNpIznUO%2BwZQMnFY6ldukQJQtTTxRWkb1p9E3aLxBYBCKXwH4xqk3iOt6A0cHg4ialsJlcUAjjeeG645BGcnXdsDxgRW36DCCYyVP1MWHvaL8%2FGPaT1QsTauFRV2Hsz8sozrON4RmUNvnPovB%2Fuyb%2FybBdk%2F9X6Y%2Fl%2Bmrqm7v1zeLq6%2B38%2FvUrD%2FAS8MUUamHaK%2BQtSZXSUMRC%2FgIrEuUeoC59XYqFLDJZCGa38UWKe0FbaPF7cZdp1sSxgTFo9TTJjTbQMAjFFGFFnIpWwV24byyXjtQUXgxF0pFSHi%2BKIUG6VvgC7%2B5eJMNsT1GfP%2FdoemmTnA9IPjrbwYcHzfyUWq3DdmUV0E0WKRTsuqEAusO6oHuMVXqcFD0jSlgOzbCRwFV0LImlEoskTgwXp3UR80C5sox%2BrgrkmsRdj7nabS9yVSAXCpGrSTNkLnZ%2FXCukECxv0D7F1ONzamjJ2Y0fzRKVC4wPa2jp7MBatABVwGFmk8PEF3WYTRuP4jBt1mOB1AYwtxMatE46yX0AvVgSOQuLP%2BOakDO7xEigXb%2F0ETT1qMt4qHgny7n1MEZJDNeCFNvv9vQ5ies25aIUprbjZ1o3f06jXYleClBHRwNBjOR%2FEjlqEL%2BM97xkcSlWFmiSFOGSVWBPEbrTv%2FuEwAdqnoXAKqp4bsr6v0%2Fv3AFn%2BeuojsJawtXW5U7PXbSPxQuDoFOLlfxqzcVCKJBwO%2B42oJehK3jSnCfsxGEoKv4hmlKz3vtNA9JxgbZBpwUvE4qCHnkgbMcFIQ48kddBqqFmCHP1Y9xARKrdZII6cTcPzN3x%2Fkg1GGBJSdqT7T6YNislbGkrIn%2FumcP%2BdSvR4ALKtLwS3z2pboAIENrWJKUv2AgYLD1z68a0QDjtOy18Y0dlc8nb%2FxFSWgM8Ax3JEW5snXEEjRlbwELzwp7E1CNYdDeM0wnKzQYEIniQVmqKhfCcELPsAwAYJ0PAeEgSgtbK%2BzTYM7zE0TJXasu3o3bA1TXhNjQapuN8hH5D8kgpLQUVMZkKoagIzpx1qdzz5MkPrHKfC%2Bsw5eesfpO5OJo8phTODVp5CpaDMnEkk1dPWMadrEwcqWTX41ECC%2BvJxMGqtxKz61%2B43mzx5KDOC1kPjxROJqVqaU2AQB3KGrlWVOnVlrZjAz1satEsYG89IieTWZGTRODRowh9erKR7c259IzkOIOaSIJQjAZdesR0MM2PWqWD0qAUJUQXQXq6JHWU26OOohgOIfh5cvSOZKjiBfXePzYobaNDrttpV9ChYwnLKkSHhOiktowNeTJW88iQO%2BxCR5mta76c4sRGeIL7eG2IMfSK50U%2F789Kwr44cx5ORoNR%2FOgxkwaiWMk0f1YntWUIfIenkrr89vNKULzVBIGjSaymTmcSBBpwucRFEwsfR%2FADOIERzjTTixZcDOPj95w%2FqigzG2qM%2BQTywgmyEarZo0d19QmqTu%2BdDFIYqTMRyRSeacqTTMGZZh%2FCdtZdsmZzSYD7xPYRwXfixoZpoXcfbAH%2FWeD3ndErz3jy0Tw7b%2BEGmppLyNYDCLiMR0GFjQ8LomdhCUTwaGi8A8aQMGcq8x9aYt3MSWCjoAFXGPoZ5FKyLfMFK%2FilNLWNLEqF%2FpiBAeEaDZ7iJ8uG0gsSrLCkKonp5YxPZJWoa8xndDhk4DhjW4%2FlcGlktL1SjqX5tWJzeVBnU5EocWahKoqAWYgtNNJkUD4G4je%2FqIRfEEc5dH8fx8js95tn5NClvXT%2BjJB65stEAEwoFmU7uB13QALsgE79z2877%2BcdGv%2FJn9%2F6P%2B%2F2WoDroLUXCNmdXpAVE%2B9Zlm0yxiE7RlXza6O%2FFaqqDAtmy5g0aFRot15Yh8tZCKFVNKtiU6XLC6qq5KPXuHOqqpo0VUVXPTcZTcc0Vcq4XCkLhVVuqltThejwwn7yIVC2FnzcgsIaKxdsEDlHb9%2BgAw7s4wQ7lDW7QDNqUFflJcBITiJPa5WNXM1qrfJKbBaoRFeDdkoYDde2vV0%2Bnmx%2BINY4KUgkeWGgrvDG4%2BWfD80biy57R3mjOixXePpI3lgMJysoMde9nGD7zPaJQpgtEKbKw7QOw9BkmpvRHukt2b8hO%2BPN5nzrFHd6DGQ2Y%2BuYJOTTqE9gE9gXUVf0r7tBn89NVJaNQs3KaOhlUOf0unPymiq3W%2FpczMqlNuW1dhh3jGoeTiNnRbl452r6nmqb72eFqZAoxpzYmcJEqHup0THe1pQKoY895pYpz16jt%2BdZnajpK1KCytzjoC3KMTqectQocCndIiYVy0dpgSaRRXE8m8Rfwaqi06dyJKMJjzLjTEtNZCthNdOTLyodsCJNhQPOHFeiv9qQvu1vrC0rQSy97WajuRhXFen7WS%2FIE12%2Feo2SSaLdO4BQUkf9N0cpkzLWeKp0T8YaZVeOoAb6dPJKl4y0XF6lmO05QZkjUCfYfhrY8CPC75z8CKlcNMXSOEn14v7NCVn7yxj%2FpmRtlltm5WoGBKF3H2uOCvIrU%2FO8pFAvVYM%2Bs6RmQ1SIWFRNRpUqSOIkRpZ9yRJLZDolT23NZIsZhr5rT%2Fh0SYqGCHMJOabtR9OMmCnbduRlgAXxc8uChx3dPT%2BebyrvDZhAhpWCf5zdYgLKy4NKCM%2BeaHaqoKQwCQBkFYUJyWVj0LHlmpSyzNkveSOBFvoSCT2n701PpfQrNV0HnkchY8KFq7kU1LgqxjOF%2BwL%2Fc312g%2F%2B0fd8EFtCJJ5cNlkuow3hu2ryPYZHDcqG%2B3pBn%2BDoyyEaHcnlNpoc20Qq79RV6u%2B7ankfG5VK5hZYPZd9ILYqz1iw%2F8KMrigzEfc0IH5K4Iw3NHvA%2FYOGPGlBDNGQTzo2WGcFHFIQCpuYUXZ1YIdQYtJZA87cuUcMgFHDhYsu63c17UR8X09ZhMDzug5dAxD2jwL13EDylxlDA37wUWqLGNF3Yw0OOzyDGJIUj8ofFtDpRLlEFM%2BCnrCDZpCDhgixgBsqs%2FFkd69Vs24%2FKq2i%2B679tA4uiX%2F4P). Specify the desired analysis details for your data in the *essential.vars.groovy* file (see below) and run the pipeline *chipseq.pipeline.groovy* as described [here](https://gitlab.rlp.net/imbforge/NGSpipe2go/-/blob/master/README.md). A markdown file *ChIPreport.Rmd* will be generated in the output reports folder after running the pipeline. Subsequently, the *ChIPreport.Rmd* file can be converted to a final html report using the *knitr* R-package.\r +\r +\r +### The pipelines includes\r +- raw data quality control with FastQC, BamQC and MultiQC\r +- mapping reads or read pairs to the reference genome using bowtie2 (default) or bowtie1\r +- filter out multimapping reads from bowtie2 output with samtools (optional)\r +- identify and remove duplicate reads with Picard MarkDuplicates (optional) \r +- generation of bigWig tracks for visualisation of alignment with deeptools bamCoverage. For single end design, reads are extended to the average fragment size\r +- characterization of insert size using Picard CollectInsertSizeMetrics (for paired end libraries only)\r +- characterize library complexity by PCR Bottleneck Coefficient using the GenomicAlignments R-package (for single read libraries only) \r +- characterize phantom peaks by cross correlation analysis using the spp R-package (for single read libraries only)\r +- peak calling of IP samples vs. corresponding input controls using MACS2\r +- peak annotation using the ChIPseeker R-package (optional)\r +- differential binding analysis using the diffbind R-package (optional). For this, input peak files must be given in *NGSpipe2go/tools/diffbind/targets_diffbind.txt* and contrasts of interest in *NGSpipe2go/tools/diffbind/contrasts_diffbind.txt* (see below)\r +\r +\r +### Pipeline-specific parameter settings\r +- targets.txt: tab-separated txt-file giving information about the analysed samples. The following columns are required: \r + - IP: bam file name of IP sample\r + - IPname: IP sample name to be used in plots and tables \r + - INPUT: bam file name of corresponding input control sample\r + - INPUTname: input sample name to be used in plots and tables \r + - group: variable for sample grouping (e.g. by condition)\r +\r +- essential.vars.groovy: essential parameter describing the experiment including: \r + - ESSENTIAL_PROJECT: your project folder name\r + - ESSENTIAL_BOWTIE_REF: full path to bowtie2 indexed reference genome (bowtie1 indexed reference genome if bowtie1 is selected as mapper)\r + - ESSENTIAL_BOWTIE_GENOME: full path to the reference genome FASTA file\r + - ESSENTIAL_BSGENOME: Bioconductor genome sequence annotation package\r + - ESSENTIAL_TXDB: Bioconductor transcript-related annotation package\r + - ESSENTIAL_ANNODB: Bioconductor genome annotation package\r + - ESSENTIAL_BLACKLIST: files with problematic 'blacklist regions' to be excluded from analysis (optional)\r + - ESSENTIAL_PAIRED: either paired end ("yes") or single read ("no") design\r + - ESSENTIAL_READLEN: read length of library\r + - ESSENTIAL_FRAGLEN: mean length of library inserts and also minimum peak size called by MACS2\r + - ESSENTIAL_THREADS: number of threads for parallel tasks\r + - ESSENTIAL_USE_BOWTIE1: if true use bowtie1 for read mapping, otherwise bowtie2 by default\r +\r +- additional (more specialized) parameter can be given in the var.groovy-files of the individual pipeline modules \r +\r +If differential binding analysis is selected it is required additionally:\r +\r +- contrasts_diffbind.txt: indicate intended group comparisions for differential binding analysis, e.g. *KOvsWT=(KO-WT)* if targets.txt contains the groups *KO* and *WT*. Give 1 contrast per line. \r +- targets_diffbind.txt: \r + - SampleID: IP sample name (as IPname in targets.txt)\r + - Condition: variable for sample grouping (as group in targets.txt)\r + - Replicate: number of replicate\r + - bamReads: bam file name of IP sample (as IP in targets.txt but with path relative to project directory)\r + - ControlID: input sample name (as INPUTname in targets.txt)\r + - bamControl: bam file name of corresponding input control sample (as INPUT in targets.txt but with path relative to project directory)\r + - Peaks: peak file name opbatined from peak caller (path relative to project directory)\r + - PeakCaller: name of peak caller (e.g. macs)\r +\r +## Programs required\r +- Bedtools\r +- Bowtie2\r +- deepTools\r +- encodeChIPqc (provided by another project from imbforge)\r +- FastQC\r +- MACS2\r +- MultiQC\r +- Picard\r +- R with packages ChIPSeeker, diffbind, GenomicAlignments, spp and genome annotation packages\r +- Samtools\r +- UCSC utilities\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/59?version=1" ; + schema1:isBasedOn "https://gitlab.rlp.net/imbforge/NGSpipe2go/-/tree/master/pipelines/ChIPseq" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ChIP-seq" ; + schema1:sdDatePublished "2024-08-05 10:32:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/59/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2757 ; + schema1:creator ; + schema1:dateCreated "2020-10-07T07:41:21Z" ; + schema1:dateModified "2023-01-16T13:44:52Z" ; + schema1:description """# ChIP-Seq pipeline\r +Here we provide the tools to perform paired end or single read ChIP-Seq analysis including raw data quality control, read mapping, peak calling, differential binding analysis and functional annotation. As input files you may use either zipped fastq-files (.fastq.gz) or mapped read data (.bam files). In case of paired end reads, corresponding fastq files should be named using *.R1.fastq.gz* and *.R2.fastq.gz* suffixes.\r +\r +\r +## Pipeline Workflow\r +All analysis steps are illustrated in the pipeline [flowchart](https://www.draw.io/?lightbox=1&highlight=0000ff&edit=_blank&layers=1&nav=1&title=NGSpipe2go_ChIPseq_pipeline.html#R7R1Zc6M489e4KvNgF4fPx0kyzmSPTCbJfLOzLykMsq0NBgLYOX79p5NTYLC5kkyyNRuEAKnV6m712VPPNs8Xruas%2F7YNYPYUyXjuqec9RZGHitLD%2F0nGC22ZDGe0YeVCg3UKG27hK2CNEmvdQgN4sY6%2BbZs%2BdOKNum1ZQPdjbZrr2k%2FxbkvbjH%2FV0VYg1XCra2a69Sc0%2FDVrlcez8MZXAFdr9umpMqE3Fpr%2BsHLtrcW%2BZ9kWoHc2Gn8Nm6O31gz7KdKkfumpZ65t%2B%2FSvzfMZMDFYOcToc%2FOMu8GQXWD5RR44%2F%2B%2FFO%2F%2Fxv8eH1%2FXZ5Hopg0tf6geD8184LICBQMMubddf2yvb0swvYespmS%2FAr5XQ1X%2FbjcP7rzQHtYRP%2FWXbqOFcxt2A77%2Bwlde2vo2a1v7GZHfBM%2FT%2FwS8cjNjVr8id82f2LXLxwi8s332JPIQvf0XvhY%2BRK%2F6c52uuz0Yyxtf21tXBNXDhBvjAvXU0HVqr4FEj0hU9uQJ%2BRlfPd%2B2HAH%2FQ%2Bp6ml4itGv0kbdrpV99vf27%2F2J6%2F%2Fnt3eTf%2F5e3%2B7asM8egHczoO2QLiVYt8gqHABbDRQN0X1MEFpubDXRzhNbZvVkG%2FEIHQHwyHxPj0Vfpjub6a%2BuZufbO4%2Bno7v3%2F92ucbeqeZW%2FapnjI20QxODbjDXzThyiI3xo9bjPqnLtlWwSX6a8X%2BTx5buMkWNDDyLt6awN8IVpnaApinwRY9s03bJZ3UOfkRLtrStjh2yHjNDc1bE2SXY6iPr5bQNKMvlfBv8FJ%2Bh9AE9XTlagZEiJBo1u0N1BkGrUzN8zg2cXIhkc%2F6aPFsDLj%2BTMrDrB1wffCciwns7nSsDpQRfYqRbbR806DtKUIIJ2xZ1xEaOJSGg9HxaCTEavlD49GXMf6tA4%2FgBjGyz55DeahcDSIpUzmGRfJISmHQUJqlMWjM0eoY%2FHn96Z%2FfSf%2F8%2B8f85ou7kY27i5fLvno0V%2Bs8D6uQ1xRkNeqwalZDHv3sutpLpINjQ8v3Im%2B%2Bxg0hug2nihDd5hn91Uluf%2FQHHUGIbcFUCiHg%2FGasX%2Fxzvr4Bfz3qy18vd9%2FOXvvy6L1jYIyctYKOajfRkZ2FstAx1X%2BYj76jmZTX%2F2j0zVuDLO6b4KzJBsz5YrjPeTK%2B0fcI9n5GHeSh8yxg2GdK73OUSdPXpXh31Z9NzuZsfXndvwWP6Ilr6AATIjaLhnaaOwahlJExAQEoqWxz5AyOg0OfbCnoawv0NNrBfOrocA%2BW0IJEDEXog%2F%2FZM8dqFsRzNKtIm4alCxcsY19a%2Bz5WIXzG%2B0WZr6CPRLmBazoDC9EWZQ43i6WN6Az68%2BriFs9VWdnooo%2FX1rQXeJXADis75hwQiBTMMWp4GDPm%2Bho66K8BvztAEqK9e0nPJrsjh6AmQBHRPLObG8azTqOwaWuGhxU0GOBYHWMbWxMvnqRZBkZxz0OcCWom5VIaOc9XtIsr2IDPQN%2F6ZLiRDYhQFb8WWiuyNU888An9jzQ6GnQBmReZ3YlDbhnAw6ek4rPKPgJVck5IcDI1fUyQBQdNmZ8dKj9mDgVsLgECdHxy8J%2FoeOWggxaePBXCuLAz5Q2BfnAoBFcuny0MQzkFMkUAMd5WTvJJiR79hHpgEn%2BBvVx6mIomFqEC8aNd6Zn%2F%2FSsmSe%2BTnmOycyhKN3B%2Bk5WiEnPVAnPG5vvhAffb4j%2BsZECEGOtOBDKkgDwu7GdMHYlGFdPHhe0awO2jZixrYBBxxCfUU0JYPgruRDXxn4lsooIh%2Fg16OJphBO9W8thPK6IEXT5v4D8LtFjxm3kig4AfcVkuDfFscCZZFdhEByXpaLG9QPsd3pJSvQKoR3op2aD3F7bxkmpMcS7f4C2X17n8LeyYfpS3wOi7ECJt8PwhAZmFBIPI22ChL%2BDG9Ihz5xD9TkXz8LSNk5qD8N11zerq%2BsddhZOynK2ftz6Nz63iRWPza33dMBV1Dp0Fe5gNXrKXkRlVMXDcKCIRhIASMheVZfOlDCXC%2FphYEpE4sFAGdc38zCwNPhYjTrndwQRL%2FBkb9VqaRL%2BOlfi9mEBRhcycUK3LM4EAKJKZuVBQgm2jywjnLiGuKdPf4lpxcY0fMt6MuOYIRYcNGj60qHQgOaGERtv7ZLfge%2BPIPYT0fp%2FtoM9EPrDoqXufLu46PASnDuuF1XBOsm2d6nXQVCkpwDenYqHyNKIq64zmKlCADHaa62XqrbK6pUVQJnXnLECG2iq1LvWsQoJROa69gwbYpwoScavo5E4D3dIVejAGpBhABN%2Blj%2FWxNRYuoR7%2FunRiO%2F7gU9FhiIC5lwEq%2BQywDKt7WkMfYHcY%2FOyTS9yAqmd%2Fw6FI%2FyFgf7PyjgmHcz%2BRsaRRLRL%2FYGfVSMP4KtagRhI6t8lyaiHenVzSlCtbzNx7uPCjFrbujjsi%2FQSyxVzz%2FO9nhKoXETwet4h%2B%2BniEuo3WzDb5acjVnsiwiX0CDxbx5oce07cneP56uxggmpHD7xmv36DBYbYxD%2BwcuBvW76Mbj%2FqANgfsey9roOuUyRpE9CzBLph26C9yda6OM5gE8ZJiL5YqPz6NuOmAn5%2FG0xTlk8dymvSNy3smFWEgQmclfqbLIFPAXNhPrVOo%2BijCsCBF4F5lrft7xK1YQ3b6rcofQ4gie9y0f6MIs%2BfNOoki6rQJFMkXdj4IilS88mlvLDlxIpETRlEmVtGnwjU%2FGoVG1aLQXOS02KoaT4qh0eQgcVndKy5X6KZYmChVjppFfR6Eiyz%2F9o0%2BcpWFYG3quFJskdXjF7nSsK44j5h0X0lfeHerHRE5xnF%2BobCDT6YXcrI%2FO2%2FXyl%2F4mb6bWHkYUu5jOfW5yRelTB1B0KTz32yYj6CJ%2Fspo1gCC5p%2FEfyNoPQLS5F1g6LhiRYAQQ4f5ioC3gaFyHEOVfYy9AyhauXjXEooqx6BoYVX5wn7yIVAK68p5S8R38QZoRg8bdx0HmyX2WJaz4hBo04lHXJGwVdWhgSzYLf5T1vN7dPIH2%2BFD1XxohWeQKq%2BfH0awpE79fMXuTDMljsBjtgNj%2BniBKbImfXyu6UkQ%2FbbfM4GEdDD%2FBOkA15dvW5%2B6AeIcJqhDuIUi%2B0d61PM2Vnu%2BLhSN8WYLpgFp1A2NXQnHXcR54XhnvGgMPo%2FJn2sbaGL8%2BwrMHcBv7TXhyjAZTmOor0rjNOrz7XEk6hfE80nH8FxKoboLvK2JOWIXkf0EIRmaGzA%2BEcaiPYTo3qxv0zlcLoEb%2BjDRoZjQ8xsfyjX9tmZZPGNJx3Z3DTt5OBZkTBHtZB75Xf1OnnZ%2BJ5v2qqPbGI2sz7mUbpsmkhZIJCXlWEDT1z3uq%2FcBkHmsFkTmKtjSn7uLe%2Bfs7od%2FtQHj6832yr%2B%2Fajd3SxNOW4efV%2Fnpf%2B95ddaaOSJ33J2hT2mJ2gUOQpuOkqi4C3nN7uI48SI%2BpAYQmXtraL3cswwG97QdHR3vcU92mqWNg5uNkR596n5m%2BGLSsbrLMx6QPujWYA1MB7je4EY8dXHHvBDOdhzi8RohmhTsBbxjXOx7GEtBEJVvCweltTMhxLPh8oWwMh%2FzcH3r%2BXQ%2B3M0%2BTAfhI%2FTy1ohA4CaI3TQtADBHKTyxd39gncgFJQNZqiAt4Fyo%2FT7eobt97XdJaaE25ffw7bpiX1qoJ9snBVXMeIo4axF%2BEEMHP8sJ3FjbYESwFh7dOvEQ49TXaXIV0UeZmFOl8jjTsZtOBc%2BjtPJ42JRzd2TLJFJcAvLTS6a4RHfUsTpTjV4kcWVNvuAqv47Ss5EoTWU9umdxyMrwvZ9%2BEvSs%2B6mYh6M3SyedtWYhkYcoBIsSyu9nWOJCI4C6R6kfpY70X0wcoWVQk5y%2BRoKUjoAPPT%2FonaCVXhat3EMoj4l8Cc1rEQCUp5Kj31QS%2B64LLHQNUkmhHMj1nd2UAw90b5y0JgiOCxK46h11jiVw4NkHLOFTEdr2hXan0Xf41cBakaMhWlg0wN2KiITaagOsiJDYDaJGpT464fK0bNw8LYtRpvKErQJalvQ2GEuTNC1r0NtAHLfzFqL%2FqnPJrijeh5OivTSL56Zv2XNroiY9sY7K4lzcE0vbnGEtE6l%2FU5BK6og6%2BiQBle1uEBnwiDlsAVdPEBNIH8l8D02ELqOx62zs5QnepHmCV4P0Neb5UFuiWD8t9eHp9PHu35%2FX9zfWn456ubv%2FXVyhFKGapgmVEKqV06mjNK0812s3JewDBewmA4hE657rhdW2a3EiIGi4z7U40X%2FEMKZuhoaP8vcR96LCTA1Res2FHkleT20%2BO%2BjhHB2vzE%2BpGYVEfPjl%2Bdq0W4L8coR%2FRYL8mPz0qAkrKuCTn3oE%2FGHL7sRila7SJr%2F8rdLNyRPxFlW6C70wzbs%2Bu0FvO7V93wQW0LFj5pkNlkuoQ8DSEzRA8Rblsw%2FR9fnoqtfhSGBwb91A9e5T8785ajYq6hXYPWoGHQQIqoQtStQc08ZvpSnO%2BcPNkLJwtKUp2kjqNU7RaiBJI255aYkkiYs9vvtae5XKPoVjXqcNUYuCGolO2%2FzegkaicEqTjmok5HIpTZrSSGw03Sse6syCs3TNNIkTRSOMiwyxvBT%2BPmKM1anAz6vtGGNulupMRETays3tPF0MiFjA1c%2BYMYpFazGVnhcLPexuqHEF2J700h6NBTbuuuK3FpOtcWFf%2FeOATf%2F7zcyf6Rfj%2Frs%2FIFbJlKeFmXJbwljusA8iX7VXC7kBJBxaxzbsC2DZsRJeOby5fbpGw55Ov%2F28u%2FxC%2BDPaFc%2FE%2FL6KzSMZKMXvSjjdN46WQnx%2FSyHQdBAOmQC0cTmLaBQ42h36A%2FZCoKWh8mOI2q4Iwjz%2FTVsnoycxQXgO%2B4qhHFSio2MVN%2BLC00SYpUJw4Jfl2mjNSEBrEoCrt75GQO0%2Bbn0NYaR2q7EKcb1LwNxL6V2CRGQNcPqZwMtWqOjgWv%2BOsPpZdjakztdxDXh9JASZoFL%2BoeajlVClIInnbEL%2FO0FIbgBaVpxFAidDuXlo7YMFffdTeL2EFklFE%2BTvoaVPNIJuosoo1CNDQ0%2B9eNAjgAaON4gsSpEqlAWLcPbKVsQs%2Bf4PMrBeqVKclVbWrEn4mfFSMLkhz8IS9XWFPKdlndZO028gbbcsFw7W6xSTDcbdij6QVx7OT5CiPRkaPlt2UR9Iqlz1wmyDJ6%2FQcUhitpbPlOisy2IlbWuHz47kLEyVlY4GKXNNhTB1J4cGLd0sD27kAYHxYPXa7XEqB4zzTSuAlTjLkkVHT1GFzFlNpIznUO%2BwZQMnFY6ldukQJQtTTxRWkb1p9E3aLxBYBCKXwH4xqk3iOt6A0cHg4ialsJlcUAjjeeG645BGcnXdsDxgRW36DCCYyVP1MWHvaL8%2FGPaT1QsTauFRV2Hsz8sozrON4RmUNvnPovB%2Fuyb%2FybBdk%2F9X6Y%2Fl%2Bmrqm7v1zeLq6%2B38%2FvUrD%2FAS8MUUamHaK%2BQtSZXSUMRC%2FgIrEuUeoC59XYqFLDJZCGa38UWKe0FbaPF7cZdp1sSxgTFo9TTJjTbQMAjFFGFFnIpWwV24byyXjtQUXgxF0pFSHi%2BKIUG6VvgC7%2B5eJMNsT1GfP%2FdoemmTnA9IPjrbwYcHzfyUWq3DdmUV0E0WKRTsuqEAusO6oHuMVXqcFD0jSlgOzbCRwFV0LImlEoskTgwXp3UR80C5sox%2BrgrkmsRdj7nabS9yVSAXCpGrSTNkLnZ%2FXCukECxv0D7F1ONzamjJ2Y0fzRKVC4wPa2jp7MBatABVwGFmk8PEF3WYTRuP4jBt1mOB1AYwtxMatE46yX0AvVgSOQuLP%2BOakDO7xEigXb%2F0ETT1qMt4qHgny7n1MEZJDNeCFNvv9vQ5ies25aIUprbjZ1o3f06jXYleClBHRwNBjOR%2FEjlqEL%2BM97xkcSlWFmiSFOGSVWBPEbrTv%2FuEwAdqnoXAKqp4bsr6v0%2Fv3AFn%2BeuojsJawtXW5U7PXbSPxQuDoFOLlfxqzcVCKJBwO%2B42oJehK3jSnCfsxGEoKv4hmlKz3vtNA9JxgbZBpwUvE4qCHnkgbMcFIQ48kddBqqFmCHP1Y9xARKrdZII6cTcPzN3x%2Fkg1GGBJSdqT7T6YNislbGkrIn%2FumcP%2BdSvR4ALKtLwS3z2pboAIENrWJKUv2AgYLD1z68a0QDjtOy18Y0dlc8nb%2FxFSWgM8Ax3JEW5snXEEjRlbwELzwp7E1CNYdDeM0wnKzQYEIniQVmqKhfCcELPsAwAYJ0PAeEgSgtbK%2BzTYM7zE0TJXasu3o3bA1TXhNjQapuN8hH5D8kgpLQUVMZkKoagIzpx1qdzz5MkPrHKfC%2Bsw5eesfpO5OJo8phTODVp5CpaDMnEkk1dPWMadrEwcqWTX41ECC%2BvJxMGqtxKz61%2B43mzx5KDOC1kPjxROJqVqaU2AQB3KGrlWVOnVlrZjAz1satEsYG89IieTWZGTRODRowh9erKR7c259IzkOIOaSIJQjAZdesR0MM2PWqWD0qAUJUQXQXq6JHWU26OOohgOIfh5cvSOZKjiBfXePzYobaNDrttpV9ChYwnLKkSHhOiktowNeTJW88iQO%2BxCR5mta76c4sRGeIL7eG2IMfSK50U%2F789Kwr44cx5ORoNR%2FOgxkwaiWMk0f1YntWUIfIenkrr89vNKULzVBIGjSaymTmcSBBpwucRFEwsfR%2FADOIERzjTTixZcDOPj95w%2FqigzG2qM%2BQTywgmyEarZo0d19QmqTu%2BdDFIYqTMRyRSeacqTTMGZZh%2FCdtZdsmZzSYD7xPYRwXfixoZpoXcfbAH%2FWeD3ndErz3jy0Tw7b%2BEGmppLyNYDCLiMR0GFjQ8LomdhCUTwaGi8A8aQMGcq8x9aYt3MSWCjoAFXGPoZ5FKyLfMFK%2FilNLWNLEqF%2FpiBAeEaDZ7iJ8uG0gsSrLCkKonp5YxPZJWoa8xndDhk4DhjW4%2FlcGlktL1SjqX5tWJzeVBnU5EocWahKoqAWYgtNNJkUD4G4je%2FqIRfEEc5dH8fx8js95tn5NClvXT%2BjJB65stEAEwoFmU7uB13QALsgE79z2877%2BcdGv%2FJn9%2F6P%2B%2F2WoDroLUXCNmdXpAVE%2B9Zlm0yxiE7RlXza6O%2FFaqqDAtmy5g0aFRot15Yh8tZCKFVNKtiU6XLC6qq5KPXuHOqqpo0VUVXPTcZTcc0Vcq4XCkLhVVuqltThejwwn7yIVC2FnzcgsIaKxdsEDlHb9%2BgAw7s4wQ7lDW7QDNqUFflJcBITiJPa5WNXM1qrfJKbBaoRFeDdkoYDde2vV0%2Bnmx%2BINY4KUgkeWGgrvDG4%2BWfD80biy57R3mjOixXePpI3lgMJysoMde9nGD7zPaJQpgtEKbKw7QOw9BkmpvRHukt2b8hO%2BPN5nzrFHd6DGQ2Y%2BuYJOTTqE9gE9gXUVf0r7tBn89NVJaNQs3KaOhlUOf0unPymiq3W%2FpczMqlNuW1dhh3jGoeTiNnRbl452r6nmqb72eFqZAoxpzYmcJEqHup0THe1pQKoY895pYpz16jt%2BdZnajpK1KCytzjoC3KMTqectQocCndIiYVy0dpgSaRRXE8m8Rfwaqi06dyJKMJjzLjTEtNZCthNdOTLyodsCJNhQPOHFeiv9qQvu1vrC0rQSy97WajuRhXFen7WS%2FIE12%2Feo2SSaLdO4BQUkf9N0cpkzLWeKp0T8YaZVeOoAb6dPJKl4y0XF6lmO05QZkjUCfYfhrY8CPC75z8CKlcNMXSOEn14v7NCVn7yxj%2FpmRtlltm5WoGBKF3H2uOCvIrU%2FO8pFAvVYM%2Bs6RmQ1SIWFRNRpUqSOIkRpZ9yRJLZDolT23NZIsZhr5rT%2Fh0SYqGCHMJOabtR9OMmCnbduRlgAXxc8uChx3dPT%2BebyrvDZhAhpWCf5zdYgLKy4NKCM%2BeaHaqoKQwCQBkFYUJyWVj0LHlmpSyzNkveSOBFvoSCT2n701PpfQrNV0HnkchY8KFq7kU1LgqxjOF%2BwL%2Fc312g%2F%2B0fd8EFtCJJ5cNlkuow3hu2ryPYZHDcqG%2B3pBn%2BDoyyEaHcnlNpoc20Qq79RV6u%2B7ankfG5VK5hZYPZd9ILYqz1iw%2F8KMrigzEfc0IH5K4Iw3NHvA%2FYOGPGlBDNGQTzo2WGcFHFIQCpuYUXZ1YIdQYtJZA87cuUcMgFHDhYsu63c17UR8X09ZhMDzug5dAxD2jwL13EDylxlDA37wUWqLGNF3Yw0OOzyDGJIUj8ofFtDpRLlEFM%2BCnrCDZpCDhgixgBsqs%2FFkd69Vs24%2FKq2i%2B679tA4uiX%2F4P). Specify the desired analysis details for your data in the *essential.vars.groovy* file (see below) and run the pipeline *chipseq.pipeline.groovy* as described [here](https://gitlab.rlp.net/imbforge/NGSpipe2go/-/blob/master/README.md). A markdown file *ChIPreport.Rmd* will be generated in the output reports folder after running the pipeline. Subsequently, the *ChIPreport.Rmd* file can be converted to a final html report using the *knitr* R-package.\r +\r +\r +### The pipelines includes\r +- raw data quality control with FastQC, BamQC and MultiQC\r +- mapping reads or read pairs to the reference genome using bowtie2 (default) or bowtie1\r +- filter out multimapping reads from bowtie2 output with samtools (optional)\r +- identify and remove duplicate reads with Picard MarkDuplicates (optional) \r +- generation of bigWig tracks for visualisation of alignment with deeptools bamCoverage. For single end design, reads are extended to the average fragment size\r +- characterization of insert size using Picard CollectInsertSizeMetrics (for paired end libraries only)\r +- characterize library complexity by PCR Bottleneck Coefficient using the GenomicAlignments R-package (for single read libraries only) \r +- characterize phantom peaks by cross correlation analysis using the spp R-package (for single read libraries only)\r +- peak calling of IP samples vs. corresponding input controls using MACS2\r +- peak annotation using the ChIPseeker R-package (optional)\r +- differential binding analysis using the diffbind R-package (optional). For this, input peak files must be given in *NGSpipe2go/tools/diffbind/targets_diffbind.txt* and contrasts of interest in *NGSpipe2go/tools/diffbind/contrasts_diffbind.txt* (see below)\r +\r +\r +### Pipeline-specific parameter settings\r +- targets.txt: tab-separated txt-file giving information about the analysed samples. The following columns are required: \r + - IP: bam file name of IP sample\r + - IPname: IP sample name to be used in plots and tables \r + - INPUT: bam file name of corresponding input control sample\r + - INPUTname: input sample name to be used in plots and tables \r + - group: variable for sample grouping (e.g. by condition)\r +\r +- essential.vars.groovy: essential parameter describing the experiment including: \r + - ESSENTIAL_PROJECT: your project folder name\r + - ESSENTIAL_BOWTIE_REF: full path to bowtie2 indexed reference genome (bowtie1 indexed reference genome if bowtie1 is selected as mapper)\r + - ESSENTIAL_BOWTIE_GENOME: full path to the reference genome FASTA file\r + - ESSENTIAL_BSGENOME: Bioconductor genome sequence annotation package\r + - ESSENTIAL_TXDB: Bioconductor transcript-related annotation package\r + - ESSENTIAL_ANNODB: Bioconductor genome annotation package\r + - ESSENTIAL_BLACKLIST: files with problematic 'blacklist regions' to be excluded from analysis (optional)\r + - ESSENTIAL_PAIRED: either paired end ("yes") or single read ("no") design\r + - ESSENTIAL_READLEN: read length of library\r + - ESSENTIAL_FRAGLEN: mean length of library inserts and also minimum peak size called by MACS2\r + - ESSENTIAL_THREADS: number of threads for parallel tasks\r + - ESSENTIAL_USE_BOWTIE1: if true use bowtie1 for read mapping, otherwise bowtie2 by default\r +\r +- additional (more specialized) parameter can be given in the var.groovy-files of the individual pipeline modules \r +\r +If differential binding analysis is selected it is required additionally:\r +\r +- contrasts_diffbind.txt: indicate intended group comparisions for differential binding analysis, e.g. *KOvsWT=(KO-WT)* if targets.txt contains the groups *KO* and *WT*. Give 1 contrast per line. \r +- targets_diffbind.txt: \r + - SampleID: IP sample name (as IPname in targets.txt)\r + - Condition: variable for sample grouping (as group in targets.txt)\r + - Replicate: number of replicate\r + - bamReads: bam file name of IP sample (as IP in targets.txt but with path relative to project directory)\r + - ControlID: input sample name (as INPUTname in targets.txt)\r + - bamControl: bam file name of corresponding input control sample (as INPUT in targets.txt but with path relative to project directory)\r + - Peaks: peak file name opbatined from peak caller (path relative to project directory)\r + - PeakCaller: name of peak caller (e.g. macs)\r +\r +## Programs required\r +- Bedtools\r +- Bowtie2\r +- deepTools\r +- encodeChIPqc (provided by another project from imbforge)\r +- FastQC\r +- MACS2\r +- MultiQC\r +- Picard\r +- R with packages ChIPSeeker, diffbind, GenomicAlignments, spp and genome annotation packages\r +- Samtools\r +- UCSC utilities\r +""" ; + schema1:keywords "ChIP-seq, bpipe, groovy" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "ChIP-seq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/59?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.196.4" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook ABC MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/196/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 101184 ; + schema1:creator , + ; + schema1:dateCreated "2023-07-26T09:39:03Z" ; + schema1:dateModified "2023-07-26T09:39:41Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/196?version=4" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook ABC MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_amber_md_setup/blob/master/biobb_wf_amber_md_setup/notebooks/abcsetup/biobb_amber_ABC_setup.ipynb" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "Pre-processing of mass spectrometry-based metabolomics data with quantification and identification based on MS1 and MS2 data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/57?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/metaboigniter" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/metaboigniter" ; + schema1:sdDatePublished "2024-08-05 10:24:28 +0100" ; + schema1:url "https://workflowhub.eu/workflows/57/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5180 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:00Z" ; + schema1:dateModified "2024-06-11T12:55:00Z" ; + schema1:description "Pre-processing of mass spectrometry-based metabolomics data with quantification and identification based on MS1 and MS2 data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/57?version=3" ; + schema1:keywords "Metabolomics, identification, quantification, mass-spectrometry, ms1, ms2" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/metaboigniter" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/57?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein MD Analysis tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This workflow computes a set of Quality Control (QC) analyses on top of an uploaded MD trajectory. QC analyses include positional divergence (RMSd), change of shape (Radius of Gyration), identification of flexible regions (atomic/residue fluctuations), and identification of different molecular conformations (trajectory clustering).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.287.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_protein_md_analysis/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Protein MD Analysis tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/287/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5395 ; + schema1:creator , + ; + schema1:dateCreated "2023-04-14T08:58:27Z" ; + schema1:dateModified "2023-04-14T08:59:26Z" ; + schema1:description """# Protein MD Analysis tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This workflow computes a set of Quality Control (QC) analyses on top of an uploaded MD trajectory. QC analyses include positional divergence (RMSd), change of shape (Radius of Gyration), identification of flexible regions (atomic/residue fluctuations), and identification of different molecular conformations (trajectory clustering).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/287?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Protein MD Analysis tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_protein_md_analysis/python/workflow.py" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Functional annotation of protein sequences" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/755?version=1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Functional protein annotation using EggNOG-mapper and InterProScan" ; + schema1:sdDatePublished "2024-08-05 10:25:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/755/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8415 ; + schema1:creator ; + schema1:dateCreated "2024-02-15T11:44:05Z" ; + schema1:dateModified "2024-02-15T11:44:05Z" ; + schema1:description "Functional annotation of protein sequences" ; + schema1:isPartOf ; + schema1:keywords "genome-annotation" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Functional protein annotation using EggNOG-mapper and InterProScan" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/755?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "The pangenome graph construction pipeline renders a collection of sequences into a pangenome graph. Its goal is to build a graph that is locally directed and acyclic while preserving large-scale variation. Maintaining local linearity is important for interpretation, visualization, mapping, comparative genomics, and reuse of pangenome graphs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1007?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/pangenome" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/pangenome" ; + schema1:sdDatePublished "2024-08-05 10:23:28 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1007/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11068 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:06Z" ; + schema1:dateModified "2024-06-11T12:55:06Z" ; + schema1:description "The pangenome graph construction pipeline renders a collection of sequences into a pangenome graph. Its goal is to build a graph that is locally directed and acyclic while preserving large-scale variation. Maintaining local linearity is important for interpretation, visualization, mapping, comparative genomics, and reuse of pangenome graphs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1007?version=3" ; + schema1:keywords "pangenome" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/pangenome" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1007?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """A CWL-based pipeline for calling small germline variants, namely SNPs and small INDELs, by processing data from Whole-genome Sequencing (WGS) or Targeted Sequencing (e.g., Whole-exome sequencing; WES) experiments. \r +\r +On the respective GitHub folder are available:\r +\r +- The CWL wrappers and subworkflows for the workflow\r +- A pre-configured YAML template, based on validation analysis of publicly available HTS data\r +\r +Briefly, the workflow performs the following steps:\r +\r +1. Quality control of Illumina reads (FastQC)\r +2. Trimming of the reads (e.g., removal of adapter and/or low quality sequences) (Trim galore)\r +3. Mapping to reference genome (BWA-MEM)\r +4. Convertion of mapped reads from SAM (Sequence Alignment Map) to BAM (Binary Alignment Map) format (samtools)\r +5. Sorting mapped reads based on read names (samtools)\r +6. Adding information regarding paired end reads (e.g., CIGAR field information) (samtools)\r +7. Re-sorting mapped reads based on chromosomal coordinates (samtools)\r +8. Adding basic Read-Group information regarding sample name, platform unit, platform (e.g., ILLUMINA), library and identifier (picard AddOrReplaceReadGroups)\r +9. Marking PCR and/or optical duplicate reads (picard MarkDuplicates)\r +10. Collection of summary statistics (samtools) \r +11. Creation of indexes for coordinate-sorted BAM files to enable fast random access (samtools)\r +12. Splitting the reference genome into a predefined number of intervals for parallel processing (GATK SplitIntervals)\r +\r +At this point the application of multi-sample workflow follows, during which multiple samples are concatenated into a single, unified VCF (Variant Calling Format) file, which contains the variant information for all samples:\r +\r +13. Application of Base Quality Score Recalibration (BQSR) (GATK BaseRecalibrator and ApplyBQSR tools)\r +14. Variant calling in gVCF (genomic VCF) mode (-ERC GVCF) (GATK HaplotypeCaller) \r +15. Merging of all genomic interval-split gVCF files for each sample (GATK MergeVCFs)\r +16. Generation of the unified VCF file (GATK CombineGVCFs and GenotypeGVCFs tools)\r +17. Separate annotation for SNP and INDEL variants, using the Variant Quality Score Recalibration (VQSR) method (GATK VariantRecalibrator and ApplyVQSR tools)\r +18. Variant filtering based on the information added during VQSR and/or custom filters (bcftools)\r +19. Normalization of INDELs (split multiallelic sites) (bcftools)\r +20. Annotation of the final dataset of filtered variants with genomic, population-related and/or clinical information (ANNOVAR)\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.526.1" ; + schema1:isBasedOn "https://github.com/BiodataAnalysisGroup/CWL_HTS_pipelines/tree/main/Germline_Variant_calling/multi-sample_analysis/with_BQSR_VQSR" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL-based (multi-sample) workflow for germline variant calling" ; + schema1:sdDatePublished "2024-08-05 10:30:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/526/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 34531 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-07-05T09:44:42Z" ; + schema1:dateModified "2023-07-05T09:45:12Z" ; + schema1:description """A CWL-based pipeline for calling small germline variants, namely SNPs and small INDELs, by processing data from Whole-genome Sequencing (WGS) or Targeted Sequencing (e.g., Whole-exome sequencing; WES) experiments. \r +\r +On the respective GitHub folder are available:\r +\r +- The CWL wrappers and subworkflows for the workflow\r +- A pre-configured YAML template, based on validation analysis of publicly available HTS data\r +\r +Briefly, the workflow performs the following steps:\r +\r +1. Quality control of Illumina reads (FastQC)\r +2. Trimming of the reads (e.g., removal of adapter and/or low quality sequences) (Trim galore)\r +3. Mapping to reference genome (BWA-MEM)\r +4. Convertion of mapped reads from SAM (Sequence Alignment Map) to BAM (Binary Alignment Map) format (samtools)\r +5. Sorting mapped reads based on read names (samtools)\r +6. Adding information regarding paired end reads (e.g., CIGAR field information) (samtools)\r +7. Re-sorting mapped reads based on chromosomal coordinates (samtools)\r +8. Adding basic Read-Group information regarding sample name, platform unit, platform (e.g., ILLUMINA), library and identifier (picard AddOrReplaceReadGroups)\r +9. Marking PCR and/or optical duplicate reads (picard MarkDuplicates)\r +10. Collection of summary statistics (samtools) \r +11. Creation of indexes for coordinate-sorted BAM files to enable fast random access (samtools)\r +12. Splitting the reference genome into a predefined number of intervals for parallel processing (GATK SplitIntervals)\r +\r +At this point the application of multi-sample workflow follows, during which multiple samples are concatenated into a single, unified VCF (Variant Calling Format) file, which contains the variant information for all samples:\r +\r +13. Application of Base Quality Score Recalibration (BQSR) (GATK BaseRecalibrator and ApplyBQSR tools)\r +14. Variant calling in gVCF (genomic VCF) mode (-ERC GVCF) (GATK HaplotypeCaller) \r +15. Merging of all genomic interval-split gVCF files for each sample (GATK MergeVCFs)\r +16. Generation of the unified VCF file (GATK CombineGVCFs and GenotypeGVCFs tools)\r +17. Separate annotation for SNP and INDEL variants, using the Variant Quality Score Recalibration (VQSR) method (GATK VariantRecalibrator and ApplyVQSR tools)\r +18. Variant filtering based on the information added during VQSR and/or custom filters (bcftools)\r +19. Normalization of INDELs (split multiallelic sites) (bcftools)\r +20. Annotation of the final dataset of filtered variants with genomic, population-related and/or clinical information (ANNOVAR)\r +""" ; + schema1:image ; + schema1:keywords "CWL, workflow, Germline, variant calling, Genomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "CWL-based (multi-sample) workflow for germline variant calling" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/526?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 237748 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=21" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-08-05 10:23:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=21" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 18600 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-12T03:03:06Z" ; + schema1:dateModified "2024-06-12T03:03:06Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=21" ; + schema1:version 21 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Alternative splicing analysis using RNA-seq." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1018?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/rnasplice" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnasplice" ; + schema1:sdDatePublished "2024-08-05 10:23:21 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1018/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14104 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:14Z" ; + schema1:dateModified "2024-06-11T12:55:14Z" ; + schema1:description "Alternative splicing analysis using RNA-seq." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1018?version=4" ; + schema1:keywords "alternative-splicing, rna, rna-seq, splicing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnasplice" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1018?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=16" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-08-05 10:23:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=16" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12436 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:50Z" ; + schema1:dateModified "2024-06-11T12:54:50Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=16" ; + schema1:version 16 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "For integrative analysis of CAKUT multi-omics data DIABLO method of the mixOmics package (version 6.10.9. Singh et. al. 2019) was used with sPLS-DA (sparse Partial Least Squares Discriminant Analysis Discriminant Analysis) and PLS-DA classification." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/330?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for EJP-RD WP13 case-study: CAKUT peptidome and miRNome data analysis using the DIABLO and PLS-DA methods from the mixOmics R package" ; + schema1:sdDatePublished "2024-08-05 10:31:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/330/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 999 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2022-04-20T16:43:51Z" ; + schema1:dateModified "2023-01-16T13:59:42Z" ; + schema1:description "For integrative analysis of CAKUT multi-omics data DIABLO method of the mixOmics package (version 6.10.9. Singh et. al. 2019) was used with sPLS-DA (sparse Partial Least Squares Discriminant Analysis Discriminant Analysis) and PLS-DA classification." ; + schema1:keywords "rare diseases" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "EJP-RD WP13 case-study: CAKUT peptidome and miRNome data analysis using the DIABLO and PLS-DA methods from the mixOmics R package" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/330?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2024-04-09T10:22:36.835089" ; + schema1:hasPart , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/baredsc" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + ; + schema1:name "baredsc/baredSC-1d-logNorm" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "baredsc/baredSC-2d-logNorm" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/baredsc" ; + schema1:version "0.4" . + + a schema1:Dataset ; + schema1:datePublished "2023-11-16T14:32:18.212018" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Purge-duplicate-contigs-VGP6/main" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Purge-duplicate-contigs-VGP6/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:version "0.3.2" . + + a schema1:Dataset ; + schema1:datePublished "2022-01-27T14:48:01+00:00" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/crs4/fair-crcc-send-data" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "" . + + a schema1:ImageObject, + schema1:MediaObject ; + schema1:name "Workflow diagram" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "BioTranslator performs sequentially pathway analysis and gene prioritization: A specific operator is executed for each task to translate the input gene set into semantic terms and pinpoint the pivotal-role genes on the derived semantic network. The output consists of the set of statistically significant semantic terms and the associated hub genes (the gene signature), prioritized according to their involvement in the underlying semantic topology." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/189?version=3" ; + schema1:isBasedOn "http://www.biotranslator.gr:8080/u/thodk/w/biotranslator-workflow" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for BioTranslator Workflow" ; + schema1:sdDatePublished "2024-08-05 10:33:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/189/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3960 ; + schema1:dateCreated "2021-09-24T15:16:59Z" ; + schema1:dateModified "2023-01-16T13:52:49Z" ; + schema1:description "BioTranslator performs sequentially pathway analysis and gene prioritization: A specific operator is executed for each task to translate the input gene set into semantic terms and pinpoint the pivotal-role genes on the derived semantic network. The output consists of the set of statistically significant semantic terms and the associated hub genes (the gene signature), prioritized according to their involvement in the underlying semantic topology." ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/189?version=2" ; + schema1:keywords "Semantic Network Analysis, Gene Prioritization, Pathway Analysis, Biomedical Ontologies, Semantic Interpretation" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "BioTranslator Workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/189?version=3" ; + schema1:version 3 ; + ns1:input ; + ns1:output , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 44291 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Proteogenomics database creation workflow using pypgatk framework. " ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1008?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/pgdb" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/pgdb" ; + schema1:sdDatePublished "2024-08-05 10:23:29 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1008/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7816 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:07Z" ; + schema1:dateModified "2024-06-11T12:55:07Z" ; + schema1:description "Proteogenomics database creation workflow using pypgatk framework. " ; + schema1:keywords "cosmic, gnomad, protein-databases, proteogenomics, Proteomics, pypgatk" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/pgdb" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1008?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "call and score variants from WGS/WES of rare disease patients" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1014?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/raredisease" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/raredisease" ; + schema1:sdDatePublished "2024-08-05 10:23:23 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1014/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12171 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:08Z" ; + schema1:dateModified "2024-06-11T12:55:08Z" ; + schema1:description "call and score variants from WGS/WES of rare disease patients" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1014?version=5" ; + schema1:keywords "diagnostics, rare-disease, snv, structural-variants, variant-annotation, variant-calling, wes, WGS" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/raredisease" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1014?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description "Performs scaffolding using Bionano Data. Part of VGP assembly pipeline." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/322?version=1" ; + schema1:isBasedOn "https://github.com/Delphine-L/iwc/tree/VGP/workflows/VGP-assembly-v2" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for VGP hybrid scaffolding with Bionano optical maps" ; + schema1:sdDatePublished "2024-08-05 10:32:19 +0100" ; + schema1:url "https://workflowhub.eu/workflows/322/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 4265 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 26876 ; + schema1:creator ; + schema1:dateCreated "2022-04-05T22:21:34Z" ; + schema1:dateModified "2023-01-16T13:59:34Z" ; + schema1:description "Performs scaffolding using Bionano Data. Part of VGP assembly pipeline." ; + schema1:isPartOf ; + schema1:keywords "vgp, Assembly, Galaxy" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "VGP hybrid scaffolding with Bionano optical maps" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/322?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """Correlation between Phenotypic and In Silico Detection of Antimicrobial Resistance in Salmonella enterica in Canada Using Staramr. \r +\r +Doi: [10.3390/microorganisms10020292](https://doi.org/10.3390/microorganisms10020292)\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/470?version=1" ; + schema1:isBasedOn "https://usegalaxy.eu/u/dennisd/w/103390microorganisms10020292" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Workflow 4: Staramr" ; + schema1:sdDatePublished "2024-08-05 10:27:28 +0100" ; + schema1:url "https://workflowhub.eu/workflows/470/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 23190 ; + schema1:dateCreated "2023-05-11T08:29:41Z" ; + schema1:dateModified "2023-10-30T16:54:19Z" ; + schema1:description """Correlation between Phenotypic and In Silico Detection of Antimicrobial Resistance in Salmonella enterica in Canada Using Staramr. \r +\r +Doi: [10.3390/microorganisms10020292](https://doi.org/10.3390/microorganisms10020292)\r +""" ; + schema1:keywords "AMR, AMR-detection, 10.3390/microorganisms10020292, Bioinformatics, antimicrobial resistance" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Workflow 4: Staramr" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/470?version=1" ; + schema1:version 1 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Performs phylogenetic placement with EPA-NG" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1009?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/phyloplace" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/phyloplace" ; + schema1:sdDatePublished "2024-08-05 10:23:26 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1009/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8100 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:07Z" ; + schema1:dateModified "2024-06-11T12:55:07Z" ; + schema1:description "Performs phylogenetic placement with EPA-NG" ; + schema1:keywords "evolution, evolutionary-tree, phylogenetic-placement, phylogenetics, sequence-classification, taxonomy-assignment" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/phyloplace" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1009?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=21" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-08-05 10:24:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=21" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10412 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:13Z" ; + schema1:dateModified "2024-06-11T12:55:13Z" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=21" ; + schema1:version 21 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """## Description\r +\r +The workflow takes an input file with Cancer Driver Genes predictions (i.e. the results provided by a participant), computes a set of metrics, and compares them against the data currently stored in OpenEBench within the TCGA community. Two assessment metrics are provided for that predictions. Also, some plots (which are optional) that allow to visualize the performance of the tool are generated. The workflow consists in three standard steps, defined by OpenEBench. The tools needed to run these steps are containerised in three Docker images, whose recipes are available in the [TCGA_benchmarking_dockers](https://github.com/inab/TCGA_benchmarking_dockers ) repository and the images are stored in the [INB GitLab container registry](https://gitlab.bsc.es/inb/elixir/openebench/workflows/tcga_benchmarking_dockers/container_registry) . Separated instances are spawned from these images for each step:\r +1. **Validation**: the input file format is checked and, if required, the content of the file is validated (e.g check whether the submitted gene IDs exist)\r +2. **Metrics Generation**: the predictions are compared with the 'Gold Standards' provided by the community, which results in two performance metrics - precision (Positive Predictive Value) and recall(True Positive Rate).\r +3. **Consolidation**: the benchmark itself is performed by merging the tool metrics with the rest of TCGA data. The results are provided in JSON format and SVG format (scatter plot).\r +\r +![OpenEBench benchmarking workflow](https://raw.githubusercontent.com/inab/TCGA_benchmarking_workflow/1.0.8/workflow_schema.jpg)\r +\r +## Data\r +\r +* [TCGA_sample_data](./TCGA_sample_data) folder contains all the reference data required by the steps. It is derived from the manuscript:\r +[Comprehensive Characterization of Cancer Driver Genes and Mutations](https://www.cell.com/cell/fulltext/S0092-8674%2818%2930237-X?code=cell-site), Bailey et al, 2018, Cell [![doi:10.1016/j.cell.2018.02.060](https://img.shields.io/badge/doi-10.1016%2Fj.cell.2018.02.060-green.svg)](https://doi.org/10.1016/j.cell.2018.02.060) \r +* [TCGA_sample_out](./TCGA_sample_out) folder contains an example output for a worklow run, with two cancer types / challenges selected (ACC, BRCA). Results obtained from the default execution should be similar to those ones available in this directory. Results found in [TCGA_sample_out/results](./TCGA_sample_out/results) can be visualized in the browser using [`benchmarking_workflows_results_visualizer` javascript library](https://github.com/inab/benchmarking_workflows_results_visualizer).\r +\r +## Requirements\r +This workflow depends on three tools that have to be installed before you can run it:\r +* [Git](https://git-scm.com/downloads): Used to download the workflow from GitHub.\r +* [Docker](https://docs.docker.com/get-docker/): The Docker Engine is used under the hood to execute the containerised steps of the benchmarking workflow.\r +* [Nextflow](https://www.nextflow.io/): Is the technology used to write and execute the benchmarking workflow. Note that it depends on Bash (>=3.2) and Java (>=8 , <=17). We provide the script [run_local_nextflow.bash](run_local_nextflow.bash) that automates their installation for local testing.\r +\r +Check that these tools are available in your environment:\r +```\r +# Git\r +> which git\r +/usr/bin/git\r +> git --version\r +git version 2.26.2\r +\r +# Docker\r +> which docker\r +/usr/bin/docker\r +> docker --version\r +Docker version 20.10.9-ce, build 79ea9d308018\r +\r +# Nextflow\r +> which nextflow\r +/home/myuser/bin/nextflow\r +> nextflow -version\r +\r + N E X T F L O W\r + version 21.04.1 build 5556\r + created 14-05-2021 15:20 UTC (17:20 CEST)\r + cite doi:10.1038/nbt.3820\r + http://nextflow.io\r +```\r +In the case of docker, apart from being installed the daemon has to be running. On Linux distributions that use `Systemd` for service management, which includes the most popular ones as of 2021 (Ubuntu, Debian, CentOs, Red Hat, OpenSuse), the `systemctl` command can be used to check its status and manage it:\r +\r +```\r +# Check status of docker daemon\r +> sudo systemctl status docker\r +● docker.service - Docker Application Container Engine\r + Loaded: loaded (/usr/lib/systemd/system/docker.service; disabled; vendor preset: disabled)\r + Active: inactive (dead)\r + Docs: http://docs.docker.com\r +\r +# Start docker daemon\r +> sudo systemctl start docker\r +```\r +\r +### Download workflow\r +Simply clone the repository and check out the latest tag (currently `1.0.8`):\r +\r +```\r +# Clone repository\r +> git clone https://github.com/inab/TCGA_benchmarking_dockers.git\r +\r +# Move to new directory\r +cd TCGA_benchmarking_workflow/\r +\r +# Checkout version 1.0.8\r +> git checkout 1.0.8 -b 1.0.8\r +```\r +\r +## Usage\r +The workflow can be run workflow in two different ways:\r +* Standard: `nextflow run main.nf -profile docker`\r +* Using the bash script that installs Java and Nextflow:`./run_local_nextflow.bash run main.nf -profile docker`.\r +\r +Arguments specifications:\r +```\r +Usage:\r +Run the pipeline with default parameters:\r +nextflow run main.nf -profile docker\r +\r +Run with user parameters:\r +nextflow run main.nf -profile docker --predictionsFile {driver.genes.file} --public_ref_dir {validation.reference.file} --participant_name {tool.name} --metrics_ref_dir {gold.standards.dir} --cancer_types {analyzed.cancer.types} --assess_dir {benchmark.data.dir} --results_dir {output.dir}\r +\r +Mandatory arguments:\r + --input List of cancer genes prediction\r + --community_id Name or OEB permanent ID for the benchmarking community\r + --public_ref_dir Directory with list of cancer genes used to validate the predictions\r + --participant_id Name of the tool used for prediction\r + --goldstandard_dir Dir that contains metrics reference datasets for all cancer types\r + --challenges_ids List of types of cancer selected by the user, separated by spaces\r + --assess_dir Dir where the data for the benchmark are stored\r +\r +Other options:\r + --validation_result The output directory where the results from validation step will be saved\r + --augmented_assess_dir Dir where the augmented data for the benchmark are stored\r + --assessment_results The output directory where the results from the computed metrics step will be saved\r + --outdir The output directory where the consolidation of the benchmark will be saved\r + --statsdir The output directory with nextflow statistics\r + --data_model_export_dir The output dir where json file with benchmarking data model contents will be saved\r + --otherdir The output directory where custom results will be saved (no directory inside)\r +Flags:\r + --help Display this message\r +```\r +\r +Default input parameters and Docker images to use for each step can be specified in the [config](./nextflow.config) file.\r +\r +**NOTE: In order to make your workflow compatible with the [OpenEBench VRE Nextflow Executor](https://github.com/inab/vre-process_nextflow-executor), please make sure to use the same parameter names in your workflow.**\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/244?version=4" ; + schema1:isBasedOn "https://github.com/inab/TCGA_benchmarking_workflow/tree/1.0.8" ; + schema1:license "LGPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for OpenEBench TCGA Cancer Driver Genes benchmarking workflow" ; + schema1:sdDatePublished "2024-08-05 10:32:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/244/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6421 ; + schema1:creator , + , + ; + schema1:dateCreated "2021-11-29T15:21:23Z" ; + schema1:dateModified "2023-01-16T13:55:10Z" ; + schema1:description """## Description\r +\r +The workflow takes an input file with Cancer Driver Genes predictions (i.e. the results provided by a participant), computes a set of metrics, and compares them against the data currently stored in OpenEBench within the TCGA community. Two assessment metrics are provided for that predictions. Also, some plots (which are optional) that allow to visualize the performance of the tool are generated. The workflow consists in three standard steps, defined by OpenEBench. The tools needed to run these steps are containerised in three Docker images, whose recipes are available in the [TCGA_benchmarking_dockers](https://github.com/inab/TCGA_benchmarking_dockers ) repository and the images are stored in the [INB GitLab container registry](https://gitlab.bsc.es/inb/elixir/openebench/workflows/tcga_benchmarking_dockers/container_registry) . Separated instances are spawned from these images for each step:\r +1. **Validation**: the input file format is checked and, if required, the content of the file is validated (e.g check whether the submitted gene IDs exist)\r +2. **Metrics Generation**: the predictions are compared with the 'Gold Standards' provided by the community, which results in two performance metrics - precision (Positive Predictive Value) and recall(True Positive Rate).\r +3. **Consolidation**: the benchmark itself is performed by merging the tool metrics with the rest of TCGA data. The results are provided in JSON format and SVG format (scatter plot).\r +\r +![OpenEBench benchmarking workflow](https://raw.githubusercontent.com/inab/TCGA_benchmarking_workflow/1.0.8/workflow_schema.jpg)\r +\r +## Data\r +\r +* [TCGA_sample_data](./TCGA_sample_data) folder contains all the reference data required by the steps. It is derived from the manuscript:\r +[Comprehensive Characterization of Cancer Driver Genes and Mutations](https://www.cell.com/cell/fulltext/S0092-8674%2818%2930237-X?code=cell-site), Bailey et al, 2018, Cell [![doi:10.1016/j.cell.2018.02.060](https://img.shields.io/badge/doi-10.1016%2Fj.cell.2018.02.060-green.svg)](https://doi.org/10.1016/j.cell.2018.02.060) \r +* [TCGA_sample_out](./TCGA_sample_out) folder contains an example output for a worklow run, with two cancer types / challenges selected (ACC, BRCA). Results obtained from the default execution should be similar to those ones available in this directory. Results found in [TCGA_sample_out/results](./TCGA_sample_out/results) can be visualized in the browser using [`benchmarking_workflows_results_visualizer` javascript library](https://github.com/inab/benchmarking_workflows_results_visualizer).\r +\r +## Requirements\r +This workflow depends on three tools that have to be installed before you can run it:\r +* [Git](https://git-scm.com/downloads): Used to download the workflow from GitHub.\r +* [Docker](https://docs.docker.com/get-docker/): The Docker Engine is used under the hood to execute the containerised steps of the benchmarking workflow.\r +* [Nextflow](https://www.nextflow.io/): Is the technology used to write and execute the benchmarking workflow. Note that it depends on Bash (>=3.2) and Java (>=8 , <=17). We provide the script [run_local_nextflow.bash](run_local_nextflow.bash) that automates their installation for local testing.\r +\r +Check that these tools are available in your environment:\r +```\r +# Git\r +> which git\r +/usr/bin/git\r +> git --version\r +git version 2.26.2\r +\r +# Docker\r +> which docker\r +/usr/bin/docker\r +> docker --version\r +Docker version 20.10.9-ce, build 79ea9d308018\r +\r +# Nextflow\r +> which nextflow\r +/home/myuser/bin/nextflow\r +> nextflow -version\r +\r + N E X T F L O W\r + version 21.04.1 build 5556\r + created 14-05-2021 15:20 UTC (17:20 CEST)\r + cite doi:10.1038/nbt.3820\r + http://nextflow.io\r +```\r +In the case of docker, apart from being installed the daemon has to be running. On Linux distributions that use `Systemd` for service management, which includes the most popular ones as of 2021 (Ubuntu, Debian, CentOs, Red Hat, OpenSuse), the `systemctl` command can be used to check its status and manage it:\r +\r +```\r +# Check status of docker daemon\r +> sudo systemctl status docker\r +● docker.service - Docker Application Container Engine\r + Loaded: loaded (/usr/lib/systemd/system/docker.service; disabled; vendor preset: disabled)\r + Active: inactive (dead)\r + Docs: http://docs.docker.com\r +\r +# Start docker daemon\r +> sudo systemctl start docker\r +```\r +\r +### Download workflow\r +Simply clone the repository and check out the latest tag (currently `1.0.8`):\r +\r +```\r +# Clone repository\r +> git clone https://github.com/inab/TCGA_benchmarking_dockers.git\r +\r +# Move to new directory\r +cd TCGA_benchmarking_workflow/\r +\r +# Checkout version 1.0.8\r +> git checkout 1.0.8 -b 1.0.8\r +```\r +\r +## Usage\r +The workflow can be run workflow in two different ways:\r +* Standard: `nextflow run main.nf -profile docker`\r +* Using the bash script that installs Java and Nextflow:`./run_local_nextflow.bash run main.nf -profile docker`.\r +\r +Arguments specifications:\r +```\r +Usage:\r +Run the pipeline with default parameters:\r +nextflow run main.nf -profile docker\r +\r +Run with user parameters:\r +nextflow run main.nf -profile docker --predictionsFile {driver.genes.file} --public_ref_dir {validation.reference.file} --participant_name {tool.name} --metrics_ref_dir {gold.standards.dir} --cancer_types {analyzed.cancer.types} --assess_dir {benchmark.data.dir} --results_dir {output.dir}\r +\r +Mandatory arguments:\r + --input List of cancer genes prediction\r + --community_id Name or OEB permanent ID for the benchmarking community\r + --public_ref_dir Directory with list of cancer genes used to validate the predictions\r + --participant_id Name of the tool used for prediction\r + --goldstandard_dir Dir that contains metrics reference datasets for all cancer types\r + --challenges_ids List of types of cancer selected by the user, separated by spaces\r + --assess_dir Dir where the data for the benchmark are stored\r +\r +Other options:\r + --validation_result The output directory where the results from validation step will be saved\r + --augmented_assess_dir Dir where the augmented data for the benchmark are stored\r + --assessment_results The output directory where the results from the computed metrics step will be saved\r + --outdir The output directory where the consolidation of the benchmark will be saved\r + --statsdir The output directory with nextflow statistics\r + --data_model_export_dir The output dir where json file with benchmarking data model contents will be saved\r + --otherdir The output directory where custom results will be saved (no directory inside)\r +Flags:\r + --help Display this message\r +```\r +\r +Default input parameters and Docker images to use for each step can be specified in the [config](./nextflow.config) file.\r +\r +**NOTE: In order to make your workflow compatible with the [OpenEBench VRE Nextflow Executor](https://github.com/inab/vre-process_nextflow-executor), please make sure to use the same parameter names in your workflow.**\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/244?version=3" ; + schema1:keywords "tcga, openebench, benchmarking" ; + schema1:license "https://spdx.org/licenses/LGPL-3.0" ; + schema1:name "OpenEBench TCGA Cancer Driver Genes benchmarking workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/244?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/gmx-protein-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.277.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_md_setup/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/277/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 59528 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-14T10:15:55Z" ; + schema1:dateModified "2022-11-22T09:41:33Z" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/gmx-protein-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/277?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_md_setup/galaxy/biobb_wf_md_setup.ga" ; + schema1:version 1 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-08-05 10:22:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8250 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:39Z" ; + schema1:dateModified "2024-06-11T12:54:39Z" ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + schema1:datePublished "2024-03-06T19:27:22.822246" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Purge-duplicates-one-haplotype-VGP6b" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Purge-duplicates-one-haplotype-VGP6b/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=20" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-08-05 10:24:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=20" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13448 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:50Z" ; + schema1:dateModified "2024-06-11T12:54:50Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=20" ; + schema1:version 20 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A pipeline for ortholog fetching and analysis" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1041?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/reportho" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/reportho" ; + schema1:sdDatePublished "2024-08-05 10:22:46 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1041/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11016 ; + schema1:creator ; + schema1:dateCreated "2024-06-12T03:03:07Z" ; + schema1:dateModified "2024-06-12T03:03:07Z" ; + schema1:description "A pipeline for ortholog fetching and analysis" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1041?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/reportho" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1041?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.56.5" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_protein-complex_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:31:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/56/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 156554 ; + schema1:creator , + ; + schema1:dateCreated "2023-04-14T08:04:53Z" ; + schema1:dateModified "2023-04-14T08:06:01Z" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/56?version=6" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_protein-complex_md_setup/master/biobb_wf_protein-complex_md_setup/notebooks/biobb_Protein-Complex_MDsetup_tutorial.ipynb" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """A porting of the Trinity RNA assembly pipeline, https://trinityrnaseq.github.io, that uses Nextflow to handle the underlying sub-tasks.\r +This enables additional capabilities to better use HPC resources, such as packing of tasks to fill up nodes and use of node-local disks to improve I/O.\r +By design, the pipeline separates the workflow logic (main file) and the cluster-specific configuration (config files), improving portability.\r +\r +Based on a pipeline by Sydney Informatics Hub: https://github.com/Sydney-Informatics-Hub/SIH-Raijin-Trinity""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/114?version=2" ; + schema1:isBasedOn "https://github.com/marcodelapierre/trinity-nf" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Trinity RNA Assembly" ; + schema1:sdDatePublished "2024-08-05 10:33:18 +0100" ; + schema1:url "https://workflowhub.eu/workflows/114/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8372 ; + schema1:creator ; + schema1:dateCreated "2021-03-26T12:58:59Z" ; + schema1:dateModified "2023-01-16T13:49:39Z" ; + schema1:description """A porting of the Trinity RNA assembly pipeline, https://trinityrnaseq.github.io, that uses Nextflow to handle the underlying sub-tasks.\r +This enables additional capabilities to better use HPC resources, such as packing of tasks to fill up nodes and use of node-local disks to improve I/O.\r +By design, the pipeline separates the workflow logic (main file) and the cluster-specific configuration (config files), improving portability.\r +\r +Based on a pipeline by Sydney Informatics Hub: https://github.com/Sydney-Informatics-Hub/SIH-Raijin-Trinity""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/114?version=1" ; + schema1:isPartOf ; + schema1:keywords "Assembly, Transcriptomics, RNASEQ, Nextflow" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Trinity RNA Assembly" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/114?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=26" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-08-05 10:24:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=26" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12883 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:13Z" ; + schema1:dateModified "2024-06-11T12:55:13Z" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=26" ; + schema1:version 26 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1023?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/smrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/smrnaseq" ; + schema1:sdDatePublished "2024-08-05 10:23:11 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1023/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9386 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:18Z" ; + schema1:dateModified "2024-06-11T12:55:18Z" ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1023?version=10" ; + schema1:keywords "small-rna, smrna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/smrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1023?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + schema1:datePublished "2024-08-02T09:12:59.305793" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-only-VGP3" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-only-VGP3/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.24" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """With this galaxy pipeline you can use Salmonella sp. next generation sequencing results to predict bacterial AMR phenotypes and compare the results against gold standard Salmonella sp. phenotypes obtained from food.\r +\r +This pipeline is based on the work of the National Food Agency of Canada. \r +Doi: [10.3389/fmicb.2020.00549](https://doi.org/10.3389/fmicb.2020.00549)""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/407?version=1" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Workflow 3: AMR - SeqSero2/SISTR" ; + schema1:sdDatePublished "2024-08-05 10:24:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/407/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 127987 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 37578 ; + schema1:dateCreated "2022-11-24T13:42:42Z" ; + schema1:dateModified "2024-05-07T09:45:19Z" ; + schema1:description """With this galaxy pipeline you can use Salmonella sp. next generation sequencing results to predict bacterial AMR phenotypes and compare the results against gold standard Salmonella sp. phenotypes obtained from food.\r +\r +This pipeline is based on the work of the National Food Agency of Canada. \r +Doi: [10.3389/fmicb.2020.00549](https://doi.org/10.3389/fmicb.2020.00549)""" ; + schema1:image ; + schema1:keywords "Bioinformatics, antimicrobial resistance" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Workflow 3: AMR - SeqSero2/SISTR" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/407?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for analysis of Molecular Pixelation assays" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1010?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/pixelator" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/pixelator" ; + schema1:sdDatePublished "2024-08-05 10:23:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1010/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10317 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:07Z" ; + schema1:dateModified "2024-06-11T12:55:07Z" ; + schema1:description "Pipeline for analysis of Molecular Pixelation assays" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1010?version=7" ; + schema1:keywords "molecular-pixelation, pixelator, pixelgen-technologies, proteins, single-cell, single-cell-omics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/pixelator" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1010?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and modern ancient DNA pipeline in Nextflow and with cloud support." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-08-05 10:23:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3623 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:49Z" ; + schema1:dateModified "2024-06-11T12:54:49Z" ; + schema1:description "A fully reproducible and modern ancient DNA pipeline in Nextflow and with cloud support." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1023?version=10" ; + schema1:isBasedOn "https://github.com/nf-core/smrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/smrnaseq" ; + schema1:sdDatePublished "2024-08-05 10:23:12 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1023/ro_crate?version=10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12414 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:19Z" ; + schema1:dateModified "2024-06-11T12:55:19Z" ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1023?version=10" ; + schema1:keywords "small-rna, smrna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/smrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1023?version=10" ; + schema1:version 10 . + + a schema1:Dataset ; + schema1:datePublished "2024-07-23T20:34:06.568221" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-Trio-phasing-VGP5/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.24" . + + a schema1:Dataset ; + schema1:datePublished "2024-07-10T17:38:53.055554" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/bacterial_genome_annotation" ; + schema1:license "GPL-3.0-or-later" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "bacterial_genome_annotation/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.24" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/998?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/methylseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/methylseq" ; + schema1:sdDatePublished "2024-08-05 10:22:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/998/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5432 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:01Z" ; + schema1:dateModified "2024-06-11T12:55:01Z" ; + schema1:description "Methylation (Bisulfite-Sequencing) Best Practice analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/998?version=14" ; + schema1:keywords "bisulfite-sequencing, dna-methylation, em-seq, epigenome, Epigenomics, methyl-seq, pbat, rrbs" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/methylseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/998?version=6" ; + schema1:version 6 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Summary\r +\r +This tutorial aims to illustrate the process of setting up a simulation system containing a protein in complex with a ligand, step by step, using the BioExcel Building Blocks library (biobb). The particular example used is the T4 lysozyme L99A/M102Q protein (PDB code 3HTB), in complex with the 2-propylphenol small molecule (3-letter Code JZ4).\r +\r +Workflow engine is a jupyter notebook. It can be run in binder, following the link given, or locally. Auxiliar libraries used are: nb\\_conda_kernels, nglview, ipywidgets, os, plotly, and simpletraj. Environment can be setup using the included environment.yml file.\r +\r +\r +# Parameters\r +\r +## Inputs\r +\r +Parameters needed to configure the workflow:\r +\r +* **pdbCode**: PDB code of the protein-ligand complex structure (e.g. 3HTB)\r +\r +* **ligandCode**: Small molecule 3-letter code for the ligand structure (e.g. JZ4)\r +\r +* **mol\\_charge**: Charge of the small molecule, needed to add hydrogen atoms.\r +\r +## Outputs\r +\r +Output files generated (named according to the input parameters given above):\r +\r +* **output\\_md\\_gro**: final structure of the MD setup protocol\r +\r +* **output\\_md\\_trr**: final trajectory of the MD setup protocol\r +\r +* **output\\_md\\_cpt**: final checkpoint file\r +\r +* **output\\_gppmd\\_tpr**: final tpr file\r +\r +* **output\\_genion\\_top\\_zip**: final topology of the MD system""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.56.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_protein-complex_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb) (jupyter notebook)" ; + schema1:sdDatePublished "2024-08-05 10:31:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/56/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 68187 ; + schema1:creator , + ; + schema1:dateCreated "2021-05-06T15:40:04Z" ; + schema1:dateModified "2021-05-13T08:15:05Z" ; + schema1:description """# Summary\r +\r +This tutorial aims to illustrate the process of setting up a simulation system containing a protein in complex with a ligand, step by step, using the BioExcel Building Blocks library (biobb). The particular example used is the T4 lysozyme L99A/M102Q protein (PDB code 3HTB), in complex with the 2-propylphenol small molecule (3-letter Code JZ4).\r +\r +Workflow engine is a jupyter notebook. It can be run in binder, following the link given, or locally. Auxiliar libraries used are: nb\\_conda_kernels, nglview, ipywidgets, os, plotly, and simpletraj. Environment can be setup using the included environment.yml file.\r +\r +\r +# Parameters\r +\r +## Inputs\r +\r +Parameters needed to configure the workflow:\r +\r +* **pdbCode**: PDB code of the protein-ligand complex structure (e.g. 3HTB)\r +\r +* **ligandCode**: Small molecule 3-letter code for the ligand structure (e.g. JZ4)\r +\r +* **mol\\_charge**: Charge of the small molecule, needed to add hydrogen atoms.\r +\r +## Outputs\r +\r +Output files generated (named according to the input parameters given above):\r +\r +* **output\\_md\\_gro**: final structure of the MD setup protocol\r +\r +* **output\\_md\\_trr**: final trajectory of the MD setup protocol\r +\r +* **output\\_md\\_cpt**: final checkpoint file\r +\r +* **output\\_gppmd\\_tpr**: final tpr file\r +\r +* **output\\_genion\\_top\\_zip**: final topology of the MD system""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/56?version=6" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb) (jupyter notebook)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/56?version=2" ; + schema1:version 2 . + + a schema1:MediaObject ; + schema1:about ; + schema1:author , + ; + schema1:encodingFormat "text/markdown" . + + a schema1:Dataset ; + schema1:creator , + , + , + , + , + , + ; + schema1:datePublished "2023-04-13" ; + schema1:description "Study protocol" ; + schema1:hasPart , + , + , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.7551181" ; + schema1:name "BY-COVID - WP5 - Baseline Use Case: SARS-CoV-2 vaccine effectiveness assessment - Study protocol" ; + schema1:url "https://zenodo.org/record/7825979" ; + schema1:version "1.0.3" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.130.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Amber Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:25:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/130/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 66118 ; + schema1:creator , + ; + schema1:dateCreated "2023-04-14T08:29:40Z" ; + schema1:dateModified "2023-04-14T08:30:44Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/130?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Amber Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_amber_md_setup/master/biobb_wf_amber_md_setup/notebooks/mdsetup/biobb_amber_setup_notebook.ipynb" ; + schema1:version 3 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 22763 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + schema1:datePublished "2024-07-15T07:44:45.515058" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/chipseq-sr" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "chipseq-sr/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.24" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# A workflow for marine Genomic Observatories data analysis\r +## An EOSC-Life project\r +\r +The workflows developed in the framework of this project are based on `pipeline-v5` of the MGnify resource. \r +This branch is a child of the [`pipeline_5.1`](https://github.com/EBI-Metagenomics/pipeline-v5) branch \r +that contains a part of the CWL descriptions of the MGnify pipeline version 5.1.\r +\r +The following comes from the initial repo and describes how to get the databases required.\r +\r +---\r +![logo](https://raw.githubusercontent.com/hariszaf/metaGOflow-use-case/gh-pages/assets/img/metaGOflow_logo_italics.png)\r +\r +\r +## An EOSC-Life project\r +\r +The workflows developed in the framework of this project are based on `pipeline-v5` of the MGnify resource.\r +\r +> This branch is a child of the [`pipeline_5.1`](https://github.com/hariszaf/pipeline-v5/tree/pipeline_5.1) branch\r +> that contains all CWL descriptions of the MGnify pipeline version 5.1.\r +\r +## Dependencies\r +\r +To run metaGOflow you need to make sure you have the following set on your computing environmnet first:\r +\r +- python3 [v 3.8+]\r +- [Docker](https://www.docker.com) [v 19.+] or [Singularity](https://apptainer.org) [v 3.7.+]/[Apptainer](https://apptainer.org) [v 1.+]\r +- [cwltool](https://github.com/common-workflow-language/cwltool) [v 3.+]\r +- [rdflib](https://rdflib.readthedocs.io/en/stable/) [v 6.+]\r +- [rdflib-jsonld](https://pypi.org/project/rdflib-jsonld/) [v 0.6.2]\r +- [ro-crate-py](https://github.com/ResearchObject/ro-crate-py) [v 0.7.0]\r +- [pyyaml](https://pypi.org/project/PyYAML/) [v 6.0]\r +- [Node.js](https://nodejs.org/) [v 10.24.0+]\r +- Available storage ~235GB for databases\r +\r +### Storage while running\r +\r +Depending on the analysis you are about to run, disk requirements vary.\r +Indicatively, you may have a look at the metaGOflow publication for computing resources used in various cases.\r +\r +## Installation\r +\r +### Get the EOSC-Life marine GOs workflow\r +\r +```bash\r +git clone https://github.com/emo-bon/MetaGOflow\r +cd MetaGOflow\r +```\r +\r +### Download necessary databases (~235GB)\r +\r +You can download databases for the EOSC-Life GOs workflow by running the\r +`download_dbs.sh` script under the `Installation` folder.\r +\r +```bash\r +bash Installation/download_dbs.sh -f [Output Directory e.g. ref-dbs] \r +```\r +If you have one or more already in your system, then create a symbolic link pointing\r +at the `ref-dbs` folder or at one of its subfolders/files.\r +\r +The final structure of the DB directory should be like the following:\r +\r +````bash\r +user@server:~/MetaGOflow: ls ref-dbs/\r +db_kofam/ diamond/ eggnog/ GO-slim/ interproscan-5.57-90.0/ kegg_pathways/ kofam_ko_desc.tsv Rfam/ silva_lsu/ silva_ssu/\r +````\r +\r +## How to run\r +\r +### Ensure that `Node.js` is installed on your system before running metaGOflow\r +\r +If you have root access on your system, you can run the commands below to install it:\r +\r +##### DEBIAN/UBUNTU\r +```bash\r +sudo apt-get update -y\r +sudo apt-get install -y nodejs\r +```\r +\r +##### RH/CentOS\r +```bash\r +sudo yum install rh-nodejs (e.g. rh-nodejs10)\r +```\r +\r +### Set up the environment\r +\r +#### Run once - Setup environment\r +\r +- ```bash\r + conda create -n EOSC-CWL python=3.8\r + ```\r +\r +- ```bash\r + conda activate EOSC-CWL\r + ```\r +\r +- ```bash\r + pip install cwlref-runner cwltool[all] rdflib-jsonld rocrate pyyaml\r +\r + ```\r +\r +#### Run every time\r +\r +```bash\r +conda activate EOSC-CWL\r +``` \r +\r +### Run the workflow\r +\r +- Edit the `config.yml` file to set the parameter values of your choice. For selecting all the steps, then set to `true` the variables in lines [2-6].\r +\r +#### Using Singularity\r +\r +##### Standalone\r +- run:\r + ```bash\r + ./run_wf.sh -s -n osd-short -d short-test-case -f test_input/wgs-paired-SRR1620013_1.fastq.gz -r test_input/wgs-paired-SRR1620013_2.fastq.gz\r + ``\r +\r +##### Using a cluster with a queueing system (e.g. SLURM)\r +\r +- Create a job file (e.g., SBATCH file)\r +\r +- Enable Singularity, e.g. module load Singularity & all other dependencies \r +\r +- Add the run line to the job file\r +\r +\r +#### Using Docker\r +\r +##### Standalone\r +- run:\r + ``` bash\r + ./run_wf.sh -n osd-short -d short-test-case -f test_input/wgs-paired-SRR1620013_1.fastq.gz -r test_input/wgs-paired-SRR1620013_2.fastq.gz\r + ```\r + HINT: If you are using Docker, you may need to run the above command without the `-s' flag.\r +\r +## Testing samples\r +The samples are available in the `test_input` folder.\r +\r +We provide metaGOflow with partial samples from the Human Metagenome Project ([SRR1620013](https://www.ebi.ac.uk/ena/browser/view/SRR1620013) and [SRR1620014](https://www.ebi.ac.uk/ena/browser/view/SRR1620014))\r +They are partial as only a small part of their sequences have been kept, in terms for the pipeline to test in a fast way. \r +\r +\r +## Hints and tips\r +\r +1. In case you are using Docker, it is strongly recommended to **avoid** installing it through `snap`.\r +\r +2. `RuntimeError`: slurm currently does not support shared caching, because it does not support cleaning up a worker\r + after the last job finishes.\r + Set the `--disableCaching` flag if you want to use this batch system.\r +\r +3. In case you are having errors like:\r +\r +```\r +cwltool.errors.WorkflowException: Singularity is not available for this tool\r +```\r +\r +You may run the following command:\r +\r +```\r +singularity pull --force --name debian:stable-slim.sif docker://debian:stable-sli\r +```\r +\r +## Contribution\r +\r +To make contribution to the project a bit easier, all the MGnify `conditionals` and `subworkflows` under\r +the `workflows/` directory that are not used in the metaGOflow framework, have been removed. \r +However, all the MGnify `tools/` and `utils/` are available in this repo, even if they are not invoked in the current\r +version of metaGOflow.\r +This way, we hope we encourage people to implement their own `conditionals` and/or `subworkflows` by exploiting the\r +currently supported `tools` and `utils` as well as by developing new `tools` and/or `utils`.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/384?version=1" ; + schema1:isBasedOn "https://github.com/emo-bon/MetaGOflow.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for A workflow for marine Genomic Observatories data analysis" ; + schema1:sdDatePublished "2024-08-05 10:30:29 +0100" ; + schema1:url "https://workflowhub.eu/workflows/384/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5329 ; + schema1:creator , + ; + schema1:dateCreated "2022-09-19T18:00:20Z" ; + schema1:dateModified "2023-05-16T20:35:49Z" ; + schema1:description """# A workflow for marine Genomic Observatories data analysis\r +## An EOSC-Life project\r +\r +The workflows developed in the framework of this project are based on `pipeline-v5` of the MGnify resource. \r +This branch is a child of the [`pipeline_5.1`](https://github.com/EBI-Metagenomics/pipeline-v5) branch \r +that contains a part of the CWL descriptions of the MGnify pipeline version 5.1.\r +\r +The following comes from the initial repo and describes how to get the databases required.\r +\r +---\r +![logo](https://raw.githubusercontent.com/hariszaf/metaGOflow-use-case/gh-pages/assets/img/metaGOflow_logo_italics.png)\r +\r +\r +## An EOSC-Life project\r +\r +The workflows developed in the framework of this project are based on `pipeline-v5` of the MGnify resource.\r +\r +> This branch is a child of the [`pipeline_5.1`](https://github.com/hariszaf/pipeline-v5/tree/pipeline_5.1) branch\r +> that contains all CWL descriptions of the MGnify pipeline version 5.1.\r +\r +## Dependencies\r +\r +To run metaGOflow you need to make sure you have the following set on your computing environmnet first:\r +\r +- python3 [v 3.8+]\r +- [Docker](https://www.docker.com) [v 19.+] or [Singularity](https://apptainer.org) [v 3.7.+]/[Apptainer](https://apptainer.org) [v 1.+]\r +- [cwltool](https://github.com/common-workflow-language/cwltool) [v 3.+]\r +- [rdflib](https://rdflib.readthedocs.io/en/stable/) [v 6.+]\r +- [rdflib-jsonld](https://pypi.org/project/rdflib-jsonld/) [v 0.6.2]\r +- [ro-crate-py](https://github.com/ResearchObject/ro-crate-py) [v 0.7.0]\r +- [pyyaml](https://pypi.org/project/PyYAML/) [v 6.0]\r +- [Node.js](https://nodejs.org/) [v 10.24.0+]\r +- Available storage ~235GB for databases\r +\r +### Storage while running\r +\r +Depending on the analysis you are about to run, disk requirements vary.\r +Indicatively, you may have a look at the metaGOflow publication for computing resources used in various cases.\r +\r +## Installation\r +\r +### Get the EOSC-Life marine GOs workflow\r +\r +```bash\r +git clone https://github.com/emo-bon/MetaGOflow\r +cd MetaGOflow\r +```\r +\r +### Download necessary databases (~235GB)\r +\r +You can download databases for the EOSC-Life GOs workflow by running the\r +`download_dbs.sh` script under the `Installation` folder.\r +\r +```bash\r +bash Installation/download_dbs.sh -f [Output Directory e.g. ref-dbs] \r +```\r +If you have one or more already in your system, then create a symbolic link pointing\r +at the `ref-dbs` folder or at one of its subfolders/files.\r +\r +The final structure of the DB directory should be like the following:\r +\r +````bash\r +user@server:~/MetaGOflow: ls ref-dbs/\r +db_kofam/ diamond/ eggnog/ GO-slim/ interproscan-5.57-90.0/ kegg_pathways/ kofam_ko_desc.tsv Rfam/ silva_lsu/ silva_ssu/\r +````\r +\r +## How to run\r +\r +### Ensure that `Node.js` is installed on your system before running metaGOflow\r +\r +If you have root access on your system, you can run the commands below to install it:\r +\r +##### DEBIAN/UBUNTU\r +```bash\r +sudo apt-get update -y\r +sudo apt-get install -y nodejs\r +```\r +\r +##### RH/CentOS\r +```bash\r +sudo yum install rh-nodejs (e.g. rh-nodejs10)\r +```\r +\r +### Set up the environment\r +\r +#### Run once - Setup environment\r +\r +- ```bash\r + conda create -n EOSC-CWL python=3.8\r + ```\r +\r +- ```bash\r + conda activate EOSC-CWL\r + ```\r +\r +- ```bash\r + pip install cwlref-runner cwltool[all] rdflib-jsonld rocrate pyyaml\r +\r + ```\r +\r +#### Run every time\r +\r +```bash\r +conda activate EOSC-CWL\r +``` \r +\r +### Run the workflow\r +\r +- Edit the `config.yml` file to set the parameter values of your choice. For selecting all the steps, then set to `true` the variables in lines [2-6].\r +\r +#### Using Singularity\r +\r +##### Standalone\r +- run:\r + ```bash\r + ./run_wf.sh -s -n osd-short -d short-test-case -f test_input/wgs-paired-SRR1620013_1.fastq.gz -r test_input/wgs-paired-SRR1620013_2.fastq.gz\r + ``\r +\r +##### Using a cluster with a queueing system (e.g. SLURM)\r +\r +- Create a job file (e.g., SBATCH file)\r +\r +- Enable Singularity, e.g. module load Singularity & all other dependencies \r +\r +- Add the run line to the job file\r +\r +\r +#### Using Docker\r +\r +##### Standalone\r +- run:\r + ``` bash\r + ./run_wf.sh -n osd-short -d short-test-case -f test_input/wgs-paired-SRR1620013_1.fastq.gz -r test_input/wgs-paired-SRR1620013_2.fastq.gz\r + ```\r + HINT: If you are using Docker, you may need to run the above command without the `-s' flag.\r +\r +## Testing samples\r +The samples are available in the `test_input` folder.\r +\r +We provide metaGOflow with partial samples from the Human Metagenome Project ([SRR1620013](https://www.ebi.ac.uk/ena/browser/view/SRR1620013) and [SRR1620014](https://www.ebi.ac.uk/ena/browser/view/SRR1620014))\r +They are partial as only a small part of their sequences have been kept, in terms for the pipeline to test in a fast way. \r +\r +\r +## Hints and tips\r +\r +1. In case you are using Docker, it is strongly recommended to **avoid** installing it through `snap`.\r +\r +2. `RuntimeError`: slurm currently does not support shared caching, because it does not support cleaning up a worker\r + after the last job finishes.\r + Set the `--disableCaching` flag if you want to use this batch system.\r +\r +3. In case you are having errors like:\r +\r +```\r +cwltool.errors.WorkflowException: Singularity is not available for this tool\r +```\r +\r +You may run the following command:\r +\r +```\r +singularity pull --force --name debian:stable-slim.sif docker://debian:stable-sli\r +```\r +\r +## Contribution\r +\r +To make contribution to the project a bit easier, all the MGnify `conditionals` and `subworkflows` under\r +the `workflows/` directory that are not used in the metaGOflow framework, have been removed. \r +However, all the MGnify `tools/` and `utils/` are available in this repo, even if they are not invoked in the current\r +version of metaGOflow.\r +This way, we hope we encourage people to implement their own `conditionals` and/or `subworkflows` by exploiting the\r +currently supported `tools` and `utils` as well as by developing new `tools` and/or `utils`.\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/384?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "A workflow for marine Genomic Observatories data analysis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/384?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.291.2" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python GMX OPLS/AA Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-08-05 10:32:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/291/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1758 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-24T14:20:27Z" ; + schema1:dateModified "2023-01-16T13:58:35Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/291?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python GMX OPLS/AA Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/291?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + schema1:datePublished "2021-12-03T15:25:30.767225" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/fragment-based-docking-scoring" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "fragment-based-docking-scoring/main" ; + schema1:sdDatePublished "2021-12-04 03:00:49 +0000" ; + schema1:softwareVersion "v0.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A pipeline for ortholog fetching and analysis" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1041?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/reportho" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/reportho" ; + schema1:sdDatePublished "2024-08-05 10:22:46 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1041/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11039 ; + schema1:creator ; + schema1:dateCreated "2024-07-19T03:02:46Z" ; + schema1:dateModified "2024-07-19T03:02:46Z" ; + schema1:description "A pipeline for ortholog fetching and analysis" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1041?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/reportho" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1041?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible ancient and modern DNA pipeline in Nextflow and with cloud support." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=9" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "NOASSERTION" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-08-05 10:23:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9505 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:50Z" ; + schema1:dateModified "2024-06-11T12:54:50Z" ; + schema1:description "A fully reproducible ancient and modern DNA pipeline in Nextflow and with cloud support." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=9" ; + schema1:version 9 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An nf-core demo pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1055?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/demo" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/demo" ; + schema1:sdDatePublished "2024-08-05 10:22:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1055/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9934 ; + schema1:creator ; + schema1:dateCreated "2024-06-21T03:02:42Z" ; + schema1:dateModified "2024-06-21T03:02:42Z" ; + schema1:description "An nf-core demo pipeline" ; + schema1:keywords "demo, minimal-example, training" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/demo" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1055?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/634?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Workflow 1: AbritAMR" ; + schema1:sdDatePublished "2024-08-05 10:24:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/634/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2073 ; + schema1:dateCreated "2023-10-31T10:42:03Z" ; + schema1:dateModified "2024-05-07T09:44:07Z" ; + schema1:description "" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Workflow 1: AbritAMR" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/634?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """MGnify (http://www.ebi.ac.uk/metagenomics) provides a free to use platform for the assembly, analysis and archiving of microbiome data derived from sequencing microbial populations that are present in particular environments. Over the past 2 years, MGnify (formerly EBI Metagenomics) has more than doubled the number of publicly available analysed datasets held within the resource. Recently, an updated approach to data analysis has been unveiled (version 5.0), replacing the previous single pipeline with multiple analysis pipelines that are tailored according to the input data, and that are formally described using the Common Workflow Language, enabling greater provenance, reusability, and reproducibility. MGnify's new analysis pipelines offer additional approaches for taxonomic assertions based on ribosomal internal transcribed spacer regions (ITS1/2) and expanded protein functional annotations. Biochemical pathways and systems predictions have also been added for assembled contigs. MGnify's growing focus on the assembly of metagenomic data has also seen the number of datasets it has assembled and analysed increase six-fold. The non-redundant protein database constructed from the proteins encoded by these assemblies now exceeds 1 billion sequences. Meanwhile, a newly developed contig viewer provides fine-grained visualisation of the assembled contigs and their enriched annotations.\r +\r +Documentation: https://docs.mgnify.org/en/latest/analysis.html#assembly-analysis-pipeline""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/360?version=1" ; + schema1:isBasedOn "https://github.com/EBI-Metagenomics/pipeline-v5.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for MGnify - assembly analysis pipeline" ; + schema1:sdDatePublished "2024-08-05 10:32:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/360/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 67287 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7995 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2022-06-07T07:41:18Z" ; + schema1:dateModified "2022-06-07T08:00:07Z" ; + schema1:description """MGnify (http://www.ebi.ac.uk/metagenomics) provides a free to use platform for the assembly, analysis and archiving of microbiome data derived from sequencing microbial populations that are present in particular environments. Over the past 2 years, MGnify (formerly EBI Metagenomics) has more than doubled the number of publicly available analysed datasets held within the resource. Recently, an updated approach to data analysis has been unveiled (version 5.0), replacing the previous single pipeline with multiple analysis pipelines that are tailored according to the input data, and that are formally described using the Common Workflow Language, enabling greater provenance, reusability, and reproducibility. MGnify's new analysis pipelines offer additional approaches for taxonomic assertions based on ribosomal internal transcribed spacer regions (ITS1/2) and expanded protein functional annotations. Biochemical pathways and systems predictions have also been added for assembled contigs. MGnify's growing focus on the assembly of metagenomic data has also seen the number of datasets it has assembled and analysed increase six-fold. The non-redundant protein database constructed from the proteins encoded by these assemblies now exceeds 1 billion sequences. Meanwhile, a newly developed contig viewer provides fine-grained visualisation of the assembled contigs and their enriched annotations.\r +\r +Documentation: https://docs.mgnify.org/en/latest/analysis.html#assembly-analysis-pipeline""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/360?version=1" ; + schema1:keywords "Metagenomics, Annotation, workflow, CWL" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "MGnify - assembly analysis pipeline" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/360?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Snakemake workflow: dna-seq-varlociraptor\r +\r +[![Snakemake](https://img.shields.io/badge/snakemake-≥6.3.0-brightgreen.svg)](https://snakemake.github.io)\r +[![GitHub actions status](https://github.com/snakemake-workflows/dna-seq-varlociraptor/workflows/Tests/badge.svg?branch=master)](https://github.com/snakemake-workflows/dna-seq-varlociraptor/actions?query=branch%3Amaster+workflow%3ATests)\r +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.4675661.svg)](https://doi.org/10.5281/zenodo.4675661)\r +\r +\r +A Snakemake workflow for calling small and structural variants under any kind of scenario (tumor/normal, tumor/normal/relapse, germline, pedigree, populations) via the unified statistical model of [Varlociraptor](https://varlociraptor.github.io).\r +\r +\r +## Usage\r +\r +The usage of this workflow is described in the [Snakemake Workflow Catalog](https://snakemake.github.io/snakemake-workflow-catalog/?usage=snakemake-workflows%2Fdna-seq-varlociraptor).\r +\r +If you use this workflow in a paper, don't forget to give credits to the authors by citing the URL of this (original) repository and its DOI (see above).\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/686?version=1" ; + schema1:isBasedOn "https://github.com/snakemake-workflows/dna-seq-varlociraptor" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for dna-seq-varlociraptor" ; + schema1:sdDatePublished "2024-08-05 10:26:09 +0100" ; + schema1:url "https://workflowhub.eu/workflows/686/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1287 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-12-14T08:15:08Z" ; + schema1:dateModified "2023-12-14T08:15:08Z" ; + schema1:description """# Snakemake workflow: dna-seq-varlociraptor\r +\r +[![Snakemake](https://img.shields.io/badge/snakemake-≥6.3.0-brightgreen.svg)](https://snakemake.github.io)\r +[![GitHub actions status](https://github.com/snakemake-workflows/dna-seq-varlociraptor/workflows/Tests/badge.svg?branch=master)](https://github.com/snakemake-workflows/dna-seq-varlociraptor/actions?query=branch%3Amaster+workflow%3ATests)\r +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.4675661.svg)](https://doi.org/10.5281/zenodo.4675661)\r +\r +\r +A Snakemake workflow for calling small and structural variants under any kind of scenario (tumor/normal, tumor/normal/relapse, germline, pedigree, populations) via the unified statistical model of [Varlociraptor](https://varlociraptor.github.io).\r +\r +\r +## Usage\r +\r +The usage of this workflow is described in the [Snakemake Workflow Catalog](https://snakemake.github.io/snakemake-workflow-catalog/?usage=snakemake-workflows%2Fdna-seq-varlociraptor).\r +\r +If you use this workflow in a paper, don't forget to give credits to the authors by citing the URL of this (original) repository and its DOI (see above).\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/686?version=1" ; + schema1:keywords "Bioinformatics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "dna-seq-varlociraptor" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/686?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Image Mass Cytometry analysis pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/992?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/imcyto" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/imcyto" ; + schema1:sdDatePublished "2024-08-05 10:23:44 +0100" ; + schema1:url "https://workflowhub.eu/workflows/992/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4292 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:57Z" ; + schema1:dateModified "2024-06-11T12:54:57Z" ; + schema1:description "Image Mass Cytometry analysis pipeline." ; + schema1:keywords "cytometry, image-analysis, image-processing, image-segmentation" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/imcyto" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/992?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2023-10-19T16:22:19.479397" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-only-VGP3" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-only-VGP3/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.14" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 2070783 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "ChIP-seq peak-calling and differential analysis pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/971?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/chipseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/chipseq" ; + schema1:sdDatePublished "2024-08-05 10:24:13 +0100" ; + schema1:url "https://workflowhub.eu/workflows/971/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5458 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:44Z" ; + schema1:dateModified "2024-06-11T12:54:44Z" ; + schema1:description "ChIP-seq peak-calling and differential analysis pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/971?version=5" ; + schema1:keywords "ChIP, ChIP-seq, chromatin-immunoprecipitation, macs2, peak-calling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/chipseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/971?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + schema1:datePublished "2024-03-26T19:39:39.619031" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-Trio-phasing-VGP5/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:Dataset ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-ont-artic-variant-calling" ; + schema1:mainEntity ; + schema1:name "sars-cov-2-ont-artic-variant-calling/COVID-19-ARTIC-ONT (v0.2.1)" ; + schema1:sdDatePublished "2021-07-27 03:00:55 +0100" ; + schema1:softwareVersion "v0.2.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 45247 ; + schema1:name "COVID-19-ARTIC-ONT" ; + schema1:programmingLanguage . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Conformational Transitions calculations tutorial using BioExcel Building Blocks (biobb) and GOdMD\r +\r +This tutorial aims to illustrate the process of computing a conformational transition between two known structural conformations of a protein, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.549.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_godmd/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL Protein Conformational Transitions calculations tutorial" ; + schema1:sdDatePublished "2024-08-05 10:29:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/549/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 31502 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8991 ; + schema1:creator , + ; + schema1:dateCreated "2023-08-02T09:54:17Z" ; + schema1:dateModified "2023-08-02T10:03:39Z" ; + schema1:description """# Protein Conformational Transitions calculations tutorial using BioExcel Building Blocks (biobb) and GOdMD\r +\r +This tutorial aims to illustrate the process of computing a conformational transition between two known structural conformations of a protein, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CWL Protein Conformational Transitions calculations tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_godmd/cwl/workflow.cwl" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=10" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-08-05 10:23:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11079 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:50Z" ; + schema1:dateModified "2024-06-11T12:54:50Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=10" ; + schema1:version 10 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1023?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/smrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/smrnaseq" ; + schema1:sdDatePublished "2024-08-05 10:23:12 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1023/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11092 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:18Z" ; + schema1:dateModified "2024-06-11T12:55:18Z" ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1023?version=10" ; + schema1:keywords "small-rna, smrna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/smrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1023?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:description "Dating the most recent common ancestor (MRCA) of SARS-CoV-2. The workflow is used to extract full length sequences of SARS-CoV-2, tidy up their names in FASTA files, produce a multiple sequences alignment and compute a maximum likelihood tree. More info can be found at https://covid19.galaxyproject.org/genomics/" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/6?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genomics - MRCA analysis" ; + schema1:sdDatePublished "2024-08-05 10:33:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/6/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 3760 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17202 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2020-04-10T12:44:38Z" ; + schema1:dateModified "2023-01-16T13:39:57Z" ; + schema1:description "Dating the most recent common ancestor (MRCA) of SARS-CoV-2. The workflow is used to extract full length sequences of SARS-CoV-2, tidy up their names in FASTA files, produce a multiple sequences alignment and compute a maximum likelihood tree. More info can be found at https://covid19.galaxyproject.org/genomics/" ; + schema1:image ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Genomics - MRCA analysis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/6?version=1" ; + schema1:version 1 ; + ns1:input . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 7247 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Global Run-On sequencing analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1004?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/nascent" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/nascent" ; + schema1:sdDatePublished "2024-08-05 10:23:31 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1004/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8566 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:06Z" ; + schema1:dateModified "2024-06-11T12:55:06Z" ; + schema1:description "Global Run-On sequencing analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1004?version=4" ; + schema1:keywords "gro-seq, nascent, pro-seq, rna, transcription, tss" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/nascent" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1004?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Quantitative Mass Spectrometry nf-core workflow" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1013?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/quantms" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/quantms" ; + schema1:sdDatePublished "2024-08-05 10:23:24 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1013/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12787 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:08Z" ; + schema1:dateModified "2024-06-11T12:55:08Z" ; + schema1:description "Quantitative Mass Spectrometry nf-core workflow" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1013?version=3" ; + schema1:keywords "dia, lfq, mass-spec, mass-spectrometry, openms, proteogenomics, Proteomics, tmt" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/quantms" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1013?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 146760 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "BioTranslator performs sequentially pathway analysis and gene prioritization: A specific operator is executed for each task to translate the input gene set into semantic terms and pinpoint the pivotal-role genes on the derived semantic network. The output consists of the set of statistically significant semantic terms and the associated hub genes (the gene signature), prioritized according to their involvement in the underlying semantic topology." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/189?version=2" ; + schema1:isBasedOn "http://www.biotranslator.gr:8080/u/thodk/w/biotranslator-workflow" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for BioTranslator Workflow" ; + schema1:sdDatePublished "2024-08-05 10:33:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/189/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3955 ; + schema1:dateCreated "2021-09-24T14:38:37Z" ; + schema1:dateModified "2021-09-24T15:15:04Z" ; + schema1:description "BioTranslator performs sequentially pathway analysis and gene prioritization: A specific operator is executed for each task to translate the input gene set into semantic terms and pinpoint the pivotal-role genes on the derived semantic network. The output consists of the set of statistically significant semantic terms and the associated hub genes (the gene signature), prioritized according to their involvement in the underlying semantic topology." ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/189?version=2" ; + schema1:keywords "Semantic Network Analysis, Gene Prioritization, Pathway Analysis, Biomedical Ontologies, Semantic Interpretation" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "BioTranslator Workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/189?version=2" ; + schema1:version 2 ; + ns1:input ; + ns1:output , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 44291 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Introduction\r +\r +This repository contains all the custom scripts used in the evaluation and comparison of [Katdetectr](https://github.com/ErasmusMC-CCBC/evaluation_katdetectr/tree/main) as described in the corresponding Technical Note (under submission).\r +\r +# Usage\r +\r +All required files were deposited on [Zenodo](https://zenodo.org/record/6623289#.YqBxHi8Rr0o%5D).\r +These can directly be downloaded using `zen4R` and be used as input.\r +\r +```R\r +# Increase the timeout (due to some large files).\r +options(timeout=5000)\r +\r +# Download the required files into the data/ folder (~1GB).\r +zen4R::download_zenodo(doi = "10.5281/zenodo.6810477", path = 'data/')\r +```""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.500.1" ; + schema1:isBasedOn "https://github.com/ErasmusMC-CCBC/evaluation_katdetectr/" ; + schema1:license "LGPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Performance evaluation of Katdetectr and other kataegis detection packages" ; + schema1:sdDatePublished "2024-08-05 10:30:10 +0100" ; + schema1:url "https://workflowhub.eu/workflows/500/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 19301 ; + schema1:creator , + ; + schema1:dateCreated "2023-06-07T17:32:16Z" ; + schema1:dateModified "2023-06-07T17:52:17Z" ; + schema1:description """# Introduction\r +\r +This repository contains all the custom scripts used in the evaluation and comparison of [Katdetectr](https://github.com/ErasmusMC-CCBC/evaluation_katdetectr/tree/main) as described in the corresponding Technical Note (under submission).\r +\r +# Usage\r +\r +All required files were deposited on [Zenodo](https://zenodo.org/record/6623289#.YqBxHi8Rr0o%5D).\r +These can directly be downloaded using `zen4R` and be used as input.\r +\r +```R\r +# Increase the timeout (due to some large files).\r +options(timeout=5000)\r +\r +# Download the required files into the data/ folder (~1GB).\r +zen4R::download_zenodo(doi = "10.5281/zenodo.6810477", path = 'data/')\r +```""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/LGPL-3.0" ; + schema1:name "Performance evaluation of Katdetectr and other kataegis detection packages" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/500?version=1" ; + schema1:version 1 . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 902 ; + schema1:programmingLanguage . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-08-05 10:23:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10237 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:58Z" ; + schema1:dateModified "2024-06-11T12:54:58Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=7" ; + schema1:version 7 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 28082 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:MediaObject ; + schema1:contentSize 3 ; + schema1:dateModified "2024-01-25T15:45:26" ; + schema1:name "energy.selection" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3 ; + schema1:dateModified "2024-01-25T15:45:26" ; + schema1:name "genion.group" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1044 ; + schema1:dateModified "2024-01-25T15:45:26" ; + schema1:name "ions.mdp" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 971 ; + schema1:dateModified "2024-01-25T15:45:26" ; + schema1:name "minim.mdp" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 109917 ; + schema1:dateModified "2024-01-25T15:45:26" ; + schema1:name "1aki.pdb" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2998215 ; + schema1:dateModified "2024-01-25T15:45:26" ; + schema1:name "1u3m.pdb" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2918025 ; + schema1:dateModified "2024-01-25T15:45:26" ; + schema1:name "1xyw.pdb" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88246 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1aki.gro" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577530 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1aki.top" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1280044 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1aki_em.tpr" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1aki_em_energy.edr" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1279304 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1aki_ions.tpr" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88246 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1aki_newbox.gro" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2123 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1aki_potential.xvg" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1525186 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1aki_solv.gro" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1524475 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1aki_solv_ions.gro" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 82046 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1u3m.gro" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 537864 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1u3m.top" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1416272 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1u3m_em.tpr" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1u3m_em_energy.edr" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1415196 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1u3m_ions.tpr" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 82046 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1u3m_newbox.gro" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2125 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1u3m_potential.xvg" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1837046 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1u3m_solv.gro" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1836965 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1u3m_solv_ions.gro" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 80112 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1xyw.gro" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 523940 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1xyw.top" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1408872 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1xyw_em.tpr" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1xyw_em_energy.edr" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1407844 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1xyw_ions.tpr" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 80112 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1xyw_newbox.gro" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2100 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1xyw_potential.xvg" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1845237 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1xyw_solv.gro" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1845066 ; + schema1:dateModified "2024-01-25T16:09:05" ; + schema1:name "1xyw_solv_ions.gro" ; + schema1:sdDatePublished "2024-01-25T16:09:26+00:00" . + + a schema1:Dataset ; + schema1:datePublished "2024-04-23T15:13:06.571609" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Scaffolding-HiC-VGP8" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Scaffolding-HiC-VGP8/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# GBMatch_CNN\r +Work in progress...\r +Predicting TS & risk from glioblastoma whole slide images\r +\r +# Reference\r +Upcoming paper: stay tuned...\r +\r +# Dependencies\r +python 3.7.7\r +\r +randaugment by Khrystyna Faryna: https://github.com/tovaroe/pathology-he-auto-augment\r +\r +tensorflow 2.1.0\r +\r +scikit-survival 0.13.1\r +\r +pandas 1.0.3\r +\r +lifelines 0.25.0\r +\r +# Description\r +The pipeline implemented here predicts transcriptional subtypes and survival of glioblastoma patients based on H&E stained whole slide scans. Sample data is provided in this repository. To test the basic functionality with 5-fold-CV simply run train_model_OS.py (for survival) or train_model_TS.py (for transcriptional subtypes). Please note that this will not reproduce the results from the manuscript, as only a small fraction of the image data can be provided in this repository due to size constraints. In order to reproduce the results from the manuscript, please refer to the step by step guide below. The whole dataset can be accessed at https://www.medical-epigenomics.org/papers/GBMatch/.\r +If you wish to adopt this pipeline for your own use, please be sure to set the correct parameters in config.py.\r +\r +Moreover, we provide a fully trained model in gbm_predictor.py for predicting new samples (supported WSI formats are ndpi and svs). To use GBMPredictor, simply initialize by calling \r +`gbm_predictor = GBMPredictor()`\r +and predict your sample by calling\r +`(predicted_TS, risk_group, median_riskscore) = gbm_predictor.predict(*path_to_slidescan*)`\r +Heatmaps and detailed results will be automatically saved in a subfolder in your sample path.\r +\r +# Reproducing the manuscript results - step by step guide\r +\r +## Training the CNN model\r +1. Clone this repository and install the dependencies in your environment. Make sure that the path for randaugment is correctly set in the config.py (should be correct by default).\r +2. Download all included image tiles at https://doi.org/10.5281/zenodo.8358673 and replace the data/training/image_tiles folder with the image_tiles folder from zenodo.\r +3. Run train_model_OS.py and/or train_model_TS.py to reproduce the training with 5-fold cross validation. Models and results will be saved in the data/models folder.\r +4. Run train_final_model_OS.py and/or train_final_model_TS.py to train the final model on the whole training dataset.\r +\r +## Validate the CNN model on TCGA data\r +1. Download scans and clinical data of the TCGA glioblastoma cohort from https://www.cbioportal.org/ and/or https://portal.gdc.cancer.gov/\r +2. Copy tumor segmentations from GBMatch_CNN/data/validation/segmentation into the same folder as the TCGA slide scans\r +3. Predict TCGA samples with gbm_predictor (see above).\r +(You can also find all prediction results in GBMatch_CNN/data/validation/TCGA_annotation_prediction.csv.)\r +\r +## Evaluation of the tumor microenvironment\r +1. Install qupath 0.3.0 (newer versions should also work): https://qupath.github.io/.\r +2. Download immunohistochemical slides from https://www.medical-epigenomics.org/papers/GBMatch/.\r +3. Download annotation (IHC_geojsons) from https://doi.org/10.5281/zenodo.8358673.\r +4. Create a new project and import all immunohistochemical slides & annotations.\r +5. Copy the CD34 and HLA-DR thresholder from GBMatch_CNN/qupath into your project.\r +6. Run GBMatch_CNN/qupath/IHC_eval.groovy for all slides - immunohistochemistry results will be saved to a IHC_results-folder.\r +7. Create a new project and import all HE image tiles.\r +8. Run GBMatch_CNN/qupath/cellularity.groovy for all slides - cellularity results will be saved to a HE-results-folder.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.883.1" ; + schema1:isBasedOn "https://github.com/tovaroe/GBMatch_CNN.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Training a CNN model for classification of transcriptional subtypes and survival prediction in glioblastoma" ; + schema1:sdDatePublished "2024-08-05 10:24:40 +0100" ; + schema1:url "https://workflowhub.eu/workflows/883/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2551 ; + schema1:creator ; + schema1:dateCreated "2024-05-13T07:10:28Z" ; + schema1:dateModified "2024-05-13T07:11:42Z" ; + schema1:description """# GBMatch_CNN\r +Work in progress...\r +Predicting TS & risk from glioblastoma whole slide images\r +\r +# Reference\r +Upcoming paper: stay tuned...\r +\r +# Dependencies\r +python 3.7.7\r +\r +randaugment by Khrystyna Faryna: https://github.com/tovaroe/pathology-he-auto-augment\r +\r +tensorflow 2.1.0\r +\r +scikit-survival 0.13.1\r +\r +pandas 1.0.3\r +\r +lifelines 0.25.0\r +\r +# Description\r +The pipeline implemented here predicts transcriptional subtypes and survival of glioblastoma patients based on H&E stained whole slide scans. Sample data is provided in this repository. To test the basic functionality with 5-fold-CV simply run train_model_OS.py (for survival) or train_model_TS.py (for transcriptional subtypes). Please note that this will not reproduce the results from the manuscript, as only a small fraction of the image data can be provided in this repository due to size constraints. In order to reproduce the results from the manuscript, please refer to the step by step guide below. The whole dataset can be accessed at https://www.medical-epigenomics.org/papers/GBMatch/.\r +If you wish to adopt this pipeline for your own use, please be sure to set the correct parameters in config.py.\r +\r +Moreover, we provide a fully trained model in gbm_predictor.py for predicting new samples (supported WSI formats are ndpi and svs). To use GBMPredictor, simply initialize by calling \r +`gbm_predictor = GBMPredictor()`\r +and predict your sample by calling\r +`(predicted_TS, risk_group, median_riskscore) = gbm_predictor.predict(*path_to_slidescan*)`\r +Heatmaps and detailed results will be automatically saved in a subfolder in your sample path.\r +\r +# Reproducing the manuscript results - step by step guide\r +\r +## Training the CNN model\r +1. Clone this repository and install the dependencies in your environment. Make sure that the path for randaugment is correctly set in the config.py (should be correct by default).\r +2. Download all included image tiles at https://doi.org/10.5281/zenodo.8358673 and replace the data/training/image_tiles folder with the image_tiles folder from zenodo.\r +3. Run train_model_OS.py and/or train_model_TS.py to reproduce the training with 5-fold cross validation. Models and results will be saved in the data/models folder.\r +4. Run train_final_model_OS.py and/or train_final_model_TS.py to train the final model on the whole training dataset.\r +\r +## Validate the CNN model on TCGA data\r +1. Download scans and clinical data of the TCGA glioblastoma cohort from https://www.cbioportal.org/ and/or https://portal.gdc.cancer.gov/\r +2. Copy tumor segmentations from GBMatch_CNN/data/validation/segmentation into the same folder as the TCGA slide scans\r +3. Predict TCGA samples with gbm_predictor (see above).\r +(You can also find all prediction results in GBMatch_CNN/data/validation/TCGA_annotation_prediction.csv.)\r +\r +## Evaluation of the tumor microenvironment\r +1. Install qupath 0.3.0 (newer versions should also work): https://qupath.github.io/.\r +2. Download immunohistochemical slides from https://www.medical-epigenomics.org/papers/GBMatch/.\r +3. Download annotation (IHC_geojsons) from https://doi.org/10.5281/zenodo.8358673.\r +4. Create a new project and import all immunohistochemical slides & annotations.\r +5. Copy the CD34 and HLA-DR thresholder from GBMatch_CNN/qupath into your project.\r +6. Run GBMatch_CNN/qupath/IHC_eval.groovy for all slides - immunohistochemistry results will be saved to a IHC_results-folder.\r +7. Create a new project and import all HE image tiles.\r +8. Run GBMatch_CNN/qupath/cellularity.groovy for all slides - cellularity results will be saved to a HE-results-folder.\r +""" ; + schema1:keywords "Bioinformatics, Pathology" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Training a CNN model for classification of transcriptional subtypes and survival prediction in glioblastoma" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/883?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=23" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-08-05 10:23:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=23" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12386 ; + schema1:creator ; + schema1:dateCreated "2024-06-18T03:03:03Z" ; + schema1:dateModified "2024-06-18T03:03:03Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=23" ; + schema1:version 23 . + + a schema1:Dataset ; + schema1:datePublished "2024-07-31T14:40:06.003066" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Purge-duplicates-one-haplotype-VGP6b" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Purge-duplicates-one-haplotype-VGP6b/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.24" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state of the art epitope prediction pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/984?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/epitopeprediction" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/epitopeprediction" ; + schema1:sdDatePublished "2024-08-05 10:23:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/984/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4453 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:51Z" ; + schema1:dateModified "2024-06-11T12:54:51Z" ; + schema1:description "A fully reproducible and state of the art epitope prediction pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/984?version=7" ; + schema1:keywords "epitope, epitope-prediction, mhc-binding-prediction" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/epitopeprediction" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/984?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2023-10-17T19:27:24.855858" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/cutandrun" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "cutandrun/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.12" . + + a schema1:Dataset ; + schema1:datePublished "2023-09-15T15:02:21.113507" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/rnaseq-sr" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "rnaseq-sr/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.11" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Basic processing of a QC-filtered Anndata Object. UMAP, clustering e.t.c " ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/468?version=2" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq_QCtoBasicProcessing" ; + schema1:sdDatePublished "2024-08-05 10:24:33 +0100" ; + schema1:url "https://workflowhub.eu/workflows/468/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 33517 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-06-22T06:24:41Z" ; + schema1:dateModified "2023-11-09T03:50:40Z" ; + schema1:description "Basic processing of a QC-filtered Anndata Object. UMAP, clustering e.t.c " ; + schema1:isBasedOn "https://workflowhub.eu/workflows/468?version=2" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "scRNAseq_QCtoBasicProcessing" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/468?version=2" ; + schema1:version 2 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1017?version=14" ; + schema1:isBasedOn "https://github.com/nf-core/rnafusion" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnafusion" ; + schema1:sdDatePublished "2024-08-05 10:22:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1017/ro_crate?version=14" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12036 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:10Z" ; + schema1:dateModified "2024-06-11T12:55:10Z" ; + schema1:description "Nextflow rnafusion analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1017?version=16" ; + schema1:keywords "fusion, fusion-genes, gene-fusion, rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnafusion" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1017?version=14" ; + schema1:version 14 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "This is a Nextflow implementaion of the GATK Somatic Short Variant Calling workflow. This workflow can be used to discover somatic short variants (SNVs and indels) from tumour and matched normal BAM files following GATK's Best Practices Workflow. The workflowis currently optimised to run efficiently and at scale on the National Compute Infrastructure, Gadi." ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.691.1" ; + schema1:isBasedOn "https://github.com/Sydney-Informatics-Hub/Somatic-shortV-nf" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Somatic-ShortV-nf" ; + schema1:sdDatePublished "2024-08-05 10:26:05 +0100" ; + schema1:url "https://workflowhub.eu/workflows/691/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3684 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2023-12-20T01:12:30Z" ; + schema1:dateModified "2023-12-20T01:30:37Z" ; + schema1:description "This is a Nextflow implementaion of the GATK Somatic Short Variant Calling workflow. This workflow can be used to discover somatic short variants (SNVs and indels) from tumour and matched normal BAM files following GATK's Best Practices Workflow. The workflowis currently optimised to run efficiently and at scale on the National Compute Infrastructure, Gadi." ; + schema1:keywords "Bioinformatics, FAIR workflows, GATK4, INDELs, Nextflow, variant calling, workflow, cancer, Somatic, snv, Genomics, human, WGS, HPC" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Somatic-ShortV-nf" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/691?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=27" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-08-05 10:24:28 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=27" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12883 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:13Z" ; + schema1:dateModified "2024-06-11T12:55:13Z" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=27" ; + schema1:version 27 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Macromolecular Coarse-Grained Flexibility (FlexServ) tutorial using BioExcel Building Blocks (biobb)\r +\r +This tutorial aims to illustrate the process of generating protein conformational ensembles from 3D structures and analysing its molecular flexibility, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.553.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_flexserv/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Macromolecular Coarse-Grained Flexibility tutorial" ; + schema1:sdDatePublished "2024-08-05 10:29:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/553/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7454 ; + schema1:creator , + ; + schema1:dateCreated "2023-08-02T11:23:28Z" ; + schema1:dateModified "2023-08-02T11:26:04Z" ; + schema1:description """# Macromolecular Coarse-Grained Flexibility (FlexServ) tutorial using BioExcel Building Blocks (biobb)\r +\r +This tutorial aims to illustrate the process of generating protein conformational ensembles from 3D structures and analysing its molecular flexibility, step by step, using the BioExcel Building Blocks library (biobb).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Macromolecular Coarse-Grained Flexibility tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_flexserv/python/workflow.py" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "SPA workflow using cryosparc processing engine" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1073?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CRYOSPARC: acquire -> motionCorr -> ctf -> report" ; + schema1:sdDatePublished "2024-08-05 10:22:30 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1073/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1492 ; + schema1:dateCreated "2024-07-10T13:04:36Z" ; + schema1:dateModified "2024-07-10T13:04:36Z" ; + schema1:description "SPA workflow using cryosparc processing engine" ; + schema1:keywords "spa, cryosparc, Glacios, TalosArctica, TitanKrios" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "CRYOSPARC: acquire -> motionCorr -> ctf -> report" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1073?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1027?version=11" ; + schema1:isBasedOn "https://github.com/nf-core/viralrecon" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/viralrecon" ; + schema1:sdDatePublished "2024-08-05 10:23:07 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1027/ro_crate?version=11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10888 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:21Z" ; + schema1:dateModified "2024-06-11T12:55:21Z" ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1027?version=10" ; + schema1:keywords "Amplicon, ARTIC, Assembly, covid-19, covid19, illumina, long-read-sequencing, Metagenomics, nanopore, ONT, oxford-nanopore, SARS-CoV-2, variant-calling, viral, Virus" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/viralrecon" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1027?version=11" ; + schema1:version 11 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.196.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook ABC MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:46 +0100" ; + schema1:url "https://workflowhub.eu/workflows/196/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 101184 ; + schema1:creator , + ; + schema1:dateCreated "2021-09-28T13:24:33Z" ; + schema1:dateModified "2022-09-14T09:07:13Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/196?version=4" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook ABC MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_amber_md_setup/master/biobb_wf_amber_md_setup/notebooks/abcsetup/biobb_amber_ABC_setup.ipynb" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Take an anndata file, and perform basic QC with scanpy. Produces a filtered AnnData object." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/467?version=1" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq_CellQC" ; + schema1:sdDatePublished "2024-08-05 10:24:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/467/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 21365 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-05-05T06:49:16Z" ; + schema1:dateModified "2023-06-16T04:56:56Z" ; + schema1:description "Take an anndata file, and perform basic QC with scanpy. Produces a filtered AnnData object." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/467?version=3" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "scRNAseq_CellQC" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/467?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + ; + ns1:output , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2023-11-13T09:41:59.582932" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.17" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """RNASeq-DE @ NCI-Gadi processes RNA sequencing data (single, paired and/or multiplexed) for differential expression (raw FASTQ to counts). This pipeline consists of multiple stages and is designed for the National Computational Infrastructure's (NCI) Gadi supercompter, leveraging multiple nodes to run each stage in parallel. \r +\r +Infrastructure\\_deployment\\_metadata: Gadi (NCI)""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.152.1" ; + schema1:isBasedOn "https://github.com/Sydney-Informatics-Hub/RNASeq-DE" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for RNASeq-DE @ NCI-Gadi" ; + schema1:sdDatePublished "2024-08-05 10:31:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/152/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9756 ; + schema1:creator , + , + ; + schema1:dateCreated "2021-08-18T23:24:08Z" ; + schema1:dateModified "2023-01-16T13:51:46Z" ; + schema1:description """RNASeq-DE @ NCI-Gadi processes RNA sequencing data (single, paired and/or multiplexed) for differential expression (raw FASTQ to counts). This pipeline consists of multiple stages and is designed for the National Computational Infrastructure's (NCI) Gadi supercompter, leveraging multiple nodes to run each stage in parallel. \r +\r +Infrastructure\\_deployment\\_metadata: Gadi (NCI)""" ; + schema1:isPartOf ; + schema1:keywords "RNASEQ, differential_expression, DE, Gadi, NCI, illumina, STAR, SAMTools, RSeQC, HTSeq, MultiQC, FastQC, BBduk, rna, expression, differential expression, FASTQ, counts, NCI-Gadi, rna-seq, workflow, bash, PBS, parallel, scalable" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "RNASeq-DE @ NCI-Gadi" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/152?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Simulations and figures supporting the manuscript \"Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake\"" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.511.2" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake" ; + schema1:sdDatePublished "2024-08-05 10:29:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/511/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 305 ; + schema1:creator , + , + , + , + , + ; + schema1:dateCreated "2023-08-18T11:04:44Z" ; + schema1:dateModified "2023-08-18T11:12:51Z" ; + schema1:description "Simulations and figures supporting the manuscript \"Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake\"" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/511?version=4" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/511?version=2" ; + schema1:version 2 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 75460 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-08T13:31:32.629794" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Purge-duplicate-contigs-VGP6/main" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Purge-duplicate-contigs-VGP6/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:version "0.3.0" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=21" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-08-05 10:23:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=21" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9644 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:04Z" ; + schema1:dateModified "2024-06-11T12:55:04Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=21" ; + schema1:version 21 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """\r +Author: AMBARISH KUMAR er.ambarish@gmail.com & ambari73_sit@jnu.ac.in\r +\r +This is a proposed standard operating procedure for genomic variant detection using GATK4.\r +\r +It is hoped to be effective and useful for getting SARS-CoV-2 genome variants.\r +\r +\r +\r +It uses Illumina RNASEQ reads and genome sequence.\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/30?version=1" ; + schema1:isBasedOn "https://github.com/ambarishK/bio-cwl-tools/blob/release/gatk4W.cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genomic variants - SNPs and INDELs detection using GATK4." ; + schema1:sdDatePublished "2024-08-05 10:33:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/30/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4306 ; + schema1:creator ; + schema1:dateCreated "2020-06-17T06:11:59Z" ; + schema1:dateModified "2023-01-16T13:42:19Z" ; + schema1:description """\r +Author: AMBARISH KUMAR er.ambarish@gmail.com & ambari73_sit@jnu.ac.in\r +\r +This is a proposed standard operating procedure for genomic variant detection using GATK4.\r +\r +It is hoped to be effective and useful for getting SARS-CoV-2 genome variants.\r +\r +\r +\r +It uses Illumina RNASEQ reads and genome sequence.\r +""" ; + schema1:image ; + schema1:keywords "CWL, GATK4, SNPs, INDELs" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Genomic variants - SNPs and INDELs detection using GATK4." ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/30?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + ; + ns1:output , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 65147 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 231159 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:MediaObject ; + schema1:contentSize 1583 ; + schema1:dateModified "2024-03-13T10:40:28+00:00" ; + schema1:name "kmeans.csv" ; + schema1:sdDatePublished "2024-03-22T17:53:30+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1027?version=6" ; + schema1:isBasedOn "https://github.com/nf-core/viralrecon" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/viralrecon" ; + schema1:sdDatePublished "2024-08-05 10:23:06 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1027/ro_crate?version=6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10144 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:20Z" ; + schema1:dateModified "2024-06-11T12:55:20Z" ; + schema1:description "Assembly and intrahost/low-frequency variant calling for viral samples" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1027?version=10" ; + schema1:keywords "Amplicon, ARTIC, Assembly, covid-19, covid19, illumina, long-read-sequencing, Metagenomics, nanopore, ONT, oxford-nanopore, SARS-CoV-2, variant-calling, viral, Virus" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/viralrecon" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1027?version=6" ; + schema1:version 6 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 13385 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + schema1:dateModified "2024-01-22T13:48:25" ; + schema1:hasPart , + , + , + ; + schema1:name "dataset_4f_16mb" ; + schema1:sdDatePublished "2024-01-22T16:19:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777334 ; + schema1:dateModified "2024-01-22T13:48:25" ; + schema1:name "file0.txt" ; + schema1:sdDatePublished "2024-01-22T16:19:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777273 ; + schema1:dateModified "2024-01-22T13:48:25" ; + schema1:name "file1.txt" ; + schema1:sdDatePublished "2024-01-22T16:19:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777443 ; + schema1:dateModified "2024-01-22T13:48:25" ; + schema1:name "file2.txt" ; + schema1:sdDatePublished "2024-01-22T16:19:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777723 ; + schema1:dateModified "2024-01-22T13:48:25" ; + schema1:name "file3.txt" ; + schema1:sdDatePublished "2024-01-22T16:19:50+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +****\r +\r +Workflow information:\r +* Input = genome.fasta.\r +* Outputs = soft_masked_genome.fasta, hard_masked_genome.fasta, and table of repeats found. \r +* Runs RepeatModeler with default settings, uses the output of this (repeat library) as input into RepeatMasker. \r +* Runs RepeatMasker with default settings except for: Skip masking of simple tandem repeats and low complexity regions. (-nolow) : default set to yes. Perform softmasking instead of hardmasking - set to yes. \r +* Converts the soft-masked genome to hard-masked for for use in other tools if required. \r +* Workflow report displays an edited table of repeats found. Note: a known bug is that sometimes the workflow report text resets to default text. To restore, look for an earlier workflow version with correct workflow report text, and copy and paste report text into current version.\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.875.3" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Repeat masking - TSI" ; + schema1:sdDatePublished "2024-08-05 10:22:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/875/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10929 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-20T23:41:31Z" ; + schema1:dateModified "2024-06-20T23:48:44Z" ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +****\r +\r +Workflow information:\r +* Input = genome.fasta.\r +* Outputs = soft_masked_genome.fasta, hard_masked_genome.fasta, and table of repeats found. \r +* Runs RepeatModeler with default settings, uses the output of this (repeat library) as input into RepeatMasker. \r +* Runs RepeatMasker with default settings except for: Skip masking of simple tandem repeats and low complexity regions. (-nolow) : default set to yes. Perform softmasking instead of hardmasking - set to yes. \r +* Converts the soft-masked genome to hard-masked for for use in other tools if required. \r +* Workflow report displays an edited table of repeats found. Note: a known bug is that sometimes the workflow report text resets to default text. To restore, look for an earlier workflow version with correct workflow report text, and copy and paste report text into current version.\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/875?version=2" ; + schema1:isPartOf ; + schema1:keywords "TSI-annotation" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Repeat masking - TSI" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/875?version=3" ; + schema1:version 3 ; + ns1:input ; + ns1:output , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 651806 . + + a schema1:Dataset ; + schema1:datePublished "2024-06-10T13:43:17.509153" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/parallel-accession-download" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "parallel-accession-download/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-wgs-variant-calling" ; + schema1:mainEntity ; + schema1:name "COVID-19-PE-WGS-ILLUMINA (v0.2)" ; + schema1:sdDatePublished "2021-06-22 03:00:47 +0100" ; + schema1:softwareVersion "v0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 34539 ; + schema1:name "COVID-19-PE-WGS-ILLUMINA" ; + schema1:programmingLanguage . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.129.5" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_virtual-screening" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein-ligand Docking tutorial (Fpocket)" ; + schema1:sdDatePublished "2024-08-05 10:30:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/129/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 43169 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-03-04T14:36:50Z" ; + schema1:dateModified "2024-05-14T10:12:37Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/129?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein-ligand Docking tutorial (Fpocket)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_virtual-screening/blob/main/biobb_wf_virtual-screening/notebooks/fpocket/wf_vs_fpocket.ipynb" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Simulations and figures supporting the manuscript \"Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake\"" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.511.3" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake" ; + schema1:sdDatePublished "2024-08-05 10:29:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/511/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 305 ; + schema1:creator , + , + , + , + , + ; + schema1:dateCreated "2023-11-24T13:38:57Z" ; + schema1:dateModified "2023-11-24T13:40:58Z" ; + schema1:description "Simulations and figures supporting the manuscript \"Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake\"" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/511?version=4" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Timing of spring events changes under modelled future climate scenarios in a mesotrophic lake" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/511?version=3" ; + schema1:version 3 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 75460 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Loads a single cell counts matrix into an annData format - adding a column called sample with the sample name. (Input format - matrix.mtx, features.tsv and barcodes.tsv)" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/512?version=1" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "other-closed" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq: Load counts matrix" ; + schema1:sdDatePublished "2024-08-05 10:24:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/512/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14187 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-06-22T06:40:48Z" ; + schema1:dateModified "2023-11-09T03:51:14Z" ; + schema1:description "Loads a single cell counts matrix into an annData format - adding a column called sample with the sample name. (Input format - matrix.mtx, features.tsv and barcodes.tsv)" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/512?version=1" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "" ; + schema1:name "scRNAseq: Load counts matrix" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/512?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + ; + ns1:output . + + a schema1:Dataset ; + schema1:datePublished "2024-05-31T11:30:02.345292" ; + schema1:hasPart , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/baredsc" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + ; + schema1:name "baredsc/baredSC-1d-logNorm" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "baredsc/baredSC-2d-logNorm" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/baredsc" ; + schema1:version "0.5" . + + a schema1:Dataset ; + schema1:datePublished "2023-11-23T13:33:07.963883" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/kmer-profiling-hifi-VGP1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "kmer-profiling-hifi-VGP1/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Molecular Structure Checking using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **checking** a **molecular structure** before using it as an input for a **Molecular Dynamics** simulation. The workflow uses the **BioExcel Building Blocks library (biobb)**. The particular structure used is the crystal structure of **human Adenylate Kinase 1A (AK1A)**, in complex with the **AP5A inhibitor** (PDB code [1Z83](https://www.rcsb.org/structure/1z83)). \r +\r +**Structure checking** is a key step before setting up a protein system for **simulations**. A number of **common issues** found in structures at **Protein Data Bank** may compromise the success of the **simulation**, or may suggest that longer **equilibration** procedures are necessary.\r +\r +The **workflow** shows how to:\r +\r +- Run **basic manipulations on structures** (selection of models, chains, alternative locations\r +- Detect and fix **amide assignments** and **wrong chiralities**\r +- Detect and fix **protein backbone** issues (missing fragments, and atoms, capping)\r +- Detect and fix **missing side-chain atoms**\r +- **Add hydrogen atoms** according to several criteria\r +- Detect and classify **atomic clashes**\r +- Detect possible **disulfide bonds (SS)**\r +\r +An implementation of this workflow in a **web-based Graphical User Interface (GUI)** can be found in the [https://mmb.irbbarcelona.org/biobb-wfs/](https://mmb.irbbarcelona.org/biobb-wfs/) server (see [https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check](https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check)).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.776.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/main/biobb_wf_structure_checking/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL Molecular Structure Checking" ; + schema1:sdDatePublished "2024-08-05 10:25:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/776/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 64539 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15044 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-05T08:41:19Z" ; + schema1:dateModified "2024-03-05T08:50:30Z" ; + schema1:description """# Molecular Structure Checking using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **checking** a **molecular structure** before using it as an input for a **Molecular Dynamics** simulation. The workflow uses the **BioExcel Building Blocks library (biobb)**. The particular structure used is the crystal structure of **human Adenylate Kinase 1A (AK1A)**, in complex with the **AP5A inhibitor** (PDB code [1Z83](https://www.rcsb.org/structure/1z83)). \r +\r +**Structure checking** is a key step before setting up a protein system for **simulations**. A number of **common issues** found in structures at **Protein Data Bank** may compromise the success of the **simulation**, or may suggest that longer **equilibration** procedures are necessary.\r +\r +The **workflow** shows how to:\r +\r +- Run **basic manipulations on structures** (selection of models, chains, alternative locations\r +- Detect and fix **amide assignments** and **wrong chiralities**\r +- Detect and fix **protein backbone** issues (missing fragments, and atoms, capping)\r +- Detect and fix **missing side-chain atoms**\r +- **Add hydrogen atoms** according to several criteria\r +- Detect and classify **atomic clashes**\r +- Detect possible **disulfide bonds (SS)**\r +\r +An implementation of this workflow in a **web-based Graphical User Interface (GUI)** can be found in the [https://mmb.irbbarcelona.org/biobb-wfs/](https://mmb.irbbarcelona.org/biobb-wfs/) server (see [https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check](https://mmb.irbbarcelona.org/biobb-wfs/help/create/structure#check)).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CWL Molecular Structure Checking" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/main/biobb_wf_structure_checking/cwl/workflow.cwl" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/457?version=1" ; + schema1:isBasedOn "https://github.com/tianyao-0315/PyUtils" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for PyUtils" ; + schema1:sdDatePublished "2024-08-05 10:31:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/457/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1337 ; + schema1:dateCreated "2023-04-14T02:26:04Z" ; + schema1:dateModified "2023-04-14T02:26:04Z" ; + schema1:description "" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "PyUtils" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/457?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """The tool provides a calculation of the power spectrum of Stochastic Gravitational Wave Backgorund (SGWB) from a first-order cosmological phase transition based on the parameterisations of Roper Pol et al. (2023). The power spectrum includes two components: from the sound waves excited by collisions of bubbles of the new phase and from the turbulence that is induced by these collisions.\r +\r +The cosmological epoch of the phase transition is described by the temperature, T_star and by the number(s) of relativistic degrees of freedom, g_star that should be specified as parameters.\r +\r +The phase transition itself is characterised by phenomenological parameters, alpha, beta_H and epsilon_turb, the latent heat, the ratio of the Hubble radius to the bubble size at percolation and the fraction of the energy otuput of the phase transition that goes into turbulence.\r +\r +The product Model spectrum outputs the power spectrum for fixed values of these parameters. The product Phase transition parameters reproduces the constraints on the phase transition parameters from the Pulsar Timing Array gravitational wave detectors, reported by Boyer & Neronov (2024), including the estimate of the cosmological magnetic field induced by turbulence.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.815.2" ; + schema1:isBasedOn "https://gitlab.renkulab.io/astronomy/mmoda/sgwb" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Stochastic Gravitational Wave Backgorund (SGWB) tool" ; + schema1:sdDatePublished "2024-08-05 10:24:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/815/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3955 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-04-18T12:59:25Z" ; + schema1:dateModified "2024-04-18T13:57:10Z" ; + schema1:description """The tool provides a calculation of the power spectrum of Stochastic Gravitational Wave Backgorund (SGWB) from a first-order cosmological phase transition based on the parameterisations of Roper Pol et al. (2023). The power spectrum includes two components: from the sound waves excited by collisions of bubbles of the new phase and from the turbulence that is induced by these collisions.\r +\r +The cosmological epoch of the phase transition is described by the temperature, T_star and by the number(s) of relativistic degrees of freedom, g_star that should be specified as parameters.\r +\r +The phase transition itself is characterised by phenomenological parameters, alpha, beta_H and epsilon_turb, the latent heat, the ratio of the Hubble radius to the bubble size at percolation and the fraction of the energy otuput of the phase transition that goes into turbulence.\r +\r +The product Model spectrum outputs the power spectrum for fixed values of these parameters. The product Phase transition parameters reproduces the constraints on the phase transition parameters from the Pulsar Timing Array gravitational wave detectors, reported by Boyer & Neronov (2024), including the estimate of the cosmological magnetic field induced by turbulence.\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/815?version=1" ; + schema1:keywords "astronomy" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Stochastic Gravitational Wave Backgorund (SGWB) tool" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/815?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """Inclusion Body Myositis Active Subnetwork Identification Workflow\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/681?version=6" ; + schema1:isBasedOn "https://github.com/jdwijnbergen/IBM_ASI_workflow.git" ; + schema1:license "notspecified" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Inclusion Body Myositis Active Subnetwork Identification Workflow" ; + schema1:sdDatePublished "2024-08-05 10:25:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/681/ro_crate?version=6" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 26584 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7536 ; + schema1:creator , + ; + schema1:dateCreated "2023-11-27T16:10:47Z" ; + schema1:dateModified "2023-11-27T16:10:47Z" ; + schema1:description """Inclusion Body Myositis Active Subnetwork Identification Workflow\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/681?version=6" ; + schema1:keywords "Bioinformatics, CWL, Genomics, Transcriptomics, Protein-Protein Interaction" ; + schema1:license "https://choosealicense.com/no-permission/" ; + schema1:name "Inclusion Body Myositis Active Subnetwork Identification Workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/681?version=6" ; + schema1:version 6 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """A hecatomb is a great sacrifice or an extensive loss. Heactomb the software empowers an analyst to make data driven decisions to 'sacrifice' false-positive viral reads from metagenomes to enrich for true-positive viral reads. This process frequently results in a great loss of suspected viral sequences / contigs.\r +\r +For information about installation, usage, tutorial etc please refer to the documentation: https://hecatomb.readthedocs.io/en/latest/\r +\r +### Quick start guide\r +\r +Install Hecatomb from Bioconda\r +```bash\r +# create an env called hecatomb and install Hecatomb in it\r +conda create -n hecatomb -c conda-forge -c bioconda hecatomb\r +\r +# activate conda env\r +conda activate hecatomb\r +\r +# check the installation\r +hecatomb -h\r +\r +# download the databases - you only have to do this once\r +hecatomb install\r +\r +# Run the test dataset\r +hecatomb run --test\r +```""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.235.1" ; + schema1:isBasedOn "https://github.com/shandley/hecatomb" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Hecatomb" ; + schema1:sdDatePublished "2024-08-05 10:24:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/235/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10077 ; + schema1:creator , + , + ; + schema1:dateCreated "2021-11-11T03:37:33Z" ; + schema1:dateModified "2024-05-13T02:08:43Z" ; + schema1:description """A hecatomb is a great sacrifice or an extensive loss. Heactomb the software empowers an analyst to make data driven decisions to 'sacrifice' false-positive viral reads from metagenomes to enrich for true-positive viral reads. This process frequently results in a great loss of suspected viral sequences / contigs.\r +\r +For information about installation, usage, tutorial etc please refer to the documentation: https://hecatomb.readthedocs.io/en/latest/\r +\r +### Quick start guide\r +\r +Install Hecatomb from Bioconda\r +```bash\r +# create an env called hecatomb and install Hecatomb in it\r +conda create -n hecatomb -c conda-forge -c bioconda hecatomb\r +\r +# activate conda env\r +conda activate hecatomb\r +\r +# check the installation\r +hecatomb -h\r +\r +# download the databases - you only have to do this once\r +hecatomb install\r +\r +# Run the test dataset\r +hecatomb run --test\r +```""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Hecatomb" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/235?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/986?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/fetchngs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/fetchngs" ; + schema1:sdDatePublished "2024-08-05 10:23:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/986/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6458 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:52Z" ; + schema1:dateModified "2024-06-11T12:54:52Z" ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/986?version=13" ; + schema1:keywords "ddbj, download, ena, FASTQ, geo, sra, synapse" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/fetchngs" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/986?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=11" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-08-05 10:23:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14588 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:59Z" ; + schema1:dateModified "2024-06-11T12:54:59Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=11" ; + schema1:version 11 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:description """Workflow for quality assessment of paired reads and classification using NGTax 2.0 and functional annotation using picrust2. \r +In addition files are exported to their respective subfolders for easier data management in a later stage.\r +Steps: \r + - FastQC (read quality control)\r + - NGTax 2.0\r + - Picrust 2\r + - Export module for ngtax\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.154.2" ; + schema1:isBasedOn "https://git.wur.nl/unlock/cwl/-/raw/master/cwl/workflows/workflow_ngtax_picrust2.cwl" ; + schema1:license "AFL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Quality assessment, amplicon classification and functional prediction" ; + schema1:sdDatePublished "2024-08-05 10:33:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/154/ro_crate?version=2" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 37489 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6821 ; + schema1:creator , + ; + schema1:dateCreated "2021-09-30T11:15:19Z" ; + schema1:dateModified "2023-01-16T13:51:47Z" ; + schema1:description """Workflow for quality assessment of paired reads and classification using NGTax 2.0 and functional annotation using picrust2. \r +In addition files are exported to their respective subfolders for easier data management in a later stage.\r +Steps: \r + - FastQC (read quality control)\r + - NGTax 2.0\r + - Picrust 2\r + - Export module for ngtax\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/154?version=1" ; + schema1:keywords "Amplicon, Classification, CWL" ; + schema1:license "https://spdx.org/licenses/AFL-3.0" ; + schema1:name "Quality assessment, amplicon classification and functional prediction" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/154?version=2" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/protein-ligand-docking).\r +***\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.296.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_virtual_screening/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy Protein-ligand Docking tutorial (Fpocket)" ; + schema1:sdDatePublished "2024-08-05 10:30:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/296/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 26851 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-24T14:58:31Z" ; + schema1:dateModified "2022-11-22T10:12:44Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/protein-ligand-docking).\r +***\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/296?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy Protein-ligand Docking tutorial (Fpocket)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_virtual_screening/galaxy/biobb_wf_virtual_screening.ga" ; + schema1:version 1 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# CLAWS (CNAG's Long-read Assembly Workflow in Snakemake)\r + Snakemake Pipeline used for de novo genome assembly @CNAG. It has been developed for Snakemake v6.0.5.\r +\r +It accepts Oxford Nanopore Technologies (ONT) reads, PacBio HFi reads, illumina paired-end data, illumina 10X data and Hi-C reads. It does the preprocessing of the reads, assembly, polishing, purge_dups, scaffodling and different evaluation steps. By default it will preprocess the reads, run Flye + Hypo + purge_dups + yahs and evaluate the resulting assemblies with BUSCO, MERQURY, Nseries and assembly_stats. It needs a config file and a spec file (json file with instructions on which resources should slurm use for each of the jobs). Both files are created by the script "create_config_assembly.py" that is located in the bin directory. To check all the options accepted by the script, do:\r +\r +```\r +bin/create_config_assembly.py -h\r +```\r +\r +Once the 2 config files are produced, the pipeline can be launched using snakemake like this:\r +\r +``snakemake --notemp -j 999 --snakefile assembly_pipeline.smk --configfile assembly.config --is --cluster-conf assembly.spec --use-conda --use-envmodules``\r +\r +If you are using an HPC cluster, please check how should you run snakemake to launch the jobs to the cluster. \r +\r +Most of the tools used will be installed via conda using the environments of the "envs" directory after providing the "--use-conda" option to snakemake. However, a few tools cannot be installed via conda and will have to be available in your PATH, or as a module in the cluster. Those tools are:\r +\r +- NextDenovo/2.5.0\r +- NextPolish/1.4.1\r +\r +# How to provide input data:\r +\r +There are several ways of providing the reads.\r +\r +### 1- ONT reads\r +\r +1.1 Using the option ``--ont-dir {DIR}`` in create_config_assembly.py.\r +\r +If you do so, it will look for all the files in the directory that end in '.fastq.gz' and will add the basenames to "ONT_wildcards". These wildcards will be processed by the pipeline that will:\r +\r +- Concatenate all the files into a single file\r +\r +- Run filtlong with the default or specified parameters. \r +\r +- Use the resulting file for assembly, polishing and/or purging.\r +\r +You can also specify the basenames of the files that you want to use with the ``--ont-list `` option. In this case, the pipeline will use the wildcards that you're providing instead of merging all the files in the directory.\r +\r +1.2 Using the option ```--ont-reads {FILE}``` in create_config_assembly.py.\r +\r +If you do so, it will consider that you already have all the reads in one file and will: \r +\r +- Run filtlong with the default or specified parameters.\r +\r +- Use the resulting file for assembly, polishing and/or purging.\r +\r +1.3 Using the option ```--ont-filt {FILE}```. It will use this file as the output from filtlong. Hence, it will skip the preprocessing steps and directly use it for assembly, polishing and/or purging. \r +\r +\r +\r +### 2-Illumina 10X-linked data\r +\r +2.1 Using the ```--raw-10X {DIR:list}``` option. \r +\r +Dictionary with 10X raw read directories, it has to be the mkfastq dir. You must specify as well the sampleIDs from this run. Example: '{"mkfastq- dir":"sample1,sample2,sample3"}'...\r +\r +It will take each basename in the list to get the fastqs from the corresponding directory and run longranger on each sample. Afterwards, it will build meryldbs for each "barcoded" file. Finally, it will concatenate all the meryldbs and "barcoded" files. Resulting "barcoded" file will be used for polishing. \r +\r +2.2 Using the ``--processed-10X {DIR}`` parameter. \r +\r +This directory can already be there or be produced by the pipeline as described in step 2.1. Once all the "barcoded" fastq files are there, meryldbs will be built for each "barcoded" file. Finally, it will concatenate all the meryldbs and "barcoded" files. Resulting "barcoded" file will be used for polishing. \r +\r +2.3 Using the ``--10X`` option. \r +\r +The argument to this is the path to the concatenated ".barcoded" file that needs to be used for polishing. If the pre-concatenated files are not given, meryldbs will be directly generated with this file, but it may run out of memory. \r +\r +### 3- Illumina short-read data\r +\r +3.1 Using the ``--illumina-dir {DIR}`` option, that will look for all the files in the directory that end in '.1.fastq.gz' and will add the basenames to "illumina_wildcards". These wildcards will be processed by the pipeline that will: \r +\r +- Trim adaptors with Trimgalore\r +\r +- Concatenate all the trimmed *.1.fastq.gz and the *2.fastq.gz in one file per pair. \r +\r +- The resulting reads will be used for building meryldbs and polishing. \r +\r +3.2 Using the ``--processed-illumina`` option. If the directory exists and contains files, the pipeline will look for all the files in the directory that end in '.1.fastq.gz' and will add the basenames to "illumina_wildcards". These wildcards will be processed by the pipeline that will:\r +\r +- Concatenate all the trimmed *.1.fastq.gz and the *2.fastq.gz in one file per pair. \r +\r +- The resulting reads will be used for building meryldbs and polishing. \r +\r +3.3 Using the ``--pe1 {FILE} and --pe2 {FILE}`` options. That will consider that these are the paired files containing all the illumina reads ready to be used and will build meryldbs and polish with them.\r +\r +### 4- Input assemblies\r +\r +If you want to polish an already assembled assembly, you can give it to the pipeline by using the option ``--assembly-in ASSEMBLY_IN [ASSEMBLY_IN ...]\r + Dictionary with assemblies that need to be polished but not assembled and directory where they should\r + be polished. Example: '{"assembly1":"polishing_dir1"}' '{"assembly2"="polishing_dir2"}' ...``\r + \r +If you want to start the pipeline after polishing on an already existing assembly, you can give it to the pipeline by using the option ``--postpolish-assemblies POSTPOLISH_ASSEMBLIES [POSTPOLISH_ASSEMBLIES ...]\r + Dictionary with assemblies for which postpolishing steps need to be run but that are not assembled and\r + base step for the directory where the first postpolishing step should be run. Example:\r + '{"assembly1":"s04.1_p03.1"}' '{"assembly2"="s04.2_p03.2"}' ...``\r +\r +To evaluate and produce the final pretext file on a curated assembly, use ``--curated-assemblies CURATED_ASSEMBLIES [CURATED_ASSEMBLIES ...]\r + Dictionary with assemblies that have already been curated. Evaluations and read alignment will be perforder. Example:\r + '{"assembly1":"s04.1_p03.1"}' '{"assembly2":"s04.2_p03.2"}' ...``\r +\r +\r +\r +# Description of implemented rules\r +\r +1- Preprocessing:\r + \r +- **Read concatenation:**\r +\r +``zcat {input.fastqs} | pigz -p {threads} -c > {output.final_fastq}``\r + \r +- **Longranger for 10X reads**: it uses the Longranger version installed in the path specified in the configfile\r +\r +``longranger basic --id={params.sample} --sample={params.sample} --fastqs={input.mkfastq_dir} --localcores={threads}``\r +\r +- **Trimgalore:** By default it gives the ``--max_n 0 --gzip -q 20 --paired --retain_unpaired`` options, but it can be changed with the ``--trim-galore-opts `` argument. \r +\r +``trim_galore -j {threads} {params.opts} {input.read1} {input.read2}``\r +\r +- **Filtlong:** it uses the Filtlong version installed in the path specified in the configfile. By default it gives the min_length and min_mean_q parameters, but extra parameters can be added with the ``--filtlong-opts`` option.\r +\r +``filtlong --min_length {params.minlen} --min_mean_q {params.min_mean_q} {params.opts} {input.reads} | pigz -p {threads} -c > {output.outreads}``\r + \r +- **Build meryldb**: it uses the merqury conda environment specified in the configfile. It takes as argument the `--mery-k` value that needs to be estimated first for the genome size. It can run either on the illumina reads, the ont reads or both, default behaviour is both. \r +\r +``meryl k={params.kmer} count output {output.out_dir} {input.fastq}``\r + \r +- Concat meryldbs: with the merqury conda environment specified in the configfile\r +\r +``meryl union-sum output {output.meryl_all} {input.input_run}``\r + \r +- **Align ONT (Minimap2):** it aligns the reads using minimap2 and outputs the alignment either in bam or in paf.gz formats. It uses the minimap2 conda environment specified in the configfile\r +\r +``minimap2 -{params.align_opts} -t {threads} {input.genome} {input.reads} ``\r +\r +- **Align Illumina (BWA-MEM):** it aligns the reads with BWA-mem and outputs a bam file\r +\r +``bwa mem -Y {params.options} -t {threads} {input.genome} {input.reads} | samtools view -Sb - | samtools sort -@ {threads} -o {output.mapping} -``\r +\r +2- Assembly\r +\r +- **Flye (default)**. It is run by default, if you don't want the pipeline to run it, you can give `--no-flye` option when creating the config. It uses the conda environment specified in the config. By default it is set to 2 polishing iterations and gives the genome-size estimate that has been given when creating the config. Extra options can be provided with the `--flye-opts`.\r +\r +``flye --{params.readtype} {input.reads} -o {params.outdir}out -t {threads} -i {params.pol_iterations} {params.other_flye_opts} ``\r + \r +- **Nextdenovo (if ``run-nextdenovo``):** It uses the cluster module specified in the config. If nextdenovo option is turned on, the create_config script will also create the nextdenovo config file. Check the create_config help to see which options can be modified on it. \r +\r +``nextDenovo {input.config}``\r +\r +3- Polishing\r +\r +- **Hypo (default):** It is the polisher that the pipeline uses by default, it can be turned off specifying ``--no-hypo`` when creating the config. If selected, the reads will be aligned in previous rules and then hypo will be run, it requires illumina data. It uses the conda environment specified in the config. \r +\r +``hypo -r @short_reads.list.txt -d {input.genome} -b {input.sr_bam} -c {coverage} -s {params.genome_size} -B {input.lr_bam} -t {threads} -o {output.polished} -p {params.proc} {params.opts} ``\r + \r +- **Nextpolish ont (if turned on):** to run nextpolish with ONT reads, specify ``--nextpolish-ont-rounds`` and the number of rounds you want to run of it. \r +\r +``"python /apps/NEXTPOLISH/1.3.1/lib/nextpolish2.py -g {input.genome} -p {threads} -l lgs.fofn -r {params.lrtype} > {output.polished}``\r + \r +- **Nextpolish illumina (if turned on):** to run nextpolish with ONT reads, specify ``--nextpolish-ill-rounds`` and the number of rounds you want to run of it. \r +\r +``"python /apps/NEXTPOLISH/1.3.1/lib/nextpolish1.py -g {input.genome} -p {threads} -s {input.bam} -t {params.task} > {output.polished}``\r +\r +4- Post-assembly\r +\r +- **Purge_dups (by default):** select ``--no-purgedups`` if you don't want to run it. If no manual cutoffs are given, it'll run purgedups with automatic cutoffs and then will rerun it selecting the mean cutoff as 0.75\\*cov. It uses the version installed in the cluster module specified in the config. \r +\r +5- Evaluations\r + \r +- **Merqury:** It runs on each 'terminal' assembly. This is, the base assembly and the resulting assembly from each branch of the pipeline. \r + \r +- **Busco:** It can be run only in the terminal assemblies or on all the assemblies produced by the pipeline. It uses the conda environment specified in the config as well as the parameters specified. \r + \r +- **Nseries:** This is run during the *finalize* on all the assemblies that are evaluated. After it, that rule combines the statistics produced by all the evaluation rules. \r +\r +# Description of all options\r +```\r + bin/create_config_assembly.py -h\r +usage: create_configuration_file [-h] [--configFile configFile] [--specFile specFile] [--ndconfFile ndconfFile] [--concat-cores concat_cores]\r + [--genome-size genome_size] [--lr-type lr_type] [--basename base_name] [--species species] [--keep-intermediate]\r + [--preprocess-lr-step PREPROCESS_ONT_STEP] [--preprocess-10X-step PREPROCESS_10X_STEP]\r + [--preprocess-illumina-step PREPROCESS_ILLUMINA_STEP] [--preprocess-hic-step PREPROCESS_HIC_STEP]\r + [--flye-step FLYE_STEP] [--no-flye] [--nextdenovo-step NEXTDENOVO_STEP] [--run-nextdenovo]\r + [--nextpolish-cores nextpolish_cores] [--minimap2-cores minimap2_cores] [--bwa-cores bwa_cores]\r + [--hypo-cores hypo_cores] [--pairtools-cores pairtools_cores] [--busco-cores busco_cores]\r + [--nextpolish-ont-rounds nextpolish_ont_rounds] [--nextpolish-ill-rounds nextpolish_ill_rounds]\r + [--hypo-rounds hypo_rounds] [--longranger-cores longranger_cores] [--longranger-path longranger_path]\r + [--genomescope-opts genomescope_additional] [--no-purgedups] [--ploidy ploidy] [--run-tigmint] [--run-kraken2]\r + [--no-yahs] [--scripts-dir SCRIPTS_DIR] [--ont-reads ONT_READS] [--ont-dir ONT_DIR] [--ont-filt ONT_FILTERED]\r + [--pe1 PE1] [--pe2 PE2] [--processed-illumina PROCESSED_ILLUMINA] [--raw-10X RAW_10X [RAW_10X ...]]\r + [--processed-10X PROCESSED_10X] [--10X R10X] [--illumina-dir ILLUMINA_DIR]\r + [--assembly-in ASSEMBLY_IN [ASSEMBLY_IN ...]]\r + [--postpolish-assemblies POSTPOLISH_ASSEMBLIES [POSTPOLISH_ASSEMBLIES ...]]\r + [--curated-assemblies CURATED_ASSEMBLIES [CURATED_ASSEMBLIES ...]] [--hic-dir HIC_DIR]\r + [--pipeline-workdir PIPELINE_WORKDIR] [--filtlong-dir FILTLONG_DIR] [--concat-hic-dir CONCAT_HIC_DIR]\r + [--flye-dir FLYE_DIR] [--nextdenovo-dir NEXTDENOVO_DIR] [--flye-polishing-dir POLISH_FLYE_DIR]\r + [--nextdenovo-polishing-dir POLISH_NEXTDENOVO_DIR] [--eval-dir eval_dir] [--stats-out stats_out]\r + [--hic-qc-dir hic_qc_dir] [--filtlong-minlen filtlong_minlen] [--filtlong-min-mean-q filtlong_min_mean_q]\r + [--filtlong-opts filtlong_opts] [--kraken2-db kraken2_db] [--kraken2-kmer kraken2_kmers]\r + [--kraken2-opts additional_kraken2_opts] [--kraken2-cores kraken2_threads] [--trim-galore-opts trim_galore_opts]\r + [--trim-Illumina-cores Trim_Illumina_cores] [--flye-cores flye_cores] [--flye-polishing-iterations flye_pol_it]\r + [--other-flye-opts other_flye_opts] [--nextdenovo-cores nextdenovo_cores] [--nextdenovo-jobtype nextdenovo_type]\r + [--nextdenovo-task nextdenovo_task] [--nextdenovo-rewrite nextdenovo_rewrite]\r + [--nextdenovo-parallel_jobs nextdenovo_parallel_jobs] [--nextdenovo-minreadlen nextdenovo_minreadlen]\r + [--nextdenovo-seeddepth nextdenovo_seeddepth] [--nextdenovo-seedcutoff nextdenovo_seedcutoff]\r + [--nextdenovo-blocksize nextdenovo_blocksize] [--nextdenovo-pa-correction nextdenovo_pa_correction]\r + [--nextdenovo-minimap_raw nextdenovo_minimap_raw] [--nextdenovo-minimap_cns nextdenovo_minimap_cns]\r + [--nextdenovo-minimap_map nextdenovo_minimap_map] [--nextdenovo-sort nextdenovo_sort]\r + [--nextdenovo-correction_opts nextdenovo_correction_opts] [--nextdenovo-nextgraph_opt nextdenovo_nextgraph_opt]\r + [--sr-cov ill_cov] [--hypo-proc hypo_processes] [--hypo-no-lr] [--hypo-opts hypo_opts]\r + [--purgedups-cores purgedups_cores] [--purgedups-calcuts-opts calcuts_opts] [--tigmint-cores tigmint_cores]\r + [--tigmint-opts tigmint_opts] [--hic-qc] [--no-pretext] [--assembly-qc assembly_qc] [--yahs-cores yahs_cores]\r + [--yahs-mq yahs_mq] [--yahs-opts yahs_opts] [--hic-map-opts hic_map_opts] [--mq mq [mq ...]]\r + [--hic-qc-assemblylen hic_qc_assemblylen] [--blast-cores blast_cores] [--hic-blastdb blastdb]\r + [--hic-readsblast hic_readsblast] [--no-final-evals] [--busco-lin busco_lineage] [--merqury-db merqury_db]\r + [--merqury-plot-opts merqury_plot_opts] [--meryl-k meryl_k] [--meryl-threads meryl_threads]\r + [--meryl-reads meryl_reads [meryl_reads ...]] [--ont-list ONT_wildcards] [--illumina-list illumina_wildcards]\r + [--r10X-list r10X_wildcards] [--hic-list hic_wildcards]\r +\r +Create a configuration json file for the assembly pipeline.\r +\r +options:\r + -h, --help show this help message and exit\r +\r +General Parameters:\r + --configFile configFile\r + Configuration JSON to be generated. Default assembly.config\r + --specFile specFile Cluster specifications JSON fileto be generated. Default assembly.spec\r + --ndconfFile ndconfFile\r + Name pf the nextdenovo config file. Default nextdenovo.config\r + --concat-cores concat_cores\r + Number of threads to concatenate reads and to run filtlong. Default 4\r + --genome-size genome_size\r + Approximate genome size. Example: 615m or 2.6g. Default None\r + --lr-type lr_type Type of long reads (options are flye read-type options). Default nano-hq\r + --basename base_name Base name for the project. Default None\r + --species species Name of the species to be assembled. Default None\r + --keep-intermediate Set this to True if you do not want intermediate files to be removed. Default False\r + --preprocess-lr-step PREPROCESS_ONT_STEP\r + Step for preprocessing long-reads. Default 02.1\r + --preprocess-10X-step PREPROCESS_10X_STEP\r + Step for preprocessing 10X reads. Default 02.2\r + --preprocess-illumina-step PREPROCESS_ILLUMINA_STEP\r + Step for preprocessing illumina reads. Default 02.2\r + --preprocess-hic-step PREPROCESS_HIC_STEP\r + Step for preprocessing hic reads. Default 02.3\r + --flye-step FLYE_STEP\r + Step for running flye. Default 03.1\r + --no-flye Give this option if you do not want to run Flye.\r + --nextdenovo-step NEXTDENOVO_STEP\r + Step for running nextdenovo. Default 03.2\r + --run-nextdenovo Give this option if you do want to run Nextdenovo.\r + --nextpolish-cores nextpolish_cores\r + Number of threads to run the nextpolish step. Default 24\r + --minimap2-cores minimap2_cores\r + Number of threads to run the alignment with minimap2. Default 32\r + --bwa-cores bwa_cores\r + Number of threads to run the alignments with BWA-Mem2. Default 16\r + --hypo-cores hypo_cores\r + Number of threads to run the hypo step. Default 24\r + --pairtools-cores pairtools_cores\r + Number of threads to run the pairtools step. Default 100\r + --busco-cores busco_cores\r + Number of threads to run BUSCO. Default 32\r + --nextpolish-ont-rounds nextpolish_ont_rounds\r + Number of rounds to run the Nextpolish with ONT step. Default 0\r + --nextpolish-ill-rounds nextpolish_ill_rounds\r + Number of rounds to run the Nextpolish with illumina step. Default 0\r + --hypo-rounds hypo_rounds\r + Number of rounds to run the Hypostep. Default 1\r + --longranger-cores longranger_cores\r + Number of threads to run longranger. Default 16\r + --longranger-path longranger_path\r + Path to longranger executable. Default /scratch/project/devel/aateam/src/10X/longranger-2.2.2\r + --genomescope-opts genomescope_additional\r + Additional options to run Genomescope2 with. Default -m 10000\r + --no-purgedups Give this option if you do not want to run Purgedups.\r + --ploidy ploidy Expected ploidy. Default 2\r + --run-tigmint Give this option if you want to run the scaffolding with 10X reads step.\r + --run-kraken2 Give this option if you want to run Kraken2 on the input reads.\r + --no-yahs Give this option if you do not want to run yahs.\r +\r +Inputs:\r + --scripts-dir SCRIPTS_DIR\r + Directory with the different scripts for the pipeline. Default\r + /software/assembly/pipelines/Assembly_pipeline/CLAWSv2.2/bin/../scripts/\r + --ont-reads ONT_READS\r + File with all the ONT reads. Default None\r + --ont-dir ONT_DIR Directory where the ONT fastqs are stored. Default None\r + --ont-filt ONT_FILTERED\r + File with the ONT reads after running filtlong on them. Default None\r + --pe1 PE1 File with the illumina paired-end fastqs, already trimmed, pair 1.\r + --pe2 PE2 File with the illumina paired-end fastqs, already trimmed, pair 2.\r + --processed-illumina PROCESSED_ILLUMINA\r + Directory to Processed illumina reads. Already there or to be produced by the pipeline.\r + --raw-10X RAW_10X [RAW_10X ...]\r + Dictionary with 10X raw read directories, it has to be the mkfastq dir. You must specify as well the sampleIDs from this run.\r + Example: '{"mkfastq-dir":"sample1,sample2,sample3"}'...\r + --processed-10X PROCESSED_10X\r + Directory to Processed 10X reads. Already there or to be produced by the pipeline.\r + --10X R10X File with barcoded 10X reads in fastq.gz format, concatenated.\r + --illumina-dir ILLUMINA_DIR\r + Directory where the raw illumina fastqs are stored. Default None\r + --assembly-in ASSEMBLY_IN [ASSEMBLY_IN ...]\r + Dictionary with assemblies that need to be polished but not assembled and directory where they should be polished. Example:\r + '{"assembly1":"polishing_dir1"}' '{"assembly2"="polishing_dir2"}' ...\r + --postpolish-assemblies POSTPOLISH_ASSEMBLIES [POSTPOLISH_ASSEMBLIES ...]\r + Dictionary with assemblies for whic postpolishing steps need to be run but that are not assembled and base step for the\r + directory where the first postpolishing step should be run. Example: '{"assembly1":"s04.1_p03.1"}'\r + '{"assembly2":"s04.2_p03.2"}' ...\r + --curated-assemblies CURATED_ASSEMBLIES [CURATED_ASSEMBLIES ...]\r + Dictionary with assemblies that have already been curated. Evaluations and read alignment will be perforder. Example:\r + '{"assembly1":"s04.1_p03.1"}' '{"assembly2":"s04.2_p03.2"}' ...\r + --hic-dir HIC_DIR Directory where the HiC fastqs are stored. Default None\r +\r +Outputs:\r + --pipeline-workdir PIPELINE_WORKDIR\r + Base directory for the pipeline run. Default /scratch_isilon/groups/assembly/jgomez/test_CLAWSv2/ilErePala/assembly/\r + --filtlong-dir FILTLONG_DIR\r + Directory to process the ONT reads with filtlong. Default s02.1_p01.1_Filtlong\r + --concat-hic-dir CONCAT_HIC_DIR\r + Directory to concatenate the HiC reads. Default s02.3_p01.1_Concat_HiC\r + --flye-dir FLYE_DIR Directory to run flye. Default s03.1_p02.1_flye/\r + --nextdenovo-dir NEXTDENOVO_DIR\r + Directory to run nextdenovo. Default s03.2_p02.1_nextdenovo/\r + --flye-polishing-dir POLISH_FLYE_DIR\r + Directory to polish the flye assembly. Default s04.1_p03.1_polishing/\r + --nextdenovo-polishing-dir POLISH_NEXTDENOVO_DIR\r + Directory to run nextdenovo. Default s04.2_p03.2_polishing/\r + --eval-dir eval_dir Base directory for the evaluations. Default evaluations/\r + --stats-out stats_out\r + Path to the file with the final statistics.\r + --hic-qc-dir hic_qc_dir\r + Directory to run the hic_qc. Default hic_qc/\r +\r +Filtlong:\r + --filtlong-minlen filtlong_minlen\r + Minimum read length to use with Filtlong. Default 1000\r + --filtlong-min-mean-q filtlong_min_mean_q\r + Minimum mean quality to use with Filtlong. Default 80\r + --filtlong-opts filtlong_opts\r + Extra options to run Filtlong (eg. -t 4000000000)\r +\r +Kraken2:\r + --kraken2-db kraken2_db\r + Database to be used for running Kraken2. Default None\r + --kraken2-kmer kraken2_kmers\r + Database to be used for running Kraken2. Default None\r + --kraken2-opts additional_kraken2_opts\r + Optional parameters for the rule Kraken2. Default\r + --kraken2-cores kraken2_threads\r + Number of threads to run the Kraken2 step. Default 16\r +\r +Trim_Galore:\r + --trim-galore-opts trim_galore_opts\r + Optional parameters for the rule trim_galore. Default --max_n 0 --gzip -q 20 --paired --retain_unpaired\r + --trim-Illumina-cores Trim_Illumina_cores\r + Number of threads to run the Illumina trimming step. Default 8\r +\r +Flye:\r + --flye-cores flye_cores\r + Number of threads to run FLYE. Default 128\r + --flye-polishing-iterations flye_pol_it\r + Number of polishing iterations to use with FLYE. Default 2\r + --other-flye-opts other_flye_opts\r + Additional options to run Flye. Default --scaffold\r +\r +Nextdenovo:\r + --nextdenovo-cores nextdenovo_cores\r + Number of threads to run nextdenovo. Default 2\r + --nextdenovo-jobtype nextdenovo_type\r + Job_type for nextdenovo. Default slurm\r + --nextdenovo-task nextdenovo_task\r + Task need to run. Default all\r + --nextdenovo-rewrite nextdenovo_rewrite\r + Overwrite existing directory. Default yes\r + --nextdenovo-parallel_jobs nextdenovo_parallel_jobs\r + Number of tasks used to run in parallel. Default 50\r + --nextdenovo-minreadlen nextdenovo_minreadlen\r + Filter reads with length < minreadlen. Default 1k\r + --nextdenovo-seeddepth nextdenovo_seeddepth\r + Expected seed depth, used to calculate seed_cutoff, co-use with genome_size, you can try to set it 30-45 to get a better\r + assembly result. Default 45\r + --nextdenovo-seedcutoff nextdenovo_seedcutoff\r + Minimum seed length, <=0 means calculate it automatically using bin/seq_stat. Default 0\r + --nextdenovo-blocksize nextdenovo_blocksize\r + Block size for parallel running, split non-seed reads into small files, the maximum size of each file is blocksize. Default 1g\r + --nextdenovo-pa-correction nextdenovo_pa_correction\r + number of corrected tasks used to run in parallel, each corrected task requires ~TOTAL_INPUT_BASES/4 bytes of memory usage,\r + overwrite parallel_jobs only for this step. Default 100\r + --nextdenovo-minimap_raw nextdenovo_minimap_raw\r + minimap2 options, used to find overlaps between raw reads, see minimap2-nd for details. Default -t 30\r + --nextdenovo-minimap_cns nextdenovo_minimap_cns\r + minimap2 options, used to find overlaps between corrected reads. Default -t 30\r + --nextdenovo-minimap_map nextdenovo_minimap_map\r + minimap2 options, used to map reads back to the assembly. Default -t 30 --no-kalloc\r + --nextdenovo-sort nextdenovo_sort\r + sort options, see ovl_sort for details. Default -m 400g -t 20\r + --nextdenovo-correction_opts nextdenovo_correction_opts\r + Correction options. Default -p 30 -dbuf\r + --nextdenovo-nextgraph_opt nextdenovo_nextgraph_opt\r + nextgraph options, see nextgraph for details. Default -a 1\r +\r +Hypo:\r + --sr-cov ill_cov Approximate short read coverage for hypo Default 0\r + --hypo-proc hypo_processes\r + Number of contigs to be processed in parallel by HyPo. Default 6\r + --hypo-no-lr Set this to false if you don¡t want to run hypo with long reads. Default True\r + --hypo-opts hypo_opts\r + Additional options to run Hypo. Default None\r +\r +Purge_dups:\r + --purgedups-cores purgedups_cores\r + Number of threads to run purgedups. Default 8\r + --purgedups-calcuts-opts calcuts_opts\r + Adjusted values to run calcuts for purgedups. Default None\r +\r +Scaffold_with_10X:\r + --tigmint-cores tigmint_cores\r + Number of threads to run the 10X scaffolding step. Default 12\r + --tigmint-opts tigmint_opts\r + Adjusted values to run the scaffolding with 10X reads. Default None\r +\r +HiC:\r + --hic-qc Give this option if only QC of the HiC data needs to be done.\r + --no-pretext Give this option if you do not want to generate the pretext file\r + --assembly-qc assembly_qc\r + Path to the assembly to be used perfom the QC of the HiC reads.\r + --yahs-cores yahs_cores\r + Number of threads to run YAHS. Default 48\r + --yahs-mq yahs_mq Mapping quality to use when running yahs.Default 40\r + --yahs-opts yahs_opts\r + Additional options to give to YAHS.Default\r + --hic-map-opts hic_map_opts\r + Options to use with bwa mem when aligning the HiC reads. Deafault -5SP -T0\r + --mq mq [mq ...] Mapping qualities to use for processing the hic mappings. Default [0, 40]\r + --hic-qc-assemblylen hic_qc_assemblylen\r + Lentgh of the assembly to be used for HiC QC\r + --blast-cores blast_cores\r + Number of threads to run blast with the HiC unmapped reads.Default 8\r + --hic-blastdb blastdb\r + BLAST Database to use to classify the hic unmapped reads. Default /scratch_isilon/groups/assembly/data/blastdbs\r + --hic-readsblast hic_readsblast\r + Number of unmapped hic reads to classify with blast. Default 100\r +\r +Finalize:\r + --no-final-evals If specified, do not run evaluations on final assemblies. Default True\r + --busco-lin busco_lineage\r + Path to the lineage directory to run Busco with. Default None\r + --merqury-db merqury_db\r + Meryl database. Default None\r + --merqury-plot-opts merqury_plot_opts\r + Meryl database. Default None\r + --meryl-k meryl_k Merqury plot additional options, for example " -m 200 -n 6000|". Default None\r + --meryl-threads meryl_threads\r + Number of threads to run meryl and merqury. Default 4\r + --meryl-reads meryl_reads [meryl_reads ...]\r + Type of reads to be used to build the meryldb. Default ont illumina\r +\r +Wildcards:\r + --ont-list ONT_wildcards\r + List with basename of the ONT fastqs that will be used. Default None\r + --illumina-list illumina_wildcards\r + List with basename of the illumina fastqs. Default None\r + --r10X-list r10X_wildcards\r + List with basename of the raw 10X fastqs. Default None\r + --hic-list hic_wildcards\r + List with basename of the raw hic fastqs. Default None\r +```\r +# Changes made to v2.2: \r +\r +1. General: \r +\r + Now default read_type is nano-hq \r +\r +2. Rule trim_galore: \r +\r + "--max_n 0" has been added to the default behaviour of "--trim-galore-opts" \r +\r +3. Meryl: \r +\r + New option "--meryl-reads" has been added to the config. Default is "Illumina ont" to build the meryl database using both type of reads, it can be changed to one or the other \r +\r +4. Merqury: \r +\r + Option "--merqury-plot-opts" has been added to config file. It can be used to modify the x and y axis maximum values (eg. --merqury-plot-opts " -m 200 -n 6000") \r +\r +5. Genomescope: \r +\r + "-m 10000" is now part of the default behavior of "--genomescope-opts" \r +\r +6. Hic_statistics: \r +\r + This is now running for each assembly and mq for which a pretext file is generated \r +\r +7. Assembly inputs for different steps: \r +\r + a. "--assembly-in" to start after assembly step (eg. Evaluation, polishing, purging and scaffolding) \r +\r + b. "--postpolish-assemblies" to start after polishing step (eg. Evaluation, purging and scaffolding) \r +\r + c. "--curated-assemblies" to start after scaffolding step (eg. Evaluation and pretext generation) \r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.567.2" ; + schema1:isBasedOn "https://github.com/cnag-aat/assembly_pipeline.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CLAWS (CNAG's long-read assembly workflow in Snakemake)" ; + schema1:sdDatePublished "2024-08-05 10:25:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/567/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3879 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-02-02T12:24:07Z" ; + schema1:dateModified "2024-02-02T12:24:51Z" ; + schema1:description """# CLAWS (CNAG's Long-read Assembly Workflow in Snakemake)\r + Snakemake Pipeline used for de novo genome assembly @CNAG. It has been developed for Snakemake v6.0.5.\r +\r +It accepts Oxford Nanopore Technologies (ONT) reads, PacBio HFi reads, illumina paired-end data, illumina 10X data and Hi-C reads. It does the preprocessing of the reads, assembly, polishing, purge_dups, scaffodling and different evaluation steps. By default it will preprocess the reads, run Flye + Hypo + purge_dups + yahs and evaluate the resulting assemblies with BUSCO, MERQURY, Nseries and assembly_stats. It needs a config file and a spec file (json file with instructions on which resources should slurm use for each of the jobs). Both files are created by the script "create_config_assembly.py" that is located in the bin directory. To check all the options accepted by the script, do:\r +\r +```\r +bin/create_config_assembly.py -h\r +```\r +\r +Once the 2 config files are produced, the pipeline can be launched using snakemake like this:\r +\r +``snakemake --notemp -j 999 --snakefile assembly_pipeline.smk --configfile assembly.config --is --cluster-conf assembly.spec --use-conda --use-envmodules``\r +\r +If you are using an HPC cluster, please check how should you run snakemake to launch the jobs to the cluster. \r +\r +Most of the tools used will be installed via conda using the environments of the "envs" directory after providing the "--use-conda" option to snakemake. However, a few tools cannot be installed via conda and will have to be available in your PATH, or as a module in the cluster. Those tools are:\r +\r +- NextDenovo/2.5.0\r +- NextPolish/1.4.1\r +\r +# How to provide input data:\r +\r +There are several ways of providing the reads.\r +\r +### 1- ONT reads\r +\r +1.1 Using the option ``--ont-dir {DIR}`` in create_config_assembly.py.\r +\r +If you do so, it will look for all the files in the directory that end in '.fastq.gz' and will add the basenames to "ONT_wildcards". These wildcards will be processed by the pipeline that will:\r +\r +- Concatenate all the files into a single file\r +\r +- Run filtlong with the default or specified parameters. \r +\r +- Use the resulting file for assembly, polishing and/or purging.\r +\r +You can also specify the basenames of the files that you want to use with the ``--ont-list `` option. In this case, the pipeline will use the wildcards that you're providing instead of merging all the files in the directory.\r +\r +1.2 Using the option ```--ont-reads {FILE}``` in create_config_assembly.py.\r +\r +If you do so, it will consider that you already have all the reads in one file and will: \r +\r +- Run filtlong with the default or specified parameters.\r +\r +- Use the resulting file for assembly, polishing and/or purging.\r +\r +1.3 Using the option ```--ont-filt {FILE}```. It will use this file as the output from filtlong. Hence, it will skip the preprocessing steps and directly use it for assembly, polishing and/or purging. \r +\r +\r +\r +### 2-Illumina 10X-linked data\r +\r +2.1 Using the ```--raw-10X {DIR:list}``` option. \r +\r +Dictionary with 10X raw read directories, it has to be the mkfastq dir. You must specify as well the sampleIDs from this run. Example: '{"mkfastq- dir":"sample1,sample2,sample3"}'...\r +\r +It will take each basename in the list to get the fastqs from the corresponding directory and run longranger on each sample. Afterwards, it will build meryldbs for each "barcoded" file. Finally, it will concatenate all the meryldbs and "barcoded" files. Resulting "barcoded" file will be used for polishing. \r +\r +2.2 Using the ``--processed-10X {DIR}`` parameter. \r +\r +This directory can already be there or be produced by the pipeline as described in step 2.1. Once all the "barcoded" fastq files are there, meryldbs will be built for each "barcoded" file. Finally, it will concatenate all the meryldbs and "barcoded" files. Resulting "barcoded" file will be used for polishing. \r +\r +2.3 Using the ``--10X`` option. \r +\r +The argument to this is the path to the concatenated ".barcoded" file that needs to be used for polishing. If the pre-concatenated files are not given, meryldbs will be directly generated with this file, but it may run out of memory. \r +\r +### 3- Illumina short-read data\r +\r +3.1 Using the ``--illumina-dir {DIR}`` option, that will look for all the files in the directory that end in '.1.fastq.gz' and will add the basenames to "illumina_wildcards". These wildcards will be processed by the pipeline that will: \r +\r +- Trim adaptors with Trimgalore\r +\r +- Concatenate all the trimmed *.1.fastq.gz and the *2.fastq.gz in one file per pair. \r +\r +- The resulting reads will be used for building meryldbs and polishing. \r +\r +3.2 Using the ``--processed-illumina`` option. If the directory exists and contains files, the pipeline will look for all the files in the directory that end in '.1.fastq.gz' and will add the basenames to "illumina_wildcards". These wildcards will be processed by the pipeline that will:\r +\r +- Concatenate all the trimmed *.1.fastq.gz and the *2.fastq.gz in one file per pair. \r +\r +- The resulting reads will be used for building meryldbs and polishing. \r +\r +3.3 Using the ``--pe1 {FILE} and --pe2 {FILE}`` options. That will consider that these are the paired files containing all the illumina reads ready to be used and will build meryldbs and polish with them.\r +\r +### 4- Input assemblies\r +\r +If you want to polish an already assembled assembly, you can give it to the pipeline by using the option ``--assembly-in ASSEMBLY_IN [ASSEMBLY_IN ...]\r + Dictionary with assemblies that need to be polished but not assembled and directory where they should\r + be polished. Example: '{"assembly1":"polishing_dir1"}' '{"assembly2"="polishing_dir2"}' ...``\r + \r +If you want to start the pipeline after polishing on an already existing assembly, you can give it to the pipeline by using the option ``--postpolish-assemblies POSTPOLISH_ASSEMBLIES [POSTPOLISH_ASSEMBLIES ...]\r + Dictionary with assemblies for which postpolishing steps need to be run but that are not assembled and\r + base step for the directory where the first postpolishing step should be run. Example:\r + '{"assembly1":"s04.1_p03.1"}' '{"assembly2"="s04.2_p03.2"}' ...``\r +\r +To evaluate and produce the final pretext file on a curated assembly, use ``--curated-assemblies CURATED_ASSEMBLIES [CURATED_ASSEMBLIES ...]\r + Dictionary with assemblies that have already been curated. Evaluations and read alignment will be perforder. Example:\r + '{"assembly1":"s04.1_p03.1"}' '{"assembly2":"s04.2_p03.2"}' ...``\r +\r +\r +\r +# Description of implemented rules\r +\r +1- Preprocessing:\r + \r +- **Read concatenation:**\r +\r +``zcat {input.fastqs} | pigz -p {threads} -c > {output.final_fastq}``\r + \r +- **Longranger for 10X reads**: it uses the Longranger version installed in the path specified in the configfile\r +\r +``longranger basic --id={params.sample} --sample={params.sample} --fastqs={input.mkfastq_dir} --localcores={threads}``\r +\r +- **Trimgalore:** By default it gives the ``--max_n 0 --gzip -q 20 --paired --retain_unpaired`` options, but it can be changed with the ``--trim-galore-opts `` argument. \r +\r +``trim_galore -j {threads} {params.opts} {input.read1} {input.read2}``\r +\r +- **Filtlong:** it uses the Filtlong version installed in the path specified in the configfile. By default it gives the min_length and min_mean_q parameters, but extra parameters can be added with the ``--filtlong-opts`` option.\r +\r +``filtlong --min_length {params.minlen} --min_mean_q {params.min_mean_q} {params.opts} {input.reads} | pigz -p {threads} -c > {output.outreads}``\r + \r +- **Build meryldb**: it uses the merqury conda environment specified in the configfile. It takes as argument the `--mery-k` value that needs to be estimated first for the genome size. It can run either on the illumina reads, the ont reads or both, default behaviour is both. \r +\r +``meryl k={params.kmer} count output {output.out_dir} {input.fastq}``\r + \r +- Concat meryldbs: with the merqury conda environment specified in the configfile\r +\r +``meryl union-sum output {output.meryl_all} {input.input_run}``\r + \r +- **Align ONT (Minimap2):** it aligns the reads using minimap2 and outputs the alignment either in bam or in paf.gz formats. It uses the minimap2 conda environment specified in the configfile\r +\r +``minimap2 -{params.align_opts} -t {threads} {input.genome} {input.reads} ``\r +\r +- **Align Illumina (BWA-MEM):** it aligns the reads with BWA-mem and outputs a bam file\r +\r +``bwa mem -Y {params.options} -t {threads} {input.genome} {input.reads} | samtools view -Sb - | samtools sort -@ {threads} -o {output.mapping} -``\r +\r +2- Assembly\r +\r +- **Flye (default)**. It is run by default, if you don't want the pipeline to run it, you can give `--no-flye` option when creating the config. It uses the conda environment specified in the config. By default it is set to 2 polishing iterations and gives the genome-size estimate that has been given when creating the config. Extra options can be provided with the `--flye-opts`.\r +\r +``flye --{params.readtype} {input.reads} -o {params.outdir}out -t {threads} -i {params.pol_iterations} {params.other_flye_opts} ``\r + \r +- **Nextdenovo (if ``run-nextdenovo``):** It uses the cluster module specified in the config. If nextdenovo option is turned on, the create_config script will also create the nextdenovo config file. Check the create_config help to see which options can be modified on it. \r +\r +``nextDenovo {input.config}``\r +\r +3- Polishing\r +\r +- **Hypo (default):** It is the polisher that the pipeline uses by default, it can be turned off specifying ``--no-hypo`` when creating the config. If selected, the reads will be aligned in previous rules and then hypo will be run, it requires illumina data. It uses the conda environment specified in the config. \r +\r +``hypo -r @short_reads.list.txt -d {input.genome} -b {input.sr_bam} -c {coverage} -s {params.genome_size} -B {input.lr_bam} -t {threads} -o {output.polished} -p {params.proc} {params.opts} ``\r + \r +- **Nextpolish ont (if turned on):** to run nextpolish with ONT reads, specify ``--nextpolish-ont-rounds`` and the number of rounds you want to run of it. \r +\r +``"python /apps/NEXTPOLISH/1.3.1/lib/nextpolish2.py -g {input.genome} -p {threads} -l lgs.fofn -r {params.lrtype} > {output.polished}``\r + \r +- **Nextpolish illumina (if turned on):** to run nextpolish with ONT reads, specify ``--nextpolish-ill-rounds`` and the number of rounds you want to run of it. \r +\r +``"python /apps/NEXTPOLISH/1.3.1/lib/nextpolish1.py -g {input.genome} -p {threads} -s {input.bam} -t {params.task} > {output.polished}``\r +\r +4- Post-assembly\r +\r +- **Purge_dups (by default):** select ``--no-purgedups`` if you don't want to run it. If no manual cutoffs are given, it'll run purgedups with automatic cutoffs and then will rerun it selecting the mean cutoff as 0.75\\*cov. It uses the version installed in the cluster module specified in the config. \r +\r +5- Evaluations\r + \r +- **Merqury:** It runs on each 'terminal' assembly. This is, the base assembly and the resulting assembly from each branch of the pipeline. \r + \r +- **Busco:** It can be run only in the terminal assemblies or on all the assemblies produced by the pipeline. It uses the conda environment specified in the config as well as the parameters specified. \r + \r +- **Nseries:** This is run during the *finalize* on all the assemblies that are evaluated. After it, that rule combines the statistics produced by all the evaluation rules. \r +\r +# Description of all options\r +```\r + bin/create_config_assembly.py -h\r +usage: create_configuration_file [-h] [--configFile configFile] [--specFile specFile] [--ndconfFile ndconfFile] [--concat-cores concat_cores]\r + [--genome-size genome_size] [--lr-type lr_type] [--basename base_name] [--species species] [--keep-intermediate]\r + [--preprocess-lr-step PREPROCESS_ONT_STEP] [--preprocess-10X-step PREPROCESS_10X_STEP]\r + [--preprocess-illumina-step PREPROCESS_ILLUMINA_STEP] [--preprocess-hic-step PREPROCESS_HIC_STEP]\r + [--flye-step FLYE_STEP] [--no-flye] [--nextdenovo-step NEXTDENOVO_STEP] [--run-nextdenovo]\r + [--nextpolish-cores nextpolish_cores] [--minimap2-cores minimap2_cores] [--bwa-cores bwa_cores]\r + [--hypo-cores hypo_cores] [--pairtools-cores pairtools_cores] [--busco-cores busco_cores]\r + [--nextpolish-ont-rounds nextpolish_ont_rounds] [--nextpolish-ill-rounds nextpolish_ill_rounds]\r + [--hypo-rounds hypo_rounds] [--longranger-cores longranger_cores] [--longranger-path longranger_path]\r + [--genomescope-opts genomescope_additional] [--no-purgedups] [--ploidy ploidy] [--run-tigmint] [--run-kraken2]\r + [--no-yahs] [--scripts-dir SCRIPTS_DIR] [--ont-reads ONT_READS] [--ont-dir ONT_DIR] [--ont-filt ONT_FILTERED]\r + [--pe1 PE1] [--pe2 PE2] [--processed-illumina PROCESSED_ILLUMINA] [--raw-10X RAW_10X [RAW_10X ...]]\r + [--processed-10X PROCESSED_10X] [--10X R10X] [--illumina-dir ILLUMINA_DIR]\r + [--assembly-in ASSEMBLY_IN [ASSEMBLY_IN ...]]\r + [--postpolish-assemblies POSTPOLISH_ASSEMBLIES [POSTPOLISH_ASSEMBLIES ...]]\r + [--curated-assemblies CURATED_ASSEMBLIES [CURATED_ASSEMBLIES ...]] [--hic-dir HIC_DIR]\r + [--pipeline-workdir PIPELINE_WORKDIR] [--filtlong-dir FILTLONG_DIR] [--concat-hic-dir CONCAT_HIC_DIR]\r + [--flye-dir FLYE_DIR] [--nextdenovo-dir NEXTDENOVO_DIR] [--flye-polishing-dir POLISH_FLYE_DIR]\r + [--nextdenovo-polishing-dir POLISH_NEXTDENOVO_DIR] [--eval-dir eval_dir] [--stats-out stats_out]\r + [--hic-qc-dir hic_qc_dir] [--filtlong-minlen filtlong_minlen] [--filtlong-min-mean-q filtlong_min_mean_q]\r + [--filtlong-opts filtlong_opts] [--kraken2-db kraken2_db] [--kraken2-kmer kraken2_kmers]\r + [--kraken2-opts additional_kraken2_opts] [--kraken2-cores kraken2_threads] [--trim-galore-opts trim_galore_opts]\r + [--trim-Illumina-cores Trim_Illumina_cores] [--flye-cores flye_cores] [--flye-polishing-iterations flye_pol_it]\r + [--other-flye-opts other_flye_opts] [--nextdenovo-cores nextdenovo_cores] [--nextdenovo-jobtype nextdenovo_type]\r + [--nextdenovo-task nextdenovo_task] [--nextdenovo-rewrite nextdenovo_rewrite]\r + [--nextdenovo-parallel_jobs nextdenovo_parallel_jobs] [--nextdenovo-minreadlen nextdenovo_minreadlen]\r + [--nextdenovo-seeddepth nextdenovo_seeddepth] [--nextdenovo-seedcutoff nextdenovo_seedcutoff]\r + [--nextdenovo-blocksize nextdenovo_blocksize] [--nextdenovo-pa-correction nextdenovo_pa_correction]\r + [--nextdenovo-minimap_raw nextdenovo_minimap_raw] [--nextdenovo-minimap_cns nextdenovo_minimap_cns]\r + [--nextdenovo-minimap_map nextdenovo_minimap_map] [--nextdenovo-sort nextdenovo_sort]\r + [--nextdenovo-correction_opts nextdenovo_correction_opts] [--nextdenovo-nextgraph_opt nextdenovo_nextgraph_opt]\r + [--sr-cov ill_cov] [--hypo-proc hypo_processes] [--hypo-no-lr] [--hypo-opts hypo_opts]\r + [--purgedups-cores purgedups_cores] [--purgedups-calcuts-opts calcuts_opts] [--tigmint-cores tigmint_cores]\r + [--tigmint-opts tigmint_opts] [--hic-qc] [--no-pretext] [--assembly-qc assembly_qc] [--yahs-cores yahs_cores]\r + [--yahs-mq yahs_mq] [--yahs-opts yahs_opts] [--hic-map-opts hic_map_opts] [--mq mq [mq ...]]\r + [--hic-qc-assemblylen hic_qc_assemblylen] [--blast-cores blast_cores] [--hic-blastdb blastdb]\r + [--hic-readsblast hic_readsblast] [--no-final-evals] [--busco-lin busco_lineage] [--merqury-db merqury_db]\r + [--merqury-plot-opts merqury_plot_opts] [--meryl-k meryl_k] [--meryl-threads meryl_threads]\r + [--meryl-reads meryl_reads [meryl_reads ...]] [--ont-list ONT_wildcards] [--illumina-list illumina_wildcards]\r + [--r10X-list r10X_wildcards] [--hic-list hic_wildcards]\r +\r +Create a configuration json file for the assembly pipeline.\r +\r +options:\r + -h, --help show this help message and exit\r +\r +General Parameters:\r + --configFile configFile\r + Configuration JSON to be generated. Default assembly.config\r + --specFile specFile Cluster specifications JSON fileto be generated. Default assembly.spec\r + --ndconfFile ndconfFile\r + Name pf the nextdenovo config file. Default nextdenovo.config\r + --concat-cores concat_cores\r + Number of threads to concatenate reads and to run filtlong. Default 4\r + --genome-size genome_size\r + Approximate genome size. Example: 615m or 2.6g. Default None\r + --lr-type lr_type Type of long reads (options are flye read-type options). Default nano-hq\r + --basename base_name Base name for the project. Default None\r + --species species Name of the species to be assembled. Default None\r + --keep-intermediate Set this to True if you do not want intermediate files to be removed. Default False\r + --preprocess-lr-step PREPROCESS_ONT_STEP\r + Step for preprocessing long-reads. Default 02.1\r + --preprocess-10X-step PREPROCESS_10X_STEP\r + Step for preprocessing 10X reads. Default 02.2\r + --preprocess-illumina-step PREPROCESS_ILLUMINA_STEP\r + Step for preprocessing illumina reads. Default 02.2\r + --preprocess-hic-step PREPROCESS_HIC_STEP\r + Step for preprocessing hic reads. Default 02.3\r + --flye-step FLYE_STEP\r + Step for running flye. Default 03.1\r + --no-flye Give this option if you do not want to run Flye.\r + --nextdenovo-step NEXTDENOVO_STEP\r + Step for running nextdenovo. Default 03.2\r + --run-nextdenovo Give this option if you do want to run Nextdenovo.\r + --nextpolish-cores nextpolish_cores\r + Number of threads to run the nextpolish step. Default 24\r + --minimap2-cores minimap2_cores\r + Number of threads to run the alignment with minimap2. Default 32\r + --bwa-cores bwa_cores\r + Number of threads to run the alignments with BWA-Mem2. Default 16\r + --hypo-cores hypo_cores\r + Number of threads to run the hypo step. Default 24\r + --pairtools-cores pairtools_cores\r + Number of threads to run the pairtools step. Default 100\r + --busco-cores busco_cores\r + Number of threads to run BUSCO. Default 32\r + --nextpolish-ont-rounds nextpolish_ont_rounds\r + Number of rounds to run the Nextpolish with ONT step. Default 0\r + --nextpolish-ill-rounds nextpolish_ill_rounds\r + Number of rounds to run the Nextpolish with illumina step. Default 0\r + --hypo-rounds hypo_rounds\r + Number of rounds to run the Hypostep. Default 1\r + --longranger-cores longranger_cores\r + Number of threads to run longranger. Default 16\r + --longranger-path longranger_path\r + Path to longranger executable. Default /scratch/project/devel/aateam/src/10X/longranger-2.2.2\r + --genomescope-opts genomescope_additional\r + Additional options to run Genomescope2 with. Default -m 10000\r + --no-purgedups Give this option if you do not want to run Purgedups.\r + --ploidy ploidy Expected ploidy. Default 2\r + --run-tigmint Give this option if you want to run the scaffolding with 10X reads step.\r + --run-kraken2 Give this option if you want to run Kraken2 on the input reads.\r + --no-yahs Give this option if you do not want to run yahs.\r +\r +Inputs:\r + --scripts-dir SCRIPTS_DIR\r + Directory with the different scripts for the pipeline. Default\r + /software/assembly/pipelines/Assembly_pipeline/CLAWSv2.2/bin/../scripts/\r + --ont-reads ONT_READS\r + File with all the ONT reads. Default None\r + --ont-dir ONT_DIR Directory where the ONT fastqs are stored. Default None\r + --ont-filt ONT_FILTERED\r + File with the ONT reads after running filtlong on them. Default None\r + --pe1 PE1 File with the illumina paired-end fastqs, already trimmed, pair 1.\r + --pe2 PE2 File with the illumina paired-end fastqs, already trimmed, pair 2.\r + --processed-illumina PROCESSED_ILLUMINA\r + Directory to Processed illumina reads. Already there or to be produced by the pipeline.\r + --raw-10X RAW_10X [RAW_10X ...]\r + Dictionary with 10X raw read directories, it has to be the mkfastq dir. You must specify as well the sampleIDs from this run.\r + Example: '{"mkfastq-dir":"sample1,sample2,sample3"}'...\r + --processed-10X PROCESSED_10X\r + Directory to Processed 10X reads. Already there or to be produced by the pipeline.\r + --10X R10X File with barcoded 10X reads in fastq.gz format, concatenated.\r + --illumina-dir ILLUMINA_DIR\r + Directory where the raw illumina fastqs are stored. Default None\r + --assembly-in ASSEMBLY_IN [ASSEMBLY_IN ...]\r + Dictionary with assemblies that need to be polished but not assembled and directory where they should be polished. Example:\r + '{"assembly1":"polishing_dir1"}' '{"assembly2"="polishing_dir2"}' ...\r + --postpolish-assemblies POSTPOLISH_ASSEMBLIES [POSTPOLISH_ASSEMBLIES ...]\r + Dictionary with assemblies for whic postpolishing steps need to be run but that are not assembled and base step for the\r + directory where the first postpolishing step should be run. Example: '{"assembly1":"s04.1_p03.1"}'\r + '{"assembly2":"s04.2_p03.2"}' ...\r + --curated-assemblies CURATED_ASSEMBLIES [CURATED_ASSEMBLIES ...]\r + Dictionary with assemblies that have already been curated. Evaluations and read alignment will be perforder. Example:\r + '{"assembly1":"s04.1_p03.1"}' '{"assembly2":"s04.2_p03.2"}' ...\r + --hic-dir HIC_DIR Directory where the HiC fastqs are stored. Default None\r +\r +Outputs:\r + --pipeline-workdir PIPELINE_WORKDIR\r + Base directory for the pipeline run. Default /scratch_isilon/groups/assembly/jgomez/test_CLAWSv2/ilErePala/assembly/\r + --filtlong-dir FILTLONG_DIR\r + Directory to process the ONT reads with filtlong. Default s02.1_p01.1_Filtlong\r + --concat-hic-dir CONCAT_HIC_DIR\r + Directory to concatenate the HiC reads. Default s02.3_p01.1_Concat_HiC\r + --flye-dir FLYE_DIR Directory to run flye. Default s03.1_p02.1_flye/\r + --nextdenovo-dir NEXTDENOVO_DIR\r + Directory to run nextdenovo. Default s03.2_p02.1_nextdenovo/\r + --flye-polishing-dir POLISH_FLYE_DIR\r + Directory to polish the flye assembly. Default s04.1_p03.1_polishing/\r + --nextdenovo-polishing-dir POLISH_NEXTDENOVO_DIR\r + Directory to run nextdenovo. Default s04.2_p03.2_polishing/\r + --eval-dir eval_dir Base directory for the evaluations. Default evaluations/\r + --stats-out stats_out\r + Path to the file with the final statistics.\r + --hic-qc-dir hic_qc_dir\r + Directory to run the hic_qc. Default hic_qc/\r +\r +Filtlong:\r + --filtlong-minlen filtlong_minlen\r + Minimum read length to use with Filtlong. Default 1000\r + --filtlong-min-mean-q filtlong_min_mean_q\r + Minimum mean quality to use with Filtlong. Default 80\r + --filtlong-opts filtlong_opts\r + Extra options to run Filtlong (eg. -t 4000000000)\r +\r +Kraken2:\r + --kraken2-db kraken2_db\r + Database to be used for running Kraken2. Default None\r + --kraken2-kmer kraken2_kmers\r + Database to be used for running Kraken2. Default None\r + --kraken2-opts additional_kraken2_opts\r + Optional parameters for the rule Kraken2. Default\r + --kraken2-cores kraken2_threads\r + Number of threads to run the Kraken2 step. Default 16\r +\r +Trim_Galore:\r + --trim-galore-opts trim_galore_opts\r + Optional parameters for the rule trim_galore. Default --max_n 0 --gzip -q 20 --paired --retain_unpaired\r + --trim-Illumina-cores Trim_Illumina_cores\r + Number of threads to run the Illumina trimming step. Default 8\r +\r +Flye:\r + --flye-cores flye_cores\r + Number of threads to run FLYE. Default 128\r + --flye-polishing-iterations flye_pol_it\r + Number of polishing iterations to use with FLYE. Default 2\r + --other-flye-opts other_flye_opts\r + Additional options to run Flye. Default --scaffold\r +\r +Nextdenovo:\r + --nextdenovo-cores nextdenovo_cores\r + Number of threads to run nextdenovo. Default 2\r + --nextdenovo-jobtype nextdenovo_type\r + Job_type for nextdenovo. Default slurm\r + --nextdenovo-task nextdenovo_task\r + Task need to run. Default all\r + --nextdenovo-rewrite nextdenovo_rewrite\r + Overwrite existing directory. Default yes\r + --nextdenovo-parallel_jobs nextdenovo_parallel_jobs\r + Number of tasks used to run in parallel. Default 50\r + --nextdenovo-minreadlen nextdenovo_minreadlen\r + Filter reads with length < minreadlen. Default 1k\r + --nextdenovo-seeddepth nextdenovo_seeddepth\r + Expected seed depth, used to calculate seed_cutoff, co-use with genome_size, you can try to set it 30-45 to get a better\r + assembly result. Default 45\r + --nextdenovo-seedcutoff nextdenovo_seedcutoff\r + Minimum seed length, <=0 means calculate it automatically using bin/seq_stat. Default 0\r + --nextdenovo-blocksize nextdenovo_blocksize\r + Block size for parallel running, split non-seed reads into small files, the maximum size of each file is blocksize. Default 1g\r + --nextdenovo-pa-correction nextdenovo_pa_correction\r + number of corrected tasks used to run in parallel, each corrected task requires ~TOTAL_INPUT_BASES/4 bytes of memory usage,\r + overwrite parallel_jobs only for this step. Default 100\r + --nextdenovo-minimap_raw nextdenovo_minimap_raw\r + minimap2 options, used to find overlaps between raw reads, see minimap2-nd for details. Default -t 30\r + --nextdenovo-minimap_cns nextdenovo_minimap_cns\r + minimap2 options, used to find overlaps between corrected reads. Default -t 30\r + --nextdenovo-minimap_map nextdenovo_minimap_map\r + minimap2 options, used to map reads back to the assembly. Default -t 30 --no-kalloc\r + --nextdenovo-sort nextdenovo_sort\r + sort options, see ovl_sort for details. Default -m 400g -t 20\r + --nextdenovo-correction_opts nextdenovo_correction_opts\r + Correction options. Default -p 30 -dbuf\r + --nextdenovo-nextgraph_opt nextdenovo_nextgraph_opt\r + nextgraph options, see nextgraph for details. Default -a 1\r +\r +Hypo:\r + --sr-cov ill_cov Approximate short read coverage for hypo Default 0\r + --hypo-proc hypo_processes\r + Number of contigs to be processed in parallel by HyPo. Default 6\r + --hypo-no-lr Set this to false if you don¡t want to run hypo with long reads. Default True\r + --hypo-opts hypo_opts\r + Additional options to run Hypo. Default None\r +\r +Purge_dups:\r + --purgedups-cores purgedups_cores\r + Number of threads to run purgedups. Default 8\r + --purgedups-calcuts-opts calcuts_opts\r + Adjusted values to run calcuts for purgedups. Default None\r +\r +Scaffold_with_10X:\r + --tigmint-cores tigmint_cores\r + Number of threads to run the 10X scaffolding step. Default 12\r + --tigmint-opts tigmint_opts\r + Adjusted values to run the scaffolding with 10X reads. Default None\r +\r +HiC:\r + --hic-qc Give this option if only QC of the HiC data needs to be done.\r + --no-pretext Give this option if you do not want to generate the pretext file\r + --assembly-qc assembly_qc\r + Path to the assembly to be used perfom the QC of the HiC reads.\r + --yahs-cores yahs_cores\r + Number of threads to run YAHS. Default 48\r + --yahs-mq yahs_mq Mapping quality to use when running yahs.Default 40\r + --yahs-opts yahs_opts\r + Additional options to give to YAHS.Default\r + --hic-map-opts hic_map_opts\r + Options to use with bwa mem when aligning the HiC reads. Deafault -5SP -T0\r + --mq mq [mq ...] Mapping qualities to use for processing the hic mappings. Default [0, 40]\r + --hic-qc-assemblylen hic_qc_assemblylen\r + Lentgh of the assembly to be used for HiC QC\r + --blast-cores blast_cores\r + Number of threads to run blast with the HiC unmapped reads.Default 8\r + --hic-blastdb blastdb\r + BLAST Database to use to classify the hic unmapped reads. Default /scratch_isilon/groups/assembly/data/blastdbs\r + --hic-readsblast hic_readsblast\r + Number of unmapped hic reads to classify with blast. Default 100\r +\r +Finalize:\r + --no-final-evals If specified, do not run evaluations on final assemblies. Default True\r + --busco-lin busco_lineage\r + Path to the lineage directory to run Busco with. Default None\r + --merqury-db merqury_db\r + Meryl database. Default None\r + --merqury-plot-opts merqury_plot_opts\r + Meryl database. Default None\r + --meryl-k meryl_k Merqury plot additional options, for example " -m 200 -n 6000|". Default None\r + --meryl-threads meryl_threads\r + Number of threads to run meryl and merqury. Default 4\r + --meryl-reads meryl_reads [meryl_reads ...]\r + Type of reads to be used to build the meryldb. Default ont illumina\r +\r +Wildcards:\r + --ont-list ONT_wildcards\r + List with basename of the ONT fastqs that will be used. Default None\r + --illumina-list illumina_wildcards\r + List with basename of the illumina fastqs. Default None\r + --r10X-list r10X_wildcards\r + List with basename of the raw 10X fastqs. Default None\r + --hic-list hic_wildcards\r + List with basename of the raw hic fastqs. Default None\r +```\r +# Changes made to v2.2: \r +\r +1. General: \r +\r + Now default read_type is nano-hq \r +\r +2. Rule trim_galore: \r +\r + "--max_n 0" has been added to the default behaviour of "--trim-galore-opts" \r +\r +3. Meryl: \r +\r + New option "--meryl-reads" has been added to the config. Default is "Illumina ont" to build the meryl database using both type of reads, it can be changed to one or the other \r +\r +4. Merqury: \r +\r + Option "--merqury-plot-opts" has been added to config file. It can be used to modify the x and y axis maximum values (eg. --merqury-plot-opts " -m 200 -n 6000") \r +\r +5. Genomescope: \r +\r + "-m 10000" is now part of the default behavior of "--genomescope-opts" \r +\r +6. Hic_statistics: \r +\r + This is now running for each assembly and mq for which a pretext file is generated \r +\r +7. Assembly inputs for different steps: \r +\r + a. "--assembly-in" to start after assembly step (eg. Evaluation, polishing, purging and scaffolding) \r +\r + b. "--postpolish-assemblies" to start after polishing step (eg. Evaluation, purging and scaffolding) \r +\r + c. "--curated-assemblies" to start after scaffolding step (eg. Evaluation and pretext generation) \r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/567?version=1" ; + schema1:keywords "Bioinformatics, Genomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "CLAWS (CNAG's long-read assembly workflow in Snakemake)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/567?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """\r +This repository hosts Metabolome Annotation Workflow (MAW). The workflow takes MS2 .mzML format data files as an input in R. It performs spectral database dereplication using R Package Spectra and compound database dereplication using SIRIUS OR MetFrag . Final candidate selection is done in Python using RDKit and PubChemPy.""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.510.2" ; + schema1:isBasedOn "https://github.com/zmahnoor14/MAW" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Metabolome Annotation Workflow (MAW)" ; + schema1:sdDatePublished "2024-08-05 10:29:49 +0100" ; + schema1:url "https://workflowhub.eu/workflows/510/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1506 ; + schema1:creator , + , + , + , + , + ; + schema1:dateCreated "2023-08-01T14:21:50Z" ; + schema1:dateModified "2023-08-01T14:22:09Z" ; + schema1:description """\r +This repository hosts Metabolome Annotation Workflow (MAW). The workflow takes MS2 .mzML format data files as an input in R. It performs spectral database dereplication using R Package Spectra and compound database dereplication using SIRIUS OR MetFrag . Final candidate selection is done in Python using RDKit and PubChemPy.""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/510?version=1" ; + schema1:keywords "Metabolomics, Annotation, mass-spectrometry, identification, Bioinformatics, FAIR workflows, workflow, gnps, massbank, hmdb, spectra, rdkit, Cheminformatics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Metabolome Annotation Workflow (MAW)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/510?version=2" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + , + , + ; + ns1:output , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 10427 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-08-05 10:24:23 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5136 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:11Z" ; + schema1:dateModified "2024-06-11T12:55:11Z" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "ATACSeq peak-calling and differential analysis pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/965?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/atacseq" ; + schema1:sdDatePublished "2024-08-05 10:24:19 +0100" ; + schema1:url "https://workflowhub.eu/workflows/965/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5755 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:41Z" ; + schema1:dateModified "2024-06-11T12:54:41Z" ; + schema1:description "ATACSeq peak-calling and differential analysis pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/965?version=8" ; + schema1:keywords "ATAC-seq, chromatin-accessibiity" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/atacseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/965?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-23T13:33:07.958178" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Purge-duplicate-contigs-VGP6/main" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Purge-duplicate-contigs-VGP6/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:version "0.3.3" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-08-05 10:24:24 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5725 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:11Z" ; + schema1:dateModified "2024-06-11T12:55:11Z" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + schema1:datePublished "2021-07-26T10:22:23.309483" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-variant-calling" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:sdDatePublished "2021-10-27 15:45:10 +0100" ; + schema1:softwareVersion "v0.4.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.6" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """Workflow for Spliced RNAseq data\r +**Steps:**\r +\r +* workflow_quality.cwl:\r + * FastQC (Read Quality Control)\r + * fastp (Read Trimming)\r +* STAR (Read mapping)\r +* featurecounts (transcript read counts)\r +* kallisto (transcript [pseudo]counts)\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/95?version=1" ; + schema1:isBasedOn "https://git.wur.nl/unlock/cwl/-/blob/master/cwl/workflows/workflow_RNAseq_Spliced.cwl" ; + schema1:license "CC0-1.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Spliced RNAseq workflow" ; + schema1:sdDatePublished "2024-08-05 10:33:13 +0100" ; + schema1:url "https://workflowhub.eu/workflows/95/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 33079 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6752 ; + schema1:creator , + ; + schema1:dateCreated "2020-12-22T15:53:49Z" ; + schema1:dateModified "2023-01-16T13:46:30Z" ; + schema1:description """Workflow for Spliced RNAseq data\r +**Steps:**\r +\r +* workflow_quality.cwl:\r + * FastQC (Read Quality Control)\r + * fastp (Read Trimming)\r +* STAR (Read mapping)\r +* featurecounts (transcript read counts)\r +* kallisto (transcript [pseudo]counts)\r +""" ; + schema1:image ; + schema1:keywords "RNASEQ, rna, rna-seq, kallisto, STAR" ; + schema1:license "https://spdx.org/licenses/CC0-1.0" ; + schema1:name "Spliced RNAseq workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/95?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2024-06-18T14:17:17.498298" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/amr_gene_detection" ; + schema1:license "GPL-3.0-or-later" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "amr_gene_detection/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# metaGOflow: A workflow for marine Genomic Observatories' data analysis\r +\r +![logo](https://raw.githubusercontent.com/hariszaf/metaGOflow-use-case/gh-pages/assets/img/metaGOflow_logo_italics.png)\r +\r +\r +## An EOSC-Life project\r +\r +The workflows developed in the framework of this project are based on `pipeline-v5` of the MGnify resource.\r +\r +> This branch is a child of the [`pipeline_5.1`](https://github.com/hariszaf/pipeline-v5/tree/pipeline_5.1) branch\r +> that contains all CWL descriptions of the MGnify pipeline version 5.1.\r +\r +## Dependencies\r +\r +To run metaGOflow you need to make sure you have the following set on your computing environmnet first:\r +\r +- python3 [v 3.8+]\r +- [Docker](https://www.docker.com) [v 19.+] or [Singularity](https://apptainer.org) [v 3.7.+]/[Apptainer](https://apptainer.org) [v 1.+]\r +- [cwltool](https://github.com/common-workflow-language/cwltool) [v 3.+]\r +- [rdflib](https://rdflib.readthedocs.io/en/stable/) [v 6.+]\r +- [rdflib-jsonld](https://pypi.org/project/rdflib-jsonld/) [v 0.6.2]\r +- [ro-crate-py](https://github.com/ResearchObject/ro-crate-py) [v 0.7.0]\r +- [pyyaml](https://pypi.org/project/PyYAML/) [v 6.0]\r +- [Node.js](https://nodejs.org/) [v 10.24.0+]\r +- Available storage ~235GB for databases\r +\r +### Storage while running\r +\r +Depending on the analysis you are about to run, disk requirements vary.\r +Indicatively, you may have a look at the metaGOflow publication for computing resources used in various cases.\r +\r +## Installation\r +\r +### Get the EOSC-Life marine GOs workflow\r +\r +```bash\r +git clone https://github.com/emo-bon/MetaGOflow\r +cd MetaGOflow\r +```\r +\r +### Download necessary databases (~235GB)\r +\r +You can download databases for the EOSC-Life GOs workflow by running the\r +`download_dbs.sh` script under the `Installation` folder.\r +\r +```bash\r +bash Installation/download_dbs.sh -f [Output Directory e.g. ref-dbs] \r +```\r +If you have one or more already in your system, then create a symbolic link pointing\r +at the `ref-dbs` folder or at one of its subfolders/files.\r +\r +The final structure of the DB directory should be like the following:\r +\r +````bash\r +user@server:~/MetaGOflow: ls ref-dbs/\r +db_kofam/ diamond/ eggnog/ GO-slim/ interproscan-5.57-90.0/ kegg_pathways/ kofam_ko_desc.tsv Rfam/ silva_lsu/ silva_ssu/\r +````\r +\r +## How to run\r +\r +### Ensure that `Node.js` is installed on your system before running metaGOflow\r +\r +If you have root access on your system, you can run the commands below to install it:\r +\r +##### DEBIAN/UBUNTU\r +```bash\r +sudo apt-get update -y\r +sudo apt-get install -y nodejs\r +```\r +\r +##### RH/CentOS\r +```bash\r +sudo yum install rh-nodejs (e.g. rh-nodejs10)\r +```\r +\r +### Set up the environment\r +\r +#### Run once - Setup environment\r +\r +- ```bash\r + conda create -n EOSC-CWL python=3.8\r + ```\r +\r +- ```bash\r + conda activate EOSC-CWL\r + ```\r +\r +- ```bash\r + pip install cwlref-runner cwltool[all] rdflib-jsonld rocrate pyyaml\r +\r + ```\r +\r +#### Run every time\r +\r +```bash\r +conda activate EOSC-CWL\r +``` \r +\r +### Run the workflow\r +\r +- Edit the `config.yml` file to set the parameter values of your choice. For selecting all the steps, then set to `true` the variables in lines [2-6].\r +\r +#### Using Singularity\r +\r +##### Standalone\r +- run:\r + ```bash\r + ./run_wf.sh -s -n osd-short -d short-test-case -f test_input/wgs-paired-SRR1620013_1.fastq.gz -r test_input/wgs-paired-SRR1620013_2.fastq.gz\r + ``\r +\r +##### Using a cluster with a queueing system (e.g. SLURM)\r +\r +- Create a job file (e.g., SBATCH file)\r +\r +- Enable Singularity, e.g. module load Singularity & all other dependencies \r +\r +- Add the run line to the job file\r +\r +\r +#### Using Docker\r +\r +##### Standalone\r +- run:\r + ``` bash\r + ./run_wf.sh -n osd-short -d short-test-case -f test_input/wgs-paired-SRR1620013_1.fastq.gz -r test_input/wgs-paired-SRR1620013_2.fastq.gz\r + ```\r + HINT: If you are using Docker, you may need to run the above command without the `-s' flag.\r +\r +## Testing samples\r +The samples are available in the `test_input` folder.\r +\r +We provide metaGOflow with partial samples from the Human Metagenome Project ([SRR1620013](https://www.ebi.ac.uk/ena/browser/view/SRR1620013) and [SRR1620014](https://www.ebi.ac.uk/ena/browser/view/SRR1620014))\r +They are partial as only a small part of their sequences have been kept, in terms for the pipeline to test in a fast way. \r +\r +\r +## Hints and tips\r +\r +1. In case you are using Docker, it is strongly recommended to **avoid** installing it through `snap`.\r +\r +2. `RuntimeError`: slurm currently does not support shared caching, because it does not support cleaning up a worker\r + after the last job finishes.\r + Set the `--disableCaching` flag if you want to use this batch system.\r +\r +3. In case you are having errors like:\r +\r +```\r +cwltool.errors.WorkflowException: Singularity is not available for this tool\r +```\r +\r +You may run the following command:\r +\r +```\r +singularity pull --force --name debian:stable-slim.sif docker://debian:stable-sli\r +```\r +\r +## Contribution\r +\r +To make contribution to the project a bit easier, all the MGnify `conditionals` and `subworkflows` under\r +the `workflows/` directory that are not used in the metaGOflow framework, have been removed. \r +However, all the MGnify `tools/` and `utils/` are available in this repo, even if they are not invoked in the current\r +version of metaGOflow.\r +This way, we hope we encourage people to implement their own `conditionals` and/or `subworkflows` by exploiting the\r +currently supported `tools` and `utils` as well as by developing new `tools` and/or `utils`.\r +\r +\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.384.3" ; + schema1:isBasedOn "https://github.com/emo-bon/MetaGOflow.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for A workflow for marine Genomic Observatories data analysis" ; + schema1:sdDatePublished "2024-08-05 10:30:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/384/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7006 ; + schema1:creator , + ; + schema1:dateCreated "2023-05-16T20:41:04Z" ; + schema1:dateModified "2023-05-16T22:01:12Z" ; + schema1:description """# metaGOflow: A workflow for marine Genomic Observatories' data analysis\r +\r +![logo](https://raw.githubusercontent.com/hariszaf/metaGOflow-use-case/gh-pages/assets/img/metaGOflow_logo_italics.png)\r +\r +\r +## An EOSC-Life project\r +\r +The workflows developed in the framework of this project are based on `pipeline-v5` of the MGnify resource.\r +\r +> This branch is a child of the [`pipeline_5.1`](https://github.com/hariszaf/pipeline-v5/tree/pipeline_5.1) branch\r +> that contains all CWL descriptions of the MGnify pipeline version 5.1.\r +\r +## Dependencies\r +\r +To run metaGOflow you need to make sure you have the following set on your computing environmnet first:\r +\r +- python3 [v 3.8+]\r +- [Docker](https://www.docker.com) [v 19.+] or [Singularity](https://apptainer.org) [v 3.7.+]/[Apptainer](https://apptainer.org) [v 1.+]\r +- [cwltool](https://github.com/common-workflow-language/cwltool) [v 3.+]\r +- [rdflib](https://rdflib.readthedocs.io/en/stable/) [v 6.+]\r +- [rdflib-jsonld](https://pypi.org/project/rdflib-jsonld/) [v 0.6.2]\r +- [ro-crate-py](https://github.com/ResearchObject/ro-crate-py) [v 0.7.0]\r +- [pyyaml](https://pypi.org/project/PyYAML/) [v 6.0]\r +- [Node.js](https://nodejs.org/) [v 10.24.0+]\r +- Available storage ~235GB for databases\r +\r +### Storage while running\r +\r +Depending on the analysis you are about to run, disk requirements vary.\r +Indicatively, you may have a look at the metaGOflow publication for computing resources used in various cases.\r +\r +## Installation\r +\r +### Get the EOSC-Life marine GOs workflow\r +\r +```bash\r +git clone https://github.com/emo-bon/MetaGOflow\r +cd MetaGOflow\r +```\r +\r +### Download necessary databases (~235GB)\r +\r +You can download databases for the EOSC-Life GOs workflow by running the\r +`download_dbs.sh` script under the `Installation` folder.\r +\r +```bash\r +bash Installation/download_dbs.sh -f [Output Directory e.g. ref-dbs] \r +```\r +If you have one or more already in your system, then create a symbolic link pointing\r +at the `ref-dbs` folder or at one of its subfolders/files.\r +\r +The final structure of the DB directory should be like the following:\r +\r +````bash\r +user@server:~/MetaGOflow: ls ref-dbs/\r +db_kofam/ diamond/ eggnog/ GO-slim/ interproscan-5.57-90.0/ kegg_pathways/ kofam_ko_desc.tsv Rfam/ silva_lsu/ silva_ssu/\r +````\r +\r +## How to run\r +\r +### Ensure that `Node.js` is installed on your system before running metaGOflow\r +\r +If you have root access on your system, you can run the commands below to install it:\r +\r +##### DEBIAN/UBUNTU\r +```bash\r +sudo apt-get update -y\r +sudo apt-get install -y nodejs\r +```\r +\r +##### RH/CentOS\r +```bash\r +sudo yum install rh-nodejs (e.g. rh-nodejs10)\r +```\r +\r +### Set up the environment\r +\r +#### Run once - Setup environment\r +\r +- ```bash\r + conda create -n EOSC-CWL python=3.8\r + ```\r +\r +- ```bash\r + conda activate EOSC-CWL\r + ```\r +\r +- ```bash\r + pip install cwlref-runner cwltool[all] rdflib-jsonld rocrate pyyaml\r +\r + ```\r +\r +#### Run every time\r +\r +```bash\r +conda activate EOSC-CWL\r +``` \r +\r +### Run the workflow\r +\r +- Edit the `config.yml` file to set the parameter values of your choice. For selecting all the steps, then set to `true` the variables in lines [2-6].\r +\r +#### Using Singularity\r +\r +##### Standalone\r +- run:\r + ```bash\r + ./run_wf.sh -s -n osd-short -d short-test-case -f test_input/wgs-paired-SRR1620013_1.fastq.gz -r test_input/wgs-paired-SRR1620013_2.fastq.gz\r + ``\r +\r +##### Using a cluster with a queueing system (e.g. SLURM)\r +\r +- Create a job file (e.g., SBATCH file)\r +\r +- Enable Singularity, e.g. module load Singularity & all other dependencies \r +\r +- Add the run line to the job file\r +\r +\r +#### Using Docker\r +\r +##### Standalone\r +- run:\r + ``` bash\r + ./run_wf.sh -n osd-short -d short-test-case -f test_input/wgs-paired-SRR1620013_1.fastq.gz -r test_input/wgs-paired-SRR1620013_2.fastq.gz\r + ```\r + HINT: If you are using Docker, you may need to run the above command without the `-s' flag.\r +\r +## Testing samples\r +The samples are available in the `test_input` folder.\r +\r +We provide metaGOflow with partial samples from the Human Metagenome Project ([SRR1620013](https://www.ebi.ac.uk/ena/browser/view/SRR1620013) and [SRR1620014](https://www.ebi.ac.uk/ena/browser/view/SRR1620014))\r +They are partial as only a small part of their sequences have been kept, in terms for the pipeline to test in a fast way. \r +\r +\r +## Hints and tips\r +\r +1. In case you are using Docker, it is strongly recommended to **avoid** installing it through `snap`.\r +\r +2. `RuntimeError`: slurm currently does not support shared caching, because it does not support cleaning up a worker\r + after the last job finishes.\r + Set the `--disableCaching` flag if you want to use this batch system.\r +\r +3. In case you are having errors like:\r +\r +```\r +cwltool.errors.WorkflowException: Singularity is not available for this tool\r +```\r +\r +You may run the following command:\r +\r +```\r +singularity pull --force --name debian:stable-slim.sif docker://debian:stable-sli\r +```\r +\r +## Contribution\r +\r +To make contribution to the project a bit easier, all the MGnify `conditionals` and `subworkflows` under\r +the `workflows/` directory that are not used in the metaGOflow framework, have been removed. \r +However, all the MGnify `tools/` and `utils/` are available in this repo, even if they are not invoked in the current\r +version of metaGOflow.\r +This way, we hope we encourage people to implement their own `conditionals` and/or `subworkflows` by exploiting the\r +currently supported `tools` and `utils` as well as by developing new `tools` and/or `utils`.\r +\r +\r +\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/384?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "A workflow for marine Genomic Observatories data analysis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/384?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """# workflow-partial-cstacks-sstacks-gstacks\r +\r +These workflows are part of a set designed to work for RAD-seq data on the Galaxy platform, using the tools from the Stacks program. \r +\r +Galaxy Australia: https://usegalaxy.org.au/\r +\r +Stacks: http://catchenlab.life.illinois.edu/stacks/\r +\r +This workflow takes in ustacks output, and runs cstacks, sstacks and gstacks. \r +\r +To generate ustacks output see https://workflowhub.eu/workflows/349\r +\r +For the full de novo workflow see https://workflowhub.eu/workflows/348\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/350?version=1" ; + schema1:isBasedOn "https://github.com/AnnaSyme/workflow-partial-cstacks-sstacks-gstacks.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Partial de novo workflow: c-s-g-pops only" ; + schema1:sdDatePublished "2024-08-05 10:32:07 +0100" ; + schema1:url "https://workflowhub.eu/workflows/350/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 23360 ; + schema1:creator ; + schema1:dateCreated "2022-05-31T07:56:39Z" ; + schema1:dateModified "2023-01-30T18:21:31Z" ; + schema1:description """# workflow-partial-cstacks-sstacks-gstacks\r +\r +These workflows are part of a set designed to work for RAD-seq data on the Galaxy platform, using the tools from the Stacks program. \r +\r +Galaxy Australia: https://usegalaxy.org.au/\r +\r +Stacks: http://catchenlab.life.illinois.edu/stacks/\r +\r +This workflow takes in ustacks output, and runs cstacks, sstacks and gstacks. \r +\r +To generate ustacks output see https://workflowhub.eu/workflows/349\r +\r +For the full de novo workflow see https://workflowhub.eu/workflows/348\r +""" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Partial de novo workflow: c-s-g-pops only" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/350?version=1" ; + schema1:version 1 ; + ns1:input , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A pipeline to demultiplex, QC and map Nanopore data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1002?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/nanoseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/nanoseq" ; + schema1:sdDatePublished "2024-08-05 10:23:33 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1002/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6467 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:05Z" ; + schema1:dateModified "2024-06-11T12:55:05Z" ; + schema1:description "A pipeline to demultiplex, QC and map Nanopore data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1002?version=5" ; + schema1:keywords "Alignment, demultiplexing, nanopore, QC" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/nanoseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1002?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.280.4" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_ligand_parameterization/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python GMX Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-08-05 10:31:02 +0100" ; + schema1:url "https://workflowhub.eu/workflows/280/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1728 ; + schema1:creator , + ; + schema1:dateCreated "2023-04-14T07:55:42Z" ; + schema1:dateModified "2023-04-14T07:56:58Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/280?version=3" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python GMX Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_ligand_parameterization/python/workflow.py" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=29" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-08-05 10:24:28 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=29" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13149 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:13Z" ; + schema1:dateModified "2024-06-11T12:55:13Z" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=29" ; + schema1:version 29 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """## Purge dups\r +\r +This snakemake pipeline is designed to be run using as input a contig-level genome and pacbio reads. This pipeline has been tested with `snakemake v7.32.4`. Raw long-read sequencing files and the input contig genome assembly must be given in the `config.yaml` file. To execute the workflow run:\r +\r +`snakemake --use-conda --cores N`\r +\r +Or configure the cluster.json and run using the `./run_cluster` command""" ; + schema1:hasPart , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.506.2" ; + schema1:isBasedOn "https://github.com/ERGA-consortium/pipelines/tree/main/assembly/snakemake/3.Purging/purge-dups" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Purge retained haplotypes using Purge-Dups" ; + schema1:sdDatePublished "2024-08-05 10:25:08 +0100" ; + schema1:url "https://workflowhub.eu/workflows/506/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1741 ; + schema1:creator ; + schema1:dateCreated "2024-03-16T07:45:11Z" ; + schema1:dateModified "2024-03-20T16:10:05Z" ; + schema1:description """## Purge dups\r +\r +This snakemake pipeline is designed to be run using as input a contig-level genome and pacbio reads. This pipeline has been tested with `snakemake v7.32.4`. Raw long-read sequencing files and the input contig genome assembly must be given in the `config.yaml` file. To execute the workflow run:\r +\r +`snakemake --use-conda --cores N`\r +\r +Or configure the cluster.json and run using the `./run_cluster` command""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/506?version=1" ; + schema1:isPartOf ; + schema1:keywords "Assembly, Genomics, Snakemake, Bioinformatics, Genome assembly" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Purge retained haplotypes using Purge-Dups" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/snakemake/3.Purging/purge-dups/Snakefile" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A pipeline to investigate horizontal gene transfer from NGS data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/988?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/hgtseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hgtseq" ; + schema1:sdDatePublished "2024-08-05 10:23:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/988/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8564 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:54Z" ; + schema1:dateModified "2024-06-11T12:54:54Z" ; + schema1:description "A pipeline to investigate horizontal gene transfer from NGS data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/988?version=1" ; + schema1:keywords "BWA-mem, bwa-mem2, FastQC, genomics-visualization, ggbio, horizontal-gene-transfer, kraken2, krona, MultiQC, NGS, SAMTools, taxonomies, tidyverse" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hgtseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/988?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# TronFlow alignment pipeline\r +\r +![GitHub tag (latest SemVer)](https://img.shields.io/github/v/release/tron-bioinformatics/tronflow-bwa?sort=semver)\r +[![Run tests](https://github.com/TRON-Bioinformatics/tronflow-bwa/actions/workflows/automated_tests.yml/badge.svg?branch=master)](https://github.com/TRON-Bioinformatics/tronflow-bwa/actions/workflows/automated_tests.yml)\r +[![DOI](https://zenodo.org/badge/327943420.svg)](https://zenodo.org/badge/latestdoi/327943420)\r +[![License](https://img.shields.io/badge/license-MIT-green)](https://opensource.org/licenses/MIT)\r +[![Powered by Nextflow](https://img.shields.io/badge/powered%20by-Nextflow-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)](https://www.nextflow.io/)\r +\r +The TronFlow alignment pipeline is part of a collection of computational workflows for tumor-normal pair \r +somatic variant calling.\r +\r +Find the documentation here [![Documentation Status](https://readthedocs.org/projects/tronflow-docs/badge/?version=latest)](https://tronflow-docs.readthedocs.io/en/latest/?badge=latest)\r +\r +This pipeline aligns paired and single end FASTQ files with BWA aln and mem algorithms and with BWA mem 2.\r +For RNA-seq STAR is also supported. To increase sensitivity of novel junctions use `--star_two_pass_mode` (recommended for RNAseq variant calling).\r +It also includes an initial step of read trimming using FASTP.\r +\r +\r +## How to run it\r +\r +Run it from GitHub as follows:\r +```\r +nextflow run tron-bioinformatics/tronflow-alignment -profile conda --input_files $input --output $output --algorithm aln --library paired\r +```\r +\r +Otherwise download the project and run as follows:\r +```\r +nextflow main.nf -profile conda --input_files $input --output $output --algorithm aln --library paired\r +```\r +\r +Find the help as follows:\r +```\r +$ nextflow run tron-bioinformatics/tronflow-alignment --help\r +N E X T F L O W ~ version 19.07.0\r +Launching `main.nf` [intergalactic_shannon] - revision: e707c77d7b\r +\r +Usage:\r + nextflow main.nf --input_files input_files [--reference reference.fasta]\r +\r +Input:\r + * input_fastq1: the path to a FASTQ file (incompatible with --input_files)\r + * input_files: the path to a tab-separated values file containing in each row the sample name and two paired FASTQs (incompatible with --fastq1 and --fastq2)\r + when `--library paired`, or a single FASTQ file when `--library single`\r + Example input file:\r + name1 fastq1.1 fastq1.2\r + name2 fastq2.1 fastq2.2\r + * reference: path to the indexed FASTA genome reference or the star reference folder in case of using star\r +\r +Optional input:\r + * input_fastq2: the path to a second FASTQ file (incompatible with --input_files, incompatible with --library paired)\r + * output: the folder where to publish output (default: output)\r + * algorithm: determines the BWA algorithm, either `aln`, `mem`, `mem2` or `star` (default `aln`)\r + * library: determines whether the sequencing library is paired or single end, either `paired` or `single` (default `paired`)\r + * cpus: determines the number of CPUs for each job, with the exception of bwa sampe and samse steps which are not parallelized (default: 8)\r + * memory: determines the memory required by each job (default: 32g)\r + * inception: if enabled it uses an inception, only valid for BWA aln, it requires a fast file system such as flash (default: false)\r + * skip_trimming: skips the read trimming step\r + * star_two_pass_mode: activates STAR two-pass mode, increasing sensitivity of novel junction discovery, recommended for RNA variant calling (default: false)\r + * additional_args: additional alignment arguments, only effective in BWA mem, BWA mem 2 and STAR (default: none) \r +\r +Output:\r + * A BAM file \\${name}.bam and its index\r + * FASTP read trimming stats report in HTML format \\${name.fastp_stats.html}\r + * FASTP read trimming stats report in JSON format \\${name.fastp_stats.json}\r +```\r +\r +### Input tables\r +\r +The table with FASTQ files expects two tab-separated columns without a header\r +\r +| Sample name | FASTQ 1 | FASTQ 2 |\r +|----------------------|---------------------------------|------------------------------|\r +| sample_1 | /path/to/sample_1.1.fastq | /path/to/sample_1.2.fastq |\r +| sample_2 | /path/to/sample_2.1.fastq | /path/to/sample_2.2.fastq |\r +\r +\r +### Reference genome\r +\r +The reference genome has to be provided in FASTA format and it requires two set of indexes:\r +* FAI index. Create with `samtools faidx your.fasta`\r +* BWA indexes. Create with `bwa index your.fasta`\r +\r +For bwa-mem2 a specific index is needed:\r +```\r +bwa-mem2 index your.fasta\r +```\r +\r +For star a reference folder prepared with star has to be provided. In order to prepare it will need the reference\r +genome in FASTA format and the gene annotations in GTF format. Run a command as follows:\r +```\r +STAR --runMode genomeGenerate --genomeDir $YOUR_FOLDER --genomeFastaFiles $YOUR_FASTA --sjdbGTFfile $YOUR_GTF\r +```\r +\r +## References\r +\r +* Li H. and Durbin R. (2010) Fast and accurate long-read alignment with Burrows-Wheeler Transform. Bioinformatics, Epub. https://doi.org/10.1093/bioinformatics/btp698 \r +* Shifu Chen, Yanqing Zhou, Yaru Chen, Jia Gu; fastp: an ultra-fast all-in-one FASTQ preprocessor, Bioinformatics, Volume 34, Issue 17, 1 September 2018, Pages i884–i890, https://doi.org/10.1093/bioinformatics/bty560\r +* Vasimuddin Md, Sanchit Misra, Heng Li, Srinivas Aluru. Efficient Architecture-Aware Acceleration of BWA-MEM for Multicore Systems. IEEE Parallel and Distributed Processing Symposium (IPDPS), 2019.\r +* Dobin A, Davis CA, Schlesinger F, Drenkow J, Zaleski C, Jha S, Batut P, Chaisson M, Gingeras TR. STAR: ultrafast universal RNA-seq aligner. Bioinformatics. 2013 Jan 1;29(1):15-21. doi: 10.1093/bioinformatics/bts635. Epub 2012 Oct 25. PMID: 23104886; PMCID: PMC3530905.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/418?version=1" ; + schema1:isBasedOn "https://github.com/TRON-Bioinformatics/tronflow-alignment" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for TronFlow alignment pipeline" ; + schema1:sdDatePublished "2024-08-05 10:31:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/418/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4781 ; + schema1:dateCreated "2023-01-17T16:51:42Z" ; + schema1:dateModified "2023-01-17T16:51:42Z" ; + schema1:description """# TronFlow alignment pipeline\r +\r +![GitHub tag (latest SemVer)](https://img.shields.io/github/v/release/tron-bioinformatics/tronflow-bwa?sort=semver)\r +[![Run tests](https://github.com/TRON-Bioinformatics/tronflow-bwa/actions/workflows/automated_tests.yml/badge.svg?branch=master)](https://github.com/TRON-Bioinformatics/tronflow-bwa/actions/workflows/automated_tests.yml)\r +[![DOI](https://zenodo.org/badge/327943420.svg)](https://zenodo.org/badge/latestdoi/327943420)\r +[![License](https://img.shields.io/badge/license-MIT-green)](https://opensource.org/licenses/MIT)\r +[![Powered by Nextflow](https://img.shields.io/badge/powered%20by-Nextflow-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)](https://www.nextflow.io/)\r +\r +The TronFlow alignment pipeline is part of a collection of computational workflows for tumor-normal pair \r +somatic variant calling.\r +\r +Find the documentation here [![Documentation Status](https://readthedocs.org/projects/tronflow-docs/badge/?version=latest)](https://tronflow-docs.readthedocs.io/en/latest/?badge=latest)\r +\r +This pipeline aligns paired and single end FASTQ files with BWA aln and mem algorithms and with BWA mem 2.\r +For RNA-seq STAR is also supported. To increase sensitivity of novel junctions use `--star_two_pass_mode` (recommended for RNAseq variant calling).\r +It also includes an initial step of read trimming using FASTP.\r +\r +\r +## How to run it\r +\r +Run it from GitHub as follows:\r +```\r +nextflow run tron-bioinformatics/tronflow-alignment -profile conda --input_files $input --output $output --algorithm aln --library paired\r +```\r +\r +Otherwise download the project and run as follows:\r +```\r +nextflow main.nf -profile conda --input_files $input --output $output --algorithm aln --library paired\r +```\r +\r +Find the help as follows:\r +```\r +$ nextflow run tron-bioinformatics/tronflow-alignment --help\r +N E X T F L O W ~ version 19.07.0\r +Launching `main.nf` [intergalactic_shannon] - revision: e707c77d7b\r +\r +Usage:\r + nextflow main.nf --input_files input_files [--reference reference.fasta]\r +\r +Input:\r + * input_fastq1: the path to a FASTQ file (incompatible with --input_files)\r + * input_files: the path to a tab-separated values file containing in each row the sample name and two paired FASTQs (incompatible with --fastq1 and --fastq2)\r + when `--library paired`, or a single FASTQ file when `--library single`\r + Example input file:\r + name1 fastq1.1 fastq1.2\r + name2 fastq2.1 fastq2.2\r + * reference: path to the indexed FASTA genome reference or the star reference folder in case of using star\r +\r +Optional input:\r + * input_fastq2: the path to a second FASTQ file (incompatible with --input_files, incompatible with --library paired)\r + * output: the folder where to publish output (default: output)\r + * algorithm: determines the BWA algorithm, either `aln`, `mem`, `mem2` or `star` (default `aln`)\r + * library: determines whether the sequencing library is paired or single end, either `paired` or `single` (default `paired`)\r + * cpus: determines the number of CPUs for each job, with the exception of bwa sampe and samse steps which are not parallelized (default: 8)\r + * memory: determines the memory required by each job (default: 32g)\r + * inception: if enabled it uses an inception, only valid for BWA aln, it requires a fast file system such as flash (default: false)\r + * skip_trimming: skips the read trimming step\r + * star_two_pass_mode: activates STAR two-pass mode, increasing sensitivity of novel junction discovery, recommended for RNA variant calling (default: false)\r + * additional_args: additional alignment arguments, only effective in BWA mem, BWA mem 2 and STAR (default: none) \r +\r +Output:\r + * A BAM file \\${name}.bam and its index\r + * FASTP read trimming stats report in HTML format \\${name.fastp_stats.html}\r + * FASTP read trimming stats report in JSON format \\${name.fastp_stats.json}\r +```\r +\r +### Input tables\r +\r +The table with FASTQ files expects two tab-separated columns without a header\r +\r +| Sample name | FASTQ 1 | FASTQ 2 |\r +|----------------------|---------------------------------|------------------------------|\r +| sample_1 | /path/to/sample_1.1.fastq | /path/to/sample_1.2.fastq |\r +| sample_2 | /path/to/sample_2.1.fastq | /path/to/sample_2.2.fastq |\r +\r +\r +### Reference genome\r +\r +The reference genome has to be provided in FASTA format and it requires two set of indexes:\r +* FAI index. Create with `samtools faidx your.fasta`\r +* BWA indexes. Create with `bwa index your.fasta`\r +\r +For bwa-mem2 a specific index is needed:\r +```\r +bwa-mem2 index your.fasta\r +```\r +\r +For star a reference folder prepared with star has to be provided. In order to prepare it will need the reference\r +genome in FASTA format and the gene annotations in GTF format. Run a command as follows:\r +```\r +STAR --runMode genomeGenerate --genomeDir $YOUR_FOLDER --genomeFastaFiles $YOUR_FASTA --sjdbGTFfile $YOUR_GTF\r +```\r +\r +## References\r +\r +* Li H. and Durbin R. (2010) Fast and accurate long-read alignment with Burrows-Wheeler Transform. Bioinformatics, Epub. https://doi.org/10.1093/bioinformatics/btp698 \r +* Shifu Chen, Yanqing Zhou, Yaru Chen, Jia Gu; fastp: an ultra-fast all-in-one FASTQ preprocessor, Bioinformatics, Volume 34, Issue 17, 1 September 2018, Pages i884–i890, https://doi.org/10.1093/bioinformatics/bty560\r +* Vasimuddin Md, Sanchit Misra, Heng Li, Srinivas Aluru. Efficient Architecture-Aware Acceleration of BWA-MEM for Multicore Systems. IEEE Parallel and Distributed Processing Symposium (IPDPS), 2019.\r +* Dobin A, Davis CA, Schlesinger F, Drenkow J, Zaleski C, Jha S, Batut P, Chaisson M, Gingeras TR. STAR: ultrafast universal RNA-seq aligner. Bioinformatics. 2013 Jan 1;29(1):15-21. doi: 10.1093/bioinformatics/bts635. Epub 2012 Oct 25. PMID: 23104886; PMCID: PMC3530905.\r +""" ; + schema1:isPartOf ; + schema1:keywords "Alignment, BWA, STAR, Bioinformatics, fastp" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "TronFlow alignment pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/418?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Analysis pipeline for CUT&RUN and CUT&TAG experiments that includes sequencing QC, spike-in normalisation, IgG control normalisation, peak calling and downstream peak analysis." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/976?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/cutandrun" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/cutandrun" ; + schema1:sdDatePublished "2024-08-05 10:24:08 +0100" ; + schema1:url "https://workflowhub.eu/workflows/976/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10375 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:46Z" ; + schema1:dateModified "2024-06-11T12:54:46Z" ; + schema1:description "Analysis pipeline for CUT&RUN and CUT&TAG experiments that includes sequencing QC, spike-in normalisation, IgG control normalisation, peak calling and downstream peak analysis." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/976?version=7" ; + schema1:keywords "cutandrun, cutandrun-seq, cutandtag, cutandtag-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/cutandrun" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/976?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2023-09-01T14:29:11.916146" ; + schema1:hasPart , + , + , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/hic-hicup-cooler" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + , + , + ; + schema1:name "hic-hicup-cooler/hic-fastq-to-cool-hicup-cooler" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "hic-hicup-cooler/chic-fastq-to-cool-hicup-cooler" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/hic-hicup-cooler" ; + schema1:version "0.2.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.10" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "hic-hicup-cooler/hic-fastq-to-pairs-hicup" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/hic-hicup-cooler" ; + schema1:version "0.2.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "hic-hicup-cooler/hic-juicermediumtabix-to_cool-cooler" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/hic-hicup-cooler" ; + schema1:version "0.2.1" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/amber-protein-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.297.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_md_setup/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy Amber Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/297/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 80553 ; + schema1:creator , + ; + schema1:dateCreated "2023-05-03T13:49:59Z" ; + schema1:dateModified "2023-05-03T13:51:37Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/amber-protein-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/297?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy Amber Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_md_setup/galaxy/biobb_wf_amber_md_setup.ga" ; + schema1:version 3 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2023-10-17T15:33:20.525539" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/chipseq-sr" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "chipseq-sr/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.12" . + + a schema1:Dataset ; + schema1:datePublished "2024-06-23T17:52:56.378217" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Plot-Nx-Size" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Plot-Nx-Size/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "ChIP-seq peak-calling and differential analysis pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/971?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/chipseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/chipseq" ; + schema1:sdDatePublished "2024-08-05 10:24:12 +0100" ; + schema1:url "https://workflowhub.eu/workflows/971/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4753 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:44Z" ; + schema1:dateModified "2024-06-11T12:54:44Z" ; + schema1:description "ChIP-seq peak-calling and differential analysis pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/971?version=5" ; + schema1:keywords "ChIP, ChIP-seq, chromatin-immunoprecipitation, macs2, peak-calling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/chipseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/971?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.120.5" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:31:05 +0100" ; + schema1:url "https://workflowhub.eu/workflows/120/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 57824 ; + schema1:creator , + ; + schema1:dateCreated "2023-07-26T09:17:16Z" ; + schema1:dateModified "2023-07-26T09:18:43Z" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/120?version=5" ; + schema1:isPartOf , + , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_md_setup/blob/master/biobb_wf_md_setup/notebooks/biobb_MDsetup_tutorial.ipynb" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.282.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_virtual_screening/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Protein-ligand Docking tutorial (Fpocket)" ; + schema1:sdDatePublished "2024-08-05 10:30:52 +0100" ; + schema1:url "https://workflowhub.eu/workflows/282/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2666 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-17T10:16:30Z" ; + schema1:dateModified "2022-04-11T09:29:43Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/282?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Protein-ligand Docking tutorial (Fpocket)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_virtual_screening/python/workflow.py" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2021-07-26T10:22:23.313479" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-se-illumina-wgs-variant-calling" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:sdDatePublished "2021-10-27 15:45:08 +0100" ; + schema1:softwareVersion "v0.1.2" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.6" . + + a schema1:Dataset ; + schema1:datePublished "2024-04-22T10:09:30.195105" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.22" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=26" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-08-05 10:24:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=26" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13457 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:51Z" ; + schema1:dateModified "2024-06-11T12:54:51Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=26" ; + schema1:version 26 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """Galaxy Workflow created on Galaxy-E european instance, ecology.usegalaxy.eu, related to the Galaxy training tutorial "Deep learning to predict animal behavior" .\r +\r +This workflow allows to analyze animal behavior data through deep learning.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/659?version=1" ; + schema1:isBasedOn "https://ecology.usegalaxy.eu/u/ylebras/w/workflow-constructed-from-history-imported-animal-dive-prediction-using-deep-learning" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Animal dive prediction using deep learning" ; + schema1:sdDatePublished "2024-08-05 10:27:17 +0100" ; + schema1:url "https://workflowhub.eu/workflows/659/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 78381 ; + schema1:dateCreated "2023-11-09T21:12:11Z" ; + schema1:dateModified "2023-11-09T21:12:11Z" ; + schema1:description """Galaxy Workflow created on Galaxy-E european instance, ecology.usegalaxy.eu, related to the Galaxy training tutorial "Deep learning to predict animal behavior" .\r +\r +This workflow allows to analyze animal behavior data through deep learning.\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Animal dive prediction using deep learning" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/659?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/986?version=14" ; + schema1:isBasedOn "https://github.com/nf-core/fetchngs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/fetchngs" ; + schema1:sdDatePublished "2024-08-05 10:23:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/986/ro_crate?version=14" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9364 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:53Z" ; + schema1:dateModified "2024-06-11T12:54:53Z" ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/986?version=13" ; + schema1:keywords "ddbj, download, ena, FASTQ, geo, sra, synapse" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/fetchngs" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/986?version=14" ; + schema1:version 14 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1021?version=10" ; + schema1:isBasedOn "https://github.com/nf-core/scrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/scrnaseq" ; + schema1:sdDatePublished "2024-08-05 10:23:15 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1021/ro_crate?version=10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10337 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:17Z" ; + schema1:dateModified "2024-06-11T12:55:17Z" ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1021?version=13" ; + schema1:keywords "10x-genomics, 10xgenomics, alevin, bustools, Cellranger, kallisto, rna-seq, single-cell, star-solo" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/scrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1021?version=10" ; + schema1:version 10 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +****\r +\r +About this workflow:\r +\r +* Inputs: transdecoder-peptides.fasta, transdecoder-nucleotides.fasta\r +* Runs many steps to convert outputs into the formats required for Fgenesh - .pro, .dat and .cdna""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.880.1" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Convert formats - TSI" ; + schema1:sdDatePublished "2024-08-05 10:24:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/880/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 77130 ; + schema1:creator , + ; + schema1:dateCreated "2024-05-08T07:23:51Z" ; + schema1:dateModified "2024-05-09T04:09:41Z" ; + schema1:description """This is part of a series of workflows to annotate a genome, tagged with `TSI-annotation`. \r +These workflows are based on command-line code by Luke Silver, converted into Galaxy Australia workflows. \r +\r +The workflows can be run in this order: \r +* Repeat masking\r +* RNAseq QC and read trimming\r +* Find transcripts\r +* Combine transcripts\r +* Extract transcripts\r +* Convert formats\r +* Fgenesh annotation\r +\r +****\r +\r +About this workflow:\r +\r +* Inputs: transdecoder-peptides.fasta, transdecoder-nucleotides.fasta\r +* Runs many steps to convert outputs into the formats required for Fgenesh - .pro, .dat and .cdna""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "TSI-annotation" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Convert formats - TSI" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/880?version=1" ; + schema1:version 1 ; + ns1:input , + ; + ns1:output . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 522976 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# mvgwas-nf\r +\r +[![nextflow](https://img.shields.io/badge/nextflow-%E2%89%A520.04.1-blue.svg)](http://nextflow.io)\r +[![CI-checks](https://github.com/guigolab/sqtlseeker2-nf/actions/workflows/ci.yaml/badge.svg)](https://github.com/guigolab/sqtlseeker2-nf/actions/workflows/ci.yaml)\r +\r +A pipeline for multi-trait genome-wide association studies (GWAS) using [MANTA](https://github.com/dgarrimar/manta).\r +\r +The pipeline performs the following analysis steps:\r +\r +* Split genotype file \r +* Preprocess phenotype and covariate data\r +* Test for association between phenotypes and genetic variants\r +* Collect summary statistics\r +\r +The pipeline uses [Nextflow](http://www.nextflow.io) as the execution backend. Please check [Nextflow documentation](http://www.nextflow.io/docs/latest/index.html) for more information.\r +\r +## Requirements\r +\r +- Unix-like operating system (Linux, MacOS, etc.)\r +- Java 8 or later \r +- [Docker](https://www.docker.com/) (v1.10.0 or later) or [Singularity](http://singularity.lbl.gov) (v2.5.0 or later)\r +\r +## Quickstart (~2 min)\r +\r +1. Install Nextflow:\r + ```\r + curl -fsSL get.nextflow.io | bash\r + ```\r +\r +2. Make a test run:\r + ```\r + nextflow run dgarrimar/mvgwas-nf -with-docker\r + ```\r +\r +**Notes**: move the `nextflow` executable to a directory in your `$PATH`. Set `-with-singularity` to use Singularity instead of Docker. \r +\r +(*) Alternatively you can clone this repository:\r +```\r +git clone https://github.com/dgarrimar/mvgwas-nf\r +cd mvgwas-nf\r +nextflow run mvgwas.nf -with-docker\r +```\r +\r +## Pipeline usage\r +\r +Launching the pipeline with the `--help` parameter shows the help message:\r +\r +```\r +nextflow run mvgwas.nf --help\r +```\r +\r +```\r +N E X T F L O W ~ version 20.04.1\r +Launching `mvgwas.nf` [amazing_roentgen] - revision: 56125073b7\r +\r +mvgwas-nf: A pipeline for multivariate Genome-Wide Association Studies\r +==============================================================================================\r +Performs multi-trait GWAS using using MANTA (https://github.com/dgarrimar/manta)\r +\r +Usage:\r +nextflow run mvgwas.nf [options]\r +\r +Parameters:\r +--pheno PHENOTYPES phenotype file\r +--geno GENOTYPES indexed genotype VCF file\r +--cov COVARIATES covariate file\r +--l VARIANTS/CHUNK variants tested per chunk (default: 10000)\r +--t TRANSFOMATION phenotype transformation: none, sqrt, log (default: none)\r +--i INTERACTION test for interaction with a covariate: none, (default: none)\r +--ng INDIVIDUALS/GENOTYPE minimum number of individuals per genotype group (default: 10)\r +--dir DIRECTORY output directory (default: result)\r +--out OUTPUT output file (default: mvgwas.tsv)\r +```\r +\r +## Input files and format\r +\r +`mvgwas-nf` requires the following input files:\r +\r +* **Genotypes.** \r +[bgzip](http://www.htslib.org/doc/bgzip.html)-compressed and indexed [VCF](https://samtools.github.io/hts-specs/VCFv4.3.pdf) genotype file.\r +\r +* **Phenotypes.**\r +Tab-separated file with phenotype measurements (quantitative) for each sample (i.e. *n* samples x *q* phenotypes).\r +The first column should contain sample IDs. Columns should be named.\r +\r +* **Covariates.**\r +Tab-separated file with covariate measurements (quantitative or categorical) for each sample (i.e. *n* samples x *k* covariates). \r +The first column should contain sample IDs. Columns should be named. \r +\r +Example [data](data) is available for the test run.\r +\r +## Pipeline results\r +\r +An output text file containing the multi-trait GWAS summary statistics (default: `./result/mvgwas.tsv`), with the following information:\r +\r +* `CHR`: chromosome\r +* `POS`: position\r +* `ID`: variant ID\r +* `REF`: reference allele\r +* `ALT`: alternative allele\r +* `F`: pseudo-F statistic\r +* `R2`: fraction of variance explained by the variant\r +* `P`: P-value\r +\r +The output folder and file names can be modified with the `--dir` and `--out` parameters, respectively.\r +\r +## Cite mvgwas-nf\r +\r +If you find `mvgwas-nf` useful in your research please cite the related publication:\r +\r +Garrido-Martín, D., Calvo, M., Reverter, F., Guigó, R. A fast non-parametric test of association for multiple traits. *bioRxiv* (2022). [https://doi.org/10.1101/2022.06.06.493041](https://doi.org/10.1101/2022.06.06.493041)\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/436?version=1" ; + schema1:isBasedOn "https://github.com/dgarrimar/mvgwas-nf.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for mvgwas-nf" ; + schema1:sdDatePublished "2024-08-05 10:31:19 +0100" ; + schema1:url "https://workflowhub.eu/workflows/436/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5754 ; + schema1:creator , + ; + schema1:dateCreated "2023-02-15T11:58:32Z" ; + schema1:dateModified "2023-02-15T12:09:57Z" ; + schema1:description """# mvgwas-nf\r +\r +[![nextflow](https://img.shields.io/badge/nextflow-%E2%89%A520.04.1-blue.svg)](http://nextflow.io)\r +[![CI-checks](https://github.com/guigolab/sqtlseeker2-nf/actions/workflows/ci.yaml/badge.svg)](https://github.com/guigolab/sqtlseeker2-nf/actions/workflows/ci.yaml)\r +\r +A pipeline for multi-trait genome-wide association studies (GWAS) using [MANTA](https://github.com/dgarrimar/manta).\r +\r +The pipeline performs the following analysis steps:\r +\r +* Split genotype file \r +* Preprocess phenotype and covariate data\r +* Test for association between phenotypes and genetic variants\r +* Collect summary statistics\r +\r +The pipeline uses [Nextflow](http://www.nextflow.io) as the execution backend. Please check [Nextflow documentation](http://www.nextflow.io/docs/latest/index.html) for more information.\r +\r +## Requirements\r +\r +- Unix-like operating system (Linux, MacOS, etc.)\r +- Java 8 or later \r +- [Docker](https://www.docker.com/) (v1.10.0 or later) or [Singularity](http://singularity.lbl.gov) (v2.5.0 or later)\r +\r +## Quickstart (~2 min)\r +\r +1. Install Nextflow:\r + ```\r + curl -fsSL get.nextflow.io | bash\r + ```\r +\r +2. Make a test run:\r + ```\r + nextflow run dgarrimar/mvgwas-nf -with-docker\r + ```\r +\r +**Notes**: move the `nextflow` executable to a directory in your `$PATH`. Set `-with-singularity` to use Singularity instead of Docker. \r +\r +(*) Alternatively you can clone this repository:\r +```\r +git clone https://github.com/dgarrimar/mvgwas-nf\r +cd mvgwas-nf\r +nextflow run mvgwas.nf -with-docker\r +```\r +\r +## Pipeline usage\r +\r +Launching the pipeline with the `--help` parameter shows the help message:\r +\r +```\r +nextflow run mvgwas.nf --help\r +```\r +\r +```\r +N E X T F L O W ~ version 20.04.1\r +Launching `mvgwas.nf` [amazing_roentgen] - revision: 56125073b7\r +\r +mvgwas-nf: A pipeline for multivariate Genome-Wide Association Studies\r +==============================================================================================\r +Performs multi-trait GWAS using using MANTA (https://github.com/dgarrimar/manta)\r +\r +Usage:\r +nextflow run mvgwas.nf [options]\r +\r +Parameters:\r +--pheno PHENOTYPES phenotype file\r +--geno GENOTYPES indexed genotype VCF file\r +--cov COVARIATES covariate file\r +--l VARIANTS/CHUNK variants tested per chunk (default: 10000)\r +--t TRANSFOMATION phenotype transformation: none, sqrt, log (default: none)\r +--i INTERACTION test for interaction with a covariate: none, (default: none)\r +--ng INDIVIDUALS/GENOTYPE minimum number of individuals per genotype group (default: 10)\r +--dir DIRECTORY output directory (default: result)\r +--out OUTPUT output file (default: mvgwas.tsv)\r +```\r +\r +## Input files and format\r +\r +`mvgwas-nf` requires the following input files:\r +\r +* **Genotypes.** \r +[bgzip](http://www.htslib.org/doc/bgzip.html)-compressed and indexed [VCF](https://samtools.github.io/hts-specs/VCFv4.3.pdf) genotype file.\r +\r +* **Phenotypes.**\r +Tab-separated file with phenotype measurements (quantitative) for each sample (i.e. *n* samples x *q* phenotypes).\r +The first column should contain sample IDs. Columns should be named.\r +\r +* **Covariates.**\r +Tab-separated file with covariate measurements (quantitative or categorical) for each sample (i.e. *n* samples x *k* covariates). \r +The first column should contain sample IDs. Columns should be named. \r +\r +Example [data](data) is available for the test run.\r +\r +## Pipeline results\r +\r +An output text file containing the multi-trait GWAS summary statistics (default: `./result/mvgwas.tsv`), with the following information:\r +\r +* `CHR`: chromosome\r +* `POS`: position\r +* `ID`: variant ID\r +* `REF`: reference allele\r +* `ALT`: alternative allele\r +* `F`: pseudo-F statistic\r +* `R2`: fraction of variance explained by the variant\r +* `P`: P-value\r +\r +The output folder and file names can be modified with the `--dir` and `--out` parameters, respectively.\r +\r +## Cite mvgwas-nf\r +\r +If you find `mvgwas-nf` useful in your research please cite the related publication:\r +\r +Garrido-Martín, D., Calvo, M., Reverter, F., Guigó, R. A fast non-parametric test of association for multiple traits. *bioRxiv* (2022). [https://doi.org/10.1101/2022.06.06.493041](https://doi.org/10.1101/2022.06.06.493041)\r +""" ; + schema1:keywords "GWAS, Multivariate, Non-parametric, Nextflow" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "mvgwas-nf" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/436?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2023-10-20T09:57:48.562544" ; + schema1:hasPart , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/baredsc" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + ; + schema1:name "baredsc/baredSC-1d-logNorm" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.14" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.14" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "baredsc/baredSC-2d-logNorm" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/baredsc" ; + schema1:version "0.1" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.130.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Amber Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:25:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/130/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 66118 ; + schema1:creator , + ; + schema1:dateCreated "2022-09-15T12:29:23Z" ; + schema1:dateModified "2023-01-16T13:50:19Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/130?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Amber Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_amber_md_setup/master/biobb_wf_amber_md_setup/notebooks/mdsetup/biobb_amber_setup_notebook.ipynb" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# Galaxy Workflow Documentation: MS Finder Pipeline\r +\r +This document outlines a MSFinder Galaxy workflow designed for peak annotation. The workflow consists of several steps aimed at preprocessing MS data, filtering, enhancing, and running MSFinder.\r +\r +## Step 1: Data Collection and Preprocessing\r +Collect if the inchi and smiles are missing from the dataset, and subsequently filter out the spectra which are missing inchi and smiles.\r +\r +### 1.1 MSMetaEnhancer: Collect InChi, Isomeric_smiles, and Nominal_mass\r +- Utilizes MSMetaEnhancer to collect InChi and Isomeric_smiles using PubChem and IDSM databases.\r +- Utilizes MSMetaEnhancer to collect MW using RDkit (For GOLM).\r +\r +### 1.2 replace key\r +- replace isomeric_smiles key to smiles using replace text tool\r +- replace MW key to parent_mass using replace text tool (For GOLM)\r +\r +### 1.3 Matchms Filtering\r +- Filters out invalid SMILES and InChi from the dataset using Matchms filtering.\r +\r +## Step 2: Complex Removal and Subsetting Dataset\r +Removes coordination complexes from the dataset.\r +\r +### 2.1 Remove Complexes and Subset Data\r +- Removes complexes from the dataset.\r +- Exports metadata using Matchms metadata export, cuts the SMILES column, removes complexes using Rem_Complex tool, and updates the dataset using Matchms subsetting.\r +\r +## Step 3: Data Key Manipulation\r +Add missing metadata required by the MSFinder for annotation.\r +\r +### 3.1 Matchms Remove Key\r +- Removes existing keys such as adduct, charge, and ionmode from the dataset.\r +\r +### 3.2 Matchms Add Key\r +- Adds necessary keys like charge, ionmode, and adduct to the dataset.\r +\r +### 3.3 Matchms Filtering\r +- Derives precursor m/z using parent mass and adduct information using matchms filtering.\r +\r +### 3.4 Matchms Convert\r +- Converts the dataset to Riken format for compatibility with MSFinder using matchms convert.\r +\r +## Step 4: Peak Annotation\r +### 4.1 Recetox-MSFinder\r +- Executes MSFinder with a 0.5 Da tolerance for both MS1 and MS2, including all element checks and an extended range for peak annotation.\r +\r +## Step 5: Error Handling and Refinement\r +Check the MSFinder output to see if the output is the results or the log file. If the output is log file remove the smile from the dataset using matchms subsetting tool and rerun MSFinder.\r +\r +### 5.1 Error Handling\r +- Handles errors in peak annotation by removing SMILES that are not accepted by MSFinder.\r +- Reruns MSFinder after error correction or with different parameter (if applicable).\r +\r +## Step 6: High-res Annotation\r +### 6.1 High-Res Peak Overwriting\r +- Utilizes the Use_Theoretical_mz_Annotations tool to Overwrite experimentally measured mz values for peaks with theoretical values from peak comments.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.888.2" ; + schema1:isBasedOn "https://github.com/RECETOX/workflow-testing/blob/main/msfinder_workflow/Galaxy_Workflow_MsFinder_Workflow_GOLM_V2.ga" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Theoretical fragment substructure generation and in silico mass spectral library high-resolution upcycling workflow" ; + schema1:sdDatePublished "2024-08-05 10:23:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/888/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 41205 ; + schema1:creator , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-06T10:18:12Z" ; + schema1:dateModified "2024-06-19T09:35:34Z" ; + schema1:description """# Galaxy Workflow Documentation: MS Finder Pipeline\r +\r +This document outlines a MSFinder Galaxy workflow designed for peak annotation. The workflow consists of several steps aimed at preprocessing MS data, filtering, enhancing, and running MSFinder.\r +\r +## Step 1: Data Collection and Preprocessing\r +Collect if the inchi and smiles are missing from the dataset, and subsequently filter out the spectra which are missing inchi and smiles.\r +\r +### 1.1 MSMetaEnhancer: Collect InChi, Isomeric_smiles, and Nominal_mass\r +- Utilizes MSMetaEnhancer to collect InChi and Isomeric_smiles using PubChem and IDSM databases.\r +- Utilizes MSMetaEnhancer to collect MW using RDkit (For GOLM).\r +\r +### 1.2 replace key\r +- replace isomeric_smiles key to smiles using replace text tool\r +- replace MW key to parent_mass using replace text tool (For GOLM)\r +\r +### 1.3 Matchms Filtering\r +- Filters out invalid SMILES and InChi from the dataset using Matchms filtering.\r +\r +## Step 2: Complex Removal and Subsetting Dataset\r +Removes coordination complexes from the dataset.\r +\r +### 2.1 Remove Complexes and Subset Data\r +- Removes complexes from the dataset.\r +- Exports metadata using Matchms metadata export, cuts the SMILES column, removes complexes using Rem_Complex tool, and updates the dataset using Matchms subsetting.\r +\r +## Step 3: Data Key Manipulation\r +Add missing metadata required by the MSFinder for annotation.\r +\r +### 3.1 Matchms Remove Key\r +- Removes existing keys such as adduct, charge, and ionmode from the dataset.\r +\r +### 3.2 Matchms Add Key\r +- Adds necessary keys like charge, ionmode, and adduct to the dataset.\r +\r +### 3.3 Matchms Filtering\r +- Derives precursor m/z using parent mass and adduct information using matchms filtering.\r +\r +### 3.4 Matchms Convert\r +- Converts the dataset to Riken format for compatibility with MSFinder using matchms convert.\r +\r +## Step 4: Peak Annotation\r +### 4.1 Recetox-MSFinder\r +- Executes MSFinder with a 0.5 Da tolerance for both MS1 and MS2, including all element checks and an extended range for peak annotation.\r +\r +## Step 5: Error Handling and Refinement\r +Check the MSFinder output to see if the output is the results or the log file. If the output is log file remove the smile from the dataset using matchms subsetting tool and rerun MSFinder.\r +\r +### 5.1 Error Handling\r +- Handles errors in peak annotation by removing SMILES that are not accepted by MSFinder.\r +- Reruns MSFinder after error correction or with different parameter (if applicable).\r +\r +## Step 6: High-res Annotation\r +### 6.1 High-Res Peak Overwriting\r +- Utilizes the Use_Theoretical_mz_Annotations tool to Overwrite experimentally measured mz values for peaks with theoretical values from peak comments.\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/888?version=1" ; + schema1:keywords "Bioinformatics, Cheminformatics, Metabolomics" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Theoretical fragment substructure generation and in silico mass spectral library high-resolution upcycling workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/888?version=2" ; + schema1:version 2 ; + ns1:input ; + ns1:output , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description "This is a Galaxy workflow for performing molecular dynamics simulations and analysis with flavivirus helicases in the Apo or unbound state. The associated input files can be found at: https://zenodo.org/records/7493015 The associated output files can be found at: https://zenodo.org/records/7850935" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.762.1" ; + schema1:isBasedOn "https://zenodo.org/records/7493015" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for flavivirushelicase_apo" ; + schema1:sdDatePublished "2024-08-05 10:25:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/762/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 72163 ; + schema1:creator ; + schema1:dateCreated "2024-02-16T17:30:27Z" ; + schema1:dateModified "2024-03-02T16:24:20Z" ; + schema1:description "This is a Galaxy workflow for performing molecular dynamics simulations and analysis with flavivirus helicases in the Apo or unbound state. The associated input files can be found at: https://zenodo.org/records/7493015 The associated output files can be found at: https://zenodo.org/records/7850935" ; + schema1:keywords "zika, dengue, west nile, helicase, rna virus, molecular dynamics" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "flavivirushelicase_apo" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/762?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.196.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook ABC MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/196/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 101184 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-23T08:57:03Z" ; + schema1:dateModified "2023-01-16T13:53:15Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/196?version=4" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook ABC MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_amber_md_setup/blob/master/biobb_wf_amber_md_setup/notebooks/abcsetup/biobb_amber_ABC_setup.ipynb" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1023?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/smrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/smrnaseq" ; + schema1:sdDatePublished "2024-08-05 10:23:10 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1023/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4039 ; + schema1:creator , + , + , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:18Z" ; + schema1:dateModified "2024-06-11T12:55:18Z" ; + schema1:description "Small RNA-Seq Best Practice Analysis Pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1023?version=10" ; + schema1:keywords "small-rna, smrna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/smrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1023?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2024-02-29T18:36:07.286557" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/fluorescence-nuclei-segmentation-and-counting" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "fluorescence-nuclei-segmentation-and-counting/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.63.1" ; + schema1:isBasedOn "https://github.com/arvados/bh20-seq-resource" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for COVID-19 PubSeq Pangenome Generate" ; + schema1:sdDatePublished "2024-08-05 10:31:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/63/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3464 ; + schema1:creator , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2020-10-07T09:36:13Z" ; + schema1:dateModified "2023-01-16T13:44:54Z" ; + schema1:description "" ; + schema1:image ; + schema1:keywords "covid-19, CWL, pangenome" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "COVID-19 PubSeq Pangenome Generate" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/63?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 30852 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """To discover causal mutations of inherited diseases it’s common practice to do a trio analysis. In a trio analysis DNA is sequenced of both the patient and parents. Using this method, it’s possible to identify multiple inheritance patterns. Some examples of these patterns are autosomal recessive, autosomal dominant, and de-novo variants, which are represented in the figure below. To elaborate, the most left tree shows an autosomal dominant inhertitance pattern where the offspring inherits a faulty copy of the gene from one of the parents.\r +\r +To discover these mutations either whole exome sequencing (WES) or whole genome sequencing (WGS) can be used. With these technologies it is possible to uncover the DNA of the parents and offspring to find (shared) mutations in the DNA. These mutations can include insertions/deletions (indels), loss of heterozygosity (LOH), single nucleotide variants (SNVs), copy number variations (CNVs), and fusion genes.\r +\r +In this workflow we will also make use of the HTSGET protocol, which is a program to download our data securely and savely. This protocol has been implemented in the EGA Download Client Tool: toolshed.g2.bx.psu.edu/repos/iuc/ega_download_client/pyega3/4.0.0+galaxy0 tool, so we don’t have to leave Galaxy to retrieve our data.\r +\r +We will not start our analysis from scratch, since the main goal of this tutorial is to use the HTSGET protocol to download variant information from an online archive and to find the causative variant from those variants. If you want to learn how to do the analysis from scratch, using the raw reads, you can have a look at the Exome sequencing data analysis for diagnosing a genetic disease tutorial.""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/363?version=1" ; + schema1:isBasedOn "https://github.com/galaxyproject/training-material/blob/main/topics/variant-analysis/tutorials/trio-analysis/workflows/main_workflow.ga" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Trio Analysis" ; + schema1:sdDatePublished "2024-08-05 10:31:15 +0100" ; + schema1:url "https://workflowhub.eu/workflows/363/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 26615 ; + schema1:creator ; + schema1:dateCreated "2022-06-09T08:32:07Z" ; + schema1:dateModified "2023-03-01T15:35:23Z" ; + schema1:description """To discover causal mutations of inherited diseases it’s common practice to do a trio analysis. In a trio analysis DNA is sequenced of both the patient and parents. Using this method, it’s possible to identify multiple inheritance patterns. Some examples of these patterns are autosomal recessive, autosomal dominant, and de-novo variants, which are represented in the figure below. To elaborate, the most left tree shows an autosomal dominant inhertitance pattern where the offspring inherits a faulty copy of the gene from one of the parents.\r +\r +To discover these mutations either whole exome sequencing (WES) or whole genome sequencing (WGS) can be used. With these technologies it is possible to uncover the DNA of the parents and offspring to find (shared) mutations in the DNA. These mutations can include insertions/deletions (indels), loss of heterozygosity (LOH), single nucleotide variants (SNVs), copy number variations (CNVs), and fusion genes.\r +\r +In this workflow we will also make use of the HTSGET protocol, which is a program to download our data securely and savely. This protocol has been implemented in the EGA Download Client Tool: toolshed.g2.bx.psu.edu/repos/iuc/ega_download_client/pyega3/4.0.0+galaxy0 tool, so we don’t have to leave Galaxy to retrieve our data.\r +\r +We will not start our analysis from scratch, since the main goal of this tutorial is to use the HTSGET protocol to download variant information from an online archive and to find the causative variant from those variants. If you want to learn how to do the analysis from scratch, using the raw reads, you can have a look at the Exome sequencing data analysis for diagnosing a genetic disease tutorial.""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/363?version=1" ; + schema1:keywords "variant-analysis" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Trio Analysis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/363?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2022-10-24T13:41:51.909409" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/rnaseq-pe" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "rnaseq-pe/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.11" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Dual RNA-seq pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/982?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/dualrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/dualrnaseq" ; + schema1:sdDatePublished "2024-08-05 10:24:02 +0100" ; + schema1:url "https://workflowhub.eu/workflows/982/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8839 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:49Z" ; + schema1:dateModified "2024-06-11T12:54:49Z" ; + schema1:description "Dual RNA-seq pipeline" ; + schema1:keywords "dualrna-seq, host-pathogen, quantification, readmapping, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/dualrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/982?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2024-07-24T13:33:35.024740" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/kmer-profiling-hifi-VGP1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "kmer-profiling-hifi-VGP1/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.24" . + + a schema1:Dataset ; + schema1:datePublished "2022-02-11T15:29:50.854615" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-variant-calling" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sars-cov-2-pe-illumina-artic-variant-calling/COVID-19-PE-ARTIC-ILLUMINA" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """This workflow demonstrates the usage of the [Community Earth System Model](https://www.cesm.ucar.edu/) on Galaxy Europe. \r +\r +A fully coupled B1850 compset with resolution f19_g17 is run for 1 month.\r +\r +![](https://nordicesmhub.github.io/GEO4962/fig/newcase.png)""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/364?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Workflow for running the Community Earth System Model in fully coupled mode" ; + schema1:sdDatePublished "2024-08-05 10:32:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/364/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 2639 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 18008 ; + schema1:dateCreated "2022-06-12T19:01:32Z" ; + schema1:dateModified "2023-01-16T14:01:23Z" ; + schema1:description """This workflow demonstrates the usage of the [Community Earth System Model](https://www.cesm.ucar.edu/) on Galaxy Europe. \r +\r +A fully coupled B1850 compset with resolution f19_g17 is run for 1 month.\r +\r +![](https://nordicesmhub.github.io/GEO4962/fig/newcase.png)""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Workflow for running the Community Earth System Model in fully coupled mode" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/364?version=1" ; + schema1:version 1 ; + ns1:input , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 6568 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for screening for functional components of assembled contigs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/987?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/funcscan" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/funcscan" ; + schema1:sdDatePublished "2024-08-05 10:23:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/987/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 16015 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:54Z" ; + schema1:dateModified "2024-06-11T12:54:54Z" ; + schema1:description "Pipeline for screening for functional components of assembled contigs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/987?version=8" ; + schema1:keywords "amp, AMR, antibiotic-resistance, antimicrobial-peptides, antimicrobial-resistance-genes, arg, Assembly, bgc, biosynthetic-gene-clusters, contigs, function, Metagenomics, natural-products, screening, secondary-metabolites" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/funcscan" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/987?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=23" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-08-05 10:24:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=23" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10903 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:13Z" ; + schema1:dateModified "2024-06-11T12:55:13Z" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=23" ; + schema1:version 23 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description "This is a Galaxy workflow for performing molecular dynamics simulations and analysis with coronavirus helicases bound to a ligand/drug molecule. The associated input files can be found at: https://zenodo.org/records/7492987. The associated output files can be found at: https://zenodo.org/records/7851000." ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.763.1" ; + schema1:isBasedOn "https://zenodo.org/records/7492987" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for coronavirushelicase_proteindrugcomplex" ; + schema1:sdDatePublished "2024-08-05 10:25:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/763/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 78464 ; + schema1:creator ; + schema1:dateCreated "2024-02-16T19:04:32Z" ; + schema1:dateModified "2024-03-02T17:06:31Z" ; + schema1:description "This is a Galaxy workflow for performing molecular dynamics simulations and analysis with coronavirus helicases bound to a ligand/drug molecule. The associated input files can be found at: https://zenodo.org/records/7492987. The associated output files can be found at: https://zenodo.org/records/7851000." ; + schema1:keywords "covid-19, coronavirus, helicase, rna virus, molecular dynamics, SARS-CoV-2, MERS, NSP13, covid19.galaxyproject.org" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "coronavirushelicase_proteindrugcomplex" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/763?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "MNase-seq analysis pipeline using BWA and DANPOS2." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1000?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/mnaseseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mnaseseq" ; + schema1:sdDatePublished "2024-08-05 10:23:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1000/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5436 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:05Z" ; + schema1:dateModified "2024-06-11T12:55:05Z" ; + schema1:description "MNase-seq analysis pipeline using BWA and DANPOS2." ; + schema1:keywords "mnase-seq, nucleosome, nucleosome-maps, nucleosome-positioning" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mnaseseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1000?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "ATACSeq peak-calling and differential analysis pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/965?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/atacseq" ; + schema1:sdDatePublished "2024-08-05 10:24:18 +0100" ; + schema1:url "https://workflowhub.eu/workflows/965/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4439 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:41Z" ; + schema1:dateModified "2024-06-11T12:54:41Z" ; + schema1:description "ATACSeq peak-calling and differential analysis pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/965?version=8" ; + schema1:keywords "ATAC-seq, chromatin-accessibiity" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/atacseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/965?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for analysis of Molecular Pixelation assays" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1010?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/pixelator" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/pixelator" ; + schema1:sdDatePublished "2024-08-05 10:23:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1010/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10964 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:07Z" ; + schema1:dateModified "2024-06-11T12:55:07Z" ; + schema1:description "Pipeline for analysis of Molecular Pixelation assays" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1010?version=7" ; + schema1:keywords "molecular-pixelation, pixelator, pixelgen-technologies, proteins, single-cell, single-cell-omics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/pixelator" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1010?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Genes and transcripts annotation with Isoseq using uLTRA and TAMA" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/993?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/isoseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/isoseq" ; + schema1:sdDatePublished "2024-08-05 10:23:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/993/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8438 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:58Z" ; + schema1:dateModified "2024-06-11T12:54:58Z" ; + schema1:description "Genes and transcripts annotation with Isoseq using uLTRA and TAMA" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/993?version=6" ; + schema1:keywords "isoseq, isoseq-3, rna, tama, ultra" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/isoseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/993?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Quantitative Mass Spectrometry nf-core workflow" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1013?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/quantms" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/quantms" ; + schema1:sdDatePublished "2024-08-05 10:23:24 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1013/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14567 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:08Z" ; + schema1:dateModified "2024-06-11T12:55:08Z" ; + schema1:description "Quantitative Mass Spectrometry nf-core workflow" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1013?version=3" ; + schema1:keywords "dia, lfq, mass-spec, mass-spectrometry, openms, proteogenomics, Proteomics, tmt" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/quantms" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1013?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + schema1:datePublished "2022-10-24T11:08:23.961762" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-consensus-from-variation" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sars-cov-2-consensus-from-variation/COVID-19-CONSENSUS-CONSTRUCTION" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.11" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# SNP-Calling\r +GATK Variant calling pipeline for genomic data using Nextflow\r +\r +[![nextflow](https://img.shields.io/badge/nextflow-%E2%89%A522.04.5-brightgreen.svg)](http://nextflow.io)\r +\r +## Quickstart\r +\r +Install Nextflow using the following command: \r +\r + curl -s https://get.nextflow.io | bash\r + \r +Index reference genome:\r +\r + `$ bwa index /path/to/reference/genome.fa`\r + \r + `$ samtools faidx /path/to/reference/genome.fa`\r + \r + `$ gatk CreateSequenceDictionary -R /path/to/genome.fa -O genome.dict`\r +\r +Launch the pipeline execution with the following command:\r +\r + nextflow run jdetras/snp-calling -r main -profile docker\r + \r +## Pipeline Description\r +\r +The variant calling pipeline follows the recommended practices from GATK. The input genomic data are aligned to a reference genome using BWA. The alignemnt files are processed using Picard Tools. Variant calling is done using samtools and GATK. \r +\r +## Input files\r +\r +The input files required to run the pipeline:\r +* Genomic sequence paired reads, `*_{1,2}.fq.gz`\r +* Reference genome, `*.fa`\r +\r +## Pipeline parameters\r +\r +### Usage\r +Usage: `nextflow run jdetras/snp-calling -profile docker [options]`\r +\r +Options:\r +\r +* `--reads` \r +* `--genome`\r +* `--output`\r +\r +Example: \r + `$ nextflow run jdetras/snp-calling -profile docker --reads '/path/to/reads/*_{1,2}.fq.gz' --genome '/path/to/reference/genome.fa' --output '/path/to/output'`\r +\r +#### `--reads`\r +\r +* The path to the FASTQ read files.\r +* Wildcards (*, ?) can be used to declare multiple reads. Use single quotes when wildcards are used. \r +* Default parameter: `$projectDir/data/reads/*_{1,2}.fq.gz`\r +\r +Example: \r + `$ nextflow run jdetras/snp-calling -profile docker --reads '/path/to/reads/*_{1,2}.fq.gz'`\r + \r +#### `--genome`\r +\r +* The path to the genome file in fasta format.\r +* The extension is `.fa`.\r +* Default parameter: `$projectDir/data/reference/genome.fa`\r +\r +Example:\r + `$ nextflow run jdetras/snp-calling -profile docker --genome /path/to/reference/genome.fa`\r + \r +#### `--output`\r +\r +* The path to the directory for the output files.\r +* Default parameter: `$projectDir/output`\r +\r +## Software\r +\r +* [BWA 0.7.17](http://bio-bwa.sourceforge.net/)\r +* [Samtools 1.3.1](http://www.htslib.org/)\r +* [GATK 4.2.6.1](https://gatk.broadinstitute.org/) \r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/442?version=1" ; + schema1:isBasedOn "https://github.com/jdetras/SNP-Calling.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for SNP-Calling Workflow" ; + schema1:sdDatePublished "2024-08-05 10:31:13 +0100" ; + schema1:url "https://workflowhub.eu/workflows/442/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 1737 ; + schema1:dateCreated "2023-03-21T05:30:15Z" ; + schema1:dateModified "2023-03-21T05:30:15Z" ; + schema1:description """# SNP-Calling\r +GATK Variant calling pipeline for genomic data using Nextflow\r +\r +[![nextflow](https://img.shields.io/badge/nextflow-%E2%89%A522.04.5-brightgreen.svg)](http://nextflow.io)\r +\r +## Quickstart\r +\r +Install Nextflow using the following command: \r +\r + curl -s https://get.nextflow.io | bash\r + \r +Index reference genome:\r +\r + `$ bwa index /path/to/reference/genome.fa`\r + \r + `$ samtools faidx /path/to/reference/genome.fa`\r + \r + `$ gatk CreateSequenceDictionary -R /path/to/genome.fa -O genome.dict`\r +\r +Launch the pipeline execution with the following command:\r +\r + nextflow run jdetras/snp-calling -r main -profile docker\r + \r +## Pipeline Description\r +\r +The variant calling pipeline follows the recommended practices from GATK. The input genomic data are aligned to a reference genome using BWA. The alignemnt files are processed using Picard Tools. Variant calling is done using samtools and GATK. \r +\r +## Input files\r +\r +The input files required to run the pipeline:\r +* Genomic sequence paired reads, `*_{1,2}.fq.gz`\r +* Reference genome, `*.fa`\r +\r +## Pipeline parameters\r +\r +### Usage\r +Usage: `nextflow run jdetras/snp-calling -profile docker [options]`\r +\r +Options:\r +\r +* `--reads` \r +* `--genome`\r +* `--output`\r +\r +Example: \r + `$ nextflow run jdetras/snp-calling -profile docker --reads '/path/to/reads/*_{1,2}.fq.gz' --genome '/path/to/reference/genome.fa' --output '/path/to/output'`\r +\r +#### `--reads`\r +\r +* The path to the FASTQ read files.\r +* Wildcards (*, ?) can be used to declare multiple reads. Use single quotes when wildcards are used. \r +* Default parameter: `$projectDir/data/reads/*_{1,2}.fq.gz`\r +\r +Example: \r + `$ nextflow run jdetras/snp-calling -profile docker --reads '/path/to/reads/*_{1,2}.fq.gz'`\r + \r +#### `--genome`\r +\r +* The path to the genome file in fasta format.\r +* The extension is `.fa`.\r +* Default parameter: `$projectDir/data/reference/genome.fa`\r +\r +Example:\r + `$ nextflow run jdetras/snp-calling -profile docker --genome /path/to/reference/genome.fa`\r + \r +#### `--output`\r +\r +* The path to the directory for the output files.\r +* Default parameter: `$projectDir/output`\r +\r +## Software\r +\r +* [BWA 0.7.17](http://bio-bwa.sourceforge.net/)\r +* [Samtools 1.3.1](http://www.htslib.org/)\r +* [GATK 4.2.6.1](https://gatk.broadinstitute.org/) \r +""" ; + schema1:keywords "variant calling, GATK4, BWA-mem, rice" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "SNP-Calling Workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/442?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """## EBP-Nor Genome Assembly pipeline\r +\r +This repository contains the EBP-Nor genome assembly pipeline. This pipeline is implemented in snakemake.\r +This pipeline is developed to create haplotype-resolved genome assemblies from PacBio HiFi reads and HiC reads,\r +and is primarly designed for diploid eukaryotic organisms. The pipeline is designed to work on a linux cluster with slurm as workload manager.\r +\r +## Requirements & Setup\r +\r +Some software need to be configured/installed before the pipeline can be run\r +\r +### Conda setup\r +\r +Most required software, including snakemake itself, can be installed using [conda](https://conda.io/projects/conda/en/latest/user-guide/install/index.html).\r +\r +Once conda is installed, you can create a new environment containing most necessary software from the provided asm_pipeline.yaml file as follows:\r +\r +```shell\r +conda create -n asm_pipeline --file=worfklow/envs/asm_pipeline.yaml\r +```\r +\r +### Other software setup\r +\r +The following software need to be installed manually:\r +\r +- KMC v3.1.1 (https://github.com/tbenavi1/KMC)\r +- HiFiAdapterFilt (https://github.com/sheinasim/HiFiAdapterFilt)\r +- Oatk (https://github.com/c-zhou/oatk)\r +- OatkDB (https://github.com/c-zhou/OatkDB)\r +- NCBI FCS-Adaptor (https://github.com/ncbi/fcs/wiki/FCS-adaptor)\r +- NCBI FCS-GX (https://github.com/ncbi/fcs/wiki/FCS-GX)\r +\r +Please refer to their respective installation instructions to properly install them. You will need to privide the installation paths of these software to the config file (see Parameter section).\r +\r +### BUSCO database setup\r +\r +As in general, computing nodes are not connected to the internet, BUSCO lineage datasets need to be downloaded manually before running the pipeline.\r +This can easily be done by running\r +\r +```shell\r +busco --download eukaryota\r +```\r +\r +You will need to specify the folder where you downloaded the busco lineages in the config file (see Parameter section).\r +\r +### Data\r +\r +This pipeline is created for using PacBio HiFi reads together with paired-end Hi-C data.\r +You will need to specify the absolute paths to these files in the config file (see Parameters section).\r +\r +### Parameters\r +\r +The necessary config files for running the pipeline can be found in the config folder.\r +\r +General snakemake and cluster submission parameters are defined in ```config/config.yaml```, \r +data- and software-specfic parameters are defined in ```config/asm_params.yaml```.\r +\r +First, define the paths of the input files you want to use:\r +- pacbio: path to the location of the PacBio HiFi reads (```.fastq.gz```)\r +- hicF and hicR: path to the forward and reverse HiC reads respectively\r +\r +For software not installed by conda, the installation path needs to be provided to the Snakemake pipeline by editing following parameters in the ```config/asm_params.yaml```:\r +\r +- Set the "adapterfilt_install_dir" parameter to the installation path of HiFiAdapterFilt\r +- Set the "KMC_path" parameter to the installation path of KMC\r +- Set the "oatk_dir" parameter to the installation path of oatk\r +- Set the "oatk_db" parameter to the directory where you downloaded the oatk_db files\r +- Set the "fcs_path" parameter to the location of the ```run_fcsadaptor.sh``` and ```fcs.py``` scripts\r +- Set the "fcs_adaptor_image" and "fcs_gx_image" parameters to the paths to the ```fcs-adaptor.sif``` and ```fcs-gx.sif``` files respectively\r +- Set the "fcs_gx_db" parameter to the path of the fcs-gx database\r +\r +A couple of other parameters need to be verified as well in the config/asm_params.yaml file before running the pipeline:\r +\r +- The location of the input data (```input_dir```) should be set to the folder containing the input data.\r +- The location of the downloaded busco lineages (```busco_db_dir```) should be set to the folder containing the busco lineages files downloaded earlier\r +- The required BUSCO lineage for running the BUSCO analysis needs to set (```busco_lineage``` parameter). Run ```busco --list-datasets``` to get an overview of all available datasets.\r +- The required oatk lineage for running organelle genome assembly (```oatk_lineage``` parameter). Check https://github.com/c-zhou/OatkDB for an overview of available lineages.\r +- A boolean value wether the species is plant (for plastid prediction) or not (```oatk_isPlant```; set to either True or False)\r +- The NCBI taxid of your species, required for the decontamination step (```taxid``` parameter)\r +\r +## Usage and run modes\r +\r +Before running, make sure to activate the conda environment containing the necessary software: ```conda activate asm_assembly```.\r +To run the pipeline, run the following command:\r +\r +```\r +snakemake --profile config/ --configfile config/asm_params.yaml --snakefile workflow/Snakefile {run_mode}\r +```\r +\r +If you invoke the snakemake command in another directory than the one containing the ```workflow``` and ```config``` folders, \r +or if the config files (```config.yaml``` and ```asm_params.yaml```) are in another location, you need to specify their correct paths on the command line.\r +\r +The workflow parameters can be modified in 3 ways:\r +- Directly modifying the ```config/asm_parameters.yaml``` file\r +- Overriding the default parameters on the command line: ```--config parameter=new_value```\r +- Overriding the default parameters using a different yaml file: ```--configfile path_to_parameters.yaml```\r +\r +The pipeline has different runing modes, and the run mode should always be the last argument on the command line:\r +\r +- "all" (default): will run the full workflow including pre-assembly (genomescope & smudgeplot), assembly, scaffolding, decontamination, and organelle assembly\r +- "pre_assembly": will run only the pre-assembly steps (genomescope & smudgeplot)\r +- "assembly": will filter the HiFi reads and assemble them using hifiasm (also using the Hi-C reads), and run busco\r +- "scaffolding": will run all steps necessary for scaffolding (filtering, assembly, HiC filtering, scaffolding, busco), but without pre-assembly\r +- "decontamination": will run assembly, scaffolding, and decontamination, but without pre-assembly and busco analyses\r +- "organelles": will run only organnelle genome assembly\r +\r +## Output\r +\r +All generated output will be present in the "results" directory, which will be created in the folder from where you invoke the snakemake command.\r +This results directory contains different subdirectories related to the different steps in the assembly:\r +- results/pre_assembly: genomescope and smudgeplot output (each in its own subfolder)\r +- results/assembly: Hifiasm assembly output and corresponding busco results\r +- results/scaffolding: scaffolding output, separated in two folders:\r + - meryl: meryl databases used for filtering HiC reads\r + - yahs: scaffolding output, including final scaffolds and their corresponding busco results\r +- results/decontamination: decontamination output of the final scaffolded assembly\r +- results/organelles: assembled organellar genomes\r +\r +Additionally, a text file containing all software versions will be created in the specified input directory.\r +The log files of the different steps in the workflow can be found in the ```logs``` directory that will be created.""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/740?version=1" ; + schema1:isBasedOn "https://github.com/ebp-nor/GenomeAssembly" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for EBP-Nor Genome Assembly Pipeline" ; + schema1:sdDatePublished "2024-08-05 10:25:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/740/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 19252 ; + schema1:dateCreated "2024-02-13T09:44:35Z" ; + schema1:dateModified "2024-02-13T09:44:35Z" ; + schema1:description """## EBP-Nor Genome Assembly pipeline\r +\r +This repository contains the EBP-Nor genome assembly pipeline. This pipeline is implemented in snakemake.\r +This pipeline is developed to create haplotype-resolved genome assemblies from PacBio HiFi reads and HiC reads,\r +and is primarly designed for diploid eukaryotic organisms. The pipeline is designed to work on a linux cluster with slurm as workload manager.\r +\r +## Requirements & Setup\r +\r +Some software need to be configured/installed before the pipeline can be run\r +\r +### Conda setup\r +\r +Most required software, including snakemake itself, can be installed using [conda](https://conda.io/projects/conda/en/latest/user-guide/install/index.html).\r +\r +Once conda is installed, you can create a new environment containing most necessary software from the provided asm_pipeline.yaml file as follows:\r +\r +```shell\r +conda create -n asm_pipeline --file=worfklow/envs/asm_pipeline.yaml\r +```\r +\r +### Other software setup\r +\r +The following software need to be installed manually:\r +\r +- KMC v3.1.1 (https://github.com/tbenavi1/KMC)\r +- HiFiAdapterFilt (https://github.com/sheinasim/HiFiAdapterFilt)\r +- Oatk (https://github.com/c-zhou/oatk)\r +- OatkDB (https://github.com/c-zhou/OatkDB)\r +- NCBI FCS-Adaptor (https://github.com/ncbi/fcs/wiki/FCS-adaptor)\r +- NCBI FCS-GX (https://github.com/ncbi/fcs/wiki/FCS-GX)\r +\r +Please refer to their respective installation instructions to properly install them. You will need to privide the installation paths of these software to the config file (see Parameter section).\r +\r +### BUSCO database setup\r +\r +As in general, computing nodes are not connected to the internet, BUSCO lineage datasets need to be downloaded manually before running the pipeline.\r +This can easily be done by running\r +\r +```shell\r +busco --download eukaryota\r +```\r +\r +You will need to specify the folder where you downloaded the busco lineages in the config file (see Parameter section).\r +\r +### Data\r +\r +This pipeline is created for using PacBio HiFi reads together with paired-end Hi-C data.\r +You will need to specify the absolute paths to these files in the config file (see Parameters section).\r +\r +### Parameters\r +\r +The necessary config files for running the pipeline can be found in the config folder.\r +\r +General snakemake and cluster submission parameters are defined in ```config/config.yaml```, \r +data- and software-specfic parameters are defined in ```config/asm_params.yaml```.\r +\r +First, define the paths of the input files you want to use:\r +- pacbio: path to the location of the PacBio HiFi reads (```.fastq.gz```)\r +- hicF and hicR: path to the forward and reverse HiC reads respectively\r +\r +For software not installed by conda, the installation path needs to be provided to the Snakemake pipeline by editing following parameters in the ```config/asm_params.yaml```:\r +\r +- Set the "adapterfilt_install_dir" parameter to the installation path of HiFiAdapterFilt\r +- Set the "KMC_path" parameter to the installation path of KMC\r +- Set the "oatk_dir" parameter to the installation path of oatk\r +- Set the "oatk_db" parameter to the directory where you downloaded the oatk_db files\r +- Set the "fcs_path" parameter to the location of the ```run_fcsadaptor.sh``` and ```fcs.py``` scripts\r +- Set the "fcs_adaptor_image" and "fcs_gx_image" parameters to the paths to the ```fcs-adaptor.sif``` and ```fcs-gx.sif``` files respectively\r +- Set the "fcs_gx_db" parameter to the path of the fcs-gx database\r +\r +A couple of other parameters need to be verified as well in the config/asm_params.yaml file before running the pipeline:\r +\r +- The location of the input data (```input_dir```) should be set to the folder containing the input data.\r +- The location of the downloaded busco lineages (```busco_db_dir```) should be set to the folder containing the busco lineages files downloaded earlier\r +- The required BUSCO lineage for running the BUSCO analysis needs to set (```busco_lineage``` parameter). Run ```busco --list-datasets``` to get an overview of all available datasets.\r +- The required oatk lineage for running organelle genome assembly (```oatk_lineage``` parameter). Check https://github.com/c-zhou/OatkDB for an overview of available lineages.\r +- A boolean value wether the species is plant (for plastid prediction) or not (```oatk_isPlant```; set to either True or False)\r +- The NCBI taxid of your species, required for the decontamination step (```taxid``` parameter)\r +\r +## Usage and run modes\r +\r +Before running, make sure to activate the conda environment containing the necessary software: ```conda activate asm_assembly```.\r +To run the pipeline, run the following command:\r +\r +```\r +snakemake --profile config/ --configfile config/asm_params.yaml --snakefile workflow/Snakefile {run_mode}\r +```\r +\r +If you invoke the snakemake command in another directory than the one containing the ```workflow``` and ```config``` folders, \r +or if the config files (```config.yaml``` and ```asm_params.yaml```) are in another location, you need to specify their correct paths on the command line.\r +\r +The workflow parameters can be modified in 3 ways:\r +- Directly modifying the ```config/asm_parameters.yaml``` file\r +- Overriding the default parameters on the command line: ```--config parameter=new_value```\r +- Overriding the default parameters using a different yaml file: ```--configfile path_to_parameters.yaml```\r +\r +The pipeline has different runing modes, and the run mode should always be the last argument on the command line:\r +\r +- "all" (default): will run the full workflow including pre-assembly (genomescope & smudgeplot), assembly, scaffolding, decontamination, and organelle assembly\r +- "pre_assembly": will run only the pre-assembly steps (genomescope & smudgeplot)\r +- "assembly": will filter the HiFi reads and assemble them using hifiasm (also using the Hi-C reads), and run busco\r +- "scaffolding": will run all steps necessary for scaffolding (filtering, assembly, HiC filtering, scaffolding, busco), but without pre-assembly\r +- "decontamination": will run assembly, scaffolding, and decontamination, but without pre-assembly and busco analyses\r +- "organelles": will run only organnelle genome assembly\r +\r +## Output\r +\r +All generated output will be present in the "results" directory, which will be created in the folder from where you invoke the snakemake command.\r +This results directory contains different subdirectories related to the different steps in the assembly:\r +- results/pre_assembly: genomescope and smudgeplot output (each in its own subfolder)\r +- results/assembly: Hifiasm assembly output and corresponding busco results\r +- results/scaffolding: scaffolding output, separated in two folders:\r + - meryl: meryl databases used for filtering HiC reads\r + - yahs: scaffolding output, including final scaffolds and their corresponding busco results\r +- results/decontamination: decontamination output of the final scaffolded assembly\r +- results/organelles: assembled organellar genomes\r +\r +Additionally, a text file containing all software versions will be created in the specified input directory.\r +The log files of the different steps in the workflow can be found in the ```logs``` directory that will be created.""" ; + schema1:image ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "EBP-Nor Genome Assembly Pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/740?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 22814 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """# deepconsensus 1.2 snakemake pipeline\r +This snakemake-based workflow takes in a subreads.bam and results in a deepconsensus.fastq\r +- no methylation calls !\r +\r +The metadata id of the subreads file needs to be: "m[numeric]_[numeric]_[numeric].subreads.bam"\r +\r +Chunking (how many subjobs) and ccs min quality filter can be adjusted in the config.yaml\r +\r +the checkpoint model for deepconsensus1.2 should be accessible like this:\r +gsutil cp -r gs://brain-genomics-public/research/deepconsensus/models/v1.2/model_checkpoint/* "${QS_DIR}"/model/\r +if that does not work, try to download all at:\r +https://console.cloud.google.com/storage/browser/brain-genomics-public/research/deepconsensus/models?pageState=(%22StorageObjectListTable%22:(%22f%22:%22%255B%255D%22))&prefix=&forceOnObjectsSortingFiltering=false\r +\r +A run example is included in the run_snake.sh\r +\r +Feedback / pull requests welcome!\r +\r +Developed by Daniel Rickert @ WGGC Düsseldorf\r +\r +more to look at:\r +\r +https://www.youtube.com/watch?v=TlWtIao2i9E\r +\r +https://www.nature.com/articles/s41587-022-01435-7\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1075?version=1" ; + schema1:isBasedOn "https://github.com/WestGermanGenomeCenter/deep_snake.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Deepconsensus for Sequel2/2e subreads" ; + schema1:sdDatePublished "2024-08-05 10:22:29 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1075/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4578 ; + schema1:dateCreated "2024-07-12T08:59:51Z" ; + schema1:dateModified "2024-07-17T12:25:36Z" ; + schema1:description """# deepconsensus 1.2 snakemake pipeline\r +This snakemake-based workflow takes in a subreads.bam and results in a deepconsensus.fastq\r +- no methylation calls !\r +\r +The metadata id of the subreads file needs to be: "m[numeric]_[numeric]_[numeric].subreads.bam"\r +\r +Chunking (how many subjobs) and ccs min quality filter can be adjusted in the config.yaml\r +\r +the checkpoint model for deepconsensus1.2 should be accessible like this:\r +gsutil cp -r gs://brain-genomics-public/research/deepconsensus/models/v1.2/model_checkpoint/* "${QS_DIR}"/model/\r +if that does not work, try to download all at:\r +https://console.cloud.google.com/storage/browser/brain-genomics-public/research/deepconsensus/models?pageState=(%22StorageObjectListTable%22:(%22f%22:%22%255B%255D%22))&prefix=&forceOnObjectsSortingFiltering=false\r +\r +A run example is included in the run_snake.sh\r +\r +Feedback / pull requests welcome!\r +\r +Developed by Daniel Rickert @ WGGC Düsseldorf\r +\r +more to look at:\r +\r +https://www.youtube.com/watch?v=TlWtIao2i9E\r +\r +https://www.nature.com/articles/s41587-022-01435-7\r +""" ; + schema1:keywords "Bioinformatics, Genomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Deepconsensus for Sequel2/2e subreads" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1075?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Automated quantitative analysis of DIA proteomics mass spectrometry measurements." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/980?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/diaproteomics" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/diaproteomics" ; + schema1:sdDatePublished "2024-08-05 10:24:05 +0100" ; + schema1:url "https://workflowhub.eu/workflows/980/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6543 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:48Z" ; + schema1:dateModified "2024-06-11T12:54:48Z" ; + schema1:description "Automated quantitative analysis of DIA proteomics mass spectrometry measurements." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/980?version=6" ; + schema1:keywords "data-independent-proteomics, dia-proteomics, openms, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/diaproteomics" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/980?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state of the art epitope prediction pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/984?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/epitopeprediction" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/epitopeprediction" ; + schema1:sdDatePublished "2024-08-05 10:23:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/984/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5704 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:51Z" ; + schema1:dateModified "2024-06-11T12:54:51Z" ; + schema1:description "A fully reproducible and state of the art epitope prediction pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/984?version=7" ; + schema1:keywords "epitope, epitope-prediction, mhc-binding-prediction" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/epitopeprediction" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/984?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "The workflow takes a paired-reads collection (like illumina WGS or HiC), runs FastQC and SeqKit, trims with Fastp, and creates a MultiQC report. The main outputs are a paired collection of trimmed reads, a report with raw and trimmed reads stats, and a table with raw reads stats." ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.601.1" ; + schema1:isBasedOn "https://github.com/ERGA-consortium/pipelines/tree/main/assembly/galaxy" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ERGA DataQC Illumina v2309 (WF0)" ; + schema1:sdDatePublished "2024-08-05 10:25:10 +0100" ; + schema1:url "https://workflowhub.eu/workflows/601/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 19048 ; + schema1:creator , + ; + schema1:dateCreated "2023-10-06T13:03:57Z" ; + schema1:dateModified "2024-03-13T09:02:48Z" ; + schema1:description "The workflow takes a paired-reads collection (like illumina WGS or HiC), runs FastQC and SeqKit, trims with Fastp, and creates a MultiQC report. The main outputs are a paired collection of trimmed reads, a report with raw and trimmed reads stats, and a table with raw reads stats." ; + schema1:image ; + schema1:isPartOf , + , + ; + schema1:keywords "ERGA, DataQC, illumina" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ERGA DataQC Illumina v2309 (WF0)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/0.Data_QC/Galaxy-Workflow-ERGA_DataQC_Illumina_v2309_(WF0).ga" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 169490 ; + schema1:url "https://github.com/ERGA-consortium/pipelines/blob/main/assembly/galaxy/0.Data_QC/pics/QC_illu_2309.png" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and modern ancient DNA pipeline in Nextflow and with cloud support." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-08-05 10:23:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3775 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:49Z" ; + schema1:dateModified "2024-06-11T12:54:49Z" ; + schema1:description "A fully reproducible and modern ancient DNA pipeline in Nextflow and with cloud support." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# rquest-omop-worker\r +Executes BC|RQuest Availability queries via an open source implementation of BC|Link against a target PostgreSQL data source containing OMOP CDM 5.3 data.\r +\r +## Inputs\r +\r +### Body\r +Sample input payload:\r +\r +```json\r +{\r + "task_id": "job-2023-01-13-14: 20: 38-",\r + "project": "",\r + "owner": "",\r + "cohort": {\r + "groups": [\r + {\r + "rules": [\r + {\r + "varname": "OMOP",\r + "varcat": "Person",\r + "type": "TEXT",\r + "oper": "=",\r + "value": "8507"\r + }\r + ],\r + "rules_oper": "AND"\r + }\r + ],\r + "groups_oper": "OR"\r + },\r + "collection": "",\r + "protocol_version": "",\r + "char_salt": "",\r + "uuid": ""\r +}\r +```\r +\r +### Database access\r +\r +Currently this workflow requires inputs for connecting to the database it will run queries against using SQL Alchemy compatible options:\r +- `db_host` - the postgres db server hostname\r +- `db_name` - the database name\r +- `db_user` - the username for postgres\r +- `db_password` the password for postgres\r +\r +It is not currently possible to use a port other than the postgres default (`5432`)\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.471.2" ; + schema1:isBasedOn "https://github.com/HDRUK/rquest-omop-worker-workflows" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for rquest-omop-worker" ; + schema1:sdDatePublished "2024-08-05 10:27:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/471/ro_crate?version=2" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 8282 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 871 ; + schema1:creator ; + schema1:dateCreated "2023-10-10T15:03:58Z" ; + schema1:dateModified "2023-10-10T15:04:32Z" ; + schema1:description """# rquest-omop-worker\r +Executes BC|RQuest Availability queries via an open source implementation of BC|Link against a target PostgreSQL data source containing OMOP CDM 5.3 data.\r +\r +## Inputs\r +\r +### Body\r +Sample input payload:\r +\r +```json\r +{\r + "task_id": "job-2023-01-13-14: 20: 38-",\r + "project": "",\r + "owner": "",\r + "cohort": {\r + "groups": [\r + {\r + "rules": [\r + {\r + "varname": "OMOP",\r + "varcat": "Person",\r + "type": "TEXT",\r + "oper": "=",\r + "value": "8507"\r + }\r + ],\r + "rules_oper": "AND"\r + }\r + ],\r + "groups_oper": "OR"\r + },\r + "collection": "",\r + "protocol_version": "",\r + "char_salt": "",\r + "uuid": ""\r +}\r +```\r +\r +### Database access\r +\r +Currently this workflow requires inputs for connecting to the database it will run queries against using SQL Alchemy compatible options:\r +- `db_host` - the postgres db server hostname\r +- `db_name` - the database name\r +- `db_user` - the username for postgres\r +- `db_password` the password for postgres\r +\r +It is not currently possible to use a port other than the postgres default (`5432`)\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/471?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "rquest-omop-worker" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/471?version=2" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + ; + ns1:output . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=15" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-08-05 10:23:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=15" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17481 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:59Z" ; + schema1:dateModified "2024-06-11T12:54:59Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=15" ; + schema1:version 15 . + + a schema1:Dataset ; + schema1:datePublished "2022-11-30T12:14:23.298144" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/rnaseq-sr" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "rnaseq-sr/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.3" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "B cell repertoire analysis pipeline with immcantation framework." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/963?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/airrflow" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/bcellmagic" ; + schema1:sdDatePublished "2024-08-05 10:24:20 +0100" ; + schema1:url "https://workflowhub.eu/workflows/963/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4117 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:36Z" ; + schema1:dateModified "2024-06-11T12:54:36Z" ; + schema1:description "B cell repertoire analysis pipeline with immcantation framework." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/963?version=13" ; + schema1:keywords "airr, b-cell, immcantation, immunorepertoire, repseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/bcellmagic" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/963?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "Analyse Bulk RNA-Seq data in preparation for downstream Pathways analysis with MINERVA" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/689?version=1" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for mRNA-Seq BY-COVID Pipeline: Analysis" ; + schema1:sdDatePublished "2024-08-05 10:26:07 +0100" ; + schema1:url "https://workflowhub.eu/workflows/689/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 31727 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2023-12-19T10:10:54Z" ; + schema1:dateModified "2024-01-24T09:43:21Z" ; + schema1:description "Analyse Bulk RNA-Seq data in preparation for downstream Pathways analysis with MINERVA" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "BY-COVID, covid-19" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "mRNA-Seq BY-COVID Pipeline: Analysis" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/689?version=1" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 945135 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """The workflows in this collection are from the '16S Microbial Analysis with mothur' tutorial for analysis of 16S data (Saskia Hiltemann, Bérénice Batut, Dave Clements), adapted for pipeline use on galaxy australia (Ahmed Mehdi). The workflows developed in galaxy use mothur software package developed by Schloss et al https://pubmed.ncbi.nlm.nih.gov/19801464/. \r +\r +Please also refer to the 16S tutorials available at Galaxy https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop-short/tutorial.html and [https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop/tutorial.html\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/651?version=1" ; + schema1:isBasedOn "https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop-short/tutorial.html" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Workflow 5: OTU Clustering [16S Microbial Analysis With Mothur]" ; + schema1:sdDatePublished "2024-08-05 10:27:20 +0100" ; + schema1:url "https://workflowhub.eu/workflows/651/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17106 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2023-11-09T05:20:20Z" ; + schema1:dateModified "2023-11-09T05:20:20Z" ; + schema1:description """The workflows in this collection are from the '16S Microbial Analysis with mothur' tutorial for analysis of 16S data (Saskia Hiltemann, Bérénice Batut, Dave Clements), adapted for pipeline use on galaxy australia (Ahmed Mehdi). The workflows developed in galaxy use mothur software package developed by Schloss et al https://pubmed.ncbi.nlm.nih.gov/19801464/. \r +\r +Please also refer to the 16S tutorials available at Galaxy https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop-short/tutorial.html and [https://training.galaxyproject.org/training-material/topics/metagenomics/tutorials/mothur-miseq-sop/tutorial.html\r +""" ; + schema1:isPartOf ; + schema1:keywords "Metagenomics" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Workflow 5: OTU Clustering [16S Microbial Analysis With Mothur]" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/651?version=1" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.258.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_protein_complex_md_setup/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL Protein Ligand Complex MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:32:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/258/ro_crate?version=2" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 192643 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 36316 ; + schema1:creator , + ; + schema1:dateCreated "2023-06-07T10:54:43Z" ; + schema1:dateModified "2023-06-07T11:04:49Z" ; + schema1:description """# Protein Ligand Complex MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/complex/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation system** containing a **protein in complex with a ligand**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **T4 lysozyme** L99A/M102Q protein (PDB code 3HTB), in complex with the **2-propylphenol** small molecule (3-letter Code JZ4). \r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/258?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CWL Protein Ligand Complex MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_protein_complex_md_setup/cwl/workflow.cwl" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=19" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-08-05 10:22:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=19" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13451 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:40Z" ; + schema1:dateModified "2024-06-11T12:54:40Z" ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=19" ; + schema1:version 19 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """Example workflow which allows the use of Mothra\r +\r +Accepts (e.g.) [these](https://github.com/machine-shop/mothra-data/tree/main/test_images) input files, bundled as a collection.""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/413?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Mothra" ; + schema1:sdDatePublished "2024-08-05 10:31:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/413/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3782 ; + schema1:dateCreated "2022-12-14T16:03:30Z" ; + schema1:dateModified "2023-01-16T14:04:58Z" ; + schema1:description """Example workflow which allows the use of Mothra\r +\r +Accepts (e.g.) [these](https://github.com/machine-shop/mothra-data/tree/main/test_images) input files, bundled as a collection.""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Mothra" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/413?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """\r +Author: AMBARISH KUMAR er.ambarish@gmail.com; ambari73_sit@jnu.ac.in\r +\r +This is a proposed standard operating procedure for genomic variant detection using SAMTools.\r +\r +It is hoped to be effective and useful for getting SARS-CoV-2 genome variants.\r +\r +\r +\r +It uses Illumina RNASEQ reads and genome sequence.\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/34?version=1" ; + schema1:isBasedOn "https://github.com/ambarishK/bio-cwl-tools/blob/release/samtoolsW.cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Genomic variants - SNPs and INDELs detection using SAMTools." ; + schema1:sdDatePublished "2024-08-05 10:33:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/34/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 54065 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2761 ; + schema1:dateCreated "2020-06-17T07:41:06Z" ; + schema1:dateModified "2023-01-16T13:42:36Z" ; + schema1:description """\r +Author: AMBARISH KUMAR er.ambarish@gmail.com; ambari73_sit@jnu.ac.in\r +\r +This is a proposed standard operating procedure for genomic variant detection using SAMTools.\r +\r +It is hoped to be effective and useful for getting SARS-CoV-2 genome variants.\r +\r +\r +\r +It uses Illumina RNASEQ reads and genome sequence.\r +""" ; + schema1:image ; + schema1:keywords "CWL, SAMTools, SNPs, INDELs, covid-19" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Genomic variants - SNPs and INDELs detection using SAMTools." ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/34?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + ; + ns1:output , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and modern ancient DNA pipeline in Nextflow and with cloud support." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-08-05 10:23:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3894 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:49Z" ; + schema1:dateModified "2024-06-11T12:54:49Z" ; + schema1:description "A fully reproducible and modern ancient DNA pipeline in Nextflow and with cloud support." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + schema1:datePublished "2022-10-21T10:14:20.880368" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/rnaseq-sr" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "rnaseq-sr/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.11" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state of the art epitope prediction pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/984?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/epitopeprediction" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/epitopeprediction" ; + schema1:sdDatePublished "2024-08-05 10:23:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/984/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7710 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:51Z" ; + schema1:dateModified "2024-06-11T12:54:51Z" ; + schema1:description "A fully reproducible and state of the art epitope prediction pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/984?version=7" ; + schema1:keywords "epitope, epitope-prediction, mhc-binding-prediction" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/epitopeprediction" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/984?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=25" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-08-05 10:24:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=25" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13457 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:51Z" ; + schema1:dateModified "2024-06-11T12:54:51Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=25" ; + schema1:version 25 . + + a schema1:Dataset ; + schema1:datePublished "2024-07-31T15:58:18.805158" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Scaffolding-HiC-VGP8" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Scaffolding-HiC-VGP8/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.24" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for screening for functional components of assembled contigs" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/987?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/funcscan" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/funcscan" ; + schema1:sdDatePublished "2024-08-05 10:23:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/987/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 16624 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:54Z" ; + schema1:dateModified "2024-06-11T12:54:54Z" ; + schema1:description "Pipeline for screening for functional components of assembled contigs" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/987?version=8" ; + schema1:keywords "amp, AMR, antibiotic-resistance, antimicrobial-peptides, antimicrobial-resistance-genes, arg, Assembly, bgc, biosynthetic-gene-clusters, contigs, function, Metagenomics, natural-products, screening, secondary-metabolites" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/funcscan" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/987?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.262.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_abc_md_setup/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL ABC MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:32:30 +0100" ; + schema1:url "https://workflowhub.eu/workflows/262/ro_crate?version=2" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 230004 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 46603 ; + schema1:creator , + ; + schema1:dateCreated "2023-06-12T08:27:43Z" ; + schema1:dateModified "2023-06-12T08:35:34Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/262?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CWL ABC MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_abc_md_setup/cwl/workflow.cwl" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 49084 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Summary\r +\r +This tutorial aims to illustrate how to compute a fast-growth mutation free energy calculation, step by step, using the BioExcel Building Blocks library (biobb). The particular example used is the Staphylococcal nuclease protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +Workflow engine is a jupyter notebook. Auxiliary libraries used are nb\\_conda\\_kernels, os, and plotly. Environment setup can be carried out using the environment.yml in the code repository. The tutorial uses docker for running pmx - a local setup can be used instead, see notes in the tutorial.\r +\r +# Parameters\r +\r +## Inputs\r +\r +Workflow Input files needed:\r +\r +* **stateA_traj**: Equilibrium trajectory for the WT protein.\r +\r +* **stateB_traj**: Equilibrium trajectory for the Mutated protein.\r +\r +* **stateA_tpr**: WT protein topology (GROMACS tpr format).\r +\r +* **stateB_tpr**: Mutated protein topology (GROMACS tpr format).\r +\r +Auxiliar force field libraries needed:\r +\r +* **mutff45 (folder)**: pmx mutation force field libraries. \r +\r +\r +## Outputs\r +\r +* **pmx.outputs**: Final free energy estimation. Summary of information got applying the different methods.\r +\r +* **pmx.plots.png**: Final free energy plot of the Mutation free energy pipeline.\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.55.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_pmx_tutorial" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Mutation Free Energy Calculations using BioExcel Building Blocks (biobb) (jupyter notebook)" ; + schema1:sdDatePublished "2024-08-05 10:25:29 +0100" ; + schema1:url "https://workflowhub.eu/workflows/55/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 48053 ; + schema1:creator , + ; + schema1:dateCreated "2021-05-07T13:22:40Z" ; + schema1:dateModified "2021-05-13T08:15:28Z" ; + schema1:description """# Summary\r +\r +This tutorial aims to illustrate how to compute a fast-growth mutation free energy calculation, step by step, using the BioExcel Building Blocks library (biobb). The particular example used is the Staphylococcal nuclease protein (PDB code 1STN), a small, minimal protein, appropriate for a short tutorial.\r +\r +Workflow engine is a jupyter notebook. Auxiliary libraries used are nb\\_conda\\_kernels, os, and plotly. Environment setup can be carried out using the environment.yml in the code repository. The tutorial uses docker for running pmx - a local setup can be used instead, see notes in the tutorial.\r +\r +# Parameters\r +\r +## Inputs\r +\r +Workflow Input files needed:\r +\r +* **stateA_traj**: Equilibrium trajectory for the WT protein.\r +\r +* **stateB_traj**: Equilibrium trajectory for the Mutated protein.\r +\r +* **stateA_tpr**: WT protein topology (GROMACS tpr format).\r +\r +* **stateB_tpr**: Mutated protein topology (GROMACS tpr format).\r +\r +Auxiliar force field libraries needed:\r +\r +* **mutff45 (folder)**: pmx mutation force field libraries. \r +\r +\r +## Outputs\r +\r +* **pmx.outputs**: Final free energy estimation. Summary of information got applying the different methods.\r +\r +* **pmx.plots.png**: Final free energy plot of the Mutation free energy pipeline.\r +\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/55?version=5" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Mutation Free Energy Calculations using BioExcel Building Blocks (biobb) (jupyter notebook)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/55?version=2" ; + schema1:version 2 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 80077 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "ChIP-seq peak-calling and differential analysis pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/971?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/chipseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/chipseq" ; + schema1:sdDatePublished "2024-08-05 10:24:13 +0100" ; + schema1:url "https://workflowhub.eu/workflows/971/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5685 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:44Z" ; + schema1:dateModified "2024-06-11T12:54:44Z" ; + schema1:description "ChIP-seq peak-calling and differential analysis pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/971?version=5" ; + schema1:keywords "ChIP, ChIP-seq, chromatin-immunoprecipitation, macs2, peak-calling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/chipseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/971?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.129.4" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_virtual-screening" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein-ligand Docking tutorial (Fpocket)" ; + schema1:sdDatePublished "2024-08-05 10:30:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/129/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 43078 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-07-26T09:30:15Z" ; + schema1:dateModified "2023-07-26T09:33:11Z" ; + schema1:description """# Protein-ligand Docking tutorials using BioExcel Building Blocks (biobb)\r +\r +This tutorials aim to illustrate the process of **protein-ligand docking**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular examples used are based on the **Mitogen-activated protein kinase 14** (p38-α) protein (PDB code [3HEC](https://www.rcsb.org/structure/3HEC)), a well-known **Protein Kinase enzyme**,\r + in complex with the FDA-approved **Imatinib** (PDB Ligand code [STI](https://www.rcsb.org/ligand/STI), DrugBank Ligand Code [DB00619](https://go.drugbank.com/drugs/DB00619)) and **Dasatinib** (PDB Ligand code [1N1](https://www.rcsb.org/ligand/1N1), DrugBank Ligand Code [DB01254](https://go.drugbank.com/drugs/DB01254)), small **kinase inhibitors** molecules used to treat certain types of **cancer**.\r +\r +The tutorials will guide you through the process of identifying the **active site cavity** (pocket) without previous knowledge, and the final prediction of the **protein-ligand complex**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/129?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Protein-ligand Docking tutorial (Fpocket)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_virtual-screening/blob/master/biobb_wf_virtual-screening/notebooks/fpocket/wf_vs_fpocket.ipynb" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """# ESCALIBUR\r +\r +Escalibur Population Genomic Analysis Pipeline is able to explore key aspects centering the population genetics of organisms, and automates three key bioinformatic components in population genomic analysis using Workflow Definition Language (WDL: https://openwdl.org/), and customised R, Perl, Python and Unix shell scripts. Associated programs are packaged into a platform independent singularity image, for which the definition file is provided.\r +\r +The workflow for analysis using Escalibur consists of three steps - each step can be run in a separate workflow in a sequential manner; step 2 is optional.\r +\r + 1. Trimming and mapping the raw data - selection of the best reference genome;\r + 2. Removing the contamination from mapped data;\r + 3. Recalibration, variant calling and filtering;\r +\r +This implementation runs both locally and in a distributed environment that uses SLURM job scheduler.\r +\r +## Dependencies\r +Following software dependencies are required:\r +\r +* Git\r +* SLURM scheduler required for distributed HPC environment (https://slurm.schedmd.com/documentation.html)\r +* Python3.7: (https://www.python.org/)\r +* Perl 5.26.2: (https://www.perl.org/)\r +* Java 1.8\r +* Singularity 3.7.3: (https://sylabs.io/singularity/)\r +\r +## Step 1: Installation\r +\r +Typically, the installation of Singularity requires root rights. You should therefore contact your administrator to get it correctly installed. Minimum Linux kernel version requirement is 3.8, thought >= 3.18 would be preferred (https://sylabs.io/guides/3.5/admin-guide/installation.html).\r +\r +Clone the git repository to a directory on your cluster or stand-alone server.\r +```\r +> git clone --depth 1 -b v0.3-beta https://gitlab.unimelb.edu.au/bioscience/escalibur.git\r +> cd escalibur\r +```\r +\r +### Description of Files\r +* `workflow-main.local.config`: main configuration file for stand alone server runtime environment\r +* `workflow-main.slurm.config`: main configuration file for HPC runtime environment that support Slurm job scheduler\r +* `workflow-mapping.json`: defines location of input files, has behavioral settings and sets resource allocations\r +* `workflow-cleaning.json`: defines location of input files and sets resource allocations\r +* `workflow-variants.json`: defines location of input files, has behavioral settings and sets resource allocations\r +* `workflow-mapping.wdl`: main workflow file to trim and map PE reads into the genome\r +* `workflow-cleaning.wdl`: main workflow file to clean contamination from mapped PE reads against genomes representing putative contamination\r +* `workflow-variants.wdl`: main workflow file to call variants using mapped and cleaned reads\r +* `workflow-mapping.outputs.json`: defines location for resultant outputs and logs from mapping workflow\r +* `workflow-cleaning.outputs.json`: defines location for resultant outputs and logs from cleaning workflow\r +* `workflow-variants.outputs.json`: defines location for resultant outputs and logs from variants workflow\r +* `inputReads.txt`: example input file for fastq read files to mapping step\r +* `cleanup.conf`: example configuration file for putative host contamination to cleaning step\r +* `inputBams.txt`: example input file for resultant BAM files to variant calling step\r +* `references.txt`: contains list of example references genomes\r +* `perl_scripts`: contains Perl scripts used by the pipeline\r +* `scripts`: contains Python scripts used by the pipeline\r +* `R_scripts`: contains R scripts used by the pipeline\r +* `sub_workflows`: sub-workflows, one for each of the workflow steps\r +* `tasks`: workflow tasks\r +* `cromwell-50.jar`: java archive file required to run the workflow.\r +\r +Two config files have been created. One for stand alone server (`workflow-runtime.local.config`) and another one for HPC environment that supports Slurm scheduler (`workflow-runtime.slurm.config`).\r +These files have already been optimised. For slurm configuration you only need to define the HPC partition in line 35: "String rt_queue"\r +Change this to the partition you have access to on HPC environment.\r +\r +Files `workflow-mapping.outputs.json`, `workflow-cleaning.outputs.json` and `workflow-variants.outputs.json` define the directories to copy the result files to. Modify if you want to change default output directories `outputMapping`, `outputCleaning` and `outputVariants`. These output directories are generated to the directory `escalibur`.\r +#### NOTE: delete output directories from previous runs. If you have files there already and a name matches during the copy, the workflow may fail.\r +\r +`Singularity` directory contains the definition file for the software used in Escalibur. Pre-built singularity image can be downloaded from `library://pakorhon/workflows/escalibur:0.0.1-beta`.\r +```\r +> singularity pull escalibur.sif library://pakorhon/workflows/escalibur:0.0.1-beta\r +```\r +\r +## Step 2: Test run\r +\r +To confirm correct function of the workflows (`mapping`, `cleaning` and `variant calling`), fix the required absolute paths, marked by three dots `...` in `workflow-mapping.json`, `workflow-cleaning.json` and `workflow-variants.json` and configuration files `cleanup.conf` and `inputBams.txt`, and run the workflow with the provided test and configuration files, and parameter settings.\r +```\r +> java -Dconfig.file=./workflow-runtime.local.config -jar ./cromwell-50.jar run workflow-mapping.wdl -i workflow-mapping.json -o workflow-mapping.outputs.json > out.mapping 2> err.mapping\r +> java -Dconfig.file=./workflow-runtime.local.config -jar ./cromwell-50.jar run workflow-cleaning.wdl -i workflow-cleaning.json -o workflow-cleaning.outputs.json > out.cleaning 2> err.cleaning\r +> java -Dconfig.file=./workflow-runtime.local.config -jar ./cromwell-50.jar run workflow-variants.wdl -i workflow-variants.json -o workflow-variants.outputs.json > out.variants 2> err.variants\r +```\r +Slurm file templates `runMapping.slurm`, `runCleaning.slurm` and `runVariants.slurm` are available for each workflow.\r +#### NOTE: default parameter settings for run-times, memory usage and module loading may require adjustment in these files if run in HPC environment using slurm. Current settings should account for the test run.\r +\r +After the runs are complete, the results will be at the output directories: `outputMapping`, `outputCleaning` and `outputVariants`.\r +You can compare the result of `outputVariants/full_genotype_output.vcf` to that or pre-run `TestResults/full_genotype_output.vcf`.\r +\r +## Step 3: Mapping\r +\r +Make a directory for your fastq files e.g. `Reads` and copy your paired end raw data in there.\r +```\r +> mkdir Reads\r +```\r +\r +It should look something like below\r +```\r +> ls TestReads/\r +1-1_r1.fastq.gz 32-1_r1.fastq.gz 44-1_r1.fastq.gz\r +1-1_r2.fastq.gz 32-1_r2.fastq.gz 44-1_r2.fastq.gz\r +```\r +Run the python script to create a file of your input samples and edit the resulting file to match your sample identifiers and libraries.\r +```\r +> python3 scripts/inputArgMaker.py -d Reads/ -p -ps 33 -pq 20 -pl ILLUMINA -ml 50 -o inputReads.txt \r +```\r +\r +The edited output file is shown below. The script will automatically sort the files by size.\r +```\r +> cat inputReads.txt\r +# Prefix PE/SE MinLen PhredS Sequencer PhredQ Library Read Group ID Sample Platform Unit First pair of PE reads Second pair of PE reads\r +test1 PE 50 33 ILLUMINA 28 LIB1 CL100082180L1 SM1 CL100082180L1 ./TestReads/1-1_r1.fastq.gz ./TestReads/1-1_r2.fastq.gz\r +test2 PE 50 33 ILLUMINA 20 LIB2 CL100082180L1 SM2 CL100082180L1 ./TestReads/44-1_r1.fastq.gz ./TestReads/44-1_r2.fastq.gz\r +test3 PE 50 33 ILLUMINA 20 LIB3 CL100034574L1 SM2 CL100034574L1 ./TestReads/32-1_r1.fastq.gz ./TestReads/32-1_r2.fastq.gz\r +```\r +#### NOTE: If several libraries are embedded in a single read file, library-specific reads have to be separated into own files before create the inputReads.txt file. In contrast, inputReads.txt file format can accommodate multiple library files to a single sample.\r +\r +* `Prefix`: Prefix for the resultant files from trimming.\r +* `PE/SE`: Paired-End/Single-End reads as input.\r +* `MinLen`: Minimum Length of reads after trimming.\r +* `PhredS`: Used Phred coding by the sequencer (33 or 64).\r +* `Sequencer`: Name of the sequencer.\r +* `PhredQ`: Phred cut-off score used in trimming.\r +* `Library`: Identifier for the library.\r +* `Read Group ID`: Identifier for the read groups required by GATK (inputArgMaker tries to find this from FASTQ reads). Refer to (https://gatk.broadinstitute.org/hc/en-us/articles/360035890671-Read-groups).\r +* `Sample`: Identifier for the sample. Defined prefix for resultant sample specific files.\r +* `Platform Unit (optional)`: Information about flow cell, lane and sample. Helps GATK in recalibration (inputArgMaker copies Read Group ID here). Refer to (https://gatk.broadinstitute.org/hc/en-us/articles/360035890671-Read-groups).\r +* `First pair of PE reads`: Relative path to the forward pair of PE reads.\r +* `Second pair of PE reads`: Relative path to the reverse pair of PE reads.\r +\r +Create a file listing reference genomes and configure `workflow-mapping.json` file.\r +An example reference file (`references.txt`) has been created for you. Use this as an example to create your own.\r +Ensure there are no whitespaces at the end of the line or else the cromwell engine will throw an error.\r +Reads are mapped to these reference files and the best matching reference will be selected for variant calling.\r +```\r +> cat references.txt\r +scf00001 ./TestReferences/scf00001.fa\r +scf00013 ./TestReferences/scf00013.fa\r +```\r +#### NOTE: Reference label (e.g. `scf00001`) must be a substring found in the reference fasta file (`scf00001.fa`)\r +\r +The figure below illustrates the flow of the information, and appearance of labels (`Prefix`, `Sample`, `Label`) in file names, as defined in `inputReads.txt` and `references.txt`.\r +![](figures/labelFlow.png)\r +\r +### workflow-mapping.json config file\r +Add the path of your fastq and reference genome input files and change parameters as appropriate, and adjust the absolute paths for singularity image. If `mapping_workflow.readQc` is set to `yes`, reads are trimmed both for quality and the adapters. Adapters to trim are given in `mapping_workflow.pe_filtering_workflow.trimmomatic_pe_task.truseq_pe_adapter`. If you want to use custom adapters, copy them to `adapters` directory and instead of default `TruSeq3-PE.fa`, refer to your custom file. If you don't want to use adapters, use `empty.fa` file instead. For BGISEQ adapters, refer to (https://en.mgitech.cn/Download/download_file/id/71).\r +```\r +{\r + "## CONFIG FILE": "WDL",\r + "mapping_workflow.inputSampleFile": "./inputReads.txt",\r + "mapping_workflow.inputReferenceFile": "./references.txt",\r +\r + "## Parameters for samtools read filtering": "-F 4 does filters unmapped reads from resultant files",\r + "mapping_workflow.samtoolsParameters": "-F 4",\r + \r + "## Is read QC required": "yes or no",\r + "mapping_workflow.readQc": "yes",\r + "## What is the ploidy of given genome": "1 for haploid, 2 for diploid, etc.",\r + "mapping_workflow.ploidy": 2,\r + \r + "## Singularity parameters": "absolute paths to the container and the directory to bind visible inside singularity",\r + "mapping_workflow.singularityContainerPath": "/home/.../escalibur/escalibur.sif",\r + "mapping_workflow.singularityBindPath": "/home/.../escalibur/",\r +\r + "## trimmomatic adapters": "",\r + "mapping_workflow.pe_filtering_workflow.trimmomatic_pe_task.truseq_pe_adapter":"./adapters/TruSeq3-PE.fa",\r + "mapping_workflow.pe_filtering_workflow.trimmomatic_se_task.truseq_se_adapter":"./adapters/TruSeq3-SE.fa",\r + \r + "## Indexing sub workflow task parameters": "Samtools index run time parameters",\r + "mapping_workflow.index_sub_workflow.indexing_sam_task.IST_minutes": 300,\r + "mapping_workflow.index_sub_workflow.indexing_sam_task.IST_threads": 16,\r + "mapping_workflow.index_sub_workflow.indexing_sam_task.IST_mem": 30000,\r + .\r + .\r + .\r +}\r +```\r +\r +Run the mapping workflow.\r +```\r +> java -Dconfig.file=./workflow-runtime.local.config -jar ./cromwell-50.jar run workflow-mapping.wdl -i workflow-mapping.json -o workflow-mapping.outputs.json > out.mapping 2> err.mapping\r +```\r +The resultant BAM files will be copied to `outputMapping` directory.\r +\r +## Step 4 (optional): Cleaning\r +\r +If you suspect 'host' contamination in your data, you can remove that using the cleaning workflow.\r +Define the file representing the contamination. First column defines the sample identifier, second the resultant BAM file from mapping workflow and third the putative contaminant genome assembly.\r +```\r +> cat cleanup.conf\r +SM1 /home/.../escalibur/outputMapping/SM1.scf00001.MarkDup.bam /home/.../escalibur/Hosts/host1.fa\r +SM2 /home/.../escalibur/outputMapping/SM2.scf00001.MarkDup.bam /home/.../escalibur/Hosts/host1.fa\r +```\r +#### NOTE: you have to use absolute paths both to BAM files and the contaminant reference genome (here `host1.fa` and `host2.fa`).\r +\r +### workflow-cleaning.json config file\r +Add the path of your cleaning config file (here `cleanup.conf`) and adjust the absolute paths for singularity image.\r +```\r +{\r + "## CONFIG FILE": "WDL",\r + "cleaning_workflow.inputContaminantFile": "./cleanup.conf",\r + \r + "## Singularity parameters": "absolute paths to the container and the directory to bind visible inside singularity",\r + "cleaning_workflow.singularityContainerPath": "/home/.../escalibur/escalibur.sif",\r + "cleaning_workflow.singularityBindPath": "/home/.../escalibur/",\r +\r + "cleaning_workflow.indexing_bwa_task.IBT_minutes": 60,\r + "cleaning_workflow.indexing_bwa_task.IBT_threads": 1,\r + "cleaning_workflow.indexing_bwa_task.IBT_mem": 16000,\r +\r + "######################################":"########################################",\r + "CLEANING":"PARAMETERS",\r + "######################################":"########################################",\r + "cleaning_workflow.clean_bams_workflow.cleanBams_task.CLEAN_BAMS_minutes": 600,\r + "cleaning_workflow.clean_bams_workflow.cleanBams_task.CLEAN_BAMS_threads": 4,\r + "cleaning_workflow.clean_bams_workflow.cleanBams_task.CLEAN_BAMS_mem": 32000,\r +\r + "cleaning_workflow.create_cleaned_bams_workflow.createCleanedBams_task.CREATE_CLEAN_BAMS_minutes": 300,\r + "cleaning_workflow.create_cleaned_bams_workflow.createCleanedBams_task.CREATE_CLEAN_BAMS_threads": 4,\r + "cleaning_workflow.create_cleaned_bams_workflow.createCleanedBams_task.CREATE_CLEAN_BAMS_mem": 32000,\r +\r + "cleaning_workflow.refsBySample.RBS_minutes": 5,\r + "cleaning_workflow.refsBySample.RBS_threads": 1,\r + "cleaning_workflow.refsBySample.RBS_mem": 4000\r +}\r +```\r +\r +Run the cleaning workflow.\r +```\r +> java -Dconfig.file=./workflow-runtime.local.config -jar ./cromwell-50.jar run workflow-cleaning.wdl -i workflow-cleaning.json -o workflow-cleaning.outputs.json > out.cleaning 2> err.cleaning\r +```\r +The resultant cleaned BAM files will be copied to `outputCleaning` directory. You can repeat the workflow if you suspect that there may be more than one contaminant genomes per each sample. In that case you have to take care of the properly configured `cleanup.conf` file that should describe the BAM files from previous cleaning round but also define new output directory for each round in `workflow-cleaning.outputs.json` file.\r +\r +## Step 5: Variant calling\r +\r +Define the file listing the BAM files used for variant calling. First column defines the sample identifier, and second the resultant BAM file either from mapping of cleaning workflow.\r +```\r +> cat inputBams.txt\r +SM1 /home/.../escalibur/outputMapping/SM1.scf00001.MarkDup.bam\r +SM2 /home/.../escalibur/outputCleaned/SM2.scf00001.MarkDup.cleaned.bam\r +```\r +\r +### workflow-variants.json config file\r +Add the path of your file listing the locations of BAM files (here `inputBams.txt`), and add the location to selected reference genome (found in `outputMapping/best.ref`) and it's label, as defined in `references.txt` file. Adjust the absolute paths for singularity image and adjust other parameters, especially define if you want to recalibrate the BAM files by selecting value "independent" to "variants_workflow.call_type".\r +```\r +{\r + "## CONFIG FILE": "WDL",\r + "variants_workflow.inputSampleFile": "./inputBams.txt",\r + "variants_workflow.selectedRefFile": "TestReferences/scf00001.fa",\r + "variants_workflow.selectedRefLabel": "scf00001",\r + \r + "## Singularity parameters": "absolute paths to the container and the directory to bind visible inside singularity",\r + "variants_workflow.singularityContainerPath": "/home/.../escalibur/escalibur.sif",\r + "variants_workflow.singularityBindPath": "/home/.../escalibur/",\r +\r + "## Which variant call workflow to use": "fast or independent",\r + "variants_workflow.call_type": "fast",\r + \r + "## Variant filtering expressions": "For SNPs and INDELs",\r + "variants_workflow.SNP_filt_exp": "QD < 2.0 || FS > 60.0 || MQ < 40.0 || MQRankSum < -12.5 || ReadPosRankSum < -8.0",\r + "variants_workflow.INDEL_filt_exp": "QD < 2.0 || FS > 200.0 || ReadPosRankSum < -20.0",\r +\r + "## Variant Filter params": "Variant filter, indel, snps, report making: Safe to leave as default",\r + "variants_workflow.ploidy": 2,\r + "variants_workflow.maxIndelSize": 60,\r + "variants_workflow.scafNumLim": 95,\r + "variants_workflow.scafNumCo": 2,\r + "variants_workflow.scafLenCutOff": 0,\r + "variants_workflow.ldWinSize": 10,\r + "variants_workflow.ldWinStep": 5,\r + "variants_workflow.ldCutOff": 0.3,\r + "variants_workflow.snp_indel_var_filtering_workflow.indelFilterName": "Indel_filter",\r + "variants_workflow.snp_indel_var_filtering_workflow.indelFilterExpression": "QD < 2.0 || FS > 200.0 || ReadPosRankSum < -20.0",\r + "variants_workflow.snp_indel_var_filtering_workflow.snpFilterName": "Snp_filter",\r + "variants_workflow.snp_indel_var_filtering_workflow.snpFilterExpression": "QD < 2.0 || FS > 60.0 || MQ < 40.0 || MQRankSum < -12.5 || ReadPosRankSum < -8.0",\r + "variants_workflow.snp_indel_var_filtering_workflow.vfindel_tk.selectType": "",\r + "variants_workflow.snp_indel_var_filtering_workflow.vfsnp_tk.selectType": "",\r +\r + "## Build chromosome map":"map_def_scf_lim_task",\r + "variants_workflow.snp_indel_var_filtering_workflow.map_def_scf_lim_task.scafLenCutOff": 1000000,\r + "variants_workflow.snp_indel_var_filtering_workflow.map_def_scf_lim_task.scafNumCo": 3,\r +\r + "## Indexing sub workflow task parameters": "Samtools index run time parameters",\r + "variants_workflow.ref_index.IST_minutes": 300,\r + "variants_workflow.ref_index.IST_threads": 2,\r + "variants_workflow.ref_index.IST_mem": 8000,\r + .\r + .\r + .\r +}\r +```\r +\r +Run the variant calling workflow.\r +```\r +> java -Dconfig.file=./workflow-runtime.local.config -jar ./cromwell-50.jar run workflow-variants.wdl -i workflow-variants.json -o workflow-variants.outputs.json > out.variants 2> err.variants\r +```\r +The resultant files will be copied to `outputVariants` directory. That includes filtered variants calls (`full_genotype_output.vcf`) and recalibrated BAM files (if independent call_type is selected).\r +\r +## Other considerations\r +\r +### Resource allocation in HPC environment\r +Wall time, memory usage and thread count (`_minutes`, `_mem`, `_threads`) given in `.json` files for each workflow can vary substantially and may require adjusting in HPC environment and slurm. This may lead to frequent restarting of the workflow after each adjustment. We have automated this task by providing scripts that automatically check the failed resource allocations and double them for each round. These scripts are located in `Automation` directory and can be run as follows:\r +```\r +> cd Automation\r +> sh init.sh # Copies the content of ../tasks directory to tasksOrig directory\r +> sbatch runMapping.slurm # Runs runLoopMapping.sh in a worker node\r +> sbatch runCleaning.slurm # Runs runLoopCleaning.sh in a worker node\r +> sbatch runVariants.slurm # Runs runLoopVariants.sh in a worker node\r +```\r +Scripts `runLoop*.sh` copy resource allocations from collective `runtimes.json` file to the files in `../tasks` directory, run the workflow and double the failed resource allocations in `../tasks` files, and reruns the workflow until it succeeds or until ten rounds have passed. Copying of resource allocations directly to the files in `../tasks` directory is necessary to guarantee proper function of call-caching.\r +#### NOTE: automated resource allocation adjustment is experimental, should be monitored when running and may require modifications to scripts to function properly.\r +\r +### Disk usage\r +Cromwell will create duplicate copies of files while running the workflows. It is therefore recommended to remove `cromwell-executions` directory after each workflow is run, if disk space is getting sparse.\r +```\r +> rm -r cromwell-executions\r +```\r +Especially, if there are hundreds of samples that may sum up to terabytes of data, disk space might become an issue if unused files are not removed.\r +\r +### Troubleshooting\r +If the output text does not reveal the error, you can try to find an error message using command(s):\r +```\r +> find cromwell-executions/ -name stderr -exec cat {} \\; | grep -i fatal\r +> find cromwell-executions/ -name stderr -exec cat {} \\; | less\r +```\r +\r +Most commonly encountered error cases:\r +\r +* Singularity is not running correctly. Typically you require help from your administrator to get singularity properly installed.\r +* Singularity image `escalibur.sif` was not downloaded\r +* Check that you are using correct runtime configuration file `workflow-runtime.local.config` or `workflow-runtime.slurm.config` when calling `cromwell-50.jar`\r +* Absolute file paths for Singularity/Trimmomatic, input files or contaminant genomes are not updated or are wrong in `workflow-*.json`, `inputBams.txt` or `cleanup.conf` configuration files, respectively.\r +* Defined run-time and memory requirements for some tasks are not sufficient in `.json` configuration files to run the pipeline in HPC environment.\r +* If you are using slurm job scheduler and want to run the pipeline in HPC environment, you have to create the related configuration file yourselves.\r +* Pipeline has not been tested in other environments but Linux and we expect that users encounter challenges if trying to run the pipeline e.g. in Mac environment.\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/335?version=1" ; + schema1:isBasedOn "https://gitlab.unimelb.edu.au/bioscience/escalibur.git" ; + schema1:license "BSD-3-Clause" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Escalibur" ; + schema1:sdDatePublished "2024-08-05 10:32:11 +0100" ; + schema1:url "https://workflowhub.eu/workflows/335/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8987 ; + schema1:dateCreated "2022-04-20T23:21:34Z" ; + schema1:dateModified "2023-01-16T13:59:45Z" ; + schema1:description """# ESCALIBUR\r +\r +Escalibur Population Genomic Analysis Pipeline is able to explore key aspects centering the population genetics of organisms, and automates three key bioinformatic components in population genomic analysis using Workflow Definition Language (WDL: https://openwdl.org/), and customised R, Perl, Python and Unix shell scripts. Associated programs are packaged into a platform independent singularity image, for which the definition file is provided.\r +\r +The workflow for analysis using Escalibur consists of three steps - each step can be run in a separate workflow in a sequential manner; step 2 is optional.\r +\r + 1. Trimming and mapping the raw data - selection of the best reference genome;\r + 2. Removing the contamination from mapped data;\r + 3. Recalibration, variant calling and filtering;\r +\r +This implementation runs both locally and in a distributed environment that uses SLURM job scheduler.\r +\r +## Dependencies\r +Following software dependencies are required:\r +\r +* Git\r +* SLURM scheduler required for distributed HPC environment (https://slurm.schedmd.com/documentation.html)\r +* Python3.7: (https://www.python.org/)\r +* Perl 5.26.2: (https://www.perl.org/)\r +* Java 1.8\r +* Singularity 3.7.3: (https://sylabs.io/singularity/)\r +\r +## Step 1: Installation\r +\r +Typically, the installation of Singularity requires root rights. You should therefore contact your administrator to get it correctly installed. Minimum Linux kernel version requirement is 3.8, thought >= 3.18 would be preferred (https://sylabs.io/guides/3.5/admin-guide/installation.html).\r +\r +Clone the git repository to a directory on your cluster or stand-alone server.\r +```\r +> git clone --depth 1 -b v0.3-beta https://gitlab.unimelb.edu.au/bioscience/escalibur.git\r +> cd escalibur\r +```\r +\r +### Description of Files\r +* `workflow-main.local.config`: main configuration file for stand alone server runtime environment\r +* `workflow-main.slurm.config`: main configuration file for HPC runtime environment that support Slurm job scheduler\r +* `workflow-mapping.json`: defines location of input files, has behavioral settings and sets resource allocations\r +* `workflow-cleaning.json`: defines location of input files and sets resource allocations\r +* `workflow-variants.json`: defines location of input files, has behavioral settings and sets resource allocations\r +* `workflow-mapping.wdl`: main workflow file to trim and map PE reads into the genome\r +* `workflow-cleaning.wdl`: main workflow file to clean contamination from mapped PE reads against genomes representing putative contamination\r +* `workflow-variants.wdl`: main workflow file to call variants using mapped and cleaned reads\r +* `workflow-mapping.outputs.json`: defines location for resultant outputs and logs from mapping workflow\r +* `workflow-cleaning.outputs.json`: defines location for resultant outputs and logs from cleaning workflow\r +* `workflow-variants.outputs.json`: defines location for resultant outputs and logs from variants workflow\r +* `inputReads.txt`: example input file for fastq read files to mapping step\r +* `cleanup.conf`: example configuration file for putative host contamination to cleaning step\r +* `inputBams.txt`: example input file for resultant BAM files to variant calling step\r +* `references.txt`: contains list of example references genomes\r +* `perl_scripts`: contains Perl scripts used by the pipeline\r +* `scripts`: contains Python scripts used by the pipeline\r +* `R_scripts`: contains R scripts used by the pipeline\r +* `sub_workflows`: sub-workflows, one for each of the workflow steps\r +* `tasks`: workflow tasks\r +* `cromwell-50.jar`: java archive file required to run the workflow.\r +\r +Two config files have been created. One for stand alone server (`workflow-runtime.local.config`) and another one for HPC environment that supports Slurm scheduler (`workflow-runtime.slurm.config`).\r +These files have already been optimised. For slurm configuration you only need to define the HPC partition in line 35: "String rt_queue"\r +Change this to the partition you have access to on HPC environment.\r +\r +Files `workflow-mapping.outputs.json`, `workflow-cleaning.outputs.json` and `workflow-variants.outputs.json` define the directories to copy the result files to. Modify if you want to change default output directories `outputMapping`, `outputCleaning` and `outputVariants`. These output directories are generated to the directory `escalibur`.\r +#### NOTE: delete output directories from previous runs. If you have files there already and a name matches during the copy, the workflow may fail.\r +\r +`Singularity` directory contains the definition file for the software used in Escalibur. Pre-built singularity image can be downloaded from `library://pakorhon/workflows/escalibur:0.0.1-beta`.\r +```\r +> singularity pull escalibur.sif library://pakorhon/workflows/escalibur:0.0.1-beta\r +```\r +\r +## Step 2: Test run\r +\r +To confirm correct function of the workflows (`mapping`, `cleaning` and `variant calling`), fix the required absolute paths, marked by three dots `...` in `workflow-mapping.json`, `workflow-cleaning.json` and `workflow-variants.json` and configuration files `cleanup.conf` and `inputBams.txt`, and run the workflow with the provided test and configuration files, and parameter settings.\r +```\r +> java -Dconfig.file=./workflow-runtime.local.config -jar ./cromwell-50.jar run workflow-mapping.wdl -i workflow-mapping.json -o workflow-mapping.outputs.json > out.mapping 2> err.mapping\r +> java -Dconfig.file=./workflow-runtime.local.config -jar ./cromwell-50.jar run workflow-cleaning.wdl -i workflow-cleaning.json -o workflow-cleaning.outputs.json > out.cleaning 2> err.cleaning\r +> java -Dconfig.file=./workflow-runtime.local.config -jar ./cromwell-50.jar run workflow-variants.wdl -i workflow-variants.json -o workflow-variants.outputs.json > out.variants 2> err.variants\r +```\r +Slurm file templates `runMapping.slurm`, `runCleaning.slurm` and `runVariants.slurm` are available for each workflow.\r +#### NOTE: default parameter settings for run-times, memory usage and module loading may require adjustment in these files if run in HPC environment using slurm. Current settings should account for the test run.\r +\r +After the runs are complete, the results will be at the output directories: `outputMapping`, `outputCleaning` and `outputVariants`.\r +You can compare the result of `outputVariants/full_genotype_output.vcf` to that or pre-run `TestResults/full_genotype_output.vcf`.\r +\r +## Step 3: Mapping\r +\r +Make a directory for your fastq files e.g. `Reads` and copy your paired end raw data in there.\r +```\r +> mkdir Reads\r +```\r +\r +It should look something like below\r +```\r +> ls TestReads/\r +1-1_r1.fastq.gz 32-1_r1.fastq.gz 44-1_r1.fastq.gz\r +1-1_r2.fastq.gz 32-1_r2.fastq.gz 44-1_r2.fastq.gz\r +```\r +Run the python script to create a file of your input samples and edit the resulting file to match your sample identifiers and libraries.\r +```\r +> python3 scripts/inputArgMaker.py -d Reads/ -p -ps 33 -pq 20 -pl ILLUMINA -ml 50 -o inputReads.txt \r +```\r +\r +The edited output file is shown below. The script will automatically sort the files by size.\r +```\r +> cat inputReads.txt\r +# Prefix PE/SE MinLen PhredS Sequencer PhredQ Library Read Group ID Sample Platform Unit First pair of PE reads Second pair of PE reads\r +test1 PE 50 33 ILLUMINA 28 LIB1 CL100082180L1 SM1 CL100082180L1 ./TestReads/1-1_r1.fastq.gz ./TestReads/1-1_r2.fastq.gz\r +test2 PE 50 33 ILLUMINA 20 LIB2 CL100082180L1 SM2 CL100082180L1 ./TestReads/44-1_r1.fastq.gz ./TestReads/44-1_r2.fastq.gz\r +test3 PE 50 33 ILLUMINA 20 LIB3 CL100034574L1 SM2 CL100034574L1 ./TestReads/32-1_r1.fastq.gz ./TestReads/32-1_r2.fastq.gz\r +```\r +#### NOTE: If several libraries are embedded in a single read file, library-specific reads have to be separated into own files before create the inputReads.txt file. In contrast, inputReads.txt file format can accommodate multiple library files to a single sample.\r +\r +* `Prefix`: Prefix for the resultant files from trimming.\r +* `PE/SE`: Paired-End/Single-End reads as input.\r +* `MinLen`: Minimum Length of reads after trimming.\r +* `PhredS`: Used Phred coding by the sequencer (33 or 64).\r +* `Sequencer`: Name of the sequencer.\r +* `PhredQ`: Phred cut-off score used in trimming.\r +* `Library`: Identifier for the library.\r +* `Read Group ID`: Identifier for the read groups required by GATK (inputArgMaker tries to find this from FASTQ reads). Refer to (https://gatk.broadinstitute.org/hc/en-us/articles/360035890671-Read-groups).\r +* `Sample`: Identifier for the sample. Defined prefix for resultant sample specific files.\r +* `Platform Unit (optional)`: Information about flow cell, lane and sample. Helps GATK in recalibration (inputArgMaker copies Read Group ID here). Refer to (https://gatk.broadinstitute.org/hc/en-us/articles/360035890671-Read-groups).\r +* `First pair of PE reads`: Relative path to the forward pair of PE reads.\r +* `Second pair of PE reads`: Relative path to the reverse pair of PE reads.\r +\r +Create a file listing reference genomes and configure `workflow-mapping.json` file.\r +An example reference file (`references.txt`) has been created for you. Use this as an example to create your own.\r +Ensure there are no whitespaces at the end of the line or else the cromwell engine will throw an error.\r +Reads are mapped to these reference files and the best matching reference will be selected for variant calling.\r +```\r +> cat references.txt\r +scf00001 ./TestReferences/scf00001.fa\r +scf00013 ./TestReferences/scf00013.fa\r +```\r +#### NOTE: Reference label (e.g. `scf00001`) must be a substring found in the reference fasta file (`scf00001.fa`)\r +\r +The figure below illustrates the flow of the information, and appearance of labels (`Prefix`, `Sample`, `Label`) in file names, as defined in `inputReads.txt` and `references.txt`.\r +![](figures/labelFlow.png)\r +\r +### workflow-mapping.json config file\r +Add the path of your fastq and reference genome input files and change parameters as appropriate, and adjust the absolute paths for singularity image. If `mapping_workflow.readQc` is set to `yes`, reads are trimmed both for quality and the adapters. Adapters to trim are given in `mapping_workflow.pe_filtering_workflow.trimmomatic_pe_task.truseq_pe_adapter`. If you want to use custom adapters, copy them to `adapters` directory and instead of default `TruSeq3-PE.fa`, refer to your custom file. If you don't want to use adapters, use `empty.fa` file instead. For BGISEQ adapters, refer to (https://en.mgitech.cn/Download/download_file/id/71).\r +```\r +{\r + "## CONFIG FILE": "WDL",\r + "mapping_workflow.inputSampleFile": "./inputReads.txt",\r + "mapping_workflow.inputReferenceFile": "./references.txt",\r +\r + "## Parameters for samtools read filtering": "-F 4 does filters unmapped reads from resultant files",\r + "mapping_workflow.samtoolsParameters": "-F 4",\r + \r + "## Is read QC required": "yes or no",\r + "mapping_workflow.readQc": "yes",\r + "## What is the ploidy of given genome": "1 for haploid, 2 for diploid, etc.",\r + "mapping_workflow.ploidy": 2,\r + \r + "## Singularity parameters": "absolute paths to the container and the directory to bind visible inside singularity",\r + "mapping_workflow.singularityContainerPath": "/home/.../escalibur/escalibur.sif",\r + "mapping_workflow.singularityBindPath": "/home/.../escalibur/",\r +\r + "## trimmomatic adapters": "",\r + "mapping_workflow.pe_filtering_workflow.trimmomatic_pe_task.truseq_pe_adapter":"./adapters/TruSeq3-PE.fa",\r + "mapping_workflow.pe_filtering_workflow.trimmomatic_se_task.truseq_se_adapter":"./adapters/TruSeq3-SE.fa",\r + \r + "## Indexing sub workflow task parameters": "Samtools index run time parameters",\r + "mapping_workflow.index_sub_workflow.indexing_sam_task.IST_minutes": 300,\r + "mapping_workflow.index_sub_workflow.indexing_sam_task.IST_threads": 16,\r + "mapping_workflow.index_sub_workflow.indexing_sam_task.IST_mem": 30000,\r + .\r + .\r + .\r +}\r +```\r +\r +Run the mapping workflow.\r +```\r +> java -Dconfig.file=./workflow-runtime.local.config -jar ./cromwell-50.jar run workflow-mapping.wdl -i workflow-mapping.json -o workflow-mapping.outputs.json > out.mapping 2> err.mapping\r +```\r +The resultant BAM files will be copied to `outputMapping` directory.\r +\r +## Step 4 (optional): Cleaning\r +\r +If you suspect 'host' contamination in your data, you can remove that using the cleaning workflow.\r +Define the file representing the contamination. First column defines the sample identifier, second the resultant BAM file from mapping workflow and third the putative contaminant genome assembly.\r +```\r +> cat cleanup.conf\r +SM1 /home/.../escalibur/outputMapping/SM1.scf00001.MarkDup.bam /home/.../escalibur/Hosts/host1.fa\r +SM2 /home/.../escalibur/outputMapping/SM2.scf00001.MarkDup.bam /home/.../escalibur/Hosts/host1.fa\r +```\r +#### NOTE: you have to use absolute paths both to BAM files and the contaminant reference genome (here `host1.fa` and `host2.fa`).\r +\r +### workflow-cleaning.json config file\r +Add the path of your cleaning config file (here `cleanup.conf`) and adjust the absolute paths for singularity image.\r +```\r +{\r + "## CONFIG FILE": "WDL",\r + "cleaning_workflow.inputContaminantFile": "./cleanup.conf",\r + \r + "## Singularity parameters": "absolute paths to the container and the directory to bind visible inside singularity",\r + "cleaning_workflow.singularityContainerPath": "/home/.../escalibur/escalibur.sif",\r + "cleaning_workflow.singularityBindPath": "/home/.../escalibur/",\r +\r + "cleaning_workflow.indexing_bwa_task.IBT_minutes": 60,\r + "cleaning_workflow.indexing_bwa_task.IBT_threads": 1,\r + "cleaning_workflow.indexing_bwa_task.IBT_mem": 16000,\r +\r + "######################################":"########################################",\r + "CLEANING":"PARAMETERS",\r + "######################################":"########################################",\r + "cleaning_workflow.clean_bams_workflow.cleanBams_task.CLEAN_BAMS_minutes": 600,\r + "cleaning_workflow.clean_bams_workflow.cleanBams_task.CLEAN_BAMS_threads": 4,\r + "cleaning_workflow.clean_bams_workflow.cleanBams_task.CLEAN_BAMS_mem": 32000,\r +\r + "cleaning_workflow.create_cleaned_bams_workflow.createCleanedBams_task.CREATE_CLEAN_BAMS_minutes": 300,\r + "cleaning_workflow.create_cleaned_bams_workflow.createCleanedBams_task.CREATE_CLEAN_BAMS_threads": 4,\r + "cleaning_workflow.create_cleaned_bams_workflow.createCleanedBams_task.CREATE_CLEAN_BAMS_mem": 32000,\r +\r + "cleaning_workflow.refsBySample.RBS_minutes": 5,\r + "cleaning_workflow.refsBySample.RBS_threads": 1,\r + "cleaning_workflow.refsBySample.RBS_mem": 4000\r +}\r +```\r +\r +Run the cleaning workflow.\r +```\r +> java -Dconfig.file=./workflow-runtime.local.config -jar ./cromwell-50.jar run workflow-cleaning.wdl -i workflow-cleaning.json -o workflow-cleaning.outputs.json > out.cleaning 2> err.cleaning\r +```\r +The resultant cleaned BAM files will be copied to `outputCleaning` directory. You can repeat the workflow if you suspect that there may be more than one contaminant genomes per each sample. In that case you have to take care of the properly configured `cleanup.conf` file that should describe the BAM files from previous cleaning round but also define new output directory for each round in `workflow-cleaning.outputs.json` file.\r +\r +## Step 5: Variant calling\r +\r +Define the file listing the BAM files used for variant calling. First column defines the sample identifier, and second the resultant BAM file either from mapping of cleaning workflow.\r +```\r +> cat inputBams.txt\r +SM1 /home/.../escalibur/outputMapping/SM1.scf00001.MarkDup.bam\r +SM2 /home/.../escalibur/outputCleaned/SM2.scf00001.MarkDup.cleaned.bam\r +```\r +\r +### workflow-variants.json config file\r +Add the path of your file listing the locations of BAM files (here `inputBams.txt`), and add the location to selected reference genome (found in `outputMapping/best.ref`) and it's label, as defined in `references.txt` file. Adjust the absolute paths for singularity image and adjust other parameters, especially define if you want to recalibrate the BAM files by selecting value "independent" to "variants_workflow.call_type".\r +```\r +{\r + "## CONFIG FILE": "WDL",\r + "variants_workflow.inputSampleFile": "./inputBams.txt",\r + "variants_workflow.selectedRefFile": "TestReferences/scf00001.fa",\r + "variants_workflow.selectedRefLabel": "scf00001",\r + \r + "## Singularity parameters": "absolute paths to the container and the directory to bind visible inside singularity",\r + "variants_workflow.singularityContainerPath": "/home/.../escalibur/escalibur.sif",\r + "variants_workflow.singularityBindPath": "/home/.../escalibur/",\r +\r + "## Which variant call workflow to use": "fast or independent",\r + "variants_workflow.call_type": "fast",\r + \r + "## Variant filtering expressions": "For SNPs and INDELs",\r + "variants_workflow.SNP_filt_exp": "QD < 2.0 || FS > 60.0 || MQ < 40.0 || MQRankSum < -12.5 || ReadPosRankSum < -8.0",\r + "variants_workflow.INDEL_filt_exp": "QD < 2.0 || FS > 200.0 || ReadPosRankSum < -20.0",\r +\r + "## Variant Filter params": "Variant filter, indel, snps, report making: Safe to leave as default",\r + "variants_workflow.ploidy": 2,\r + "variants_workflow.maxIndelSize": 60,\r + "variants_workflow.scafNumLim": 95,\r + "variants_workflow.scafNumCo": 2,\r + "variants_workflow.scafLenCutOff": 0,\r + "variants_workflow.ldWinSize": 10,\r + "variants_workflow.ldWinStep": 5,\r + "variants_workflow.ldCutOff": 0.3,\r + "variants_workflow.snp_indel_var_filtering_workflow.indelFilterName": "Indel_filter",\r + "variants_workflow.snp_indel_var_filtering_workflow.indelFilterExpression": "QD < 2.0 || FS > 200.0 || ReadPosRankSum < -20.0",\r + "variants_workflow.snp_indel_var_filtering_workflow.snpFilterName": "Snp_filter",\r + "variants_workflow.snp_indel_var_filtering_workflow.snpFilterExpression": "QD < 2.0 || FS > 60.0 || MQ < 40.0 || MQRankSum < -12.5 || ReadPosRankSum < -8.0",\r + "variants_workflow.snp_indel_var_filtering_workflow.vfindel_tk.selectType": "",\r + "variants_workflow.snp_indel_var_filtering_workflow.vfsnp_tk.selectType": "",\r +\r + "## Build chromosome map":"map_def_scf_lim_task",\r + "variants_workflow.snp_indel_var_filtering_workflow.map_def_scf_lim_task.scafLenCutOff": 1000000,\r + "variants_workflow.snp_indel_var_filtering_workflow.map_def_scf_lim_task.scafNumCo": 3,\r +\r + "## Indexing sub workflow task parameters": "Samtools index run time parameters",\r + "variants_workflow.ref_index.IST_minutes": 300,\r + "variants_workflow.ref_index.IST_threads": 2,\r + "variants_workflow.ref_index.IST_mem": 8000,\r + .\r + .\r + .\r +}\r +```\r +\r +Run the variant calling workflow.\r +```\r +> java -Dconfig.file=./workflow-runtime.local.config -jar ./cromwell-50.jar run workflow-variants.wdl -i workflow-variants.json -o workflow-variants.outputs.json > out.variants 2> err.variants\r +```\r +The resultant files will be copied to `outputVariants` directory. That includes filtered variants calls (`full_genotype_output.vcf`) and recalibrated BAM files (if independent call_type is selected).\r +\r +## Other considerations\r +\r +### Resource allocation in HPC environment\r +Wall time, memory usage and thread count (`_minutes`, `_mem`, `_threads`) given in `.json` files for each workflow can vary substantially and may require adjusting in HPC environment and slurm. This may lead to frequent restarting of the workflow after each adjustment. We have automated this task by providing scripts that automatically check the failed resource allocations and double them for each round. These scripts are located in `Automation` directory and can be run as follows:\r +```\r +> cd Automation\r +> sh init.sh # Copies the content of ../tasks directory to tasksOrig directory\r +> sbatch runMapping.slurm # Runs runLoopMapping.sh in a worker node\r +> sbatch runCleaning.slurm # Runs runLoopCleaning.sh in a worker node\r +> sbatch runVariants.slurm # Runs runLoopVariants.sh in a worker node\r +```\r +Scripts `runLoop*.sh` copy resource allocations from collective `runtimes.json` file to the files in `../tasks` directory, run the workflow and double the failed resource allocations in `../tasks` files, and reruns the workflow until it succeeds or until ten rounds have passed. Copying of resource allocations directly to the files in `../tasks` directory is necessary to guarantee proper function of call-caching.\r +#### NOTE: automated resource allocation adjustment is experimental, should be monitored when running and may require modifications to scripts to function properly.\r +\r +### Disk usage\r +Cromwell will create duplicate copies of files while running the workflows. It is therefore recommended to remove `cromwell-executions` directory after each workflow is run, if disk space is getting sparse.\r +```\r +> rm -r cromwell-executions\r +```\r +Especially, if there are hundreds of samples that may sum up to terabytes of data, disk space might become an issue if unused files are not removed.\r +\r +### Troubleshooting\r +If the output text does not reveal the error, you can try to find an error message using command(s):\r +```\r +> find cromwell-executions/ -name stderr -exec cat {} \\; | grep -i fatal\r +> find cromwell-executions/ -name stderr -exec cat {} \\; | less\r +```\r +\r +Most commonly encountered error cases:\r +\r +* Singularity is not running correctly. Typically you require help from your administrator to get singularity properly installed.\r +* Singularity image `escalibur.sif` was not downloaded\r +* Check that you are using correct runtime configuration file `workflow-runtime.local.config` or `workflow-runtime.slurm.config` when calling `cromwell-50.jar`\r +* Absolute file paths for Singularity/Trimmomatic, input files or contaminant genomes are not updated or are wrong in `workflow-*.json`, `inputBams.txt` or `cleanup.conf` configuration files, respectively.\r +* Defined run-time and memory requirements for some tasks are not sufficient in `.json` configuration files to run the pipeline in HPC environment.\r +* If you are using slurm job scheduler and want to run the pipeline in HPC environment, you have to create the related configuration file yourselves.\r +* Pipeline has not been tested in other environments but Linux and we expect that users encounter challenges if trying to run the pipeline e.g. in Mac environment.\r +\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/BSD-3-Clause" ; + schema1:name "Escalibur" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/335?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """# ABR\\_Threshold_Detection\r +\r +## What is this?\r +\r +This code can be used to automatically determine hearing thresholds from ABR hearing curves. \r +\r +One of the following methods can be used for this purpose:\r + \r ++ neural network (NN) training, \r ++ calibration of a self-supervised sound level regression (SLR) method \r +\r +on given data sets with manually determined hearing thresholds.\r +\r +## Installation:\r +\r +Run inside the [src](./src) directory:\r +\r +### Installation as python package\r +\r +```\r +pip install -e ./src (Installation as python package)\r +```\r +\r +### Installation as conda virtual environment\r +```\r +conda create -n abr_threshold_detection python=3.7\r +conda activate abr_threshold_detection\r +conda install pip\r +pip install -e ./src\r +```\r +\r +## Usage:\r +Data files can be downloaded here: [https://zenodo.org/deposit/5779876](https://zenodo.org/deposit/5779876).\r +\r +For the Jupyter Notebooks (see the [`notebooks`](./notebooks) directory) to run, the path to the data has to be defined. For this, see the corresponding documentation of the respective notebooks.\r +\r +### Using NNs (`./src/ABR_ThresholdFinder_NN`)\r +\r +The neural network models were trained in `./src/notebooks/GMCtrained_NN*_training.ipynb` with GMC data and in `./src/notebooks/INGtrained_NN*_training.ipynb` with ING data.\r +\r +```\r +import ABR_ThresholdFinder_NN.data_preparation as dataprep\r +from ABR_ThresholdFinder_NN.models import create_model_1, compile_model_1\r +```\r +For automatic threshold detection based on NNs, `GMCtrained_NN_threshold_detection.ipynb` and `INGtrained_NN_threshold_detection.ipynb` in `./src/notebooks` can be used.\r +\r +```\r +import ABR_ThresholdFinder_NN.data_preparation as dataprep\r +import ABR_ThresholdFinder_NN.thresholder as abrthr\r +```\r +\r +### Using the SLR method (`./src/ABR_ThresholdFinder_SLR`)\r +\r +In `./src/notebooks/GMCcalibrated_SLR_threshold_detection.ipynb` and `./src/notebooks/INGcalibrated_SLR_threshold_detection.ipynb` it is shown how to use the module to:\r +\r ++ train a threshold detector on a data set and estimate the thresholds\r ++ save a trained model\r ++ load a model\r ++ apply a trained threshold estimator to a data set\r ++ evaluate thresholds by comparing it to a ground truth\r ++ evaluate thresholds by analysing signal averages\r +\r +```\r +import pandas as pd\r +import numpy as np\r +\r +from ABR_ThresholdFinder_SLR import ABR_Threshold_Detector_multi_stimulus\r +from ABR_ThresholdFinder_SLR.evaluations import evaluate_classification_against_ground_truth, plot_evaluation_curve_for_specific_stimulus\r +```\r +\r +##### Evaluate thresholds by comparing it with a 'ground truth' (a human set threshold in this case)\r +\r +For example:\r +\r +```\r +# 5dB buffer\r +evaluation = evaluate_classification_against_ground_truth(GMC_data2, 5, \r + frequency = 'frequency',\r + mouse_id = 'mouse_id',\r + sound_level = 'sound_level',\r + threshold_estimated = 'slr_estimated_thr',\r + threshold_ground_truth = 'threshold')\r +``` \r +### Compute and plot evaluation curves that allow to judge the quality of a thresholding\r +\r +Four threshold types are evaluated and compared:\r +\r ++ the threshols predicted with neural networks ('threshold NN')\r ++ the thresholds estimated by a sound level regression method ('threshold SLR')\r ++ the human ground truth ('threshold manual')\r ++ a constant threshold ('50')\r +\r +For more details, please see `Evaluation_of_ML_detected_thresholds.ipynb` in `./src/notebooks`.\r +\r +## Folder structure:\r +\r +### [`data`](./data)\r +Contains the preprocessed ABR and mouse phenotyping datasets from GMC and Ingham et al. in csv format, as well as the mouse ID distributions stored as numpy arrays for neural networks training, validation and testing.\r +\r +### [`models`](./models)\r +Contains the trained models of the two neural networks and the SLR method, but also the predictions of the first neural network with which the second neural network was fed.\r +\r +### [`models_cross-validation`](./models_cross-validation)\r +Contains the models that resulted from the cross-validation of the neural networks.\r +\r +### [`notebooks`](./notebooks)\r +Contains the Jupyter notebooks used for training, testing and evaluation of the neural networks and the SLR method, as well as those used for the hearing curve analysis.\r +\r +### [`notebooks_reports`](./notebooks_reports)\r +Contains the contents of Jupyter notebooks in html format.\r +\r +### [`results`](./results)\r +Contains the predictions or estimates made by the neural networks or the SLR method for the two data sets from GMC and Ingham et al. but also all the plots made to analyse the results.\r +\r +### [`src`](./src)\r +Contains the Python scripts used in the Jupyter notebooks.""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/376?version=1" ; + schema1:isBasedOn "https://github.com/ExperimentalGenetics/ABR_thresholder.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ABR Threshold Detection" ; + schema1:sdDatePublished "2024-08-05 10:31:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/376/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4654 ; + schema1:dateCreated "2022-07-18T08:08:04Z" ; + schema1:dateModified "2023-01-16T14:02:01Z" ; + schema1:description """# ABR\\_Threshold_Detection\r +\r +## What is this?\r +\r +This code can be used to automatically determine hearing thresholds from ABR hearing curves. \r +\r +One of the following methods can be used for this purpose:\r + \r ++ neural network (NN) training, \r ++ calibration of a self-supervised sound level regression (SLR) method \r +\r +on given data sets with manually determined hearing thresholds.\r +\r +## Installation:\r +\r +Run inside the [src](./src) directory:\r +\r +### Installation as python package\r +\r +```\r +pip install -e ./src (Installation as python package)\r +```\r +\r +### Installation as conda virtual environment\r +```\r +conda create -n abr_threshold_detection python=3.7\r +conda activate abr_threshold_detection\r +conda install pip\r +pip install -e ./src\r +```\r +\r +## Usage:\r +Data files can be downloaded here: [https://zenodo.org/deposit/5779876](https://zenodo.org/deposit/5779876).\r +\r +For the Jupyter Notebooks (see the [`notebooks`](./notebooks) directory) to run, the path to the data has to be defined. For this, see the corresponding documentation of the respective notebooks.\r +\r +### Using NNs (`./src/ABR_ThresholdFinder_NN`)\r +\r +The neural network models were trained in `./src/notebooks/GMCtrained_NN*_training.ipynb` with GMC data and in `./src/notebooks/INGtrained_NN*_training.ipynb` with ING data.\r +\r +```\r +import ABR_ThresholdFinder_NN.data_preparation as dataprep\r +from ABR_ThresholdFinder_NN.models import create_model_1, compile_model_1\r +```\r +For automatic threshold detection based on NNs, `GMCtrained_NN_threshold_detection.ipynb` and `INGtrained_NN_threshold_detection.ipynb` in `./src/notebooks` can be used.\r +\r +```\r +import ABR_ThresholdFinder_NN.data_preparation as dataprep\r +import ABR_ThresholdFinder_NN.thresholder as abrthr\r +```\r +\r +### Using the SLR method (`./src/ABR_ThresholdFinder_SLR`)\r +\r +In `./src/notebooks/GMCcalibrated_SLR_threshold_detection.ipynb` and `./src/notebooks/INGcalibrated_SLR_threshold_detection.ipynb` it is shown how to use the module to:\r +\r ++ train a threshold detector on a data set and estimate the thresholds\r ++ save a trained model\r ++ load a model\r ++ apply a trained threshold estimator to a data set\r ++ evaluate thresholds by comparing it to a ground truth\r ++ evaluate thresholds by analysing signal averages\r +\r +```\r +import pandas as pd\r +import numpy as np\r +\r +from ABR_ThresholdFinder_SLR import ABR_Threshold_Detector_multi_stimulus\r +from ABR_ThresholdFinder_SLR.evaluations import evaluate_classification_against_ground_truth, plot_evaluation_curve_for_specific_stimulus\r +```\r +\r +##### Evaluate thresholds by comparing it with a 'ground truth' (a human set threshold in this case)\r +\r +For example:\r +\r +```\r +# 5dB buffer\r +evaluation = evaluate_classification_against_ground_truth(GMC_data2, 5, \r + frequency = 'frequency',\r + mouse_id = 'mouse_id',\r + sound_level = 'sound_level',\r + threshold_estimated = 'slr_estimated_thr',\r + threshold_ground_truth = 'threshold')\r +``` \r +### Compute and plot evaluation curves that allow to judge the quality of a thresholding\r +\r +Four threshold types are evaluated and compared:\r +\r ++ the threshols predicted with neural networks ('threshold NN')\r ++ the thresholds estimated by a sound level regression method ('threshold SLR')\r ++ the human ground truth ('threshold manual')\r ++ a constant threshold ('50')\r +\r +For more details, please see `Evaluation_of_ML_detected_thresholds.ipynb` in `./src/notebooks`.\r +\r +## Folder structure:\r +\r +### [`data`](./data)\r +Contains the preprocessed ABR and mouse phenotyping datasets from GMC and Ingham et al. in csv format, as well as the mouse ID distributions stored as numpy arrays for neural networks training, validation and testing.\r +\r +### [`models`](./models)\r +Contains the trained models of the two neural networks and the SLR method, but also the predictions of the first neural network with which the second neural network was fed.\r +\r +### [`models_cross-validation`](./models_cross-validation)\r +Contains the models that resulted from the cross-validation of the neural networks.\r +\r +### [`notebooks`](./notebooks)\r +Contains the Jupyter notebooks used for training, testing and evaluation of the neural networks and the SLR method, as well as those used for the hearing curve analysis.\r +\r +### [`notebooks_reports`](./notebooks_reports)\r +Contains the contents of Jupyter notebooks in html format.\r +\r +### [`results`](./results)\r +Contains the predictions or estimates made by the neural networks or the SLR method for the two data sets from GMC and Ingham et al. but also all the plots made to analyse the results.\r +\r +### [`src`](./src)\r +Contains the Python scripts used in the Jupyter notebooks.""" ; + schema1:keywords "Machine Learning" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "ABR Threshold Detection" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/376?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Quantitative Mass Spectrometry nf-core workflow" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1013?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/quantms" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/quantms" ; + schema1:sdDatePublished "2024-08-05 10:23:24 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1013/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 16977 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:08Z" ; + schema1:dateModified "2024-06-11T12:55:08Z" ; + schema1:description "Quantitative Mass Spectrometry nf-core workflow" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1013?version=3" ; + schema1:keywords "dia, lfq, mass-spec, mass-spectrometry, openms, proteogenomics, Proteomics, tmt" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/quantms" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1013?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + schema1:datePublished "2021-07-26T10:22:23.303007" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/parallel-accession-download" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:sdDatePublished "2021-10-27 15:45:11 +0100" ; + schema1:softwareVersion "v0.1.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.6" . + + a schema1:Dataset ; + schema1:datePublished "2024-03-26T20:18:42.365340" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.21" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 169594 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + schema1:datePublished "2023-02-23T16:26:06.211606" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/pox-virus-amplicon" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "pox-virus-amplicon/main" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "pox-virus-amplicon/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/pox-virus-amplicon" ; + schema1:version "0.1" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Differential abundance analysis" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/981?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/differentialabundance" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/differentialabundance" ; + schema1:sdDatePublished "2024-08-05 10:24:02 +0100" ; + schema1:url "https://workflowhub.eu/workflows/981/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11181 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:48Z" ; + schema1:dateModified "2024-06-11T12:54:48Z" ; + schema1:description "Differential abundance analysis" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/981?version=8" ; + schema1:keywords "ATAC-seq, ChIP-seq, deseq2, differential-abundance, differential-expression, gsea, limma, microarray, rna-seq, shiny" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/differentialabundance" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/981?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Analysis of Chromosome Conformation Capture data (Hi-C)" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/989?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/hic" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hic" ; + schema1:sdDatePublished "2024-08-05 10:23:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/989/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9650 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:54Z" ; + schema1:dateModified "2024-06-11T12:54:54Z" ; + schema1:description "Analysis of Chromosome Conformation Capture data (Hi-C)" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/989?version=7" ; + schema1:keywords "chromosome-conformation-capture, Hi-C" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hic" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/989?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """# ERGA Protein-coding gene annotation workflow.\r +Adapted from the work of Sagane Joye:\r +\r +https://github.com/sdind/genome_annotation_workflow\r +\r +## Prerequisites\r +\r +The following programs are required to run the workflow and the listed version were tested. It should be noted that older versions of snakemake are not compatible with newer versions of singularity as is noted here: [https://github.com/nextflow-io/nextflow/issues/1659](https://github.com/nextflow-io/nextflow/issues/1659).\r +\r +`conda v 23.7.3`\r +\r +`singularity v 3.7.3`\r +\r +`snakemake v 7.32.3` \r +\r +You will also need to acquire a licence key for Genemark and place this in your home directory with name `~/.gm_key` The key file can be obtained from the following location, where the licence should be read and agreed to: http://topaz.gatech.edu/GeneMark/license_download.cgi\r +\r +## Workflow\r +\r +The pipeline is based on braker3 and was tested on the following dataset from Drosophila melanogaster: [https://doi.org/10.5281/zenodo.8013373](https://doi.org/10.5281/zenodo.8013373)\r +\r +### Input data\r +\r +- Reference genome in fasta format\r +\r +- RNAseq data in paired-end zipped fastq format\r +\r +- uniprot fasta sequences in zipped fasta format\r +\r +### Pipeline steps\r +\r +- **Repeat Model and Mask** Run RepeatModeler using the genome as input, filter any repeats also annotated as protein sequences in the uniprot database and use this filtered libray to mask the genome with RepeatMasker\r +\r +- **Map RNAseq data** Trim any remaining adapter sequences and map the trimmed reads to the input genome\r +\r +- **Run gene prediction software** Use the mapped RNAseq reads and the uniprot sequences to create hints for gene prediction using Braker3 on the masked genome\r +\r +- **Evaluate annotation** Run BUSCO to evaluate the completeness of the annotation produced\r +\r +### Output data\r +\r +- FastQC reports for input RNAseq data before and after adapter trimming\r +\r +- RepeatMasker report containing quantity of masked sequence and distribution among TE families\r +\r +- Protein-coding gene annotation file in gff3 format\r +\r +- BUSCO summary of annotated sequences\r +\r +## Setup\r +\r +Your data should be placed in the `data` folder, with the reference genome in the folder `data/ref` and the transcript data in the foler `data/rnaseq`.\r +\r +The config file requires the following to be given:\r +\r +```\r +asm: 'absolute path to reference fasta'\r +snakemake_dir_path: 'path to snakemake working directory'\r +name: 'name for project, e.g. mHomSap1'\r +RNA_dir: 'absolute path to rnaseq directory'\r +busco_phylum: 'busco database to use for evaluation e.g. mammalia_odb10'\r +```\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.569.1" ; + schema1:isBasedOn "https://github.com/ERGA-consortium/pipelines/tree/main/annotation/snakemake" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ERGA Protein-coding gene annotation workflow" ; + schema1:sdDatePublished "2024-08-05 10:27:51 +0100" ; + schema1:url "https://workflowhub.eu/workflows/569/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 562 ; + schema1:creator ; + schema1:dateCreated "2023-09-12T19:29:55Z" ; + schema1:dateModified "2023-09-13T13:41:08Z" ; + schema1:description """# ERGA Protein-coding gene annotation workflow.\r +Adapted from the work of Sagane Joye:\r +\r +https://github.com/sdind/genome_annotation_workflow\r +\r +## Prerequisites\r +\r +The following programs are required to run the workflow and the listed version were tested. It should be noted that older versions of snakemake are not compatible with newer versions of singularity as is noted here: [https://github.com/nextflow-io/nextflow/issues/1659](https://github.com/nextflow-io/nextflow/issues/1659).\r +\r +`conda v 23.7.3`\r +\r +`singularity v 3.7.3`\r +\r +`snakemake v 7.32.3` \r +\r +You will also need to acquire a licence key for Genemark and place this in your home directory with name `~/.gm_key` The key file can be obtained from the following location, where the licence should be read and agreed to: http://topaz.gatech.edu/GeneMark/license_download.cgi\r +\r +## Workflow\r +\r +The pipeline is based on braker3 and was tested on the following dataset from Drosophila melanogaster: [https://doi.org/10.5281/zenodo.8013373](https://doi.org/10.5281/zenodo.8013373)\r +\r +### Input data\r +\r +- Reference genome in fasta format\r +\r +- RNAseq data in paired-end zipped fastq format\r +\r +- uniprot fasta sequences in zipped fasta format\r +\r +### Pipeline steps\r +\r +- **Repeat Model and Mask** Run RepeatModeler using the genome as input, filter any repeats also annotated as protein sequences in the uniprot database and use this filtered libray to mask the genome with RepeatMasker\r +\r +- **Map RNAseq data** Trim any remaining adapter sequences and map the trimmed reads to the input genome\r +\r +- **Run gene prediction software** Use the mapped RNAseq reads and the uniprot sequences to create hints for gene prediction using Braker3 on the masked genome\r +\r +- **Evaluate annotation** Run BUSCO to evaluate the completeness of the annotation produced\r +\r +### Output data\r +\r +- FastQC reports for input RNAseq data before and after adapter trimming\r +\r +- RepeatMasker report containing quantity of masked sequence and distribution among TE families\r +\r +- Protein-coding gene annotation file in gff3 format\r +\r +- BUSCO summary of annotated sequences\r +\r +## Setup\r +\r +Your data should be placed in the `data` folder, with the reference genome in the folder `data/ref` and the transcript data in the foler `data/rnaseq`.\r +\r +The config file requires the following to be given:\r +\r +```\r +asm: 'absolute path to reference fasta'\r +snakemake_dir_path: 'path to snakemake working directory'\r +name: 'name for project, e.g. mHomSap1'\r +RNA_dir: 'absolute path to rnaseq directory'\r +busco_phylum: 'busco database to use for evaluation e.g. mammalia_odb10'\r +```\r +""" ; + schema1:keywords "Annotation, Genomics, Transcriptomics" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "ERGA Protein-coding gene annotation workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/ERGA-consortium/pipelines/tree/main/annotation/snakemake/Snakefile" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for the analysis of CRISPR data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/975?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/crisprseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/crisprseq" ; + schema1:sdDatePublished "2024-08-05 10:22:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/975/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11023 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:46Z" ; + schema1:dateModified "2024-06-11T12:54:46Z" ; + schema1:description "Pipeline for the analysis of CRISPR data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/975?version=5" ; + schema1:keywords "crispr, crispr-analysis, crispr-cas, NGS" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/crisprseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/975?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + schema1:datePublished "2023-08-31T07:14:38.838218" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/chipseq-sr" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "chipseq-sr/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state of the art epitope prediction pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/984?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/epitopeprediction" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/epitopeprediction" ; + schema1:sdDatePublished "2024-08-05 10:23:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/984/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11406 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:52Z" ; + schema1:dateModified "2024-06-11T12:54:52Z" ; + schema1:description "A fully reproducible and state of the art epitope prediction pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/984?version=7" ; + schema1:keywords "epitope, epitope-prediction, mhc-binding-prediction" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/epitopeprediction" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/984?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + schema1:datePublished "2024-06-24T12:34:13.326994" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/bacterial_genome_annotation" ; + schema1:license "GPL-3.0-or-later" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "bacterial_genome_annotation/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + schema1:datePublished "2023-11-13T11:31:30.254075" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + , + ; + schema1:name "consensus-peaks/consensus-peaks-chip-sr" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.17" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-atac-cutandrun" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.2" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.17" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-chip-pe" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.2" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.17" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/gmx-ligand-parameterization).\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.294.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_ligand_parameterization/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy GMX Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/294/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9948 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-22T09:50:36Z" ; + schema1:dateModified "2023-01-16T13:58:36Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/gmx-ligand-parameterization).\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/294?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy GMX Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_ligand_parameterization/galaxy/biobb_wf_ligand_parameterization.ga" ; + schema1:version 2 ; + ns1:output , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state of the art epitope prediction pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/984?version=4" ; + schema1:isBasedOn "https://github.com/nf-core/epitopeprediction" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/epitopeprediction" ; + schema1:sdDatePublished "2024-08-05 10:23:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/984/ro_crate?version=4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8593 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:51Z" ; + schema1:dateModified "2024-06-11T12:54:51Z" ; + schema1:description "A fully reproducible and state of the art epitope prediction pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/984?version=7" ; + schema1:keywords "epitope, epitope-prediction, mhc-binding-prediction" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/epitopeprediction" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/984?version=4" ; + schema1:version 4 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# EukRecover\r +Pipeline to recover eukaryotic MAGs using CONCOCT, metaBAT2 and EukCC's merging algorythm.\r +\r +Needs paired end shotgun metagenomic reads.\r +\r +## Environment\r +\r +Eukrecover requires an environment with snakemake and metaWRAP.\r +\r +## Quickstart\r +\r +Define your samples in the file `samples.csv`.\r +This file needs to have the columns project and run to identify each metagenome. \r +\r +This pipeline does not support co-binning, but feel free to change it. \r +\r +Clone this repro wherever you want to run the pipeline:\r +```\r +git clone https://github.com/openpaul/eukrecover/\r +```\r +\r +\r +You can then run the snakemake like so\r +\r +```\r +snakemake --use-singularity\r +```\r +\r +The pipeline used dockerhub to fetch all tools, so make sure you have singularity installed.\r +\r +\r +\r +## Prepare databases\r +The pipeline will setup databases for you, but if you already have a EukCC or a BUSCO 5 database you can use them \r +by specifying the location in the file `config/config.yaml`\r +\r +\r +## Output:\r +In the folder results you will find a folder `MAGs` which will contain a folder\r +`fa` containing the actual MAG fastas.\r +In addition you will find stats for each MAG in the table `QC.csv`.\r +\r +This table contains the following columns:\r +\r +name,eukcc_compl,eukcc_cont,BUSCO_C,BUSCO_M,BUSCO_D,BUSCO_F,BUSCO_tax,N50,bp\r +\r +\r +\r +## Citation:\r +\r +If you use this pipeline please make sure to cite all used software. \r +\r +For this please reffer to the used rules.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/475?version=1" ; + schema1:isBasedOn "https://github.com/EBI-Metagenomics/eukrecover.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for EukRecover" ; + schema1:sdDatePublished "2024-08-05 10:30:26 +0100" ; + schema1:url "https://workflowhub.eu/workflows/475/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6502 ; + schema1:dateCreated "2023-05-19T14:02:24Z" ; + schema1:dateModified "2023-05-19T14:02:24Z" ; + schema1:description """# EukRecover\r +Pipeline to recover eukaryotic MAGs using CONCOCT, metaBAT2 and EukCC's merging algorythm.\r +\r +Needs paired end shotgun metagenomic reads.\r +\r +## Environment\r +\r +Eukrecover requires an environment with snakemake and metaWRAP.\r +\r +## Quickstart\r +\r +Define your samples in the file `samples.csv`.\r +This file needs to have the columns project and run to identify each metagenome. \r +\r +This pipeline does not support co-binning, but feel free to change it. \r +\r +Clone this repro wherever you want to run the pipeline:\r +```\r +git clone https://github.com/openpaul/eukrecover/\r +```\r +\r +\r +You can then run the snakemake like so\r +\r +```\r +snakemake --use-singularity\r +```\r +\r +The pipeline used dockerhub to fetch all tools, so make sure you have singularity installed.\r +\r +\r +\r +## Prepare databases\r +The pipeline will setup databases for you, but if you already have a EukCC or a BUSCO 5 database you can use them \r +by specifying the location in the file `config/config.yaml`\r +\r +\r +## Output:\r +In the folder results you will find a folder `MAGs` which will contain a folder\r +`fa` containing the actual MAG fastas.\r +In addition you will find stats for each MAG in the table `QC.csv`.\r +\r +This table contains the following columns:\r +\r +name,eukcc_compl,eukcc_cont,BUSCO_C,BUSCO_M,BUSCO_D,BUSCO_F,BUSCO_tax,N50,bp\r +\r +\r +\r +## Citation:\r +\r +If you use this pipeline please make sure to cite all used software. \r +\r +For this please reffer to the used rules.\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "EukRecover" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/475?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=24" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-08-05 10:24:27 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=24" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10903 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:13Z" ; + schema1:dateModified "2024-06-11T12:55:13Z" ; + schema1:description "RNA sequencing analysis pipeline for gene/isoform quantification and extensive quality control." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=24" ; + schema1:version 24 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "MARS-seq 1/2 preprocessing pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/996?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/marsseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/marsseq" ; + schema1:sdDatePublished "2024-08-05 10:23:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/996/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6424 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:59Z" ; + schema1:dateModified "2024-06-11T12:54:59Z" ; + schema1:description "MARS-seq 1/2 preprocessing pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/996?version=4" ; + schema1:keywords "facs-sorting, MARS-seq, single-cell, single-cell-rna-seq, star-solo, transcriptional-dynamics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/marsseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/996?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Taxonomic classification and profiling of shotgun short- and long-read metagenomic data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1025?version=9" ; + schema1:isBasedOn "https://github.com/nf-core/taxprofiler" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/taxprofiler" ; + schema1:sdDatePublished "2024-08-05 10:23:09 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1025/ro_crate?version=9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15939 ; + schema1:creator , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:20Z" ; + schema1:dateModified "2024-06-11T12:55:20Z" ; + schema1:description "Taxonomic classification and profiling of shotgun short- and long-read metagenomic data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1025?version=10" ; + schema1:keywords "Classification, illumina, long-reads, Metagenomics, microbiome, nanopore, pathogen, Profiling, shotgun, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/taxprofiler" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1025?version=9" ; + schema1:version 9 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/abcix-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.299.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_abc_md_setup/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy ABC MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/299/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 107797 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-23T09:03:51Z" ; + schema1:dateModified "2023-01-16T13:59:01Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/abcix-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/299?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy ABC MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_abc_md_setup/galaxy/biobb_wf_amber_abc_md_setup.ga" ; + schema1:version 2 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Take a scRNAseq counts matrix from a single sample, and perform basic QC with scanpy. Then, do further processing by making a UMAP and clustering. Produces a processed AnnData \r +object.\r +\r +Depreciated: use individual workflows insead for multiple samples""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/514?version=3" ; + schema1:isBasedOn "https://github.com/swbioinf/scrnaseq_howto_ga_workflows.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for scRNAseq Single Sample Processing Counts Matrix" ; + schema1:sdDatePublished "2024-08-05 10:24:32 +0100" ; + schema1:url "https://workflowhub.eu/workflows/514/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 105641 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-05-30T05:19:29Z" ; + schema1:dateModified "2024-05-30T05:55:19Z" ; + schema1:description """Take a scRNAseq counts matrix from a single sample, and perform basic QC with scanpy. Then, do further processing by making a UMAP and clustering. Produces a processed AnnData \r +object.\r +\r +Depreciated: use individual workflows insead for multiple samples""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/514?version=2" ; + schema1:isPartOf ; + schema1:keywords "scRNAseq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "scRNAseq Single Sample Processing Counts Matrix" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/514?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-23T15:28:30+00:00" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:mainEntity . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:name "main" ; + schema1:programmingLanguage . + + a schema1:Dataset ; + schema1:datePublished "2023-11-27T11:45:55.271114" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions , + , + ; + schema1:name "consensus-peaks/consensus-peaks-chip-sr" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-atac-cutandrun" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.3" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-chip-pe" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.3" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/986?version=12" ; + schema1:isBasedOn "https://github.com/nf-core/fetchngs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/fetchngs" ; + schema1:sdDatePublished "2024-08-05 10:23:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/986/ro_crate?version=12" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9517 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:53Z" ; + schema1:dateModified "2024-06-11T12:54:53Z" ; + schema1:description "Pipeline to fetch metadata and raw FastQ files from public databases" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/986?version=13" ; + schema1:keywords "ddbj, download, ena, FASTQ, geo, sra, synapse" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/fetchngs" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/986?version=12" ; + schema1:version 12 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-08T17:33:58.391559" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.17" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=23" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-08-05 10:24:00 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=23" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13434 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:51Z" ; + schema1:dateModified "2024-06-11T12:54:51Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=23" ; + schema1:version 23 . + + a schema1:Dataset ; + schema1:datePublished "2024-02-05T13:35:16.224942" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.20" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# BAM-to-FASTQ-QC\r +\r +## General recommendations for using BAM-to-FASTQ-QC\r +Please see the [`Genome assembly with hifiasm on Galaxy Australia`](https://australianbiocommons.github.io/how-to-guides/genome_assembly/hifi_assembly) guide.\r +\r +## Acknowledgements\r +\r +The workflow & the [doc_guidelines template used](https://github.com/AustralianBioCommons/doc_guidelines) are supported by the Australian BioCommons via Bioplatforms Australia funding, the Australian Research Data Commons (https://doi.org/10.47486/PL105) and the Queensland Government RICF programme. Bioplatforms Australia and the Australian Research Data Commons are enabled by the National Collaborative Research Infrastructure Strategy (NCRIS).\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.220.2" ; + schema1:isBasedOn "https://github.com/AustralianBioCommons/BAM-to-FASTQ-QC" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for BAM to FASTQ + QC v1.0" ; + schema1:sdDatePublished "2024-08-05 10:31:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/220/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 10441 ; + schema1:creator ; + schema1:dateCreated "2022-10-17T02:51:00Z" ; + schema1:dateModified "2023-01-30T18:21:31Z" ; + schema1:description """# BAM-to-FASTQ-QC\r +\r +## General recommendations for using BAM-to-FASTQ-QC\r +Please see the [`Genome assembly with hifiasm on Galaxy Australia`](https://australianbiocommons.github.io/how-to-guides/genome_assembly/hifi_assembly) guide.\r +\r +## Acknowledgements\r +\r +The workflow & the [doc_guidelines template used](https://github.com/AustralianBioCommons/doc_guidelines) are supported by the Australian BioCommons via Bioplatforms Australia funding, the Australian Research Data Commons (https://doi.org/10.47486/PL105) and the Queensland Government RICF programme. Bioplatforms Australia and the Australian Research Data Commons are enabled by the National Collaborative Research Infrastructure Strategy (NCRIS).\r +""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/220?version=1" ; + schema1:isPartOf , + ; + schema1:keywords "BAM, FASTQ, Conversion, QC" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "BAM to FASTQ + QC v1.0" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/220?version=2" ; + schema1:version 2 ; + ns1:input ; + ns1:output , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2023-11-23T06:34:49+00:00" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/AustralianBioCommons/Genome-assessment-post-assembly.git" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Genome-assessment-post-assembly" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Genome-assessment-post-assembly" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/AustralianBioCommons/Genome-assessment-post-assembly.git" ; + schema1:version "main" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.276.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_md_setup/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:31:03 +0100" ; + schema1:url "https://workflowhub.eu/workflows/276/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6826 ; + schema1:creator , + ; + schema1:dateCreated "2023-04-14T07:46:27Z" ; + schema1:dateModified "2023-04-14T07:49:00Z" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/276?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_md_setup/python/workflow.py" ; + schema1:version 3 . + + a schema1:Dataset ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """This is an experimental KNIME workflow of using the BioExcel building blocks to implement the Protein MD Setup tutorial for molecular dynamics with GROMACS.\r +\r +Note that this workflow won't import in KNIME without the [experimental KNIME nodes](https://bioexcel.eu/research/projects/biobb_knime/) for BioBB - contact Adam Hospital for details.""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.201.1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Protein MD Setup tutorial using BioExcel Building Blocks (biobb) in KNIME" ; + schema1:sdDatePublished "2024-08-05 10:33:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/201/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:creator ; + schema1:dateCreated "2021-09-29T14:25:42Z" ; + schema1:dateModified "2022-04-11T09:27:55Z" ; + schema1:description """This is an experimental KNIME workflow of using the BioExcel building blocks to implement the Protein MD Setup tutorial for molecular dynamics with GROMACS.\r +\r +Note that this workflow won't import in KNIME without the [experimental KNIME nodes](https://bioexcel.eu/research/projects/biobb_knime/) for BioBB - contact Adam Hospital for details.""" ; + schema1:image ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Protein MD Setup tutorial using BioExcel Building Blocks (biobb) in KNIME" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/201?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 329944 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/233?version=1" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for 16S_biodiversity_for_nonoverlap_paired_end" ; + schema1:sdDatePublished "2024-08-05 10:32:55 +0100" ; + schema1:url "https://workflowhub.eu/workflows/233/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 59678 ; + schema1:dateCreated "2021-11-10T00:16:43Z" ; + schema1:dateModified "2024-04-17T04:17:52Z" ; + schema1:description "" ; + schema1:isPartOf ; + schema1:keywords "MetaDEGalaxy" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "16S_biodiversity_for_nonoverlap_paired_end" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/233?version=1" ; + schema1:version 1 ; + ns1:input ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.285.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_abc_md_setup/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python ABC MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/285/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8001 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-17T10:52:38Z" ; + schema1:dateModified "2022-04-11T09:29:43Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/285?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python ABC MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_abc_md_setup/python/workflow.py" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-27T10:02:38.084617" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/gromacs-mmgbsa" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "gromacs-mmgbsa/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:Dataset ; + schema1:datePublished "2024-06-27T11:47:36.658528" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Plot-Nx-Size" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Plot-Nx-Size/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# ONTViSc (ONT-based Viral Screening for Biosecurity)\r +\r +## Introduction\r +eresearchqut/ontvisc is a Nextflow-based bioinformatics pipeline designed to help diagnostics of viruses and viroid pathogens for biosecurity. It takes fastq files generated from either amplicon or whole-genome sequencing using Oxford Nanopore Technologies as input.\r +\r +The pipeline can either: 1) perform a direct search on the sequenced reads, 2) generate clusters, 3) assemble the reads to generate longer contigs or 4) directly map reads to a known reference. \r +\r +The reads can optionally be filtered from a plant host before performing downstream analysis.\r +\r +## Pipeline overview\r +- Data quality check (QC) and preprocessing\r + - Merge fastq files (optional)\r + - Raw fastq file QC (Nanoplot)\r + - Trim adaptors (PoreChop ABI - optional)\r + - Filter reads based on length and/or quality (Chopper - optional)\r + - Reformat fastq files so read names are trimmed after the first whitespace (bbmap)\r + - Processed fastq file QC (if PoreChop and/or Chopper is run) (Nanoplot)\r +- Host read filtering\r + - Align reads to host reference provided (Minimap2)\r + - Extract reads that do not align for downstream analysis (seqtk)\r +- QC report\r + - Derive read counts recovered pre and post data processing and post host filtering\r +- Read classification analysis mode\r +- Clustering mode\r + - Read clustering (Rattle)\r + - Convert fastq to fasta format (seqtk)\r + - Cluster scaffolding (Cap3)\r + - Megablast homology search against ncbi or custom database (blast)\r + - Derive top candidate viral hits\r +- De novo assembly mode\r + - De novo assembly (Canu or Flye)\r + - Megablast homology search against ncbi or custom database or reference (blast)\r + - Derive top candidate viral hits\r +- Read classification mode\r + - Option 1 Nucleotide-based taxonomic classification of reads (Kraken2, Braken)\r + - Option 2 Protein-based taxonomic classification of reads (Kaiju, Krona)\r + - Option 3 Convert fastq to fasta format (seqtk) and perform direct homology search using megablast (blast)\r +- Map to reference mode\r + - Align reads to reference fasta file (Minimap2) and derive bam file and alignment statistics (Samtools)\r +\r +Detailed instructions can be found on [GitHub](https://github.com/eresearchqut/ontvisc/).\r +A step-by-step guide with instructions on how to set up and execute the ONTvisc pipeline on one of the HPC systems: Lyra (Queensland University of Technology), Setonix (Pawsey) and Gadi (National Computational Infrastructure) can be found [here](https://mantczakaus.github.io/ontvisc_guide/).\r +\r +### Authors\r +Marie-Emilie Gauthier \r +Craig Windell \r +Magdalena Antczak \r +Roberto Barrero """ ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.683.1" ; + schema1:isBasedOn "https://github.com/eresearchqut/ontvisc.git" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ONTViSc (ONT-based Viral Screening for Biosecurity)" ; + schema1:sdDatePublished "2024-08-05 10:25:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/683/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 1243142 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 35839 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2023-12-04T01:42:40Z" ; + schema1:dateModified "2024-02-19T05:24:24Z" ; + schema1:description """# ONTViSc (ONT-based Viral Screening for Biosecurity)\r +\r +## Introduction\r +eresearchqut/ontvisc is a Nextflow-based bioinformatics pipeline designed to help diagnostics of viruses and viroid pathogens for biosecurity. It takes fastq files generated from either amplicon or whole-genome sequencing using Oxford Nanopore Technologies as input.\r +\r +The pipeline can either: 1) perform a direct search on the sequenced reads, 2) generate clusters, 3) assemble the reads to generate longer contigs or 4) directly map reads to a known reference. \r +\r +The reads can optionally be filtered from a plant host before performing downstream analysis.\r +\r +## Pipeline overview\r +- Data quality check (QC) and preprocessing\r + - Merge fastq files (optional)\r + - Raw fastq file QC (Nanoplot)\r + - Trim adaptors (PoreChop ABI - optional)\r + - Filter reads based on length and/or quality (Chopper - optional)\r + - Reformat fastq files so read names are trimmed after the first whitespace (bbmap)\r + - Processed fastq file QC (if PoreChop and/or Chopper is run) (Nanoplot)\r +- Host read filtering\r + - Align reads to host reference provided (Minimap2)\r + - Extract reads that do not align for downstream analysis (seqtk)\r +- QC report\r + - Derive read counts recovered pre and post data processing and post host filtering\r +- Read classification analysis mode\r +- Clustering mode\r + - Read clustering (Rattle)\r + - Convert fastq to fasta format (seqtk)\r + - Cluster scaffolding (Cap3)\r + - Megablast homology search against ncbi or custom database (blast)\r + - Derive top candidate viral hits\r +- De novo assembly mode\r + - De novo assembly (Canu or Flye)\r + - Megablast homology search against ncbi or custom database or reference (blast)\r + - Derive top candidate viral hits\r +- Read classification mode\r + - Option 1 Nucleotide-based taxonomic classification of reads (Kraken2, Braken)\r + - Option 2 Protein-based taxonomic classification of reads (Kaiju, Krona)\r + - Option 3 Convert fastq to fasta format (seqtk) and perform direct homology search using megablast (blast)\r +- Map to reference mode\r + - Align reads to reference fasta file (Minimap2) and derive bam file and alignment statistics (Samtools)\r +\r +Detailed instructions can be found on [GitHub](https://github.com/eresearchqut/ontvisc/).\r +A step-by-step guide with instructions on how to set up and execute the ONTvisc pipeline on one of the HPC systems: Lyra (Queensland University of Technology), Setonix (Pawsey) and Gadi (National Computational Infrastructure) can be found [here](https://mantczakaus.github.io/ontvisc_guide/).\r +\r +### Authors\r +Marie-Emilie Gauthier \r +Craig Windell \r +Magdalena Antczak \r +Roberto Barrero """ ; + schema1:image ; + schema1:keywords "Assembly, Bioinformatics, Virology, blast, Nextflow, ONT, singularity, Virus" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "ONTViSc (ONT-based Viral Screening for Biosecurity)" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/683?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.286.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_dna_helparms/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Structural DNA helical parameters tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:43 +0100" ; + schema1:url "https://workflowhub.eu/workflows/286/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 22900 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-17T11:10:00Z" ; + schema1:dateModified "2022-04-11T09:29:43Z" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/286?version=3" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Structural DNA helical parameters tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_dna_helparms/python/workflow.py" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """## Introduction\r +\r +**vibbits/rnaseq-editing** is a bioinformatics pipeline that can be used to analyse RNA sequencing data obtained from organisms with a reference genome and annotation followed by a prediction step of editing sites using RDDpred.\r +\r +The pipeline is largely based on the [nf-core RNAseq pipeline](https://nf-co.re/rnaseq/).\r +\r +The initial nf-core pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!\r +\r +## Pipeline summary\r +\r +1. Merge re-sequenced FastQ files ([`cat`](http://www.linfo.org/cat.html))\r +2. Read QC ([`FastQC`](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/))\r +3. Adapter and quality trimming ([`Trimmomatics`](https://www.bioinformatics.babraham.ac.uk/projects/trim_galore/))\r +4. Use of STAR for multiple alignment and quantification: [`STAR`](https://github.com/alexdobin/STAR)\r +5. Sort and index alignments ([`SAMtools`](https://sourceforge.net/projects/samtools/files/samtools/))\r +6. Prediction of editing sites using RDDpred ([`RDDpred`](https://github.com/vibbits/RDDpred))\r +7. Extensive quality control:\r + 1. [`RSeQC`](http://rseqc.sourceforge.net/)\r + 2. [`Qualimap`](http://qualimap.bioinfo.cipf.es/)\r + 3. [`dupRadar`](https://bioconductor.org/packages/release/bioc/html/dupRadar.html)\r +8. Present QC for raw read, alignment, gene biotype, sample similarity, and strand-specificity checks ([`MultiQC`](http://multiqc.info/), [`R`](https://www.r-project.org/))\r +\r +## Quick Start\r +\r +1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=21.04.0`)\r +\r +2. Install [`Docker`](https://docs.docker.com/engine/installation/) on a Linux operating system.\r + Note: This pipeline does not currently support running with macOS.\r +\r +3. Download the pipeline via git clone, download the associated training data files for RDDpred into the assets folder, download the reference data to \r +\r + ```console\r + git clone https://github.com/vibbits/rnaseq-editing.git\r + cd $(pwd)/rnaseq-editing/assets\r + # download training data file for RDDpred\r + wget -c \r + # download reference data for your genome, we provide genome and indexed genome for STAR 2.7.3a\r + \r + ```\r +\r + > * Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment.\r +\r +4. Start running your own analysis using Docker locally!\r +\r + ```console\r + nextflow run vibbits/rnaseq-editing \\\r + --input samplesheet.csv \\\r + --genome hg19 \\\r + -profile docker\r + ```\r +\r + * An executable Python script called [`fastq_dir_to_samplesheet.py`](https://github.com/nf-core/rnaseq/blob/master/bin/fastq_dir_to_samplesheet.py) has been provided if you would like to auto-create an input samplesheet based on a directory containing FastQ files **before** you run the pipeline (requires Python 3 installed locally) e.g.\r +\r + ```console\r + wget -L https://raw.githubusercontent.com/nf-core/rnaseq/master/bin/fastq_dir_to_samplesheet.py\r + ./fastq_dir_to_samplesheet.py samplesheet.csv --strandedness reverse\r + ```\r +\r + * The final analysis has been executed on the Azure platform using Azure Kubernetes Services (AKS). AKS has to be set up on the Azure platform by defining a standard node pool called sys next to the scalable node pool cpumem using Standard_E8ds_v4 as node size for calculation.\r + Furthermore, persistent volume claims (PVCs) have been setup for input and work folders of the nextflow runs. In the PVC `input` the reference data as well as the fastqc files have been stored where the PVC `work`, the temporary nextflow files for the individual runs as well as the output files have been stored.\r + * The config file for the final execution run for [RNAseq editing for the human samples and reference genome hg19](https://github.com/vibbits/rnaseq-editing/blob/master/nextflow.config.as-executed). \r +\r +## Documentation\r +\r +The nf-core/rnaseq pipeline comes with documentation about the pipeline [usage](https://nf-co.re/rnaseq/usage), [parameters](https://nf-co.re/rnaseq/parameters) and [output](https://nf-co.re/rnaseq/output).\r +\r +## Credits\r +These scripts were written to provide a reproducible data analysis pipeline until the downstream processing using dedicated R scripts for exploratory analysis and plotting. The general structure of pipeline is based on the data analysis steps of the our recent paper [ADAR1 interaction with Z-RNA promotes editing of endogenous double-stranded RNA and prevents MDA5-dependent immune activation](https://pubmed.ncbi.nlm.nih.gov/34380029/).\r +\r +Note: The nf-core scripts this pipeline is based on were originally written for use at the [National Genomics Infrastructure](https://ngisweden.scilifelab.se), part of [SciLifeLab](http://www.scilifelab.se/) in Stockholm, Sweden, by Phil Ewels ([@ewels](https://github.com/ewels)) and Rickard Hammarén ([@Hammarn](https://github.com/Hammarn)).\r +\r +The RNAseq pipeline was re-written in Nextflow DSL2 by Harshil Patel ([@drpatelh](https://github.com/drpatelh)) from [The Bioinformatics & Biostatistics Group](https://www.crick.ac.uk/research/science-technology-platforms/bioinformatics-and-biostatistics/) at [The Francis Crick Institute](https://www.crick.ac.uk/), London.\r +\r +## Citations\r +\r +The `nf-core` publication is cited here as follows:\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/264?version=1" ; + schema1:isBasedOn "https://github.com/vibbits/rnaseq-editing.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for RNA sequencing data obtained from organisms with a reference genome and annotation followed by a prediction step of editing sites using RDDpred" ; + schema1:sdDatePublished "2024-08-05 10:31:37 +0100" ; + schema1:url "https://workflowhub.eu/workflows/264/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2455 ; + schema1:dateCreated "2022-01-27T10:44:25Z" ; + schema1:dateModified "2023-01-16T13:57:29Z" ; + schema1:description """## Introduction\r +\r +**vibbits/rnaseq-editing** is a bioinformatics pipeline that can be used to analyse RNA sequencing data obtained from organisms with a reference genome and annotation followed by a prediction step of editing sites using RDDpred.\r +\r +The pipeline is largely based on the [nf-core RNAseq pipeline](https://nf-co.re/rnaseq/).\r +\r +The initial nf-core pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community!\r +\r +## Pipeline summary\r +\r +1. Merge re-sequenced FastQ files ([`cat`](http://www.linfo.org/cat.html))\r +2. Read QC ([`FastQC`](https://www.bioinformatics.babraham.ac.uk/projects/fastqc/))\r +3. Adapter and quality trimming ([`Trimmomatics`](https://www.bioinformatics.babraham.ac.uk/projects/trim_galore/))\r +4. Use of STAR for multiple alignment and quantification: [`STAR`](https://github.com/alexdobin/STAR)\r +5. Sort and index alignments ([`SAMtools`](https://sourceforge.net/projects/samtools/files/samtools/))\r +6. Prediction of editing sites using RDDpred ([`RDDpred`](https://github.com/vibbits/RDDpred))\r +7. Extensive quality control:\r + 1. [`RSeQC`](http://rseqc.sourceforge.net/)\r + 2. [`Qualimap`](http://qualimap.bioinfo.cipf.es/)\r + 3. [`dupRadar`](https://bioconductor.org/packages/release/bioc/html/dupRadar.html)\r +8. Present QC for raw read, alignment, gene biotype, sample similarity, and strand-specificity checks ([`MultiQC`](http://multiqc.info/), [`R`](https://www.r-project.org/))\r +\r +## Quick Start\r +\r +1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=21.04.0`)\r +\r +2. Install [`Docker`](https://docs.docker.com/engine/installation/) on a Linux operating system.\r + Note: This pipeline does not currently support running with macOS.\r +\r +3. Download the pipeline via git clone, download the associated training data files for RDDpred into the assets folder, download the reference data to \r +\r + ```console\r + git clone https://github.com/vibbits/rnaseq-editing.git\r + cd $(pwd)/rnaseq-editing/assets\r + # download training data file for RDDpred\r + wget -c \r + # download reference data for your genome, we provide genome and indexed genome for STAR 2.7.3a\r + \r + ```\r +\r + > * Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment.\r +\r +4. Start running your own analysis using Docker locally!\r +\r + ```console\r + nextflow run vibbits/rnaseq-editing \\\r + --input samplesheet.csv \\\r + --genome hg19 \\\r + -profile docker\r + ```\r +\r + * An executable Python script called [`fastq_dir_to_samplesheet.py`](https://github.com/nf-core/rnaseq/blob/master/bin/fastq_dir_to_samplesheet.py) has been provided if you would like to auto-create an input samplesheet based on a directory containing FastQ files **before** you run the pipeline (requires Python 3 installed locally) e.g.\r +\r + ```console\r + wget -L https://raw.githubusercontent.com/nf-core/rnaseq/master/bin/fastq_dir_to_samplesheet.py\r + ./fastq_dir_to_samplesheet.py samplesheet.csv --strandedness reverse\r + ```\r +\r + * The final analysis has been executed on the Azure platform using Azure Kubernetes Services (AKS). AKS has to be set up on the Azure platform by defining a standard node pool called sys next to the scalable node pool cpumem using Standard_E8ds_v4 as node size for calculation.\r + Furthermore, persistent volume claims (PVCs) have been setup for input and work folders of the nextflow runs. In the PVC `input` the reference data as well as the fastqc files have been stored where the PVC `work`, the temporary nextflow files for the individual runs as well as the output files have been stored.\r + * The config file for the final execution run for [RNAseq editing for the human samples and reference genome hg19](https://github.com/vibbits/rnaseq-editing/blob/master/nextflow.config.as-executed). \r +\r +## Documentation\r +\r +The nf-core/rnaseq pipeline comes with documentation about the pipeline [usage](https://nf-co.re/rnaseq/usage), [parameters](https://nf-co.re/rnaseq/parameters) and [output](https://nf-co.re/rnaseq/output).\r +\r +## Credits\r +These scripts were written to provide a reproducible data analysis pipeline until the downstream processing using dedicated R scripts for exploratory analysis and plotting. The general structure of pipeline is based on the data analysis steps of the our recent paper [ADAR1 interaction with Z-RNA promotes editing of endogenous double-stranded RNA and prevents MDA5-dependent immune activation](https://pubmed.ncbi.nlm.nih.gov/34380029/).\r +\r +Note: The nf-core scripts this pipeline is based on were originally written for use at the [National Genomics Infrastructure](https://ngisweden.scilifelab.se), part of [SciLifeLab](http://www.scilifelab.se/) in Stockholm, Sweden, by Phil Ewels ([@ewels](https://github.com/ewels)) and Rickard Hammarén ([@Hammarn](https://github.com/Hammarn)).\r +\r +The RNAseq pipeline was re-written in Nextflow DSL2 by Harshil Patel ([@drpatelh](https://github.com/drpatelh)) from [The Bioinformatics & Biostatistics Group](https://www.crick.ac.uk/research/science-technology-platforms/bioinformatics-and-biostatistics/) at [The Francis Crick Institute](https://www.crick.ac.uk/), London.\r +\r +## Citations\r +\r +The `nf-core` publication is cited here as follows:\r +\r +> **The nf-core framework for community-curated bioinformatics pipelines.**\r +>\r +> Philip Ewels, Alexander Peltzer, Sven Fillinger, Harshil Patel, Johannes Alneberg, Andreas Wilm, Maxime Ulysse Garcia, Paolo Di Tommaso & Sven Nahnsen.\r +>\r +> _Nat Biotechnol._ 2020 Feb 13. doi: [10.1038/s41587-020-0439-x](https://dx.doi.org/10.1038/s41587-020-0439-x).\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "RNA sequencing data obtained from organisms with a reference genome and annotation followed by a prediction step of editing sites using RDDpred" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/264?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# HiC scaffolding pipeline\r +\r +Snakemake pipeline for scaffolding of a genome using HiC reads using yahs.\r +\r +## Prerequisites\r +\r +This pipeine has been tested using `Snakemake v7.32.4` and requires conda for installation of required tools. To run the pipline use the command:\r +\r +`snakemake --use-conda --cores N`\r +\r +where N is number of cores to use. There are provided a set of configuration and running scripts for exectution on a slurm queueing system. After configuring the `cluster.json` file run:\r +\r +`./run_cluster`\r +\r +## Before starting\r +\r +You need to create a temporary folder and specify the path in the `config.yaml` file. This should be able to hold the temporary files created when sorting the `.pairsam` file (100s of GB or even many TBs)\r +\r +The path to the genome assemly must be given in the `config.yaml`.\r +\r +The HiC reads should be paired and named as follows: `Library_1.fastq.gz Library_2.fastq.gz`. The pipeline can accept any number of paired HiC read files, but the naming must be consistent. The folder containing these files must be provided in the `config.yaml`.""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.796.2" ; + schema1:isBasedOn "https://github.com/ERGA-consortium/pipelines/tree/main/assembly/snakemake/4.Scaffolding/yahs" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for HiC scaffolding pipeline" ; + schema1:sdDatePublished "2024-08-05 10:25:07 +0100" ; + schema1:url "https://workflowhub.eu/workflows/796/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4471 ; + schema1:creator ; + schema1:dateCreated "2024-06-21T09:42:46Z" ; + schema1:dateModified "2024-06-21T09:43:16Z" ; + schema1:description """# HiC scaffolding pipeline\r +\r +Snakemake pipeline for scaffolding of a genome using HiC reads using yahs.\r +\r +## Prerequisites\r +\r +This pipeine has been tested using `Snakemake v7.32.4` and requires conda for installation of required tools. To run the pipline use the command:\r +\r +`snakemake --use-conda --cores N`\r +\r +where N is number of cores to use. There are provided a set of configuration and running scripts for exectution on a slurm queueing system. After configuring the `cluster.json` file run:\r +\r +`./run_cluster`\r +\r +## Before starting\r +\r +You need to create a temporary folder and specify the path in the `config.yaml` file. This should be able to hold the temporary files created when sorting the `.pairsam` file (100s of GB or even many TBs)\r +\r +The path to the genome assemly must be given in the `config.yaml`.\r +\r +The HiC reads should be paired and named as follows: `Library_1.fastq.gz Library_2.fastq.gz`. The pipeline can accept any number of paired HiC read files, but the naming must be consistent. The folder containing these files must be provided in the `config.yaml`.""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/796?version=1" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "HiC scaffolding pipeline" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/796?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/278?version=8" ; + schema1:isBasedOn "https://gitlab.bsc.es/lrodrig1/structuralvariants_poc/-/tree/1.1.3/structuralvariants/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CNV_pipeline" ; + schema1:sdDatePublished "2024-08-05 10:31:59 +0100" ; + schema1:url "https://workflowhub.eu/workflows/278/ro_crate?version=8" . + + a schema1:ComputerLanguage ; + schema1:alternateName "CWL" ; + schema1:identifier ; + schema1:name "Common Workflow Language" ; + schema1:url . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 9694 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9443 ; + schema1:creator , + ; + schema1:dateCreated "2022-06-30T12:00:53Z" ; + schema1:dateModified "2022-06-30T12:00:53Z" ; + schema1:description """# StructuralVariants Workflow\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/278?version=10" ; + schema1:keywords "cancer, CODEX2, ExomeDepth, manta, TransBioNet, variant calling, GRIDSS, structural variants" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CNV_pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/278?version=8" ; + schema1:version 8 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 72963 . + + a schema1:Dataset ; + schema1:datePublished "2023-09-25T12:10:50.925708" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/atacseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "atacseq/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.12" . + + a schema1:Dataset ; + schema1:datePublished "2023-11-20T09:17:15.968072" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.18" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/991?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/hlatyping" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hlatyping" ; + schema1:sdDatePublished "2024-08-05 10:22:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/991/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3910 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:56Z" ; + schema1:dateModified "2024-06-11T12:54:56Z" ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/991?version=9" ; + schema1:keywords "DNA, hla, hla-typing, immunology, optitype, personalized-medicine, rna" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hlatyping" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/991?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + schema1:datePublished "2022-06-13T14:36:29.891474" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/VGP-meryldb-creation" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "VGP-meryldb-creation/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.11" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description """# workflow-partial-gstacks-populations\r +\r +These workflows are part of a set designed to work for RAD-seq data on the Galaxy platform, using the tools from the Stacks program. \r +\r +Galaxy Australia: https://usegalaxy.org.au/\r +\r +Stacks: http://catchenlab.life.illinois.edu/stacks/\r +\r +This workflow is part of the reference-guided stacks workflow, https://workflowhub.eu/workflows/347\r +\r + This workflow takes in bam files and a population map. \r +\r +To generate bam files see: https://workflowhub.eu/workflows/351\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/352?version=1" ; + schema1:isBasedOn "https://github.com/AnnaSyme/workflow-partial-gstacks-populations.git" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Partial ref-guided workflow - gstacks and pops" ; + schema1:sdDatePublished "2024-08-05 10:32:06 +0100" ; + schema1:url "https://workflowhub.eu/workflows/352/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 14430 ; + schema1:creator ; + schema1:dateCreated "2022-05-31T08:12:30Z" ; + schema1:dateModified "2023-01-30T18:21:31Z" ; + schema1:description """# workflow-partial-gstacks-populations\r +\r +These workflows are part of a set designed to work for RAD-seq data on the Galaxy platform, using the tools from the Stacks program. \r +\r +Galaxy Australia: https://usegalaxy.org.au/\r +\r +Stacks: http://catchenlab.life.illinois.edu/stacks/\r +\r +This workflow is part of the reference-guided stacks workflow, https://workflowhub.eu/workflows/347\r +\r + This workflow takes in bam files and a population map. \r +\r +To generate bam files see: https://workflowhub.eu/workflows/351\r +""" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Partial ref-guided workflow - gstacks and pops" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/352?version=1" ; + schema1:version 1 ; + ns1:input , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 28082 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + schema1:datePublished "2021-11-04T15:32:26.200967" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-ivar-analysis" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sars-cov-2-pe-illumina-artic-ivar-analysis/SARS-COV-2-ILLUMINA-AMPLICON-IVAR-PANGOLIN-NEXTCLADE" ; + schema1:sdDatePublished "2021-11-05 03:00:46 +0000" ; + schema1:softwareVersion "v0.2.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=21" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-08-05 10:23:20 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=21" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 18834 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:16Z" ; + schema1:dateModified "2024-06-11T12:55:16Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=21" ; + schema1:version 21 . + + a schema1:Dataset ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-ont-artic-variant-calling" ; + schema1:mainEntity ; + schema1:name "COVID-19-ARTIC-ONT (v0.2)" ; + schema1:sdDatePublished "2021-04-09 03:00:41 +0100" ; + schema1:softwareVersion "v0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 45245 ; + schema1:name "COVID-19-ARTIC-ONT" ; + schema1:programmingLanguage . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1021?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/scrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/scrnaseq" ; + schema1:sdDatePublished "2024-08-05 10:23:14 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1021/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8849 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:17Z" ; + schema1:dateModified "2024-06-11T12:55:17Z" ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1021?version=13" ; + schema1:keywords "10x-genomics, 10xgenomics, alevin, bustools, Cellranger, kallisto, rna-seq, single-cell, star-solo" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/scrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1021?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/abcix-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.299.3" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_amber_abc_md_setup/galaxy" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Galaxy ABC MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/299/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 102801 ; + schema1:creator , + ; + schema1:dateCreated "2023-05-03T13:54:07Z" ; + schema1:dateModified "2023-05-03T13:55:38Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +***\r +## This workflow must be run in **biobb.usegalaxy.es**. Please, [click here to access](https://biobb.usegalaxy.es/u/gbayarri/w/abcix-md-setup).\r +***\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/299?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Galaxy ABC MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_amber_abc_md_setup/galaxy/biobb_wf_amber_abc_md_setup.ga" ; + schema1:version 3 ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Analysis of Chromosome Conformation Capture data (Hi-C)" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/989?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/hic" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hic" ; + schema1:sdDatePublished "2024-08-05 10:23:47 +0100" ; + schema1:url "https://workflowhub.eu/workflows/989/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3921 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:54Z" ; + schema1:dateModified "2024-06-11T12:54:54Z" ; + schema1:description "Analysis of Chromosome Conformation Capture data (Hi-C)" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/989?version=7" ; + schema1:keywords "chromosome-conformation-capture, Hi-C" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hic" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/989?version=2" ; + schema1:version 2 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 2771028 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein Conformational ensembles generation\r +\r +## Workflow included in the [ELIXIR 3D-Bioinfo](https://elixir-europe.org/communities/3d-bioinfo) Implementation Study:\r +\r +### Building on PDBe-KB to chart and characterize the conformation landscape of native proteins\r +\r +This tutorial aims to illustrate the process of generating **protein conformational ensembles** from** 3D structures **and analysing its **molecular flexibility**, step by step, using the **BioExcel Building Blocks library (biobb)**.\r +\r +## Conformational landscape of native proteins\r +**Proteins** are **dynamic** systems that adopt multiple **conformational states**, a property essential for many **biological processes** (e.g. binding other proteins, nucleic acids, small molecule ligands, or switching between functionaly active and inactive states). Characterizing the different **conformational states** of proteins and the transitions between them is therefore critical for gaining insight into their **biological function** and can help explain the effects of genetic variants in **health** and **disease** and the action of drugs.\r +\r +**Structural biology** has become increasingly efficient in sampling the different **conformational states** of proteins. The **PDB** has currently archived more than **170,000 individual structures**, but over two thirds of these structures represent **multiple conformations** of the same or related protein, observed in different crystal forms, when interacting with other proteins or other macromolecules, or upon binding small molecule ligands. Charting this conformational diversity across the PDB can therefore be employed to build a useful approximation of the **conformational landscape** of native proteins.\r +\r +A number of resources and **tools** describing and characterizing various often complementary aspects of protein **conformational diversity** in known structures have been developed, notably by groups in Europe. These tools include algorithms with varying degree of sophistication, for aligning the 3D structures of individual protein chains or domains, of protein assemblies, and evaluating their degree of **structural similarity**. Using such tools one can **align structures pairwise**, compute the corresponding **similarity matrix**, and identify ensembles of **structures/conformations** with a defined **similarity level** that tend to recur in different PDB entries, an operation typically performed using **clustering** methods. Such workflows are at the basis of resources such as **CATH, Contemplate, or PDBflex** that offer access to **conformational ensembles** comprised of similar **conformations** clustered according to various criteria. Other types of tools focus on differences between **protein conformations**, identifying regions of proteins that undergo large **collective displacements** in different PDB entries, those that act as **hinges or linkers**, or regions that are inherently **flexible**.\r +\r +To build a meaningful approximation of the **conformational landscape** of native proteins, the **conformational ensembles** (and the differences between them), identified on the basis of **structural similarity/dissimilarity** measures alone, need to be **biophysically characterized**. This may be approached at **two different levels**. \r +- At the **biological level**, it is important to link observed **conformational ensembles**, to their **functional roles** by evaluating the correspondence with **protein family classifications** based on sequence information and **functional annotations** in public databases e.g. Uniprot, PDKe-Knowledge Base (KB). These links should provide valuable mechanistic insights into how the **conformational and dynamic properties** of proteins are exploited by evolution to regulate their **biological function**.

\r +\r +- At the **physical level** one needs to introduce **energetic consideration** to evaluate the likelihood that the identified **conformational ensembles** represent **conformational states** that the protein (or domain under study) samples in isolation. Such evaluation is notoriously **challenging** and can only be roughly approximated by using **computational methods** to evaluate the extent to which the observed **conformational ensembles** can be reproduced by algorithms that simulate the **dynamic behavior** of protein systems. These algorithms include the computationally expensive **classical molecular dynamics (MD) simulations** to sample local thermal fluctuations but also faster more approximate methods such as **Elastic Network Models** and **Normal Node Analysis** (NMA) to model low energy **collective motions**. Alternatively, **enhanced sampling molecular dynamics** can be used to model complex types of **conformational changes** but at a very high computational cost. \r +\r +The **ELIXIR 3D-Bioinfo Implementation Study** *Building on PDBe-KB to chart and characterize the conformation landscape of native proteins* focuses on:\r +\r +1. Mapping the **conformational diversity** of proteins and their homologs across the PDB. \r +2. Characterize the different **flexibility properties** of protein regions, and link this information to sequence and functional annotation.\r +3. Benchmark **computational methods** that can predict a biophysical description of protein motions.\r +\r +This notebook is part of the third objective, where a list of **computational resources** that are able to predict **protein flexibility** and **conformational ensembles** have been collected, evaluated, and integrated in reproducible and interoperable workflows using the **BioExcel Building Blocks library**. Note that the list is not meant to be exhaustive, it is built following the expertise of the implementation study partners.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.486.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_flexdyn" ; + schema1:license "other-open" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Protein conformational ensembles generation" ; + schema1:sdDatePublished "2024-08-05 10:30:20 +0100" ; + schema1:url "https://workflowhub.eu/workflows/486/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 108347 ; + schema1:creator , + ; + schema1:dateCreated "2023-07-26T09:35:19Z" ; + schema1:dateModified "2023-07-26T09:36:15Z" ; + schema1:description """# Protein Conformational ensembles generation\r +\r +## Workflow included in the [ELIXIR 3D-Bioinfo](https://elixir-europe.org/communities/3d-bioinfo) Implementation Study:\r +\r +### Building on PDBe-KB to chart and characterize the conformation landscape of native proteins\r +\r +This tutorial aims to illustrate the process of generating **protein conformational ensembles** from** 3D structures **and analysing its **molecular flexibility**, step by step, using the **BioExcel Building Blocks library (biobb)**.\r +\r +## Conformational landscape of native proteins\r +**Proteins** are **dynamic** systems that adopt multiple **conformational states**, a property essential for many **biological processes** (e.g. binding other proteins, nucleic acids, small molecule ligands, or switching between functionaly active and inactive states). Characterizing the different **conformational states** of proteins and the transitions between them is therefore critical for gaining insight into their **biological function** and can help explain the effects of genetic variants in **health** and **disease** and the action of drugs.\r +\r +**Structural biology** has become increasingly efficient in sampling the different **conformational states** of proteins. The **PDB** has currently archived more than **170,000 individual structures**, but over two thirds of these structures represent **multiple conformations** of the same or related protein, observed in different crystal forms, when interacting with other proteins or other macromolecules, or upon binding small molecule ligands. Charting this conformational diversity across the PDB can therefore be employed to build a useful approximation of the **conformational landscape** of native proteins.\r +\r +A number of resources and **tools** describing and characterizing various often complementary aspects of protein **conformational diversity** in known structures have been developed, notably by groups in Europe. These tools include algorithms with varying degree of sophistication, for aligning the 3D structures of individual protein chains or domains, of protein assemblies, and evaluating their degree of **structural similarity**. Using such tools one can **align structures pairwise**, compute the corresponding **similarity matrix**, and identify ensembles of **structures/conformations** with a defined **similarity level** that tend to recur in different PDB entries, an operation typically performed using **clustering** methods. Such workflows are at the basis of resources such as **CATH, Contemplate, or PDBflex** that offer access to **conformational ensembles** comprised of similar **conformations** clustered according to various criteria. Other types of tools focus on differences between **protein conformations**, identifying regions of proteins that undergo large **collective displacements** in different PDB entries, those that act as **hinges or linkers**, or regions that are inherently **flexible**.\r +\r +To build a meaningful approximation of the **conformational landscape** of native proteins, the **conformational ensembles** (and the differences between them), identified on the basis of **structural similarity/dissimilarity** measures alone, need to be **biophysically characterized**. This may be approached at **two different levels**. \r +- At the **biological level**, it is important to link observed **conformational ensembles**, to their **functional roles** by evaluating the correspondence with **protein family classifications** based on sequence information and **functional annotations** in public databases e.g. Uniprot, PDKe-Knowledge Base (KB). These links should provide valuable mechanistic insights into how the **conformational and dynamic properties** of proteins are exploited by evolution to regulate their **biological function**.

\r +\r +- At the **physical level** one needs to introduce **energetic consideration** to evaluate the likelihood that the identified **conformational ensembles** represent **conformational states** that the protein (or domain under study) samples in isolation. Such evaluation is notoriously **challenging** and can only be roughly approximated by using **computational methods** to evaluate the extent to which the observed **conformational ensembles** can be reproduced by algorithms that simulate the **dynamic behavior** of protein systems. These algorithms include the computationally expensive **classical molecular dynamics (MD) simulations** to sample local thermal fluctuations but also faster more approximate methods such as **Elastic Network Models** and **Normal Node Analysis** (NMA) to model low energy **collective motions**. Alternatively, **enhanced sampling molecular dynamics** can be used to model complex types of **conformational changes** but at a very high computational cost. \r +\r +The **ELIXIR 3D-Bioinfo Implementation Study** *Building on PDBe-KB to chart and characterize the conformation landscape of native proteins* focuses on:\r +\r +1. Mapping the **conformational diversity** of proteins and their homologs across the PDB. \r +2. Characterize the different **flexibility properties** of protein regions, and link this information to sequence and functional annotation.\r +3. Benchmark **computational methods** that can predict a biophysical description of protein motions.\r +\r +This notebook is part of the third objective, where a list of **computational resources** that are able to predict **protein flexibility** and **conformational ensembles** have been collected, evaluated, and integrated in reproducible and interoperable workflows using the **BioExcel Building Blocks library**. Note that the list is not meant to be exhaustive, it is built following the expertise of the implementation study partners.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/486?version=2" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "" ; + schema1:name "Jupyter Notebook Protein conformational ensembles generation" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_flexdyn/blob/main/biobb_wf_flexdyn/notebooks/biobb_wf_flexdyn.ipynb" ; + schema1:version 2 . + + a schema1:Dataset ; + schema1:datePublished "2023-09-14T22:03:45.620920" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/rnaseq-sr" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "rnaseq-sr/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.11" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1021?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/scrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/scrnaseq" ; + schema1:sdDatePublished "2024-08-05 10:23:14 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1021/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9948 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:17Z" ; + schema1:dateModified "2024-06-11T12:55:17Z" ; + schema1:description "Pipeline for processing 10x Genomics single cell rnaseq data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1021?version=13" ; + schema1:keywords "10x-genomics, 10xgenomics, alevin, bustools, Cellranger, kallisto, rna-seq, single-cell, star-solo" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/scrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1021?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.825.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/main/biobb_wf_ligand_parameterization/docker" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Docker GMX Notebook Automatic Ligand Parameterization tutorial" ; + schema1:sdDatePublished "2024-08-05 10:24:54 +0100" ; + schema1:url "https://workflowhub.eu/workflows/825/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 794 ; + schema1:creator , + ; + schema1:dateCreated "2024-04-22T10:54:09Z" ; + schema1:dateModified "2024-05-22T13:39:14Z" ; + schema1:description """# Automatic Ligand parameterization tutorial using BioExcel Building Blocks (biobb)\r +\r +***\r +\r +This tutorial aims to illustrate the process of **ligand parameterization** for a **small molecule**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Sulfasalazine** protein (3-letter code SAS), used to treat rheumatoid arthritis, ulcerative colitis, and Crohn's disease.\r +\r +**OpenBabel and ACPype** packages are used to **add hydrogens, energetically minimize the structure**, and **generate parameters** for the **GROMACS** package. With *Generalized Amber Force Field (GAFF) forcefield and AM1-BCC* charges.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Docker GMX Notebook Automatic Ligand Parameterization tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/main/biobb_wf_ligand_parameterization/docker/Dockerfile" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Deprecated" ; + schema1:description "A pipeline for mapping, calling, and annotation of SARS-CoV2 variants." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/105?version=1" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ENA SARS-CoV2 Variant Calling" ; + schema1:sdDatePublished "2024-08-05 10:27:13 +0100" ; + schema1:url "https://workflowhub.eu/workflows/105/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 8162 ; + schema1:creator ; + schema1:dateCreated "2021-02-15T09:19:44Z" ; + schema1:dateModified "2023-11-24T10:02:53Z" ; + schema1:description "A pipeline for mapping, calling, and annotation of SARS-CoV2 variants." ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "ENA SARS-CoV2 Variant Calling" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/105?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "SLAMseq analysis using Slamdunk with various T>C conversion quantifications and QC" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1022?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/slamseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/slamseq" ; + schema1:sdDatePublished "2024-08-05 10:23:13 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1022/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4642 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:18Z" ; + schema1:dateModified "2024-06-11T12:55:18Z" ; + schema1:description "SLAMseq analysis using Slamdunk with various T>C conversion quantifications and QC" ; + schema1:keywords "differential-expression, quantseq, slamseq, Transcriptomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/slamseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1022?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """A CWL-based pipeline for processing RNA-Seq data (FASTQ format) and performing differential gene/transcript expression analysis. \r +\r +On the respective GitHub folder are available:\r +\r +- The CWL wrappers for the workflow\r +- A pre-configured YAML template, based on validation analysis of publicly available HTS data\r +- A table of metadata (``mrna_cll_subsets_phenotypes.csv``), based on the same validation analysis, to serve as an input example for the design of comparisons during differential expression analysis\r +\r +Briefly, the workflow performs the following steps:\r +\r +1. Quality control of Illumina reads (FastQC)\r +2. Trimming of the reads (e.g., removal of adapter and/or low quality sequences) (Trim galore)\r +3. (Optional) custom processing of the reads using FASTA/Q Trimmer (part of the FASTX-toolkit) \r +4. Mapping to reference genome (HISAT2)\r +5. Convertion of mapped reads from SAM (Sequence Alignment Map) to BAM (Binary Alignment Map) format (samtools)\r +6. Sorting mapped reads based on chromosomal coordinates (samtools)\r +\r +Subsequently, two independent workflows are implemented for differential expression analysis at the transcript and gene level. \r +\r +**First**, following the [reference protocol](https://doi.org/10.1038/nprot.2016.095) for HISAT, StringTie and Ballgown transcript expression analysis, StringTie along with a reference transcript annotation GTF (Gene Transfer Format) file (if one is available) is used to:\r +\r +- Assemble transcripts for each RNA-Seq sample using the previous read alignments (BAM files)\r +- Generate a global, non-redundant set of transcripts observed in any of the RNA-Seq samples\r +- Estimate transcript abundances and generate read coverage tables for each RNA-Seq sample, based on the global, merged set of transcripts (rather than the reference) which is observed across all samples\r +\r +Ballgown program is then used to load the coverage tables generated in the previous step and perform statistical analyses for differential expression at the transcript level. Notably, the StringTie - Ballgown protocol applied here was selected to include potentially novel transcripts in the analysis. \r +\r +**Second**, featureCounts is used to count reads that are mapped to selected genomic features, in this case genes by default, and generate a table of read counts per gene and sample. This table is passed as input to DESeq2 to perform differential expression analysis at the gene level. Both Ballgown and DESeq2 R scripts, along with their respective CWL wrappers, were designed to receive as input various parameters, such as experimental design, contrasts of interest, numeric thresholds, and hidden batch effects.\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.524.1" ; + schema1:isBasedOn "https://github.com/BiodataAnalysisGroup/CWL_HTS_pipelines/tree/main/RNA_Seq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL-based RNA-Seq workflow" ; + schema1:sdDatePublished "2024-08-05 10:30:01 +0100" ; + schema1:url "https://workflowhub.eu/workflows/524/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 24199 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-07-05T08:44:44Z" ; + schema1:dateModified "2023-07-05T09:16:36Z" ; + schema1:description """A CWL-based pipeline for processing RNA-Seq data (FASTQ format) and performing differential gene/transcript expression analysis. \r +\r +On the respective GitHub folder are available:\r +\r +- The CWL wrappers for the workflow\r +- A pre-configured YAML template, based on validation analysis of publicly available HTS data\r +- A table of metadata (``mrna_cll_subsets_phenotypes.csv``), based on the same validation analysis, to serve as an input example for the design of comparisons during differential expression analysis\r +\r +Briefly, the workflow performs the following steps:\r +\r +1. Quality control of Illumina reads (FastQC)\r +2. Trimming of the reads (e.g., removal of adapter and/or low quality sequences) (Trim galore)\r +3. (Optional) custom processing of the reads using FASTA/Q Trimmer (part of the FASTX-toolkit) \r +4. Mapping to reference genome (HISAT2)\r +5. Convertion of mapped reads from SAM (Sequence Alignment Map) to BAM (Binary Alignment Map) format (samtools)\r +6. Sorting mapped reads based on chromosomal coordinates (samtools)\r +\r +Subsequently, two independent workflows are implemented for differential expression analysis at the transcript and gene level. \r +\r +**First**, following the [reference protocol](https://doi.org/10.1038/nprot.2016.095) for HISAT, StringTie and Ballgown transcript expression analysis, StringTie along with a reference transcript annotation GTF (Gene Transfer Format) file (if one is available) is used to:\r +\r +- Assemble transcripts for each RNA-Seq sample using the previous read alignments (BAM files)\r +- Generate a global, non-redundant set of transcripts observed in any of the RNA-Seq samples\r +- Estimate transcript abundances and generate read coverage tables for each RNA-Seq sample, based on the global, merged set of transcripts (rather than the reference) which is observed across all samples\r +\r +Ballgown program is then used to load the coverage tables generated in the previous step and perform statistical analyses for differential expression at the transcript level. Notably, the StringTie - Ballgown protocol applied here was selected to include potentially novel transcripts in the analysis. \r +\r +**Second**, featureCounts is used to count reads that are mapped to selected genomic features, in this case genes by default, and generate a table of read counts per gene and sample. This table is passed as input to DESeq2 to perform differential expression analysis at the gene level. Both Ballgown and DESeq2 R scripts, along with their respective CWL wrappers, were designed to receive as input various parameters, such as experimental design, contrasts of interest, numeric thresholds, and hidden batch effects.\r +""" ; + schema1:image ; + schema1:keywords "RNASEQ, Transcriptomics, CWL, workflow" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "CWL-based RNA-Seq workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/524?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 168876 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/991?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/hlatyping" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/hlatyping" ; + schema1:sdDatePublished "2024-08-05 10:22:53 +0100" ; + schema1:url "https://workflowhub.eu/workflows/991/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3272 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:55Z" ; + schema1:dateModified "2024-06-11T12:54:55Z" ; + schema1:description "Precision HLA typing from next-generation sequencing data." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/991?version=9" ; + schema1:keywords "DNA, hla, hla-typing, immunology, optitype, personalized-medicine, rna" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/hlatyping" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/991?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Taxonomic classification and profiling of shotgun short- and long-read metagenomic data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1025?version=3" ; + schema1:isBasedOn "https://github.com/nf-core/taxprofiler" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/taxprofiler" ; + schema1:sdDatePublished "2024-08-05 10:23:08 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1025/ro_crate?version=3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 15361 ; + schema1:creator , + , + , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:19Z" ; + schema1:dateModified "2024-06-11T12:55:19Z" ; + schema1:description "Taxonomic classification and profiling of shotgun short- and long-read metagenomic data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1025?version=10" ; + schema1:keywords "Classification, illumina, long-reads, Metagenomics, microbiome, nanopore, pathogen, Profiling, shotgun, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/taxprofiler" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1025?version=3" ; + schema1:version 3 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.195.5" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_dna_helparms" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Structural DNA helical parameters tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/195/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 73714 ; + schema1:creator , + ; + schema1:dateCreated "2024-03-04T15:36:51Z" ; + schema1:dateModified "2024-05-14T10:18:03Z" ; + schema1:description """# Structural DNA helical parameters from MD trajectory tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the [NAFlex](https://mmb.irbbarcelona.org/NAFlex) server and in particular in its [Nucleic Acids Analysis section](https://mmb.irbbarcelona.org/NAFlex/help.php?id=tutorialAnalysisNA).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **extracting structural and dynamical properties** from a **DNA MD trajectory helical parameters**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Drew Dickerson Dodecamer** sequence -CGCGAATTCGCG- (PDB code [1BNA](https://www.rcsb.org/structure/1BNA)). The trajectory used is a 500ns-long MD simulation taken from the [BigNASim](https://mmb.irbbarcelona.org/BIGNASim/) database ([NAFlex_DDD_II](https://mmb.irbbarcelona.org/BIGNASim/getStruc.php?idCode=NAFlex_DDD_II) entry).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2023 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2023 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/195?version=4" ; + schema1:isPartOf ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Structural DNA helical parameters tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_wf_dna_helparms/blob/main/biobb_wf_dna_helparms/notebooks/biobb_dna_helparms_tutorial.ipynb" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Demultiplexing pipeline for Illumina sequencing data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/978?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/demultiplex" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/demultiplex" ; + schema1:sdDatePublished "2024-08-05 10:24:06 +0100" ; + schema1:url "https://workflowhub.eu/workflows/978/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7786 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:54:47Z" ; + schema1:dateModified "2024-06-11T12:54:47Z" ; + schema1:description "Demultiplexing pipeline for Illumina sequencing data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/978?version=7" ; + schema1:keywords "bases2fastq, bcl2fastq, demultiplexing, elementbiosciences, illumina" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/demultiplex" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/978?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=11" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-08-05 10:23:58 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 11079 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:50Z" ; + schema1:dateModified "2024-06-11T12:54:50Z" ; + schema1:description "A fully reproducible and state-of-the-art ancient DNA analysis pipeline" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=11" ; + schema1:version 11 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 83280 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:MediaObject ; + schema1:contentSize 6224 ; + schema1:dateModified "2020-09-21T10:34:27" ; + schema1:name "compss.txt" ; + schema1:sdDatePublished "2023-12-15T14:53:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2728 ; + schema1:dateModified "2023-12-15T14:53:16" ; + schema1:name "result.txt" ; + schema1:sdDatePublished "2023-12-15T14:53:21+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """Workflow for Metagenomics from raw reads to bins.\r +\r +**Steps:**\r +\r +* workflow_quality.cwl:\r +\r + * FastQC (control)\r +\r + * fastp (quality trimming)\r +\r + * bbmap contamination filter\r +\r +* SPAdes (Assembly)\r +\r +* QUAST (Assembly quality report)\r +\r +* BBmap (Read mapping to assembly)\r +\r +* MetaBat2 (binning)\r +\r +* CheckM (bin completeness and contamination)\r +\r +* GTDB-Tk (bin taxonomic classification)\r +\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/64?version=10" ; + schema1:isBasedOn "https://gitlab.com/m-unlock/cwl/-/blob/master/cwl/workflows/workflow_metagenomics_binning.cwl" ; + schema1:license "CC0-1.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Metagenomics Binning Workflow" ; + schema1:sdDatePublished "2024-08-05 10:31:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/64/ro_crate?version=10" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 60154 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13774 ; + schema1:creator , + ; + schema1:dateCreated "2021-06-07T17:34:04Z" ; + schema1:dateModified "2021-06-07T17:35:51Z" ; + schema1:description """Workflow for Metagenomics from raw reads to bins.\r +\r +**Steps:**\r +\r +* workflow_quality.cwl:\r +\r + * FastQC (control)\r +\r + * fastp (quality trimming)\r +\r + * bbmap contamination filter\r +\r +* SPAdes (Assembly)\r +\r +* QUAST (Assembly quality report)\r +\r +* BBmap (Read mapping to assembly)\r +\r +* MetaBat2 (binning)\r +\r +* CheckM (bin completeness and contamination)\r +\r +* GTDB-Tk (bin taxonomic classification)\r +\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/64?version=10" ; + schema1:keywords "Metagenomics, microbial, metagenome, binning" ; + schema1:license "https://spdx.org/licenses/CC0-1.0" ; + schema1:name "Metagenomics Binning Workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/64?version=10" ; + schema1:version 10 ; + ns1:input , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=7" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-08-05 10:22:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7971 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:39Z" ; + schema1:dateModified "2024-06-11T12:54:39Z" ; + schema1:description "Amplicon sequencing analysis workflow using DADA2 and QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=7" ; + schema1:version 7 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description """# SLURM HPC Cromwell implementation of GATK4 germline variant calling pipeline\r +See the [GATK](https://gatk.broadinstitute.org/hc/en-us) website for more information on this toolset \r +## Assumptions\r +- Using hg38 human reference genome build\r +- Running using HPC/SLURM scheduling. This repo was specifically tested on Pawsey Zeus machine, primarily running in the `/scratch` partition. \r +- Starting from short-read Illumina paired-end fastq files as input\r +\r +### Dependencies\r +The following versions have been tested and work, but GATK and Cromwell are regularly updated and so one must consider whether they would like to use newer versions of these tools. \r +- BWA/0.7.15\r +- GATK v4.0.6.0\r +- SAMtools/1.5\r +- picard/2.9\r +- Python/2.7\r +- Cromwell v61\r +\r +## Quick start guide\r +### Installing and preparing environment for GATK4 with Cromwell\r +\r +1. Clone repository\r +```\r +git clone https://github.com/SarahBeecroft/slurmCromwellGATK4.git\r +cd slurmCromwellGATK4\r +chmod +x *.sh\r +```\r +\r +2. Install [Miniconda](https://docs.conda.io/en/latest/miniconda.html) if you haven’t already. This is best placed in your `/group` directory to avoid filling your small `/home` directory, or being purged is placed in the `/scratch` directory.\r +\r +3. Create Conda environment using the supplied conda environment file\r +\r +```\r +conda env create --file gatk4_pipeline.yml\r +```\r +\r +3. Download the necessary .jar files\r + - The Cromwell workfow orchestration engine can be downloaded from https://github.com/broadinstitute/cromwell/releases/ \r + - GATK can be downloaded from https://github.com/broadinstitute/gatk/releases. Unzip the file with `unzip` \r + - Picard can be downloaded from https://github.com/broadinstitute/picard/releases/\r +\r +\r +4. If you do not have the resource bundle files already, these need to be downloaded. In future they will be cached on Pawsey systems. The bundle data should be download from the [Google Cloud bucket](https://console.cloud.google.com/storage/browser/genomics-public-data/references/hg38/v0;tab=objects?_ga=2.98248159.1769807612.1582055494-233304531.1578854612&pli=1&prefix=&forceOnObjectsSortingFiltering=false) and not from the FTP site, which is missing various files. Refer to this handy [blog post](https://davetang.org/muse/2020/02/21/using-google-cloud-sdk-to-download-gatk-resource-bundle-files/) on how to download the resource files using Google Cloud SDK. There is a Slurm script (download_bundle.slurm) that can be used to download all hg38 files from the Google Cloud bucket. The files were downloaded in /scratch/pawsey0001/sbeecroft/hg38/v0, which needs to be moved before the data becomes purged after 30 days. Note that Homo_sapiens_assembly38.dbsnp138.vcf.gz was from the FTP bundle as this file could not be downloaded using the Conda version of Google Cloud SDK.\r +\r +Note that the `hg38_wgs_scattered_calling_intervals.txt` will need to be to generated using the following:\r +\r +```\r +cd \r +find `pwd` -name "scattered.interval_list" -print | sort > hg38_wgs_scattered_calling_intervals.txt\r +```\r +\r +These files are required for Multisample_Fastq_to_Gvcf_GATK4.\r +\r +```\r +Homo_sapiens_assembly38.dict\r +Homo_sapiens_assembly38.fasta\r +Homo_sapiens_assembly38.fasta.fai\r +Homo_sapiens_assembly38.fasta.64.alt\r +Homo_sapiens_assembly38.fasta.64.amb\r +Homo_sapiens_assembly38.fasta.64.ann\r +Homo_sapiens_assembly38.fasta.64.bwt\r +Homo_sapiens_assembly38.fasta.64.pac\r +Homo_sapiens_assembly38.fasta.64.sa\r +Homo_sapiens_assembly38.fasta.amb\r +Homo_sapiens_assembly38.fasta.ann\r +Homo_sapiens_assembly38.fasta.bwt\r +Homo_sapiens_assembly38.fasta.pac\r +Homo_sapiens_assembly38.fasta.sa\r +Homo_sapiens_assembly38.dbsnp138.vcf.gz (needs to be gunzipped)\r +Homo_sapiens_assembly38.dbsnp138.vcf.idx\r +Mills_and_1000G_gold_standard.indels.hg38.vcf.gz\r +Mills_and_1000G_gold_standard.indels.hg38.vcf.gz.tbi\r +Homo_sapiens_assembly38.dbsnp138.vcf\r +Homo_sapiens_assembly38.dbsnp138.vcf.idx\r +Homo_sapiens_assembly38.known_indels.vcf.gz\r +Homo_sapiens_assembly38.known_indels.vcf.gz.tbi\r +```\r +\r +These files are required for Multisample_jointgt_GATK4.\r +\r +```\r +wgs_evaluation_regions.hg38.interval_list\r +hg38.custom_100Mb.intervals\r +Homo_sapiens_assembly38.dbsnp138.vcf\r +Homo_sapiens_assembly38.dbsnp138.vcf.idx\r +1000G_phase1.snps.high_confidence.hg38.vcf.gz\r +1000G_phase1.snps.high_confidence.hg38.vcf.gz.tbi\r +1000G_omni2.5.hg38.vcf.gz\r +1000G_omni2.5.hg38.vcf.gz.tbi\r +Axiom_Exome_Plus.genotypes.all_populations.poly.hg38.vcf.gz\r +Axiom_Exome_Plus.genotypes.all_populations.poly.hg38.vcf.gz.tbi\r +hapmap_3.3.hg38.vcf.gz\r +hapmap_3.3.hg38.vcf.gz.tbi\r +```\r +\r +\r +5. Set up the config files. Files that you need to edit with the correct paths to your data/jar files or other specific configurations are:\r + - `Multisample_Fastq_to_Gvcf_GATK4_inputs_hg38.json`\r + - `Multisample_jointgt_GATK4_inputs_hg38.json`\r + - both json files will need the correct paths to your reference file locations, and the file specifying your inputs i.e. `samples.txt` or `gvcfs.txt`\r + - `samples.txt`\r + - `gvcfs.txt`\r + - These are the sample input files (tab seperated)\r + - The format for samples.txt is sampleID, sampleID_readgroup, path_to_fastq_R1_file, path_to_fastq_R2_file,\r + - The format for gvcfs.txt is sample ID, gvcf, gvcf .tbi index file\r + - Examples are included in this repo\r + - NOTE: Having tabs, not spaces, is vital for parsing the file. Visual studio code tends to introduce spaces, so if you are having issues, check the file with another text editor such as sublime. \r + - `launch_cromwell.sh`\r + - `launch_jointgt.sh`\r + - These are the scripts which launch the pipeline. \r + - `launch_cromwell.sh` launches the fastq to gvcf stage\r + - `launch_jointgt.sh` launched the gvcf joint genotyping to cohort vcf step. This is perfomed when you have run all samples through the fastq to gvcf stage.\r + - Check the paths and parameters make sense for your machine\r + - `slurm.conf`\r + - the main options here relate to the job scheduler. If you are running on Zeus at Pawsey, you should not need to alter these parameters.\r + - `cromwell.options`\r + - `cromwell.options` requires editing to provide the directory where you would like the final workflow outputs to be written\r + - `Multisample_Fastq_to_Gvcf_GATK4.wdl`\r + - `ruddle_fastq_to_gvcf_single_sample_gatk4.wdl`\r + - The paths to your jar files will need to be updated\r + - The path to your conda `activate` binary will need to be updated (e.g. `/group/projectID/userID/miniconda/bin/activate`)\r +\r +6. Launch the job using `sbatch launch_cromwell.sh`. When that has completed successfully, you can launch the second stage of the pipeline (joint calling) with `sbatch launch_jointgt.sh`.\r +\r +### Overview of the steps in `Multisample_Fastq_to_Gvcf_GATK4.wdl`\r +This part of the pipeline takes short-read, Illumina paired-end fastq files as the input. The outputs generated are sorted, duplicate marked bam files and their indices, duplicate metric information, and a GVCF file for each sample. The GVCF files are used as input for the second part of the pipeline (joint genotyping).\r +\r +```\r +FastqToUbam\r +GetBwaVersion\r +SamToFastqAndBwaMem\r +MergeBamAlignment\r +SortAndFixTags\r +MarkDuplicates\r +CreateSequenceGroupingTSV\r +BaseRecalibrator\r +GatherBqsrReports\r +ApplyBQSR\r +GatherBamFiles\r +HaplotypeCaller\r +MergeGVCFs\r +```\r +\r +### Overview of the steps in `Multisample_jointgt_GATK4.wdl`\r +This part of the pipeline takes GVCF files (one per sample), and performs joint genotyping across all of the provided samples. This means that old previously generated GVCFs can be joint-called with new GVCFs whenever you need to add new samples. The key output from this is a joint-genotyped, cohort-wide VCF file.\r +\r +```\r +GetNumberOfSamples\r +ImportGVCFs\r +GenotypeGVCFs\r +HardFilterAndMakeSitesOnlyVcf\r +IndelsVariantRecalibrator\r +SNPsVariantRecalibratorCreateModel\r +SNPsVariantRecalibrator\r +GatherTranches\r +ApplyRecalibration\r +GatherVcfs\r +CollectVariantCallingMetrics\r +GatherMetrics\r +DynamicallyCombineIntervals\r +```\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/144?version=1" ; + schema1:isBasedOn "https://github.com/SarahBeecroft/slurmCromwellGATK4" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for GATK4 Fastq to joint-called cohort VCF with Cromwell on SLURM" ; + schema1:sdDatePublished "2024-08-05 10:33:09 +0100" ; + schema1:url "https://workflowhub.eu/workflows/144/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 29099 ; + schema1:dateCreated "2021-08-17T04:42:40Z" ; + schema1:dateModified "2023-01-16T13:51:44Z" ; + schema1:description """# SLURM HPC Cromwell implementation of GATK4 germline variant calling pipeline\r +See the [GATK](https://gatk.broadinstitute.org/hc/en-us) website for more information on this toolset \r +## Assumptions\r +- Using hg38 human reference genome build\r +- Running using HPC/SLURM scheduling. This repo was specifically tested on Pawsey Zeus machine, primarily running in the `/scratch` partition. \r +- Starting from short-read Illumina paired-end fastq files as input\r +\r +### Dependencies\r +The following versions have been tested and work, but GATK and Cromwell are regularly updated and so one must consider whether they would like to use newer versions of these tools. \r +- BWA/0.7.15\r +- GATK v4.0.6.0\r +- SAMtools/1.5\r +- picard/2.9\r +- Python/2.7\r +- Cromwell v61\r +\r +## Quick start guide\r +### Installing and preparing environment for GATK4 with Cromwell\r +\r +1. Clone repository\r +```\r +git clone https://github.com/SarahBeecroft/slurmCromwellGATK4.git\r +cd slurmCromwellGATK4\r +chmod +x *.sh\r +```\r +\r +2. Install [Miniconda](https://docs.conda.io/en/latest/miniconda.html) if you haven’t already. This is best placed in your `/group` directory to avoid filling your small `/home` directory, or being purged is placed in the `/scratch` directory.\r +\r +3. Create Conda environment using the supplied conda environment file\r +\r +```\r +conda env create --file gatk4_pipeline.yml\r +```\r +\r +3. Download the necessary .jar files\r + - The Cromwell workfow orchestration engine can be downloaded from https://github.com/broadinstitute/cromwell/releases/ \r + - GATK can be downloaded from https://github.com/broadinstitute/gatk/releases. Unzip the file with `unzip` \r + - Picard can be downloaded from https://github.com/broadinstitute/picard/releases/\r +\r +\r +4. If you do not have the resource bundle files already, these need to be downloaded. In future they will be cached on Pawsey systems. The bundle data should be download from the [Google Cloud bucket](https://console.cloud.google.com/storage/browser/genomics-public-data/references/hg38/v0;tab=objects?_ga=2.98248159.1769807612.1582055494-233304531.1578854612&pli=1&prefix=&forceOnObjectsSortingFiltering=false) and not from the FTP site, which is missing various files. Refer to this handy [blog post](https://davetang.org/muse/2020/02/21/using-google-cloud-sdk-to-download-gatk-resource-bundle-files/) on how to download the resource files using Google Cloud SDK. There is a Slurm script (download_bundle.slurm) that can be used to download all hg38 files from the Google Cloud bucket. The files were downloaded in /scratch/pawsey0001/sbeecroft/hg38/v0, which needs to be moved before the data becomes purged after 30 days. Note that Homo_sapiens_assembly38.dbsnp138.vcf.gz was from the FTP bundle as this file could not be downloaded using the Conda version of Google Cloud SDK.\r +\r +Note that the `hg38_wgs_scattered_calling_intervals.txt` will need to be to generated using the following:\r +\r +```\r +cd \r +find `pwd` -name "scattered.interval_list" -print | sort > hg38_wgs_scattered_calling_intervals.txt\r +```\r +\r +These files are required for Multisample_Fastq_to_Gvcf_GATK4.\r +\r +```\r +Homo_sapiens_assembly38.dict\r +Homo_sapiens_assembly38.fasta\r +Homo_sapiens_assembly38.fasta.fai\r +Homo_sapiens_assembly38.fasta.64.alt\r +Homo_sapiens_assembly38.fasta.64.amb\r +Homo_sapiens_assembly38.fasta.64.ann\r +Homo_sapiens_assembly38.fasta.64.bwt\r +Homo_sapiens_assembly38.fasta.64.pac\r +Homo_sapiens_assembly38.fasta.64.sa\r +Homo_sapiens_assembly38.fasta.amb\r +Homo_sapiens_assembly38.fasta.ann\r +Homo_sapiens_assembly38.fasta.bwt\r +Homo_sapiens_assembly38.fasta.pac\r +Homo_sapiens_assembly38.fasta.sa\r +Homo_sapiens_assembly38.dbsnp138.vcf.gz (needs to be gunzipped)\r +Homo_sapiens_assembly38.dbsnp138.vcf.idx\r +Mills_and_1000G_gold_standard.indels.hg38.vcf.gz\r +Mills_and_1000G_gold_standard.indels.hg38.vcf.gz.tbi\r +Homo_sapiens_assembly38.dbsnp138.vcf\r +Homo_sapiens_assembly38.dbsnp138.vcf.idx\r +Homo_sapiens_assembly38.known_indels.vcf.gz\r +Homo_sapiens_assembly38.known_indels.vcf.gz.tbi\r +```\r +\r +These files are required for Multisample_jointgt_GATK4.\r +\r +```\r +wgs_evaluation_regions.hg38.interval_list\r +hg38.custom_100Mb.intervals\r +Homo_sapiens_assembly38.dbsnp138.vcf\r +Homo_sapiens_assembly38.dbsnp138.vcf.idx\r +1000G_phase1.snps.high_confidence.hg38.vcf.gz\r +1000G_phase1.snps.high_confidence.hg38.vcf.gz.tbi\r +1000G_omni2.5.hg38.vcf.gz\r +1000G_omni2.5.hg38.vcf.gz.tbi\r +Axiom_Exome_Plus.genotypes.all_populations.poly.hg38.vcf.gz\r +Axiom_Exome_Plus.genotypes.all_populations.poly.hg38.vcf.gz.tbi\r +hapmap_3.3.hg38.vcf.gz\r +hapmap_3.3.hg38.vcf.gz.tbi\r +```\r +\r +\r +5. Set up the config files. Files that you need to edit with the correct paths to your data/jar files or other specific configurations are:\r + - `Multisample_Fastq_to_Gvcf_GATK4_inputs_hg38.json`\r + - `Multisample_jointgt_GATK4_inputs_hg38.json`\r + - both json files will need the correct paths to your reference file locations, and the file specifying your inputs i.e. `samples.txt` or `gvcfs.txt`\r + - `samples.txt`\r + - `gvcfs.txt`\r + - These are the sample input files (tab seperated)\r + - The format for samples.txt is sampleID, sampleID_readgroup, path_to_fastq_R1_file, path_to_fastq_R2_file,\r + - The format for gvcfs.txt is sample ID, gvcf, gvcf .tbi index file\r + - Examples are included in this repo\r + - NOTE: Having tabs, not spaces, is vital for parsing the file. Visual studio code tends to introduce spaces, so if you are having issues, check the file with another text editor such as sublime. \r + - `launch_cromwell.sh`\r + - `launch_jointgt.sh`\r + - These are the scripts which launch the pipeline. \r + - `launch_cromwell.sh` launches the fastq to gvcf stage\r + - `launch_jointgt.sh` launched the gvcf joint genotyping to cohort vcf step. This is perfomed when you have run all samples through the fastq to gvcf stage.\r + - Check the paths and parameters make sense for your machine\r + - `slurm.conf`\r + - the main options here relate to the job scheduler. If you are running on Zeus at Pawsey, you should not need to alter these parameters.\r + - `cromwell.options`\r + - `cromwell.options` requires editing to provide the directory where you would like the final workflow outputs to be written\r + - `Multisample_Fastq_to_Gvcf_GATK4.wdl`\r + - `ruddle_fastq_to_gvcf_single_sample_gatk4.wdl`\r + - The paths to your jar files will need to be updated\r + - The path to your conda `activate` binary will need to be updated (e.g. `/group/projectID/userID/miniconda/bin/activate`)\r +\r +6. Launch the job using `sbatch launch_cromwell.sh`. When that has completed successfully, you can launch the second stage of the pipeline (joint calling) with `sbatch launch_jointgt.sh`.\r +\r +### Overview of the steps in `Multisample_Fastq_to_Gvcf_GATK4.wdl`\r +This part of the pipeline takes short-read, Illumina paired-end fastq files as the input. The outputs generated are sorted, duplicate marked bam files and their indices, duplicate metric information, and a GVCF file for each sample. The GVCF files are used as input for the second part of the pipeline (joint genotyping).\r +\r +```\r +FastqToUbam\r +GetBwaVersion\r +SamToFastqAndBwaMem\r +MergeBamAlignment\r +SortAndFixTags\r +MarkDuplicates\r +CreateSequenceGroupingTSV\r +BaseRecalibrator\r +GatherBqsrReports\r +ApplyBQSR\r +GatherBamFiles\r +HaplotypeCaller\r +MergeGVCFs\r +```\r +\r +### Overview of the steps in `Multisample_jointgt_GATK4.wdl`\r +This part of the pipeline takes GVCF files (one per sample), and performs joint genotyping across all of the provided samples. This means that old previously generated GVCFs can be joint-called with new GVCFs whenever you need to add new samples. The key output from this is a joint-genotyped, cohort-wide VCF file.\r +\r +```\r +GetNumberOfSamples\r +ImportGVCFs\r +GenotypeGVCFs\r +HardFilterAndMakeSitesOnlyVcf\r +IndelsVariantRecalibrator\r +SNPsVariantRecalibratorCreateModel\r +SNPsVariantRecalibrator\r +GatherTranches\r +ApplyRecalibration\r +GatherVcfs\r +CollectVariantCallingMetrics\r +GatherMetrics\r +DynamicallyCombineIntervals\r +```\r +""" ; + schema1:isPartOf ; + schema1:keywords "GATK4, Genomics, Alignment, variant_calling, SNPs, INDELs" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "GATK4 Fastq to joint-called cohort VCF with Cromwell on SLURM" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/144?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Pipeline for processing of 10xGenomics single cell rnaseq data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1021?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/scrnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/scrnaseq" ; + schema1:sdDatePublished "2024-08-05 10:23:13 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1021/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4707 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:55:17Z" ; + schema1:dateModified "2024-06-11T12:55:17Z" ; + schema1:description "Pipeline for processing of 10xGenomics single cell rnaseq data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1021?version=13" ; + schema1:keywords "10x-genomics, 10xgenomics, alevin, bustools, Cellranger, kallisto, rna-seq, single-cell, star-solo" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/scrnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1021?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/44?version=16" ; + schema1:isBasedOn "https://github.com/nf-core/rnaseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/rnaseq" ; + schema1:sdDatePublished "2024-08-05 10:24:26 +0100" ; + schema1:url "https://workflowhub.eu/workflows/44/ro_crate?version=16" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 9441 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-11T12:55:12Z" ; + schema1:dateModified "2024-06-11T12:55:12Z" ; + schema1:description "Nextflow RNA-Seq analysis pipeline, part of the nf-core community." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/44?version=28" ; + schema1:keywords "rna, rna-seq" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/rnaseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/44?version=16" ; + schema1:version 16 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "Purge Phased assembly of duplications and overlaps. Include purge steps for Primary and Alternate assemblies." ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/321?version=1" ; + schema1:isBasedOn "https://github.com/Delphine-L/iwc/tree/VGP/workflows/VGP-assembly-v2" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for VGP purge assembly with purge_dups pipeline" ; + schema1:sdDatePublished "2024-08-05 10:32:20 +0100" ; + schema1:url "https://workflowhub.eu/workflows/321/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 11049 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 60777 ; + schema1:creator ; + schema1:dateCreated "2022-04-05T22:16:05Z" ; + schema1:dateModified "2023-01-16T13:59:31Z" ; + schema1:description "Purge Phased assembly of duplications and overlaps. Include purge steps for Primary and Alternate assemblies." ; + schema1:image ; + schema1:keywords "vgp, Assembly, Galaxy" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "VGP purge assembly with purge_dups pipeline" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/321?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 29874 . + + a schema1:Dataset ; + schema1:datePublished "2022-10-14T09:09:18.098730" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-variation-reporting" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "sars-cov-2-variation-reporting/COVID-19-VARIATION-REPORTING" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.11" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "call and score variants from WGS/WES of rare disease patients" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1014?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/raredisease" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/raredisease" ; + schema1:sdDatePublished "2024-08-05 10:23:23 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1014/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12171 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:08Z" ; + schema1:dateModified "2024-06-11T12:55:08Z" ; + schema1:description "call and score variants from WGS/WES of rare disease patients" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1014?version=5" ; + schema1:keywords "diagnostics, rare-disease, snv, structural-variants, variant-annotation, variant-calling, wes, WGS" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/raredisease" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1014?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.132.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_wf_amber_md_setup" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Jupyter Notebook Amber Constant pH MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:30:48 +0100" ; + schema1:url "https://workflowhub.eu/workflows/132/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 53577 ; + schema1:creator , + ; + schema1:dateCreated "2021-06-30T12:23:58Z" ; + schema1:dateModified "2022-09-15T12:33:02Z" ; + schema1:description """# AMBER Protein MD Setup tutorials using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)** wrapping the **Ambertools MD package**.\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/132?version=4" ; + schema1:isPartOf , + ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Jupyter Notebook Amber Constant pH MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://raw.githubusercontent.com/bioexcel/biobb_wf_amber_md_setup/8bcda75405183a84476acd7ba733e4cb666ce397/biobb_wf_amber_md_setup/notebooks/mdsetup_ph/biobb_amber_CpHMD_notebook.ipynb" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2024-06-24T10:11:39.189641" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/quality-and-contamination-control" ; + schema1:license "GPL-3.0-or-later" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "quality-and-contamination-control/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.23" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """Workflow for Creating a large disease network from various datasets and databases for IBM, and applying the active subnetwork identification method MOGAMUN.\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/681?version=4" ; + schema1:isBasedOn "https://github.com/jdwijnbergen/IBM_ASI_workflow.git" ; + schema1:license "notspecified" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Inclusion Body Myositis Active Subnetwork Identification Workflow" ; + schema1:sdDatePublished "2024-08-05 10:25:50 +0100" ; + schema1:url "https://workflowhub.eu/workflows/681/ro_crate?version=4" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 27177 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 6953 ; + schema1:creator , + ; + schema1:dateCreated "2023-11-27T16:00:47Z" ; + schema1:dateModified "2023-11-27T16:00:47Z" ; + schema1:description """Workflow for Creating a large disease network from various datasets and databases for IBM, and applying the active subnetwork identification method MOGAMUN.\r +""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/681?version=6" ; + schema1:keywords "Bioinformatics, CWL, Genomics, Transcriptomics, Protein-Protein Interaction" ; + schema1:license "https://choosealicense.com/no-permission/" ; + schema1:name "Inclusion Body Myositis Active Subnetwork Identification Workflow" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/681?version=4" ; + schema1:version 4 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description "Given a set of pathways generated by RetroPath2.0, this workflow informs the user as to the theoretically best performing ones based on four criteria: FBA, thermodynamic feasibility, length of the pathway, and reaction rule score." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/22?version=1" ; + schema1:isBasedOn "https://galaxy-synbiocad.org/u/mdulac/w/rpanalysis-3" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Pathway Analysis" ; + schema1:sdDatePublished "2024-08-05 10:33:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/22/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5365 ; + schema1:creator ; + schema1:dateCreated "2020-05-29T10:01:25Z" ; + schema1:dateModified "2023-01-16T13:41:28Z" ; + schema1:description "Given a set of pathways generated by RetroPath2.0, this workflow informs the user as to the theoretically best performing ones based on four criteria: FBA, thermodynamic feasibility, length of the pathway, and reaction rule score." ; + schema1:keywords "Retrosynthesis" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Pathway Analysis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/22?version=1" ; + schema1:version 1 ; + ns1:input , + , + ; + ns1:output , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Mutations Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.290.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_md_setup_mutations/python" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Python Protein MD Setup tutorial with mutations" ; + schema1:sdDatePublished "2024-08-05 10:30:42 +0100" ; + schema1:url "https://workflowhub.eu/workflows/290/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7412 ; + schema1:creator , + ; + schema1:dateCreated "2022-11-23T08:34:35Z" ; + schema1:dateModified "2023-01-16T13:58:35Z" ; + schema1:description """# Mutations Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/290?version=2" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "Python Protein MD Setup tutorial with mutations" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_md_setup_mutations/python/workflow.py" ; + schema1:version 2 . + + a schema1:Dataset ; + schema1:datePublished "2023-11-13T14:30:43.588577" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Assembly-decontamination-VGP9" ; + schema1:license "BSD-3-Clause" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Assembly-decontamination-VGP9/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.17" . + + a schema1:Dataset ; + schema1:datePublished "2023-01-20T14:55:06.084507" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/cutandrun" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "cutandrun/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.3" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# skim2rrna\r +\r +**skim2rrna** is a snakemake pipeline for the batch assembly, annotation, and phylogenetic analysis of ribosomal genes from low coverage genome skims. The pipeline was designed to work with sequence data from museum collections. However, it should also work with genome skims from recently collected samples.\r +\r +## Contents\r + - [Setup](#setup)\r + - [Example data](#example-data)\r + - [Input](#input)\r + - [Output](#output)\r + - [Filtering contaminants](#filtering-contaminants)\r + - [Assembly and annotation only](#assembly-and-annotation-only)\r + - [Running your own data](#running-your-own-data)\r + - [Getting help](#getting-help)\r + - [Citations](#citations)\r +\r +## Setup\r +\r +The pipeline is written in Snakemake and uses conda and singularity to install the necessary tools.\r +\r +It is *strongly recommended* to install conda using Mambaforge. See details here https://snakemake.readthedocs.io/en/stable/getting_started/installation.html\r +\r +Once conda is installed, you can pull the github repo and set up the base conda environment.\r +\r +```\r +# get github repo\r +git clone https://github.com/o-william-white/skim2rrna\r +\r +# change dir\r +cd skim2rrna\r +\r +# setup conda env\r +conda env create -n snakemake -f workflow/envs/conda_env.yaml\r +```\r +\r +
\r +\r +
\r +\r +## Example data\r +\r +Before you run your own data, it is recommended to run the example datasets provided . This will confirm there are no user-specific issues with the setup and it also installs all the dependencies. The example data includes simulated ribosomal data from 25 different butterfly species. \r +\r +To run the example data, use the code below. **Note that you need to change the user email to your own address**. The email is required by the Bio Entrez package to fetch reference sequences. The first time you run the pipeline, it will take some time to install each of the conda environments, so it is a good time to take a tea break :).\r +```\r +conda activate snakemake\r +\r +snakemake \\\r + --cores 4 \\\r + --use-conda \\\r + --use-singularity \\ \r + --config user_email=user@example_email.com\r +```\r +\r +
\r +\r +
\r +\r +## Input\r +\r +Snakemake requires a `config.yaml` and `samples.csv` to define input parameters and sequence data for each sample. \r +\r +For the example data provided, the config file is located here `config/config.yaml` and it looks like this:\r +```\r +# path to sample sheet csv with columns for ID,forward,reverse,taxid,seed,gene\r +samples: config/samples.csv\r +\r +# user email\r +user_email: user@example_email.com\r +\r +# getorganelle reference (go_fetch, custom)\r +go_reference: go_fetch\r +\r +# forward adapter\r +forward_adapter: AGATCGGAAGAGCACACGTCTGAACTCCAGTCA\r +\r +# reverse adapter\r +reverse_adapter: AGATCGGAAGAGCGTCGTGTAGGGAAAGAGTGT\r +\r +# fastp deduplication (True/False)\r +fastp_dedup: True\r +\r +# barrnap kindgom (Bacteria:bac, Archaea:arc, Eukaryota:euk, None:NA)\r +barrnap_kingdom: euk\r +\r +# alignment trimming method to use (gblocks or clipkit)\r +alignment_trim: gblocks\r +\r +# alignment missing data threshold for alignment (0.0 - 1.0)\r +missing_threshold: 0.5\r +\r +# name of outgroup sample (optional)\r +# use "NA" if there is no obvious outgroup\r +# if more than one outgroup use a comma separated list i.e. "sampleA,sampleB"\r +outgroup: Eurema_blanda\r +\r +# plot dimensions (cm)\r +plot_height: 20\r +plot_width: 20\r +```\r +\r +The example samples.csv file is located here `config/samples.csv` and it looks like this (note that the seed and gene columns are only required if the custom getorganelle database option is specified in the config file):\r +\r +\r + ID | forward | reverse | taxid | seed | gene \r +----|---------|---------|-------|------|------\r +Adelpha_iphiclus | .test/reads/Adelpha_iphiclus_1.fq.gz | .test/reads/Adelpha_iphiclus_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Anartia_jatrophae_saturata | .test/reads/Anartia_jatrophae_saturata_1.fq.gz | .test/reads/Anartia_jatrophae_saturata_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Araschnia_levana | .test/reads/Araschnia_levana_1.fq.gz | .test/reads/Araschnia_levana_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Auzakia_danava | .test/reads/Auzakia_danava_1.fq.gz | .test/reads/Auzakia_danava_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Baeotus_beotus | .test/reads/Baeotus_beotus_1.fq.gz | .test/reads/Baeotus_beotus_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Catacroptera_cloanthe | .test/reads/Catacroptera_cloanthe_1.fq.gz | .test/reads/Catacroptera_cloanthe_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Chalinga_pratti | .test/reads/Chalinga_pratti_1.fq.gz | .test/reads/Chalinga_pratti_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Diaethria_gabaza_eupepla | .test/reads/Diaethria_gabaza_eupepla_1.fq.gz | .test/reads/Diaethria_gabaza_eupepla_2.fq.gz | 127268 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Doleschallia_melana | .test/reads/Doleschallia_melana_1.fq.gz | .test/reads/Doleschallia_melana_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Eurema_blanda | .test/reads/Eurema_blanda_1.fq.gz | .test/reads/Eurema_blanda_2.fq.gz | 42450 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Hypolimnas_usambara | .test/reads/Hypolimnas_usambara_1.fq.gz | .test/reads/Hypolimnas_usambara_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Junonia_villida | .test/reads/Junonia_villida_1.fq.gz | .test/reads/Junonia_villida_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Kallima_paralekta | .test/reads/Kallima_paralekta_1.fq.gz | .test/reads/Kallima_paralekta_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Kallimoides_rumia | .test/reads/Kallimoides_rumia_1.fq.gz | .test/reads/Kallimoides_rumia_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Litinga_cottini | .test/reads/Litinga_cottini_1.fq.gz | .test/reads/Litinga_cottini_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Mallika_jacksoni | .test/reads/Mallika_jacksoni_1.fq.gz | .test/reads/Mallika_jacksoni_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Moduza_procris | .test/reads/Moduza_procris_1.fq.gz | .test/reads/Moduza_procris_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Parasarpa_zayla | .test/reads/Parasarpa_zayla_1.fq.gz | .test/reads/Parasarpa_zayla_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Phaedyma_columella | .test/reads/Phaedyma_columella_1.fq.gz | .test/reads/Phaedyma_columella_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Precis_pelarga | .test/reads/Precis_pelarga_1.fq.gz | .test/reads/Precis_pelarga_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Protogoniomorpha_temora | .test/reads/Protogoniomorpha_temora_1.fq.gz | .test/reads/Protogoniomorpha_temora_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Salamis_cacta | .test/reads/Salamis_cacta_1.fq.gz | .test/reads/Salamis_cacta_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Smyrna_blomfildia | .test/reads/Smyrna_blomfildia_1.fq.gz | .test/reads/Smyrna_blomfildia_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Tacola_larymna | .test/reads/Tacola_larymna_1.fq.gz | .test/reads/Tacola_larymna_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Yoma_algina | .test/reads/Yoma_algina_1.fq.gz | .test/reads/Yoma_algina_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +\r +\r +
\r +\r +
\r +\r +## Output\r +\r +All output files are saved to the `results` direcotry. Below is a table summarising all of the output files generated by the pipeline.\r +\r +| Directory | Description |\r +|-----------------------|---------------------------|\r +| fastqc_raw | Fastqc reports for raw input reads |\r +| fastp | Fastp reports from quality control of raw reads |\r +| fastqc_qc | Fastqc reports for quality controlled reads |\r +| go_fetch | Optional output containing reference databases used by GetOrganelle |\r +| getorganelle | GetOrganelle output with a directory for each sample |\r +| assembled_sequence | Assembled sequences selected from GetOrganelle output and renamed |\r +| seqkit | Seqkit summary of each assembly |\r +| blastn | Blastn output of each assembly |\r +| minimap | Mapping output of quality filtered reads against each assembly |\r +| blobtools | Blobtools assembly summary collating blastn and mapping output |\r +| annotations | Annotation outputs of mitos |\r +| summary | Summary per sample (seqkit stats), contig (GC content, length, coverage, taxonomy and annotations) and annotated gene counts |\r +| annotated_genes | Unaligned fasta files of annotated genes identified across all samples |\r +| mafft | Mafft aligned fasta files of annotated genes identified across all samples |\r +| mafft_filtered | Mafft aligned fasta files after the removal of sequences based on a missing data threshold |\r +| alignment_trim | Ambiguous parts of alignment removed using either gblocks or clipkit |\r +| iqtree | Iqtree phylogenetic analysis of annotated genes |\r +| plot_tree | Plots of phylogenetic trees |\r +\r +
\r +\r +
\r +\r +## Filtering contaminants\r +\r +If you are working with museum collections, it is possible that you may assemble and annotate sequences from contaminant/non-target species. *Contaminant sequences can be identified based on the blast search output or unusual placement in the phylogenetic trees* (see blobtools and plot_tree outputs). \r +\r +A supplementary python script `format_alignments.py `is provided to remove putative contaminants from alignments, and format the alignments for downstream phylogenetic analysis.\r +\r +For example, let's say we wanted to remove all sequences from the sample "Kallima_paralekta" and 5.8S ribosomal sequence, you could run the script as shown below. The script works by identifying and removing sequences that have names with `Kallima_paralekta` or `5_8S` in the sequence names. The filtered alignments are written to a new output directory `filter_alignments_output`.\r +\r +```\r +python workflow/scripts/format_alignments.py \\\r + --input results/mafft_filtered/ \\\r + --cont Kallima_paralekta 5_8S \\\r + --output filter_alignments_output\r +```\r +\r +*Note that the output fasta files have been reformatted so each alignment file is named after the gene and each sequence is named after the sample.* This is useful if you would like to run our related pipeline **gene2phylo** for further phylogenetic analyses.\r +\r +
\r +\r +
\r +\r +## Assembly and annotation only\r +\r +If you are only interested in the assembly of ribosomal sequences and annotation of genes without the phylogenetic analysis, you can stop the pipeline from running the gene alignment and phylogenetic analyses using the `--omit-from` parameter.\r +```\r +snakemake \\\r + --cores 4 \\\r + --use-conda \\\r + --use-singularity \\\r + --config user_email=user@example_email.com \\\r + --omit-from mafft \r +```\r +\r +
\r +\r +
\r +\r +## Running your own data\r +\r +The first thing you need to do is generate your own config.yaml and samples.csv files, using the files provided as a template.\r +\r +GetOrganelle requires reference data in the format of seed and gene reference fasta files. By default the pipeline uses a basic python script called go_fetch.py https://github.com/o-william-white/go_fetch to download and format reference data formatted for GetOrganelle. \r +\r +go_fetch.py works by searching NCBI based on the NCBI taxonomy specified by the taxid column in the samples.csv file. Note that the seed and gene columns in the samples.csv file are only required if you want to provide your own custom GetOrganelle seed and gene reference databases. \r +\r +You can use the default reference data for GetOrganelle, but I would recommend using custom reference databases where possible. See here for details of how to set up your own databases https://github.com/Kinggerm/GetOrganelle/wiki/FAQ#how-to-assemble-a-target-organelle-genome-using-my-own-reference\r +\r +## Getting help\r +\r +If you have any questions, please do get in touch in the issues or by email o.william.white@gmail.com\r +\r +
\r +\r +
\r +\r +## Citations\r +\r +If you use the pipeline, please cite our bioarxiv preprint: https://doi.org/10.1101/2023.08.11.552985\r +\r +Since the pipeline is a wrapper for several other bioinformatic tools we also ask that you cite the tools used by the pipeline:\r + - Fastqc https://github.com/s-andrews/FastQC\r + - Fastp https://doi.org/10.1093/bioinformatics/bty560\r + - GetOrganelle https://doi.org/10.1186/s13059-020-02154-5\r + - Blastn https://doi.org/10.1186/1471-2105-10-421\r + - Minimap2 https://doi.org/10.1093/bioinformatics/bty191\r + - Blobtools https://doi.org/10.12688/f1000research.12232.1\r + - Seqkit https://doi.org/10.1371/journal.pone.0163962\r + - MITOS2 https://doi.org/10.1016/j.ympev.2012.08.023\r + - Gblocks (default) https://doi.org/10.1093/oxfordjournals.molbev.a026334\r + - Clipkit (optional) https://doi.org/10.1371/journal.pbio.3001007\r + - Mafft https://doi.org/10.1093/molbev/mst010\r + - Iqtree https://doi.org/10.1093/molbev/msu300\r + - ete3 https://doi.org/10.1093/molbev/msw046\r + - ggtree https://doi.org/10.1111/2041-210X.12628\r +\r +
\r +\r +
\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/792?version=1" ; + schema1:isBasedOn "https://github.com/o-william-white/skim2rrna.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for skim2rrna" ; + schema1:sdDatePublished "2024-08-05 10:25:11 +0100" ; + schema1:url "https://workflowhub.eu/workflows/792/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 2816 ; + schema1:dateCreated "2024-03-12T15:06:39Z" ; + schema1:dateModified "2024-03-12T15:09:08Z" ; + schema1:description """# skim2rrna\r +\r +**skim2rrna** is a snakemake pipeline for the batch assembly, annotation, and phylogenetic analysis of ribosomal genes from low coverage genome skims. The pipeline was designed to work with sequence data from museum collections. However, it should also work with genome skims from recently collected samples.\r +\r +## Contents\r + - [Setup](#setup)\r + - [Example data](#example-data)\r + - [Input](#input)\r + - [Output](#output)\r + - [Filtering contaminants](#filtering-contaminants)\r + - [Assembly and annotation only](#assembly-and-annotation-only)\r + - [Running your own data](#running-your-own-data)\r + - [Getting help](#getting-help)\r + - [Citations](#citations)\r +\r +## Setup\r +\r +The pipeline is written in Snakemake and uses conda and singularity to install the necessary tools.\r +\r +It is *strongly recommended* to install conda using Mambaforge. See details here https://snakemake.readthedocs.io/en/stable/getting_started/installation.html\r +\r +Once conda is installed, you can pull the github repo and set up the base conda environment.\r +\r +```\r +# get github repo\r +git clone https://github.com/o-william-white/skim2rrna\r +\r +# change dir\r +cd skim2rrna\r +\r +# setup conda env\r +conda env create -n snakemake -f workflow/envs/conda_env.yaml\r +```\r +\r +
\r +\r +
\r +\r +## Example data\r +\r +Before you run your own data, it is recommended to run the example datasets provided . This will confirm there are no user-specific issues with the setup and it also installs all the dependencies. The example data includes simulated ribosomal data from 25 different butterfly species. \r +\r +To run the example data, use the code below. **Note that you need to change the user email to your own address**. The email is required by the Bio Entrez package to fetch reference sequences. The first time you run the pipeline, it will take some time to install each of the conda environments, so it is a good time to take a tea break :).\r +```\r +conda activate snakemake\r +\r +snakemake \\\r + --cores 4 \\\r + --use-conda \\\r + --use-singularity \\ \r + --config user_email=user@example_email.com\r +```\r +\r +
\r +\r +
\r +\r +## Input\r +\r +Snakemake requires a `config.yaml` and `samples.csv` to define input parameters and sequence data for each sample. \r +\r +For the example data provided, the config file is located here `config/config.yaml` and it looks like this:\r +```\r +# path to sample sheet csv with columns for ID,forward,reverse,taxid,seed,gene\r +samples: config/samples.csv\r +\r +# user email\r +user_email: user@example_email.com\r +\r +# getorganelle reference (go_fetch, custom)\r +go_reference: go_fetch\r +\r +# forward adapter\r +forward_adapter: AGATCGGAAGAGCACACGTCTGAACTCCAGTCA\r +\r +# reverse adapter\r +reverse_adapter: AGATCGGAAGAGCGTCGTGTAGGGAAAGAGTGT\r +\r +# fastp deduplication (True/False)\r +fastp_dedup: True\r +\r +# barrnap kindgom (Bacteria:bac, Archaea:arc, Eukaryota:euk, None:NA)\r +barrnap_kingdom: euk\r +\r +# alignment trimming method to use (gblocks or clipkit)\r +alignment_trim: gblocks\r +\r +# alignment missing data threshold for alignment (0.0 - 1.0)\r +missing_threshold: 0.5\r +\r +# name of outgroup sample (optional)\r +# use "NA" if there is no obvious outgroup\r +# if more than one outgroup use a comma separated list i.e. "sampleA,sampleB"\r +outgroup: Eurema_blanda\r +\r +# plot dimensions (cm)\r +plot_height: 20\r +plot_width: 20\r +```\r +\r +The example samples.csv file is located here `config/samples.csv` and it looks like this (note that the seed and gene columns are only required if the custom getorganelle database option is specified in the config file):\r +\r +\r + ID | forward | reverse | taxid | seed | gene \r +----|---------|---------|-------|------|------\r +Adelpha_iphiclus | .test/reads/Adelpha_iphiclus_1.fq.gz | .test/reads/Adelpha_iphiclus_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Anartia_jatrophae_saturata | .test/reads/Anartia_jatrophae_saturata_1.fq.gz | .test/reads/Anartia_jatrophae_saturata_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Araschnia_levana | .test/reads/Araschnia_levana_1.fq.gz | .test/reads/Araschnia_levana_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Auzakia_danava | .test/reads/Auzakia_danava_1.fq.gz | .test/reads/Auzakia_danava_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Baeotus_beotus | .test/reads/Baeotus_beotus_1.fq.gz | .test/reads/Baeotus_beotus_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Catacroptera_cloanthe | .test/reads/Catacroptera_cloanthe_1.fq.gz | .test/reads/Catacroptera_cloanthe_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Chalinga_pratti | .test/reads/Chalinga_pratti_1.fq.gz | .test/reads/Chalinga_pratti_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Diaethria_gabaza_eupepla | .test/reads/Diaethria_gabaza_eupepla_1.fq.gz | .test/reads/Diaethria_gabaza_eupepla_2.fq.gz | 127268 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Doleschallia_melana | .test/reads/Doleschallia_melana_1.fq.gz | .test/reads/Doleschallia_melana_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Eurema_blanda | .test/reads/Eurema_blanda_1.fq.gz | .test/reads/Eurema_blanda_2.fq.gz | 42450 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Hypolimnas_usambara | .test/reads/Hypolimnas_usambara_1.fq.gz | .test/reads/Hypolimnas_usambara_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Junonia_villida | .test/reads/Junonia_villida_1.fq.gz | .test/reads/Junonia_villida_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Kallima_paralekta | .test/reads/Kallima_paralekta_1.fq.gz | .test/reads/Kallima_paralekta_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Kallimoides_rumia | .test/reads/Kallimoides_rumia_1.fq.gz | .test/reads/Kallimoides_rumia_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Litinga_cottini | .test/reads/Litinga_cottini_1.fq.gz | .test/reads/Litinga_cottini_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Mallika_jacksoni | .test/reads/Mallika_jacksoni_1.fq.gz | .test/reads/Mallika_jacksoni_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Moduza_procris | .test/reads/Moduza_procris_1.fq.gz | .test/reads/Moduza_procris_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Parasarpa_zayla | .test/reads/Parasarpa_zayla_1.fq.gz | .test/reads/Parasarpa_zayla_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Phaedyma_columella | .test/reads/Phaedyma_columella_1.fq.gz | .test/reads/Phaedyma_columella_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Precis_pelarga | .test/reads/Precis_pelarga_1.fq.gz | .test/reads/Precis_pelarga_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Protogoniomorpha_temora | .test/reads/Protogoniomorpha_temora_1.fq.gz | .test/reads/Protogoniomorpha_temora_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Salamis_cacta | .test/reads/Salamis_cacta_1.fq.gz | .test/reads/Salamis_cacta_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Smyrna_blomfildia | .test/reads/Smyrna_blomfildia_1.fq.gz | .test/reads/Smyrna_blomfildia_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Tacola_larymna | .test/reads/Tacola_larymna_1.fq.gz | .test/reads/Tacola_larymna_2.fq.gz | 100750 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +Yoma_algina | .test/reads/Yoma_algina_1.fq.gz | .test/reads/Yoma_algina_2.fq.gz | 40040 | .test/seed_mitochondrion.fasta | .test/gene_mitochondrion.fasta\r +\r +\r +
\r +\r +
\r +\r +## Output\r +\r +All output files are saved to the `results` direcotry. Below is a table summarising all of the output files generated by the pipeline.\r +\r +| Directory | Description |\r +|-----------------------|---------------------------|\r +| fastqc_raw | Fastqc reports for raw input reads |\r +| fastp | Fastp reports from quality control of raw reads |\r +| fastqc_qc | Fastqc reports for quality controlled reads |\r +| go_fetch | Optional output containing reference databases used by GetOrganelle |\r +| getorganelle | GetOrganelle output with a directory for each sample |\r +| assembled_sequence | Assembled sequences selected from GetOrganelle output and renamed |\r +| seqkit | Seqkit summary of each assembly |\r +| blastn | Blastn output of each assembly |\r +| minimap | Mapping output of quality filtered reads against each assembly |\r +| blobtools | Blobtools assembly summary collating blastn and mapping output |\r +| annotations | Annotation outputs of mitos |\r +| summary | Summary per sample (seqkit stats), contig (GC content, length, coverage, taxonomy and annotations) and annotated gene counts |\r +| annotated_genes | Unaligned fasta files of annotated genes identified across all samples |\r +| mafft | Mafft aligned fasta files of annotated genes identified across all samples |\r +| mafft_filtered | Mafft aligned fasta files after the removal of sequences based on a missing data threshold |\r +| alignment_trim | Ambiguous parts of alignment removed using either gblocks or clipkit |\r +| iqtree | Iqtree phylogenetic analysis of annotated genes |\r +| plot_tree | Plots of phylogenetic trees |\r +\r +
\r +\r +
\r +\r +## Filtering contaminants\r +\r +If you are working with museum collections, it is possible that you may assemble and annotate sequences from contaminant/non-target species. *Contaminant sequences can be identified based on the blast search output or unusual placement in the phylogenetic trees* (see blobtools and plot_tree outputs). \r +\r +A supplementary python script `format_alignments.py `is provided to remove putative contaminants from alignments, and format the alignments for downstream phylogenetic analysis.\r +\r +For example, let's say we wanted to remove all sequences from the sample "Kallima_paralekta" and 5.8S ribosomal sequence, you could run the script as shown below. The script works by identifying and removing sequences that have names with `Kallima_paralekta` or `5_8S` in the sequence names. The filtered alignments are written to a new output directory `filter_alignments_output`.\r +\r +```\r +python workflow/scripts/format_alignments.py \\\r + --input results/mafft_filtered/ \\\r + --cont Kallima_paralekta 5_8S \\\r + --output filter_alignments_output\r +```\r +\r +*Note that the output fasta files have been reformatted so each alignment file is named after the gene and each sequence is named after the sample.* This is useful if you would like to run our related pipeline **gene2phylo** for further phylogenetic analyses.\r +\r +
\r +\r +
\r +\r +## Assembly and annotation only\r +\r +If you are only interested in the assembly of ribosomal sequences and annotation of genes without the phylogenetic analysis, you can stop the pipeline from running the gene alignment and phylogenetic analyses using the `--omit-from` parameter.\r +```\r +snakemake \\\r + --cores 4 \\\r + --use-conda \\\r + --use-singularity \\\r + --config user_email=user@example_email.com \\\r + --omit-from mafft \r +```\r +\r +
\r +\r +
\r +\r +## Running your own data\r +\r +The first thing you need to do is generate your own config.yaml and samples.csv files, using the files provided as a template.\r +\r +GetOrganelle requires reference data in the format of seed and gene reference fasta files. By default the pipeline uses a basic python script called go_fetch.py https://github.com/o-william-white/go_fetch to download and format reference data formatted for GetOrganelle. \r +\r +go_fetch.py works by searching NCBI based on the NCBI taxonomy specified by the taxid column in the samples.csv file. Note that the seed and gene columns in the samples.csv file are only required if you want to provide your own custom GetOrganelle seed and gene reference databases. \r +\r +You can use the default reference data for GetOrganelle, but I would recommend using custom reference databases where possible. See here for details of how to set up your own databases https://github.com/Kinggerm/GetOrganelle/wiki/FAQ#how-to-assemble-a-target-organelle-genome-using-my-own-reference\r +\r +## Getting help\r +\r +If you have any questions, please do get in touch in the issues or by email o.william.white@gmail.com\r +\r +
\r +\r +
\r +\r +## Citations\r +\r +If you use the pipeline, please cite our bioarxiv preprint: https://doi.org/10.1101/2023.08.11.552985\r +\r +Since the pipeline is a wrapper for several other bioinformatic tools we also ask that you cite the tools used by the pipeline:\r + - Fastqc https://github.com/s-andrews/FastQC\r + - Fastp https://doi.org/10.1093/bioinformatics/bty560\r + - GetOrganelle https://doi.org/10.1186/s13059-020-02154-5\r + - Blastn https://doi.org/10.1186/1471-2105-10-421\r + - Minimap2 https://doi.org/10.1093/bioinformatics/bty191\r + - Blobtools https://doi.org/10.12688/f1000research.12232.1\r + - Seqkit https://doi.org/10.1371/journal.pone.0163962\r + - MITOS2 https://doi.org/10.1016/j.ympev.2012.08.023\r + - Gblocks (default) https://doi.org/10.1093/oxfordjournals.molbev.a026334\r + - Clipkit (optional) https://doi.org/10.1371/journal.pbio.3001007\r + - Mafft https://doi.org/10.1093/molbev/mst010\r + - Iqtree https://doi.org/10.1093/molbev/msu300\r + - ete3 https://doi.org/10.1093/molbev/msw046\r + - ggtree https://doi.org/10.1111/2041-210X.12628\r +\r +
\r +\r +
\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "skim2rrna" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/792?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "16S rRNA amplicon sequencing analysis workflow using QIIME2" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/964?version=1" ; + schema1:isBasedOn "https://github.com/nf-core/ampliseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/ampliseq" ; + schema1:sdDatePublished "2024-08-05 10:22:56 +0100" ; + schema1:url "https://workflowhub.eu/workflows/964/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3680 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:54:37Z" ; + schema1:dateModified "2024-06-11T12:54:37Z" ; + schema1:description "16S rRNA amplicon sequencing analysis workflow using QIIME2" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/964?version=22" ; + schema1:keywords "16S, 18S, amplicon-sequencing, eDNA, illumina, iontorrent, ITS, Metabarcoding, Metagenomics, microbiome, pacbio, qiime2, rrna, taxonomic-classification, taxonomic-profiling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/ampliseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/964?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + schema1:datePublished "2024-03-26T19:01:13.539900" ; + schema1:hasPart , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Purge-duplicate-contigs-VGP6/main" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Purge-duplicate-contigs-VGP6/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Purge-duplicate-contigs-VGP6" ; + schema1:version "0.3.6" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.279.2" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_md_setup/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:32:35 +0100" ; + schema1:url "https://workflowhub.eu/workflows/279/ro_crate?version=2" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 144627 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 28526 ; + schema1:creator , + ; + schema1:dateCreated "2023-06-07T10:04:37Z" ; + schema1:dateModified "2023-06-07T10:16:23Z" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/279?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CWL Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_md_setup/cwl/workflow.cwl" ; + schema1:version 2 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/995?version=9" ; + schema1:isBasedOn "https://github.com/nf-core/mag" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mag" ; + schema1:sdDatePublished "2024-08-05 10:23:41 +0100" ; + schema1:url "https://workflowhub.eu/workflows/995/ro_crate?version=9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 13140 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:59Z" ; + schema1:dateModified "2024-06-11T12:54:59Z" ; + schema1:description "Assembly, binning and annotation of metagenomes" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/995?version=21" ; + schema1:keywords "Annotation, Assembly, binning, long-read-sequencing, metagenomes, Metagenomics, nanopore, nanopore-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mag" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/995?version=9" ; + schema1:version 9 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:about ; + schema1:contentSize 80077 ; + schema1:description "The graph diagram of the workflow, automatically generated by COMPSs runtime" ; + schema1:encodingFormat , + "image/svg+xml" ; + schema1:name "complete_graph.svg" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/1020?version=19" ; + schema1:isBasedOn "https://github.com/nf-core/sarek" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/sarek" ; + schema1:sdDatePublished "2024-08-05 10:23:19 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1020/ro_crate?version=19" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 18582 ; + schema1:creator , + ; + schema1:dateCreated "2024-06-11T12:55:16Z" ; + schema1:dateModified "2024-06-11T12:55:16Z" ; + schema1:description "An open-source analysis pipeline to detect germline or somatic variants from whole genome or targeted sequencing" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/1020?version=23" ; + schema1:keywords "Annotation, cancer, GATK4, Genomics, Germline, pre-processing, Somatic, target-panels, variant-calling, whole-exome-sequencing, whole-genome-sequencing" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/sarek" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1020?version=19" ; + schema1:version 19 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "ChIP-seq peak-calling and differential analysis pipeline." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/971?version=5" ; + schema1:isBasedOn "https://github.com/nf-core/chipseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/chipseq" ; + schema1:sdDatePublished "2024-08-05 10:24:13 +0100" ; + schema1:url "https://workflowhub.eu/workflows/971/ro_crate?version=5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 5685 ; + schema1:creator , + , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:44Z" ; + schema1:dateModified "2024-06-11T12:54:44Z" ; + schema1:description "ChIP-seq peak-calling and differential analysis pipeline." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/971?version=5" ; + schema1:keywords "ChIP, ChIP-seq, chromatin-immunoprecipitation, macs2, peak-calling" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/chipseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/971?version=5" ; + schema1:version 5 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/999?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/mhcquant" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/mhcquant" ; + schema1:sdDatePublished "2024-08-05 10:23:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/999/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4227 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:55:03Z" ; + schema1:dateModified "2024-06-11T12:55:03Z" ; + schema1:description "Identify and quantify peptides from mass spectrometry raw data" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/999?version=22" ; + schema1:keywords "mass-spectrometry, mhc, openms, peptides, Proteomics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/mhcquant" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/999?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """Flashlite-Trinity contains two workflows that run Trinity on the [University of Queensland's HPC, Flashlite](https://rcc.uq.edu.au/flashlite). Trinity performs de novo transcriptome assembly of RNA-seq data by combining three independent software modules Inchworm, Chrysalis and Butterfly to process RNA-seq reads. The algorithm can detect isoforms, handle paired-end reads, multiple insert sizes and strandedness. Users can run Flashlite-Trinity on single samples, or smaller samples requiring <500Gb of memory or staged Trinity which is recommended for global assemblies with multiple sample inputs. Both implementations make use of Singularity containers to install software. \r +\r +Infrastructure\\_deployment\\_metadata: FlashLite (QRISCloud)""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.149.1" ; + schema1:isBasedOn "https://github.com/Sydney-Informatics-Hub/Flashlite-Trinity" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Flashlite-Trinity" ; + schema1:sdDatePublished "2024-08-05 10:33:07 +0100" ; + schema1:url "https://workflowhub.eu/workflows/149/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 3652 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2021-08-18T23:17:42Z" ; + schema1:dateModified "2023-01-16T13:51:45Z" ; + schema1:description """Flashlite-Trinity contains two workflows that run Trinity on the [University of Queensland's HPC, Flashlite](https://rcc.uq.edu.au/flashlite). Trinity performs de novo transcriptome assembly of RNA-seq data by combining three independent software modules Inchworm, Chrysalis and Butterfly to process RNA-seq reads. The algorithm can detect isoforms, handle paired-end reads, multiple insert sizes and strandedness. Users can run Flashlite-Trinity on single samples, or smaller samples requiring <500Gb of memory or staged Trinity which is recommended for global assemblies with multiple sample inputs. Both implementations make use of Singularity containers to install software. \r +\r +Infrastructure\\_deployment\\_metadata: FlashLite (QRISCloud)""" ; + schema1:isPartOf ; + schema1:keywords "trinity, Transcriptomics, Assembly, illumina, salmon, scalable, global assemblies, rna-seq, de novo, transcriptome, strandedness, rna, singularity, container, PBS" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "Flashlite-Trinity" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/149?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description """Workflow for Single-cell ATAC-seq standard processing with SnapATAC2.\r +This workflow takes a fragment file as input and performs the standard steps of scATAC-seq analysis: filtering, dimension reduction, embedding and visualization of marker genes with SnapATAC2. Finally, the clusters are manually annotated with the help of marker genes. \r +In an alternative step, the fragment file can also be generated from a BAM file. \r +* newer Version: Updated SnapATAC2 version from 2.5.3 to 2.6.4""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/1077?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Workflow - Standard processing of 10X single cell ATAC-seq data with SnapATAC2" ; + schema1:sdDatePublished "2024-08-05 10:22:11 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1077/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 83654 ; + schema1:creator ; + schema1:dateCreated "2024-07-17T15:56:28Z" ; + schema1:dateModified "2024-08-02T18:08:24Z" ; + schema1:description """Workflow for Single-cell ATAC-seq standard processing with SnapATAC2.\r +This workflow takes a fragment file as input and performs the standard steps of scATAC-seq analysis: filtering, dimension reduction, embedding and visualization of marker genes with SnapATAC2. Finally, the clusters are manually annotated with the help of marker genes. \r +In an alternative step, the fragment file can also be generated from a BAM file. \r +* newer Version: Updated SnapATAC2 version from 2.5.3 to 2.6.4""" ; + schema1:image ; + schema1:keywords "scATAC-seq, epigenetics" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Workflow - Standard processing of 10X single cell ATAC-seq data with SnapATAC2" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1077?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 821119 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "A fully reproducible ancient and modern DNA pipeline in Nextflow and with cloud support." ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/983?version=8" ; + schema1:isBasedOn "https://github.com/nf-core/eager" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/eager" ; + schema1:sdDatePublished "2024-08-05 10:23:57 +0100" ; + schema1:url "https://workflowhub.eu/workflows/983/ro_crate?version=8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4247 ; + schema1:creator , + , + , + ; + schema1:dateCreated "2024-06-11T12:54:49Z" ; + schema1:dateModified "2024-06-11T12:54:49Z" ; + schema1:description "A fully reproducible ancient and modern DNA pipeline in Nextflow and with cloud support." ; + schema1:isBasedOn "https://workflowhub.eu/workflows/983?version=28" ; + schema1:keywords "adna, ancient-dna-analysis, ancientdna, genome, Metagenomics, pathogen-genomics, population-genetics" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/eager" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/983?version=8" ; + schema1:version 8 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# ont-assembly-snake\r +\r +A Snakemake wrapper for easily creating *de novo* bacterial genome assemblies from Oxford Nanopore (ONT) sequencing data, and optionally Illumina data,\r +using any combination of read filtering, assembly, long and short read polishing, and reference-based polishing.\r +\r +## Included programs\r +\r +| read filtering | assembly | long read polishing | short read polishing | reference-based polishing |\r +| --- | --- | --- | --- | --- |\r +| [Filtlong](https://github.com/rrwick/Filtlong)
[Rasusa](https://github.com/mbhall88/rasusa) | [Flye](https://github.com/fenderglass/Flye)
[raven](https://github.com/lbcb-sci/raven)
[miniasm](https://github.com/lh3/miniasm)
[Unicycler](https://github.com/rrwick/Unicycler)
[Canu](https://github.com/marbl/canu) | [racon](https://github.com/lbcb-sci/racon)
[medaka](https://github.com/nanoporetech/medaka) | [pilon](https://github.com/broadinstitute/pilon/wiki)
[Polypolish](https://github.com/rrwick/Polypolish)
[POLCA](https://github.com/alekseyzimin/masurca#polca) | [Homopolish](https://github.com/ythuang0522/homopolish)
[proovframe](https://github.com/thackl/proovframe) | \r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.787.1" ; + schema1:isBasedOn "https://github.com/pmenzel/ont-assembly-snake" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for ont-assembly-snake" ; + schema1:sdDatePublished "2024-08-05 10:25:15 +0100" ; + schema1:url "https://workflowhub.eu/workflows/787/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 23914 ; + schema1:creator ; + schema1:dateCreated "2024-03-06T10:25:52Z" ; + schema1:dateModified "2024-03-06T10:54:16Z" ; + schema1:description """# ont-assembly-snake\r +\r +A Snakemake wrapper for easily creating *de novo* bacterial genome assemblies from Oxford Nanopore (ONT) sequencing data, and optionally Illumina data,\r +using any combination of read filtering, assembly, long and short read polishing, and reference-based polishing.\r +\r +## Included programs\r +\r +| read filtering | assembly | long read polishing | short read polishing | reference-based polishing |\r +| --- | --- | --- | --- | --- |\r +| [Filtlong](https://github.com/rrwick/Filtlong)
[Rasusa](https://github.com/mbhall88/rasusa) | [Flye](https://github.com/fenderglass/Flye)
[raven](https://github.com/lbcb-sci/raven)
[miniasm](https://github.com/lh3/miniasm)
[Unicycler](https://github.com/rrwick/Unicycler)
[Canu](https://github.com/marbl/canu) | [racon](https://github.com/lbcb-sci/racon)
[medaka](https://github.com/nanoporetech/medaka) | [pilon](https://github.com/broadinstitute/pilon/wiki)
[Polypolish](https://github.com/rrwick/Polypolish)
[POLCA](https://github.com/alekseyzimin/masurca#polca) | [Homopolish](https://github.com/ythuang0522/homopolish)
[proovframe](https://github.com/thackl/proovframe) | \r +\r +""" ; + schema1:keywords "name:ONT, name:ILLUMINA, genome_assembly" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "ont-assembly-snake" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/787?version=1" ; + schema1:version 1 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Generic variation analysis reporting\r +\r +This workflow generates reports from a list of variants generated by [Variant Calling Workflow](https://workflowhub.eu/workflows/353). \r +\r +The workflow accepts a single input:\r +\r +- A collection of VCF files\r +\r +The workflow produces two outputs (format description below):\r +\r +1. A list of variants grouped by Sample\r +2. A list of variants grouped by Variant\r +\r +Here is example of output **by sample**. In this table all varinats in all samples are epxlicitrly listed:\r +\r +| Sample | POS | FILTER | REF | ALT | DP | AF | AFcaller | SB | DP4 | IMPACT | FUNCLASS | EFFECT | GENE | CODON | AA | TRID | min(AF) | max(AF) | countunique(change) | countunique(FUNCLASS) | change |\r +|----------|------|----------|---------|-----|-----|------|-----------|-----|-------|----------|---------------|-------------|--------|-------------| ---|--------|----------|-----------|-------------------------|------------------------------|------------|\r +| ERR3485786 | 11644 | PASS | A | G | 97 | 0.979381 | 0.907216 | 0 | 1,1,49,46 | LOW | SILENT | SYNONYMOUS_CODING | D7L | tgT/tgC | C512 | AKG51361.1 | 0.979381 | 1 | 1 | 1 | A>G |\r +| ERR3485786 | 11904 | PASS | T | C | 102 | 0.990196 | 0.95098 | 0 | 0,0,51,50 | MODERATE | MISSENSE | NON_SYNONYMOUS_CODING | D7L | Act/Gct | T426A | AKG51361.1 | 0.990196 | 1 | 1 | 1 | T>C |\r +\r +> **Note** the two alernative allele frequency fields: "AFcaller" ans "AF". LoFreq reports AF values listed in "AFcaller". They incorrect due to the known LoFreq [bug](https://github.com/CSB5/lofreq/issues/80). To correct for this we are recomputing AF values from DP4 and DP fields as follows: `AF == (DP4[2] + DP4[3]) / DP.`\r +\r +Here is an example of output **by variant**. In this table data is aggregated by variant across all samples in which this variant is present:\r +\r +| POS | REF | ALT | IMPACT | FUNCLASS | EFFECT | GENE | CODON | AA | TRID | countunique(Sample) | min(AF) | max(AF) | SAMPLES(above-thresholds) | SAMPLES(all) | AFs(all) | change |\r +|-----|-------|-----|-----------|----------------|------------|----------|-----------|------|--------|------------------------|----------|-----------|------------------------------------|------------------|----------|---------|\r +| 11644 | A | G | LOW | SILENT | SYNONYMOUS_CODING | D7L | tgT/tgC | C512 | AKG51361.1 | 11 | 0.979381 | 1 | ERR3485786,ERR3485787... | ERR3485786,ERR3485787,ERR3485789 ... | 0.979381,1.0... | A>G |\r +| 11904 | T | C | MODERATE | MISSENSE | NON_SYNONYMOUS_CODING | D7L | Act/Gct | T426A | AKG51361.1 | 12 | 0.990196 | 1 | ERR3485786,ERR3485787... | ERR3485786,ERR3485787,ERR3485789... | 0.990196,1.0,1.0... | T>C | \r +\r +The workflow can be accessed at [usegalaxy.org](https://usegalaxy.org/u/aun1/w/genetic-variation-analysis-reporting)\r +\r +The general idea of the workflow is:\r +\r +![](https://i.imgur.com/k2cIZK5.png)\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/354?version=1" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Generic variation analysis reporting" ; + schema1:sdDatePublished "2024-08-05 10:32:05 +0100" ; + schema1:url "https://workflowhub.eu/workflows/354/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 78824 ; + schema1:creator ; + schema1:dateCreated "2022-06-01T15:36:06Z" ; + schema1:dateModified "2023-01-16T14:00:24Z" ; + schema1:description """# Generic variation analysis reporting\r +\r +This workflow generates reports from a list of variants generated by [Variant Calling Workflow](https://workflowhub.eu/workflows/353). \r +\r +The workflow accepts a single input:\r +\r +- A collection of VCF files\r +\r +The workflow produces two outputs (format description below):\r +\r +1. A list of variants grouped by Sample\r +2. A list of variants grouped by Variant\r +\r +Here is example of output **by sample**. In this table all varinats in all samples are epxlicitrly listed:\r +\r +| Sample | POS | FILTER | REF | ALT | DP | AF | AFcaller | SB | DP4 | IMPACT | FUNCLASS | EFFECT | GENE | CODON | AA | TRID | min(AF) | max(AF) | countunique(change) | countunique(FUNCLASS) | change |\r +|----------|------|----------|---------|-----|-----|------|-----------|-----|-------|----------|---------------|-------------|--------|-------------| ---|--------|----------|-----------|-------------------------|------------------------------|------------|\r +| ERR3485786 | 11644 | PASS | A | G | 97 | 0.979381 | 0.907216 | 0 | 1,1,49,46 | LOW | SILENT | SYNONYMOUS_CODING | D7L | tgT/tgC | C512 | AKG51361.1 | 0.979381 | 1 | 1 | 1 | A>G |\r +| ERR3485786 | 11904 | PASS | T | C | 102 | 0.990196 | 0.95098 | 0 | 0,0,51,50 | MODERATE | MISSENSE | NON_SYNONYMOUS_CODING | D7L | Act/Gct | T426A | AKG51361.1 | 0.990196 | 1 | 1 | 1 | T>C |\r +\r +> **Note** the two alernative allele frequency fields: "AFcaller" ans "AF". LoFreq reports AF values listed in "AFcaller". They incorrect due to the known LoFreq [bug](https://github.com/CSB5/lofreq/issues/80). To correct for this we are recomputing AF values from DP4 and DP fields as follows: `AF == (DP4[2] + DP4[3]) / DP.`\r +\r +Here is an example of output **by variant**. In this table data is aggregated by variant across all samples in which this variant is present:\r +\r +| POS | REF | ALT | IMPACT | FUNCLASS | EFFECT | GENE | CODON | AA | TRID | countunique(Sample) | min(AF) | max(AF) | SAMPLES(above-thresholds) | SAMPLES(all) | AFs(all) | change |\r +|-----|-------|-----|-----------|----------------|------------|----------|-----------|------|--------|------------------------|----------|-----------|------------------------------------|------------------|----------|---------|\r +| 11644 | A | G | LOW | SILENT | SYNONYMOUS_CODING | D7L | tgT/tgC | C512 | AKG51361.1 | 11 | 0.979381 | 1 | ERR3485786,ERR3485787... | ERR3485786,ERR3485787,ERR3485789 ... | 0.979381,1.0... | A>G |\r +| 11904 | T | C | MODERATE | MISSENSE | NON_SYNONYMOUS_CODING | D7L | Act/Gct | T426A | AKG51361.1 | 12 | 0.990196 | 1 | ERR3485786,ERR3485787... | ERR3485786,ERR3485787,ERR3485789... | 0.990196,1.0,1.0... | T>C | \r +\r +The workflow can be accessed at [usegalaxy.org](https://usegalaxy.org/u/aun1/w/genetic-variation-analysis-reporting)\r +\r +The general idea of the workflow is:\r +\r +![](https://i.imgur.com/k2cIZK5.png)\r +\r +""" ; + schema1:keywords "mpvx, generic" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Generic variation analysis reporting" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/354?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Work-in-progress" ; + schema1:description "Create Meryl Database used for the estimation of assembly parameters and quality control with Merqury. Part of the VGP pipeline." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/309?version=1" ; + schema1:isBasedOn "https://github.com/Delphine-L/iwc/tree/VGP/workflows/VGP-assembly-v2" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for VGP genome profile analysis" ; + schema1:sdDatePublished "2024-08-05 10:32:23 +0100" ; + schema1:url "https://workflowhub.eu/workflows/309/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 2887 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 17406 ; + schema1:creator ; + schema1:dateCreated "2022-04-05T11:22:34Z" ; + schema1:dateModified "2023-01-16T13:59:21Z" ; + schema1:description "Create Meryl Database used for the estimation of assembly parameters and quality control with Merqury. Part of the VGP pipeline." ; + schema1:keywords "vgp, Galaxy, Assembly" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "VGP genome profile analysis" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/309?version=1" ; + schema1:version 1 ; + ns1:input . + + a schema1:Dataset ; + schema1:datePublished "2023-08-31T07:11:57.476431" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/chipseq-pe" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "chipseq-pe/main" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.75.9" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:description "Genes and transcripts annotation with Isoseq using uLTRA and TAMA" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/993?version=2" ; + schema1:isBasedOn "https://github.com/nf-core/isoseq" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for nf-core/isoseq" ; + schema1:sdDatePublished "2024-08-05 10:23:45 +0100" ; + schema1:url "https://workflowhub.eu/workflows/993/ro_crate?version=2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 7742 ; + schema1:creator ; + schema1:dateCreated "2024-06-11T12:54:57Z" ; + schema1:dateModified "2024-06-11T12:54:57Z" ; + schema1:description "Genes and transcripts annotation with Isoseq using uLTRA and TAMA" ; + schema1:isBasedOn "https://workflowhub.eu/workflows/993?version=6" ; + schema1:keywords "isoseq, isoseq-3, rna, tama, ultra" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "nf-core/isoseq" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/993?version=2" ; + schema1:version 2 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:description """# Scaffolding using HiC data with YAHS\r +\r +This workflow has been created from a Vertebrate Genomes Project (VGP) scaffolding workflow. \r +\r +* For more information about the VGP project see https://galaxyproject.org/projects/vgp/. \r +* The scaffolding workflow is at https://dockstore.org/workflows/github.com/iwc-workflows/Scaffolding-HiC-VGP8/main:main?tab=info\r +* Please see that link for the workflow diagram. \r +\r +Some minor changes have been made to better fit with TSI project data: \r +\r +* optional inputs of SAK info and sequence graph have been removed\r +* the required input format for the genome is changed from gfa to fasta\r +* the estimated genome size now requires user input rather than being extracted from output of a previous workflow. \r +\r +Inputs: \r +\r +* assembly.fasta [note - scaffolding is done only one haplotype at a time. eg hap1 or primary]\r +* Concatenated HiC forward reads in fastqsanger.gz\r +* Concatenated HiC reverse reads in fastqsanger.gz\r +* Restriction enzyme sequence\r +* Estimated genome size (enter as integer)\r +* Lineage for busco \r +\r +Outputs: the main outputs are: \r +\r +* scaffolded_assmbly.fasta\r +* comparison of pre- post- scaffolding contact maps\r +\r +\r +\r +\r +""" ; + schema1:hasPart ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.1054.1" ; + schema1:license "GPL-3.0+" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for TSI-Scaffolding-with-HiC (based on VGP-HiC-scaffolding)" ; + schema1:sdDatePublished "2024-08-05 10:22:38 +0100" ; + schema1:url "https://workflowhub.eu/workflows/1054/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 83089 ; + schema1:creator , + , + ; + schema1:dateCreated "2024-06-21T00:48:07Z" ; + schema1:dateModified "2024-06-21T01:10:22Z" ; + schema1:description """# Scaffolding using HiC data with YAHS\r +\r +This workflow has been created from a Vertebrate Genomes Project (VGP) scaffolding workflow. \r +\r +* For more information about the VGP project see https://galaxyproject.org/projects/vgp/. \r +* The scaffolding workflow is at https://dockstore.org/workflows/github.com/iwc-workflows/Scaffolding-HiC-VGP8/main:main?tab=info\r +* Please see that link for the workflow diagram. \r +\r +Some minor changes have been made to better fit with TSI project data: \r +\r +* optional inputs of SAK info and sequence graph have been removed\r +* the required input format for the genome is changed from gfa to fasta\r +* the estimated genome size now requires user input rather than being extracted from output of a previous workflow. \r +\r +Inputs: \r +\r +* assembly.fasta [note - scaffolding is done only one haplotype at a time. eg hap1 or primary]\r +* Concatenated HiC forward reads in fastqsanger.gz\r +* Concatenated HiC reverse reads in fastqsanger.gz\r +* Restriction enzyme sequence\r +* Estimated genome size (enter as integer)\r +* Lineage for busco \r +\r +Outputs: the main outputs are: \r +\r +* scaffolded_assmbly.fasta\r +* comparison of pre- post- scaffolding contact maps\r +\r +\r +\r +\r +""" ; + schema1:keywords "TSI" ; + schema1:license "https://spdx.org/licenses/GPL-3.0+" ; + schema1:name "TSI-Scaffolding-with-HiC (based on VGP-HiC-scaffolding)" ; + schema1:producer , + ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/1054?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:description "Performs Long Read assembly using PacBio data and Hifiasm. Part of VGP assembly pipeline. This workflow generate a phased assembly." ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/325?version=1" ; + schema1:isBasedOn "https://github.com/Delphine-L/iwc/tree/VGP/workflows/VGP-assembly-v2" ; + schema1:license "GPL-3.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for VGP HiFi phased assembly with hifiasm and HiC data" ; + schema1:sdDatePublished "2024-08-05 10:32:16 +0100" ; + schema1:url "https://workflowhub.eu/workflows/325/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 11731 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 50590 ; + schema1:creator ; + schema1:dateCreated "2022-04-06T01:23:56Z" ; + schema1:dateModified "2023-01-16T13:59:39Z" ; + schema1:description "Performs Long Read assembly using PacBio data and Hifiasm. Part of VGP assembly pipeline. This workflow generate a phased assembly." ; + schema1:keywords "vgp, Assembly, Galaxy" ; + schema1:license "https://spdx.org/licenses/GPL-3.0" ; + schema1:name "VGP HiFi phased assembly with hifiasm and HiC data" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/325?version=1" ; + schema1:version 1 ; + ns1:input , + , + , + . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.48546/workflowhub.workflow.279.1" ; + schema1:isBasedOn "https://github.com/bioexcel/biobb_workflows/tree/master/biobb_wf_md_setup/cwl" ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for CWL Protein MD Setup tutorial" ; + schema1:sdDatePublished "2024-08-05 10:32:34 +0100" ; + schema1:url "https://workflowhub.eu/workflows/279/ro_crate?version=1" . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 154478 . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 24688 ; + schema1:creator , + ; + schema1:dateCreated "2022-03-17T09:18:45Z" ; + schema1:dateModified "2023-01-16T13:58:29Z" ; + schema1:description """# Protein MD Setup tutorial using BioExcel Building Blocks (biobb)\r +\r +**Based on the official [GROMACS tutorial](http://www.mdtutorials.com/gmx/lysozyme/index.html).**\r +\r +***\r +\r +This tutorial aims to illustrate the process of **setting up a simulation** system containing a **protein**, step by step, using the **BioExcel Building Blocks library (biobb)**. The particular example used is the **Lysozyme** protein (PDB code 1AKI).\r +\r +***\r +\r +## Copyright & Licensing\r +This software has been developed in the [MMB group](http://mmb.irbbarcelona.org) at the [BSC](http://www.bsc.es/) & [IRB](https://www.irbbarcelona.org/) for the [European BioExcel](http://bioexcel.eu/), funded by the European Commission (EU H2020 [823830](http://cordis.europa.eu/projects/823830), EU H2020 [675728](http://cordis.europa.eu/projects/675728)).\r +\r +* (c) 2015-2022 [Barcelona Supercomputing Center](https://www.bsc.es/)\r +* (c) 2015-2022 [Institute for Research in Biomedicine](https://www.irbbarcelona.org/)\r +\r +Licensed under the\r +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0), see the file LICENSE for details.\r +\r +![](https://bioexcel.eu/wp-content/uploads/2019/04/Bioexcell_logo_1080px_transp.png "Bioexcel")""" ; + schema1:image ; + schema1:isBasedOn "https://workflowhub.eu/workflows/279?version=1" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/Apache-2.0" ; + schema1:name "CWL Protein MD Setup tutorial" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://github.com/bioexcel/biobb_workflows/blob/master/biobb_wf_md_setup/cwl/workflow.cwl" ; + schema1:version 1 ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:Dataset ; + schema1:datePublished "2021-07-26T10:22:23.305126" ; + schema1:hasPart , + , + ; + schema1:isBasedOn "https://github.com/iwc-workflows/sars-cov-2-consensus-from-variation" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:sdDatePublished "2021-10-27 15:45:11 +0100" ; + schema1:softwareVersion "v0.2.1" . + + a schema1:MediaObject, + ns2:TestDefinition ; + dct:conformsTo ns2:PlanemoEngine ; + ns2:engineVersion ">=0.74.6" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + , + , + , + , + , + , + ; + schema1:description "This workflow combines SDF files from all fragments into a single dataset and filters to include only the lowest (best) scoring pose for each compound. This file of optimal poses for all ligands is used to compare to a database of Enamine and Chemspace compounds to select the best scoring 500 matches. More info can be found at https://covid19.galaxyproject.org/cheminformatics/" ; + schema1:hasPart , + , + ; + schema1:identifier "https://workflowhub.eu/workflows/17?version=1" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Cheminformatics - Filter results" ; + schema1:sdDatePublished "2024-08-05 10:33:39 +0100" ; + schema1:url "https://workflowhub.eu/workflows/17/ro_crate?version=1" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 5540 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 21903 ; + schema1:creator , + , + , + , + , + , + , + ; + schema1:dateCreated "2020-04-10T14:56:44Z" ; + schema1:dateModified "2023-01-16T13:41:12Z" ; + schema1:description "This workflow combines SDF files from all fragments into a single dataset and filters to include only the lowest (best) scoring pose for each compound. This file of optimal poses for all ligands is used to compare to a database of Enamine and Chemspace compounds to select the best scoring 500 matches. More info can be found at https://covid19.galaxyproject.org/cheminformatics/" ; + schema1:image ; + schema1:keywords "covid-19" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "Cheminformatics - Filter results" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:subjectOf ; + schema1:url "https://workflowhub.eu/workflows/17?version=1" ; + schema1:version 1 ; + ns1:input , + , + . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 12188 . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author ; + schema1:creativeWorkStatus "Stable" ; + schema1:description """# pod5_by_pore\r +\r +A Snakemake workflow to take the POD5 files produced by an Oxford Nanopore sequencing run and\r +re-batch them by pore (ie. by channel).\r +\r +This is useful if you want to run duplex basecalling because you can meaningfully run\r +"dorado duplex" on a single (or a subset of) the POD5 files.\r +\r +## Know issues\r +\r +It is assumed all POD5 input files are from the same sequencing run, but this is not checked.\r +""" ; + schema1:hasPart , + ; + schema1:identifier "https://workflowhub.eu/workflows/889?version=1" ; + schema1:isBasedOn "https://github.com/tbooth/pod5_by_pore.git" ; + schema1:license "MIT" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for POD5 by pore" ; + schema1:sdDatePublished "2024-08-05 10:24:36 +0100" ; + schema1:url "https://workflowhub.eu/workflows/889/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 4613 ; + schema1:creator ; + schema1:dateCreated "2024-05-24T14:28:49Z" ; + schema1:dateModified "2024-05-24T14:32:41Z" ; + schema1:description """# pod5_by_pore\r +\r +A Snakemake workflow to take the POD5 files produced by an Oxford Nanopore sequencing run and\r +re-batch them by pore (ie. by channel).\r +\r +This is useful if you want to run duplex basecalling because you can meaningfully run\r +"dorado duplex" on a single (or a subset of) the POD5 files.\r +\r +## Know issues\r +\r +It is assumed all POD5 input files are from the same sequencing run, but this is not checked.\r +""" ; + schema1:image ; + schema1:keywords "nanopore, pod5" ; + schema1:license "https://spdx.org/licenses/MIT" ; + schema1:name "POD5 by pore" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/889?version=1" ; + schema1:version 1 . + + a , + schema1:ImageObject, + schema1:MediaObject ; + schema1:contentSize 1573 . + + a schema1:Dataset ; + schema1:datePublished "2022-09-15T00:06:28+00:00" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:isBasedOn "https://github.com/wombat-p/WOMBAT-Pipelines/tree/dev/" ; + schema1:mainEntity . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:codeRepository "https://github.com/wombat-p/WOMBAT-Pipelines/tree/dev/" ; + schema1:name "main" ; + schema1:programmingLanguage ; + schema1:url "https://raw.githubusercontent.com/wombat-p/WOMBAT-Pipelines/44442336ce5110fb9b68d305517e0795ec61d434/main.nf" . + + a schema1:Dataset ; + dct:conformsTo , + ; + schema1:author , + ; + schema1:description """\r +\r +Galaxy Workflow created on Galaxy-E european instance, ecology.usegalaxy.eu, related to the Galaxy training tutorial "Antarctic sea ecoregionalization" .\r +\r +This workflow allows to analyze marine benthic biodiversity data to compute ecoregions regarding environmental data.\r +""" ; + schema1:hasPart ; + schema1:identifier "https://workflowhub.eu/workflows/658?version=1" ; + schema1:isBasedOn "https://ecology.usegalaxy.eu/u/ylebras/w/workflow-constructed-from-history-test-ecoregionalization" ; + schema1:license "CC-BY-4.0" ; + schema1:mainEntity ; + schema1:name "Research Object Crate for Ecoregionalization on Antarctic sea" ; + schema1:sdDatePublished "2024-08-05 10:27:17 +0100" ; + schema1:url "https://workflowhub.eu/workflows/658/ro_crate?version=1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + dct:conformsTo "https://bioschemas.org/profiles/ComputationalWorkflow/1.0-RELEASE/" ; + schema1:contentSize 12973 ; + schema1:creator , + , + ; + schema1:dateCreated "2023-11-09T20:55:03Z" ; + schema1:dateModified "2023-11-09T21:01:01Z" ; + schema1:description """\r +\r +Galaxy Workflow created on Galaxy-E european instance, ecology.usegalaxy.eu, related to the Galaxy training tutorial "Antarctic sea ecoregionalization" .\r +\r +This workflow allows to analyze marine benthic biodiversity data to compute ecoregions regarding environmental data.\r +""" ; + schema1:keywords "" ; + schema1:license "https://spdx.org/licenses/CC-BY-4.0" ; + schema1:name "Ecoregionalization on Antarctic sea" ; + schema1:producer ; + schema1:programmingLanguage ; + schema1:sdPublisher ; + schema1:url "https://workflowhub.eu/workflows/658?version=1" ; + schema1:version 1 ; + ns1:input , + . + + a schema1:MediaObject ; + schema1:contentSize 3954107 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "0.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954344 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "1.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954590 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "10.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953773 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "11.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953870 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "12.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954234 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "13.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954008 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "14.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954432 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "15.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954068 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "16.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954256 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "17.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953419 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "18.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954109 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "19.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953610 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "2.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954118 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "20.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954451 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "21.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954298 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "22.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953962 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "23.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954459 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "24.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953457 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "25.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953859 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "26.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953543 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "27.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953815 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "28.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954113 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "29.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954237 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "3.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953766 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "30.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954029 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "31.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954023 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "32.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953996 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "33.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953653 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "34.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953861 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "35.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954184 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "36.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953467 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "37.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953928 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "38.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953695 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "39.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954368 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "4.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953917 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "40.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953894 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "41.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954012 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "42.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953858 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "43.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953932 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "44.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953856 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "45.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953828 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "46.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954216 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "47.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954061 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "48.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954117 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "49.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954654 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "5.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953977 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "50.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954034 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "51.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953859 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "52.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954004 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "53.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953829 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "54.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953981 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "55.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953660 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "56.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953761 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "57.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953900 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "58.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954381 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "59.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953701 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "6.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953827 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "60.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954330 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "61.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953584 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "62.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954005 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "63.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954094 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "64.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953866 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "65.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953953 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "66.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953762 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "67.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953640 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "68.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953847 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "69.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953880 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "7.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953717 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "70.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953606 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "71.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953953 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "72.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953858 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "73.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954285 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "74.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954304 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "75.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953619 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "76.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953761 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "77.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953469 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "78.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953864 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "79.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953434 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "8.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954254 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "80.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953675 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "81.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953777 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "82.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953973 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "83.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954439 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "84.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954185 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "85.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954354 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "86.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953921 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "87.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954006 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "88.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954238 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "89.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954268 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "9.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953599 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "90.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954093 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "91.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953864 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "92.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3954424 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "93.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953990 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "94.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953414 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "95.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953431 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "96.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953921 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "97.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953795 ; + schema1:dateModified "2024-06-06T10:12:48" ; + schema1:name "98.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3953786 ; + schema1:dateModified "2024-06-06T10:12:49" ; + schema1:name "99.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 24 ; + schema1:dateModified "2024-06-06T10:12:43" ; + schema1:name "Result.txt" ; + schema1:sdDatePublished "2024-06-06T10:12:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3 ; + schema1:dateModified "2024-04-26T13:10:25+00:00" ; + schema1:name "energy.selection" ; + schema1:sdDatePublished "2024-07-12T10:15:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3 ; + schema1:dateModified "2024-04-26T13:10:25+00:00" ; + schema1:name "genion.group" ; + schema1:sdDatePublished "2024-07-12T10:15:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1044 ; + schema1:dateModified "2024-04-26T13:10:25+00:00" ; + schema1:name "ions.mdp" ; + schema1:sdDatePublished "2024-07-12T10:15:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 971 ; + schema1:dateModified "2024-04-26T13:10:25+00:00" ; + schema1:name "minim.mdp" ; + schema1:sdDatePublished "2024-07-12T10:15:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 109917 ; + schema1:dateModified "2024-04-26T13:10:27+00:00" ; + schema1:name "1aki.pdb" ; + schema1:sdDatePublished "2024-07-12T10:15:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2998215 ; + schema1:dateModified "2024-04-26T13:10:27+00:00" ; + schema1:name "1u3m.pdb" ; + schema1:sdDatePublished "2024-07-12T10:15:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2918025 ; + schema1:dateModified "2024-04-26T13:10:27+00:00" ; + schema1:name "1xyw.pdb" ; + schema1:sdDatePublished "2024-07-12T10:15:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88246 ; + schema1:dateModified "2024-07-12T10:15:01+00:00" ; + schema1:name "1aki.gro" ; + schema1:sdDatePublished "2024-07-12T10:15:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577599 ; + schema1:dateModified "2024-07-12T10:15:01+00:00" ; + schema1:name "1aki.top" ; + schema1:sdDatePublished "2024-07-12T10:15:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1280044 ; + schema1:dateModified "2024-07-12T10:15:01+00:00" ; + schema1:name "1aki_em.tpr" ; + schema1:sdDatePublished "2024-07-12T10:15:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-07-12T10:15:01+00:00" ; + schema1:name "1aki_em_energy.edr" ; + schema1:sdDatePublished "2024-07-12T10:15:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1279304 ; + schema1:dateModified "2024-07-12T10:15:01+00:00" ; + schema1:name "1aki_ions.tpr" ; + schema1:sdDatePublished "2024-07-12T10:15:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88246 ; + schema1:dateModified "2024-07-12T10:15:01+00:00" ; + schema1:name "1aki_newbox.gro" ; + schema1:sdDatePublished "2024-07-12T10:15:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 7939 ; + schema1:dateModified "2024-07-12T10:15:01+00:00" ; + schema1:name "1aki_potential.png" ; + schema1:sdDatePublished "2024-07-12T10:15:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2166 ; + schema1:dateModified "2024-07-12T10:15:01+00:00" ; + schema1:name "1aki_potential.xvg" ; + schema1:sdDatePublished "2024-07-12T10:15:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1525186 ; + schema1:dateModified "2024-07-12T10:15:01+00:00" ; + schema1:name "1aki_solv.gro" ; + schema1:sdDatePublished "2024-07-12T10:15:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1524475 ; + schema1:dateModified "2024-07-12T10:15:01+00:00" ; + schema1:name "1aki_solv_ions.gro" ; + schema1:sdDatePublished "2024-07-12T10:15:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 82046 ; + schema1:dateModified "2024-07-12T10:15:01+00:00" ; + schema1:name "1u3m.gro" ; + schema1:sdDatePublished "2024-07-12T10:15:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 537933 ; + schema1:dateModified "2024-07-12T10:15:01+00:00" ; + schema1:name "1u3m.top" ; + schema1:sdDatePublished "2024-07-12T10:15:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1416272 ; + schema1:dateModified "2024-07-12T10:15:01+00:00" ; + schema1:name "1u3m_em.tpr" ; + schema1:sdDatePublished "2024-07-12T10:15:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-07-12T10:15:01+00:00" ; + schema1:name "1u3m_em_energy.edr" ; + schema1:sdDatePublished "2024-07-12T10:15:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1415196 ; + schema1:dateModified "2024-07-12T10:15:01+00:00" ; + schema1:name "1u3m_ions.tpr" ; + schema1:sdDatePublished "2024-07-12T10:15:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 82046 ; + schema1:dateModified "2024-07-12T10:15:01+00:00" ; + schema1:name "1u3m_newbox.gro" ; + schema1:sdDatePublished "2024-07-12T10:15:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 7464 ; + schema1:dateModified "2024-07-12T10:15:01+00:00" ; + schema1:name "1u3m_potential.png" ; + schema1:sdDatePublished "2024-07-12T10:15:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-07-12T10:15:01+00:00" ; + schema1:name "1u3m_potential.xvg" ; + schema1:sdDatePublished "2024-07-12T10:15:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1837046 ; + schema1:dateModified "2024-07-12T10:15:01+00:00" ; + schema1:name "1u3m_solv.gro" ; + schema1:sdDatePublished "2024-07-12T10:15:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1836965 ; + schema1:dateModified "2024-07-12T10:15:01+00:00" ; + schema1:name "1u3m_solv_ions.gro" ; + schema1:sdDatePublished "2024-07-12T10:15:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 80112 ; + schema1:dateModified "2024-07-12T10:15:01+00:00" ; + schema1:name "1xyw.gro" ; + schema1:sdDatePublished "2024-07-12T10:15:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 524009 ; + schema1:dateModified "2024-07-12T10:15:01+00:00" ; + schema1:name "1xyw.top" ; + schema1:sdDatePublished "2024-07-12T10:15:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1408872 ; + schema1:dateModified "2024-07-12T10:15:01+00:00" ; + schema1:name "1xyw_em.tpr" ; + schema1:sdDatePublished "2024-07-12T10:15:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-07-12T10:15:01+00:00" ; + schema1:name "1xyw_em_energy.edr" ; + schema1:sdDatePublished "2024-07-12T10:15:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1407844 ; + schema1:dateModified "2024-07-12T10:15:01+00:00" ; + schema1:name "1xyw_ions.tpr" ; + schema1:sdDatePublished "2024-07-12T10:15:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 80112 ; + schema1:dateModified "2024-07-12T10:15:01+00:00" ; + schema1:name "1xyw_newbox.gro" ; + schema1:sdDatePublished "2024-07-12T10:15:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8525 ; + schema1:dateModified "2024-07-12T10:15:01+00:00" ; + schema1:name "1xyw_potential.png" ; + schema1:sdDatePublished "2024-07-12T10:15:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2144 ; + schema1:dateModified "2024-07-12T10:15:01+00:00" ; + schema1:name "1xyw_potential.xvg" ; + schema1:sdDatePublished "2024-07-12T10:15:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1845237 ; + schema1:dateModified "2024-07-12T10:15:01+00:00" ; + schema1:name "1xyw_solv.gro" ; + schema1:sdDatePublished "2024-07-12T10:15:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1845066 ; + schema1:dateModified "2024-07-12T10:15:01+00:00" ; + schema1:name "1xyw_solv_ions.gro" ; + schema1:sdDatePublished "2024-07-12T10:15:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4308 ; + schema1:dateModified "2024-07-12T10:15:01+00:00" ; + schema1:name "POTENTIAL_RESULTS.png" ; + schema1:sdDatePublished "2024-07-12T10:15:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256 ; + schema1:dateModified "2024-04-30T10:11:40+00:00" ; + schema1:name "A.0.0" ; + schema1:sdDatePublished "2024-04-30T13:04:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256 ; + schema1:dateModified "2024-04-30T10:11:40+00:00" ; + schema1:name "A.0.1" ; + schema1:sdDatePublished "2024-04-30T13:04:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256 ; + schema1:dateModified "2024-04-30T10:11:40+00:00" ; + schema1:name "A.1.0" ; + schema1:sdDatePublished "2024-04-30T13:04:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256 ; + schema1:dateModified "2024-04-30T10:11:40+00:00" ; + schema1:name "A.1.1" ; + schema1:sdDatePublished "2024-04-30T13:04:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256 ; + schema1:dateModified "2024-04-30T10:11:40+00:00" ; + schema1:name "B.0.0" ; + schema1:sdDatePublished "2024-04-30T13:04:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256 ; + schema1:dateModified "2024-04-30T10:11:40+00:00" ; + schema1:name "B.0.1" ; + schema1:sdDatePublished "2024-04-30T13:04:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256 ; + schema1:dateModified "2024-04-30T10:11:40+00:00" ; + schema1:name "B.1.0" ; + schema1:sdDatePublished "2024-04-30T13:04:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256 ; + schema1:dateModified "2024-04-30T10:11:40+00:00" ; + schema1:name "B.1.1" ; + schema1:sdDatePublished "2024-04-30T13:04:29+00:00" . + + a schema1:Dataset ; + schema1:dateModified "2024-03-29T14:48:57+00:00" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:name "Files1" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:14+00:00" ; + schema1:name "results_0_1.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:08+00:00" ; + schema1:name "results_0_10.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:09+00:00" ; + schema1:name "results_0_11.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:09+00:00" ; + schema1:name "results_0_12.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:10+00:00" ; + schema1:name "results_0_13.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:11+00:00" ; + schema1:name "results_0_14.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:12+00:00" ; + schema1:name "results_0_15.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:12+00:00" ; + schema1:name "results_0_16.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:12+00:00" ; + schema1:name "results_0_17.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:13+00:00" ; + schema1:name "results_0_18.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:13+00:00" ; + schema1:name "results_0_19.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:18+00:00" ; + schema1:name "results_0_2.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:14+00:00" ; + schema1:name "results_0_20.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:15+00:00" ; + schema1:name "results_0_21.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:15+00:00" ; + schema1:name "results_0_22.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:16+00:00" ; + schema1:name "results_0_23.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:16+00:00" ; + schema1:name "results_0_24.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:17+00:00" ; + schema1:name "results_0_25.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:17+00:00" ; + schema1:name "results_0_26.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:19+00:00" ; + schema1:name "results_0_3.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:19+00:00" ; + schema1:name "results_0_4.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:20+00:00" ; + schema1:name "results_0_5.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:20+00:00" ; + schema1:name "results_0_6.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:21+00:00" ; + schema1:name "results_0_7.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:21+00:00" ; + schema1:name "results_0_8.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:22+00:00" ; + schema1:name "results_0_9.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:28+00:00" ; + schema1:name "results_1_1.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:23+00:00" ; + schema1:name "results_1_10.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:24+00:00" ; + schema1:name "results_1_11.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:24+00:00" ; + schema1:name "results_1_12.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:25+00:00" ; + schema1:name "results_1_13.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:25+00:00" ; + schema1:name "results_1_14.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:26+00:00" ; + schema1:name "results_1_15.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:26+00:00" ; + schema1:name "results_1_16.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:27+00:00" ; + schema1:name "results_1_17.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:27+00:00" ; + schema1:name "results_1_18.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:28+00:00" ; + schema1:name "results_1_19.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:33+00:00" ; + schema1:name "results_1_2.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:29+00:00" ; + schema1:name "results_1_20.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:29+00:00" ; + schema1:name "results_1_21.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:30+00:00" ; + schema1:name "results_1_22.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:31+00:00" ; + schema1:name "results_1_23.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:31+00:00" ; + schema1:name "results_1_24.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:32+00:00" ; + schema1:name "results_1_25.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:33+00:00" ; + schema1:name "results_1_26.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:34+00:00" ; + schema1:name "results_1_3.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:34+00:00" ; + schema1:name "results_1_4.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:35+00:00" ; + schema1:name "results_1_5.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:36+00:00" ; + schema1:name "results_1_6.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:36+00:00" ; + schema1:name "results_1_7.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:37+00:00" ; + schema1:name "results_1_8.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:37+00:00" ; + schema1:name "results_1_9.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:44+00:00" ; + schema1:name "results_2_1.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:38+00:00" ; + schema1:name "results_2_10.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:39+00:00" ; + schema1:name "results_2_11.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:39+00:00" ; + schema1:name "results_2_12.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:40+00:00" ; + schema1:name "results_2_13.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:40+00:00" ; + schema1:name "results_2_14.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:41+00:00" ; + schema1:name "results_2_15.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:41+00:00" ; + schema1:name "results_2_16.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:42+00:00" ; + schema1:name "results_2_17.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:43+00:00" ; + schema1:name "results_2_18.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:44+00:00" ; + schema1:name "results_2_19.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:49+00:00" ; + schema1:name "results_2_2.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:45+00:00" ; + schema1:name "results_2_20.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:45+00:00" ; + schema1:name "results_2_21.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:46+00:00" ; + schema1:name "results_2_22.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:47+00:00" ; + schema1:name "results_2_23.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:48+00:00" ; + schema1:name "results_2_24.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:48+00:00" ; + schema1:name "results_2_25.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:49+00:00" ; + schema1:name "results_2_26.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:50+00:00" ; + schema1:name "results_2_3.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:51+00:00" ; + schema1:name "results_2_4.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:51+00:00" ; + schema1:name "results_2_5.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:52+00:00" ; + schema1:name "results_2_6.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:53+00:00" ; + schema1:name "results_2_7.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:53+00:00" ; + schema1:name "results_2_8.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:54+00:00" ; + schema1:name "results_2_9.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:00+00:00" ; + schema1:name "results_3_1.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:55+00:00" ; + schema1:name "results_3_10.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:55+00:00" ; + schema1:name "results_3_11.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:56+00:00" ; + schema1:name "results_3_12.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:56+00:00" ; + schema1:name "results_3_13.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:57+00:00" ; + schema1:name "results_3_14.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:57+00:00" ; + schema1:name "results_3_15.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:58+00:00" ; + schema1:name "results_3_16.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:58+00:00" ; + schema1:name "results_3_17.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:40:59+00:00" ; + schema1:name "results_3_18.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:00+00:00" ; + schema1:name "results_3_19.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:05+00:00" ; + schema1:name "results_3_2.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:01+00:00" ; + schema1:name "results_3_20.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:01+00:00" ; + schema1:name "results_3_21.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:02+00:00" ; + schema1:name "results_3_22.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:03+00:00" ; + schema1:name "results_3_23.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:03+00:00" ; + schema1:name "results_3_24.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:04+00:00" ; + schema1:name "results_3_25.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:04+00:00" ; + schema1:name "results_3_26.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:05+00:00" ; + schema1:name "results_3_3.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:06+00:00" ; + schema1:name "results_3_4.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:07+00:00" ; + schema1:name "results_3_5.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:07+00:00" ; + schema1:name "results_3_6.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:08+00:00" ; + schema1:name "results_3_7.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:08+00:00" ; + schema1:name "results_3_8.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:09+00:00" ; + schema1:name "results_3_9.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:15+00:00" ; + schema1:name "results_4_1.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:10+00:00" ; + schema1:name "results_4_10.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:10+00:00" ; + schema1:name "results_4_11.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:11+00:00" ; + schema1:name "results_4_12.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:11+00:00" ; + schema1:name "results_4_13.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:12+00:00" ; + schema1:name "results_4_14.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:12+00:00" ; + schema1:name "results_4_15.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:13+00:00" ; + schema1:name "results_4_16.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:13+00:00" ; + schema1:name "results_4_17.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:14+00:00" ; + schema1:name "results_4_18.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:14+00:00" ; + schema1:name "results_4_19.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:19+00:00" ; + schema1:name "results_4_2.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:15+00:00" ; + schema1:name "results_4_20.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:16+00:00" ; + schema1:name "results_4_21.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:16+00:00" ; + schema1:name "results_4_22.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:17+00:00" ; + schema1:name "results_4_23.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:17+00:00" ; + schema1:name "results_4_24.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:18+00:00" ; + schema1:name "results_4_25.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:18+00:00" ; + schema1:name "results_4_26.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:19+00:00" ; + schema1:name "results_4_3.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:20+00:00" ; + schema1:name "results_4_4.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:20+00:00" ; + schema1:name "results_4_5.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:21+00:00" ; + schema1:name "results_4_6.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:22+00:00" ; + schema1:name "results_4_7.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:22+00:00" ; + schema1:name "results_4_8.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:23+00:00" ; + schema1:name "results_4_9.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:30+00:00" ; + schema1:name "results_5_1.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:24+00:00" ; + schema1:name "results_5_10.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:25+00:00" ; + schema1:name "results_5_11.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:25+00:00" ; + schema1:name "results_5_12.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:26+00:00" ; + schema1:name "results_5_13.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:26+00:00" ; + schema1:name "results_5_14.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:27+00:00" ; + schema1:name "results_5_15.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:27+00:00" ; + schema1:name "results_5_16.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:28+00:00" ; + schema1:name "results_5_17.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:29+00:00" ; + schema1:name "results_5_18.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:29+00:00" ; + schema1:name "results_5_19.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:34+00:00" ; + schema1:name "results_5_2.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:30+00:00" ; + schema1:name "results_5_20.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:31+00:00" ; + schema1:name "results_5_21.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:31+00:00" ; + schema1:name "results_5_22.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:32+00:00" ; + schema1:name "results_5_23.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:32+00:00" ; + schema1:name "results_5_24.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:33+00:00" ; + schema1:name "results_5_25.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:33+00:00" ; + schema1:name "results_5_26.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:34+00:00" ; + schema1:name "results_5_3.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:34+00:00" ; + schema1:name "results_5_4.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:35+00:00" ; + schema1:name "results_5_5.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:35+00:00" ; + schema1:name "results_5_6.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:36+00:00" ; + schema1:name "results_5_7.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:37+00:00" ; + schema1:name "results_5_8.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:37+00:00" ; + schema1:name "results_5_9.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:44+00:00" ; + schema1:name "results_6_1.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:38+00:00" ; + schema1:name "results_6_10.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:39+00:00" ; + schema1:name "results_6_11.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:39+00:00" ; + schema1:name "results_6_12.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:40+00:00" ; + schema1:name "results_6_13.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:41+00:00" ; + schema1:name "results_6_14.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:41+00:00" ; + schema1:name "results_6_15.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:42+00:00" ; + schema1:name "results_6_16.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:42+00:00" ; + schema1:name "results_6_17.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:43+00:00" ; + schema1:name "results_6_18.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:43+00:00" ; + schema1:name "results_6_19.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:49+00:00" ; + schema1:name "results_6_2.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:45+00:00" ; + schema1:name "results_6_20.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:45+00:00" ; + schema1:name "results_6_21.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:46+00:00" ; + schema1:name "results_6_22.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:46+00:00" ; + schema1:name "results_6_23.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:47+00:00" ; + schema1:name "results_6_24.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:47+00:00" ; + schema1:name "results_6_25.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:48+00:00" ; + schema1:name "results_6_26.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:49+00:00" ; + schema1:name "results_6_3.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:50+00:00" ; + schema1:name "results_6_4.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:50+00:00" ; + schema1:name "results_6_5.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:51+00:00" ; + schema1:name "results_6_6.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:51+00:00" ; + schema1:name "results_6_7.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:52+00:00" ; + schema1:name "results_6_8.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:52+00:00" ; + schema1:name "results_6_9.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:59+00:00" ; + schema1:name "results_7_1.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:53+00:00" ; + schema1:name "results_7_10.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:54+00:00" ; + schema1:name "results_7_11.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:55+00:00" ; + schema1:name "results_7_12.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:55+00:00" ; + schema1:name "results_7_13.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:56+00:00" ; + schema1:name "results_7_14.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:56+00:00" ; + schema1:name "results_7_15.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:57+00:00" ; + schema1:name "results_7_16.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:57+00:00" ; + schema1:name "results_7_17.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:58+00:00" ; + schema1:name "results_7_18.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:41:58+00:00" ; + schema1:name "results_7_19.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:04+00:00" ; + schema1:name "results_7_2.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:00+00:00" ; + schema1:name "results_7_20.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:01+00:00" ; + schema1:name "results_7_21.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:01+00:00" ; + schema1:name "results_7_22.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:02+00:00" ; + schema1:name "results_7_23.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:02+00:00" ; + schema1:name "results_7_24.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:03+00:00" ; + schema1:name "results_7_25.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:04+00:00" ; + schema1:name "results_7_26.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:05+00:00" ; + schema1:name "results_7_3.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:05+00:00" ; + schema1:name "results_7_4.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:06+00:00" ; + schema1:name "results_7_5.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:07+00:00" ; + schema1:name "results_7_6.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:07+00:00" ; + schema1:name "results_7_7.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:08+00:00" ; + schema1:name "results_7_8.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:08+00:00" ; + schema1:name "results_7_9.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:14+00:00" ; + schema1:name "results_8_1.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:09+00:00" ; + schema1:name "results_8_10.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:10+00:00" ; + schema1:name "results_8_11.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:11+00:00" ; + schema1:name "results_8_12.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:11+00:00" ; + schema1:name "results_8_13.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:11+00:00" ; + schema1:name "results_8_14.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:12+00:00" ; + schema1:name "results_8_15.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:12+00:00" ; + schema1:name "results_8_16.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:13+00:00" ; + schema1:name "results_8_17.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:13+00:00" ; + schema1:name "results_8_18.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:14+00:00" ; + schema1:name "results_8_19.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:18+00:00" ; + schema1:name "results_8_2.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:15+00:00" ; + schema1:name "results_8_20.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:15+00:00" ; + schema1:name "results_8_21.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:16+00:00" ; + schema1:name "results_8_22.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:16+00:00" ; + schema1:name "results_8_23.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:17+00:00" ; + schema1:name "results_8_24.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:17+00:00" ; + schema1:name "results_8_25.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:18+00:00" ; + schema1:name "results_8_26.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:19+00:00" ; + schema1:name "results_8_3.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:20+00:00" ; + schema1:name "results_8_4.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:20+00:00" ; + schema1:name "results_8_5.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:21+00:00" ; + schema1:name "results_8_6.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:21+00:00" ; + schema1:name "results_8_7.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:22+00:00" ; + schema1:name "results_8_8.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 616006144 ; + schema1:dateModified "2024-03-29T10:42:22+00:00" ; + schema1:name "results_8_9.npy" ; + schema1:sdDatePublished "2024-07-04T13:38:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.0.0" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.0.1" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.0.2" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.0.3" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.0.4" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.0.5" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.1.0" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.1.1" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.1.2" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.1.3" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.1.4" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.1.5" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.2.0" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.2.1" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.2.2" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.2.3" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.2.4" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.2.5" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.3.0" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.3.1" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.3.2" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.3.3" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.3.4" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.3.5" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.4.0" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.4.1" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.4.2" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.4.3" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.4.4" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.4.5" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.5.0" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.5.1" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.5.2" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.5.3" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.5.4" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "A.5.5" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.0.0" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.0.1" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.0.2" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.0.3" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.0.4" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.0.5" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.1.0" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.1.1" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.1.2" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.1.3" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.1.4" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.1.5" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.2.0" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.2.1" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.2.2" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.2.3" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.2.4" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.2.5" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.3.0" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.3.1" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.3.2" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.3.3" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.3.4" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.3.5" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.4.0" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.4.1" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.4.2" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.4.3" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.4.4" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.4.5" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.5.0" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.5.1" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.5.2" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.5.3" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.5.4" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 265 ; + schema1:dateModified "2024-08-01T15:31:35+00:00" ; + schema1:name "B.5.5" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "energy.selection" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "genion.group" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1044 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "ions.mdp" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 971 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "minim.mdp" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116154 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "133l.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 114615 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "134l.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116316 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1aki.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 117369 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1bhz.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115425 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1bvx.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115668 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1bwh.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115506 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1bwi.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116802 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1bwj.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 132678 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1c46.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 126279 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1ckh.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 121338 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1f0w.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119961 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1f10.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110322 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1fly.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 212058 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1gxv.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 213030 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1gxx.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123687 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1hel.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 121662 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1hem.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 121986 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1hen.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123282 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1heo.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 124254 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1hep.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123201 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1heq.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 124902 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1her.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119880 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1hsw.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 120042 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1hsx.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 118422 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1i20.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115506 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1ioq.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 114210 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1ior.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113400 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1ios.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111051 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1iot.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111942 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1ir7.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 114858 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1ir8.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112428 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1ir9.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 178848 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1iy3.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 178605 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1iy4.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 95661 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1ja2.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 95499 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1ja4.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 95013 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1ja6.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 120690 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1jis.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 121419 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1jit.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119880 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1jiy.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 117369 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1jj1.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110241 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1jpo.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 133974 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1jwr.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111537 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1kxw.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113238 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1kxx.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115101 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1kxy.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119070 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1laa.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 118989 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lhh.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 120285 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lhi.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123444 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lhj.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119718 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lhk.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119394 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lhl.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115992 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lhm.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 109755 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1loz.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 165483 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lsa.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 162162 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lsb.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 157707 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lsc.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 158355 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lsd.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 157059 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lse.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 162567 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lsf.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123120 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lsm.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 127332 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lsn.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 122472 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lsy.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111618 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lyo.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 117612 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lyy.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 145881 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lyz.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 118665 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lz1.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116478 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lza.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115668 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lzd.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 124254 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1lzt.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 114129 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1rex.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 114372 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1rfp.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113724 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1tay.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115587 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1tby.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 114291 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1tcy.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 114615 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1tdy.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110889 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1uia.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 109998 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1uic.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113562 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1uid.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111294 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1uie.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111051 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1uif.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111699 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1uig.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 141507 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1vds.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 145314 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1vdt.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113967 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1ved.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 118908 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1xei.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116964 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1xej.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 120042 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1xek.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 94284 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "2a6u.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116397 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "2aub.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 125793 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "2bqg.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 125550 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "2bqh.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 124740 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "2bqi.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 129114 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "2bqk.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 124821 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "2bqm.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 173745 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "2c8o.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 173826 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "2c8p.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 118989 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "2epe.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101412 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "2hs7.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103113 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "2hs9.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99711 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "2hso.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 121824 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "2lhm.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123525 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "2lym.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 133488 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "2lyz.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 122472 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "2yvb.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116640 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "2zq4.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 117288 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "3a3r.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 218214 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "3exd.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115182 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "3iju.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115992 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "3ijv.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 109755 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "3j4g.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106029 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "3j6k.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 125469 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "3lym.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119394 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "3lyz.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116397 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "3qy4.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 211977 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "3wmk.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119475 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "3wpj.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 122553 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "3wvx.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 206955 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "4axt.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 209790 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "4b0d.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115425 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "4hv1.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112914 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "4hv2.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 117693 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "4i8s.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113967 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "4ias.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 128385 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "4lym.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110727 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "4lyo.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 133812 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "4lyz.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 210762 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "4nhi.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 208089 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "4qeq.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 30294 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "4r0p.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113724 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "4rlm.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112347 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "4rln.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 190107 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "4wmg.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 186624 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "5a3e.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 125064 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "5amy.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112995 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "5e4p.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116235 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "5hnc.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105138 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "5hnl.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 125388 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "5i4w.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123282 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "5k2k.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 218052 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "5k2n.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 222183 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "5k2p.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 217890 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "5k2q.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 121743 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "5k2r.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123201 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "5k2s.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 141345 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "5lyt.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 133893 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "5lyz.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 277182 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "5njm.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 122391 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "5yin.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112023 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "6gf0.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111537 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "6h0k.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112428 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "6h0l.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 127170 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "6lyt.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 134379 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "6lyz.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111456 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "6s2n.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116883 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "7byo.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 118098 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "7byp.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 203391 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "7c09.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115992 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "7d01.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116964 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "7d02.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 114372 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "7d04.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 117855 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "7d05.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103275 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "7lyz.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 120771 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "8lyz.pdb" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100402 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "133l.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 593157 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "133l.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1244624 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "133l_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "133l_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1243836 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "133l_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100402 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "133l_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4196 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "133l_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2148 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "133l_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1435957 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "133l_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1435336 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "133l_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100177 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "134l.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 592390 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "134l.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1377720 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "134l_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "134l_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1376884 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "134l_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100177 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "134l_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4430 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "134l_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2154 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "134l_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1686427 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "134l_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1685896 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "134l_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98776 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1aki.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577611 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1aki.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1388044 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1aki_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1aki_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1387304 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1aki_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98776 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1aki_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4711 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1aki_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2121 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1aki_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1727686 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1aki_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1726975 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1aki_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88471 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1bhz.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578224 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1bhz.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1331016 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1bhz_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1bhz_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1330276 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1bhz_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88471 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1bhz_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4384 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1bhz_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2154 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1bhz_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1617481 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1bhz_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1616680 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1bhz_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101081 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bvx.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577618 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bvx.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1315836 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bvx_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bvx_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1315096 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bvx_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101081 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bvx_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4192 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bvx_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2154 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bvx_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1592291 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bvx_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1591580 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bvx_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100676 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1bwh.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577615 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1bwh.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1447164 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1bwh_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1bwh_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1446424 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1bwh_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100676 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1bwh_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4706 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1bwh_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2160 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1bwh_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1838531 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1bwh_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1837820 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1bwh_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100676 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1bwi.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577615 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1bwi.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1300500 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1bwi_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1bwi_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1299760 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1bwi_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100676 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1bwi_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4038 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1bwi_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2132 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1bwi_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1563536 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1bwi_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1562825 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1bwi_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102296 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bwj.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577618 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bwj.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1325988 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bwj_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bwj_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1325248 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bwj_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102296 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bwj_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4094 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bwj_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2121 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bwj_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1611326 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bwj_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1610615 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1bwj_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123706 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1c46.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 596803 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1c46.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1619344 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1c46_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1c46_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1618604 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1c46_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123706 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1c46_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4161 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1c46_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1c46_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2133316 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1c46_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2132605 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1c46_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123221 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ckh.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 595515 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ckh.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1463304 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ckh_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ckh_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1462564 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ckh_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123221 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ckh_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4710 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ckh_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ckh_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1842311 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ckh_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1841600 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ckh_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106741 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1f0w.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577605 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1f0w.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1386532 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1f0w_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1f0w_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1385792 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1f0w_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106741 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1f0w_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4573 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1f0w_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2124 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1f0w_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1724851 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1f0w_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1724140 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1f0w_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105301 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1f10.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577982 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1f10.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1330940 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1f10_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1f10_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1330224 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1f10_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105301 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1f10_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4227 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1f10_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2169 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1f10_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1619326 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1f10_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1618525 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1f10_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97291 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1fly.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578505 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1fly.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1289252 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1fly_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1fly_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1288512 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1fly_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97291 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1fly_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3991 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1fly_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2157 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1fly_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1540981 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1fly_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1540270 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1fly_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88968 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1gxv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577990 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1gxv.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1243100 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1gxv_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1gxv_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1242384 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1gxv_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88968 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1gxv_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4095 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1gxv_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2177 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1gxv_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1454628 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1gxv_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1453827 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1gxv_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88968 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1gxx.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577986 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1gxx.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1234244 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1gxx_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1gxx_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1233528 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1gxx_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88968 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1gxx_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4439 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1gxx_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2139 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1gxx_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1438023 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1gxx_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1437222 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1gxx_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113235 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1hel.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577619 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1hel.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1615216 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1hel_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1hel_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1614476 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1hel_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113235 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1hel_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4144 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1hel_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2152 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1hel_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2153625 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1hel_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2152914 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1hel_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113370 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1hem.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578521 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1hem.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1613872 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1hem_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1hem_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1613132 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1hem_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113370 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1hem_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4283 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1hem_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2160 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1hem_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2149710 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1hem_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2148999 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1hem_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113235 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1hen.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577615 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1hen.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1620400 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1hen_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1hen_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1619660 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1hen_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113235 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1hen_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4062 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1hen_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2148 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1hen_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2163345 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1hen_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2162634 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1hen_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113100 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1heo.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 576719 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1heo.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1614040 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1heo_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1heo_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1613300 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1heo_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113100 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1heo_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4251 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1heo_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2160 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1heo_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2152815 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1heo_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2152104 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1heo_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113100 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1hep.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 576722 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1hep.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1636144 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1hep_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1hep_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1635404 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1hep_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113100 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1hep_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4291 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1hep_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1hep_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2194260 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1hep_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2193549 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1hep_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113235 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1heq.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577625 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1heq.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1649488 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1heq_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1heq_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1648748 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1heq_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113235 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1heq_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4369 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1heq_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2157 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1heq_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2217885 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1heq_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2217174 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1heq_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113100 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1her.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 576720 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1her.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1620304 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1her_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1her_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1619564 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1her_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113100 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1her_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4086 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1her_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2175 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1her_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2164560 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1her_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2163849 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1her_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106643 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1hsw.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577642 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1hsw.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1299040 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1hsw_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1hsw_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1298300 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1hsw_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106643 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1hsw_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4033 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1hsw_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1hsw_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1560863 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1hsw_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1560106 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1hsw_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108919 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1hsx.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577623 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1hsx.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1371328 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1hsx_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1hsx_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1370588 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1hsx_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108919 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1hsx_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4492 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1hsx_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1hsx_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1696384 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1hsx_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1695646 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1hsx_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102153 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1i20.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 595550 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1i20.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1302028 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1i20_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1i20_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1301240 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1i20_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102153 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1i20_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3445 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1i20_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2162 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1i20_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1540038 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1i20_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1539417 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1i20_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101928 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1ioq.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 579472 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1ioq.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1355956 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1ioq_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1ioq_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1355216 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1ioq_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101928 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1ioq_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4208 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1ioq_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1ioq_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1665498 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1ioq_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1664787 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1ioq_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100308 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1ior.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 579375 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1ior.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1307236 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1ior_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1ior_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1306520 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1ior_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100308 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1ior_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4250 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1ior_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2124 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1ior_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1573158 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1ior_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1572357 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1ior_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99048 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ios.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578998 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ios.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1296156 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ios_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ios_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1295416 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ios_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99048 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ios_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4052 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ios_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2153 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ios_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1553673 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ios_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1552962 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ios_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98103 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1iot.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578904 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1iot.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1349028 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1iot_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1iot_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1348312 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1iot_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98103 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1iot_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4245 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1iot_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2154 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1iot_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1651818 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1iot_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1651017 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1iot_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96661 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1ir7.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 576647 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1ir7.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1293636 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1ir7_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1ir7_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1292896 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1ir7_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96661 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1ir7_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4312 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1ir7_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2147 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1ir7_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1552096 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1ir7_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1551385 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1ir7_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101521 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1ir8.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 576641 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1ir8.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1305300 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1ir8_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1ir8_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1304560 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1ir8_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101521 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1ir8_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4230 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1ir8_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2175 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1ir8_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1573966 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1ir8_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1573255 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1ir8_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97516 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1ir9.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577021 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1ir9.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1295356 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1ir9_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1ir9_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1294640 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1ir9_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97516 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1ir9_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4040 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1ir9_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2153 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1ir9_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1554031 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1ir9_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1553230 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1ir9_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 90991 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1iy3.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594848 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1iy3.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1236936 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1iy3_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1iy3_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1236196 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1iy3_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 90991 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1iy3_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4188 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1iy3_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2175 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1iy3_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1418851 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1iy3_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1418140 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1iy3_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 90991 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1iy4.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594849 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1iy4.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1309944 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1iy4_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1iy4_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1309204 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1iy4_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 90991 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1iy4_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4066 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1iy4_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2167 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1iy4_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1555741 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1iy4_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1555030 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1iy4_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88258 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ja2.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577595 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ja2.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1287376 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ja2_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ja2_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1286636 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ja2_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88258 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ja2_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4042 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ja2_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2129 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ja2_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1538968 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ja2_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1538236 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1ja2_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88258 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1ja4.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577601 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1ja4.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1294144 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1ja4_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1ja4_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1293404 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1ja4_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88258 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1ja4_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4055 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1ja4_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2161 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1ja4_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1551658 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1ja4_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1550926 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1ja4_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88258 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ja6.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577598 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ja6.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1270384 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ja6_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ja6_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1269644 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ja6_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88258 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ja6_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4055 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ja6_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2162 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ja6_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1507108 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ja6_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1506376 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1ja6_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107461 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1jis.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577988 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1jis.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1390844 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1jis_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1jis_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1390128 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1jis_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107461 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1jis_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4653 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1jis_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2147 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1jis_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1731646 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1jis_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1730845 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1jis_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108541 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1jit.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577982 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1jit.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1332740 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1jit_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1jit_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1332024 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1jit_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108541 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1jit_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4022 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1jit_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2167 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1jit_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1622701 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1jit_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1621900 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1jit_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108631 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1jiy.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577608 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1jiy.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1413028 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1jiy_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1jiy_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1412288 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1jiy_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108631 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1jiy_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4615 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1jiy_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2133 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1jiy_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1774531 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1jiy_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1773820 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1jiy_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106336 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jj1.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577605 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jj1.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1448524 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jj1_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jj1_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1447784 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jj1_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106336 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jj1_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4555 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jj1_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2129 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jj1_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1841086 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jj1_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1840375 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jj1_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97021 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1jpo.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577608 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1jpo.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1278316 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1jpo_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1jpo_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1277576 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1jpo_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97021 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1jpo_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4046 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1jpo_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2169 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1jpo_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1521946 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1jpo_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1521235 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1jpo_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 132436 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jwr.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594871 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jwr.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1414416 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jwr_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jwr_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1413676 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jwr_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 132436 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jwr_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4552 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jwr_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2130 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jwr_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1751626 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jwr_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1750915 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1jwr_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96121 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1kxw.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577058 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1kxw.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1355304 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1kxw_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1kxw_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1354516 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1kxw_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96121 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1kxw_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4325 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1kxw_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2158 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1kxw_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1666981 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1kxw_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1666360 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1kxw_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96346 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1kxx.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577608 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1kxx.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1269820 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1kxx_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1kxx_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1269080 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1kxx_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96346 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1kxx_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4022 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1kxx_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1kxx_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1506016 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1kxx_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1505305 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1kxx_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99406 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1kxy.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578153 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1kxy.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1286792 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1kxy_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1kxy_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1286100 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1kxy_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99406 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1kxy_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4019 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1kxy_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2144 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1kxy_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1537156 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1kxy_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1536355 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1kxy_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106252 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1laa.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 595780 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1laa.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1330060 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1laa_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1laa_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1329320 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1laa_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106252 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1laa_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4224 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1laa_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1laa_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1592062 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1laa_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1591351 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1laa_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104542 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lhh.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594816 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lhh.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1291572 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lhh_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lhh_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1290832 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lhh_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104542 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lhh_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4111 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lhh_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2144 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lhh_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1521367 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lhh_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1520656 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lhh_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107287 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lhi.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 592144 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lhi.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1386532 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lhi_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lhi_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1385792 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lhi_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107287 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lhi_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4481 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lhi_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2160 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lhi_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1703527 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lhi_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1702816 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lhi_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107962 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lhj.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 592147 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lhj.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1258732 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lhj_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lhj_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1257992 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lhj_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107962 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lhj_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4173 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lhj_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2155 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lhj_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1463902 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lhj_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1463191 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lhj_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106612 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lhk.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 596095 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lhk.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1300688 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lhk_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lhk_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1299996 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lhk_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106612 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lhk_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4058 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lhk_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2156 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lhk_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1536532 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lhk_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1535731 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lhk_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106702 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1lhl.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 596710 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1lhl.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1296228 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1lhl_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1lhl_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1295488 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1lhl_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106702 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1lhl_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4202 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1lhl_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2175 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1lhl_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1527307 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1lhl_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1526596 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1lhl_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104632 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lhm.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594361 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lhm.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1270792 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lhm_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lhm_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1270052 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lhm_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104632 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lhm_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4106 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lhm_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2167 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lhm_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1482982 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lhm_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1482271 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lhm_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96841 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1loz.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 593733 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1loz.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1316392 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1loz_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1loz_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1315628 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1loz_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96841 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1loz_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4287 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1loz_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2157 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1loz_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1568071 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1loz_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1567360 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1loz_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105405 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lsa.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577622 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lsa.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1447456 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lsa_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lsa_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1446716 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lsa_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105405 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lsa_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4640 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lsa_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lsa_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1839075 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lsa_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1838364 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1lsa_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104730 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1lsb.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577625 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1lsb.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1392808 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1lsb_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1lsb_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1392068 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1lsb_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104730 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1lsb_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4637 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1lsb_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2150 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1lsb_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1736610 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1lsb_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1735899 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1lsb_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102435 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsc.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577619 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsc.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1353784 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsc_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsc_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1353044 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsc_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102435 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsc_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4111 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsc_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2152 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsc_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1663440 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsc_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1662729 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsc_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101220 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lsd.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577619 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1lsd.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1919056 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1lsd_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1lsd_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1918316 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1lsd_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101220 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1lsd_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4385 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1lsd_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2157 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1lsd_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2723325 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1lsd_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2722614 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1lsd_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101625 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1lse.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577622 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1lse.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1584688 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1lse_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1lse_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1583948 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1lse_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101625 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1lse_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4146 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1lse_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2149 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1lse_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2096385 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1lse_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2095674 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1lse_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104325 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lsf.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577625 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lsf.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1400224 ; + schema1:dateModified "2024-03-05T11:04:35" ; + schema1:name "1lsf_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:35" ; + schema1:name "1lsf_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1399484 ; + schema1:dateModified "2024-03-05T11:04:35" ; + schema1:name "1lsf_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104325 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lsf_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4614 ; + schema1:dateModified "2024-03-05T11:04:35" ; + schema1:name "1lsf_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2148 ; + schema1:dateModified "2024-03-05T11:04:35" ; + schema1:name "1lsf_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1750515 ; + schema1:dateModified "2024-03-05T11:04:35" ; + schema1:name "1lsf_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1749804 ; + schema1:dateModified "2024-03-05T11:04:35" ; + schema1:name "1lsf_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111300 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsm.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578165 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsm.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1660268 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsm_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsm_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1659576 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsm_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111300 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsm_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4340 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsm_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsm_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2237145 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsm_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2236344 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1lsm_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112200 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lsn.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577806 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lsn.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1581756 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lsn_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lsn_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1581064 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lsn_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112200 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lsn_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4132 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lsn_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2155 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lsn_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2090625 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lsn_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2089824 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1lsn_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107025 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lsy.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577747 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lsy.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1369716 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lsy_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lsy_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1369048 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lsy_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107025 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lsy_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4130 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lsy_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2158 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lsy_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1692465 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lsy_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1691574 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lsy_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98101 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lyo.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577607 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lyo.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1227412 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lyo_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lyo_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1226672 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lyo_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98101 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lyo_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4319 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lyo_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2152 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lyo_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1426501 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lyo_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1425790 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1lyo_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105946 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lyy.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 597112 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lyy.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1415876 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lyy_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lyy_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1415184 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lyy_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105886 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lyy_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4568 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lyy_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2145 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lyy_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1751401 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lyy_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1750600 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lyy_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102165 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1lyz.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578012 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1lyz.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1288896 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1lyz_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1lyz_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1288132 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1lyz_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102165 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1lyz_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3867 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1lyz_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2145 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1lyz_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1539645 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1lyz_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1538934 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1lyz_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 90997 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1lz1.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594854 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1lz1.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1240180 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1lz1_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1lz1_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1239440 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1lz1_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 90997 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1lz1_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4202 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1lz1_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1lz1_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1424932 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1lz1_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1424221 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1lz1_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102300 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lza.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577625 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lza.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1292224 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lza_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lza_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1291484 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lza_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102300 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lza_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4028 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lza_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2147 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lza_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1548015 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lza_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1547304 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "1lza_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98790 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1lzd.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 576403 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1lzd.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1262000 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1lzd_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1lzd_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1261260 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1lzd_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98790 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1lzd_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4093 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1lzd_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2149 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1lzd_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1493205 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1lzd_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1492494 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1lzd_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 118020 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lzt.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577622 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lzt.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1383664 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lzt_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 940 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lzt_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1382924 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lzt_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 117960 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lzt_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2905 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lzt_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 964 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lzt_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1719465 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lzt_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1718754 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1lzt_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105301 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1rex.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594877 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1rex.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1268112 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1rex_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1rex_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1267372 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1rex_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105301 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1rex_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4367 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1rex_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2163 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1rex_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1477306 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1rex_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1476595 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "1rex_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101476 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1rfp.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577608 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1rfp.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1304092 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1rfp_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1rfp_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1303352 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1rfp_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101476 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1rfp_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4259 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1rfp_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2150 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1rfp_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1570276 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1rfp_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1569565 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1rfp_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100492 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tay.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 591433 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tay.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1333676 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tay_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tay_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1332936 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tay_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100492 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tay_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4242 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tay_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2126 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tay_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1605067 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tay_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1604356 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tay_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101302 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1tby.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594177 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1tby.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1328348 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1tby_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1tby_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1327608 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1tby_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101302 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1tby_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4185 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1tby_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2124 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1tby_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1590892 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1tby_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1590181 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1tby_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101077 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1tcy.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594654 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1tcy.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1299300 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1tcy_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1tcy_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1298560 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1tcy_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101077 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1tcy_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4058 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1tcy_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2155 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1tcy_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1536127 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1tcy_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1535416 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1tcy_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101122 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tdy.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 596102 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tdy.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1329372 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tdy_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tdy_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1328632 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tdy_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101122 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tdy_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4124 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tdy_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2160 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tdy_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1590307 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tdy_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1589596 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "1tdy_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96391 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1uia.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 565547 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1uia.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1336384 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1uia_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1uia_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1335596 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1uia_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96391 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1uia_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4252 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "1uia_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2157 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1uia_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1649296 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1uia_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1648675 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1uia_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96706 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1uic.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 575266 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1uic.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1285608 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1uic_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1uic_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1284868 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1uic_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96706 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1uic_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3999 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1uic_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2169 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1uic_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1540261 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1uic_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1539550 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1uic_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97831 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1uid.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578487 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1uid.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1309984 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1uid_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1uid_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1309244 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1uid_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97831 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1uid_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4269 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1uid_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2129 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1uid_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1581481 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1uid_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1580770 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "1uid_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96841 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1uie.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 574369 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1uie.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1293608 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1uie_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1uie_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1292868 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1uie_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96841 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1uie_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4011 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1uie_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2162 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1uie_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1556731 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1uie_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1556020 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "1uie_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96706 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1uif.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577157 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1uif.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1294008 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1uif_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1uif_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1293268 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1uif_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96706 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1uif_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4020 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1uif_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2129 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1uif_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1553221 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1uif_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1552510 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1uif_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97291 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1uig.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577605 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1uig.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1298476 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1uig_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1uig_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1297736 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1uig_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97291 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1uig_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4364 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1uig_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2147 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1uig_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1559746 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1uig_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1559035 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "1uig_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104055 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1vds.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577625 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1vds.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1316828 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1vds_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1vds_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1316088 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1vds_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104055 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1vds_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4161 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1vds_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2177 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1vds_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1594185 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1vds_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1593453 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1vds_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103785 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1vdt.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577619 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1vdt.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1330076 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1vdt_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1vdt_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1329336 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1vdt_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103785 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1vdt_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4258 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1vdt_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2115 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1vdt_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1619025 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1vdt_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1618293 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "1vdt_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98790 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1ved.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577625 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1ved.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1335260 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1ved_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1ved_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1334520 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1ved_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98790 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1ved_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4077 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1ved_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1ved_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1628745 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1ved_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1628013 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1ved_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102508 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1xei.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577632 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1xei.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1448080 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1xei_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1xei_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1447340 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1xei_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102448 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1xei_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4569 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1xei_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2144 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1xei_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1840303 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1xei_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1839556 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "1xei_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98863 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1xej.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577635 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1xej.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1393504 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1xej_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1xej_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1392764 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1xej_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98803 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1xej_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3513 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1xej_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1xej_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1737973 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1xej_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1737226 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "1xej_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 94543 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1xek.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577638 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1xek.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1390264 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1xek_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1xek_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1389524 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1xek_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 94483 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1xek_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3433 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1xek_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2148 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1xek_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1731898 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1xek_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1731151 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "1xek_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88248 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2a6u.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577591 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2a6u.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1307908 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2a6u_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2a6u_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1307168 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2a6u_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88248 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2a6u_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4285 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2a6u_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2132 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2a6u_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1577433 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2a6u_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1576722 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2a6u_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 95820 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2aub.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577625 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2aub.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1351460 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2aub_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2aub_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1350720 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2aub_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 95820 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2aub_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4145 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2aub_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2131 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2aub_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1659120 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2aub_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1658388 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2aub_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115291 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2bqg.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 592458 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2bqg.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1458228 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2bqg_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2bqg_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1457488 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2bqg_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115291 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2bqg_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4582 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2bqg_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2129 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2bqg_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1837216 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2bqg_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1836505 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2bqg_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112456 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2bqh.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 592461 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2bqh.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2198604 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2bqh_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2bqh_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2197864 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2bqh_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112456 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2bqh_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3754 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2bqh_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2190 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2bqh_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3225421 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2bqh_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3224710 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2bqh_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110701 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "2bqi.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 592464 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "2bqi.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1369092 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "2bqi_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "2bqi_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1368352 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "2bqi_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110701 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "2bqi_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4137 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "2bqi_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2162 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "2bqi_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1670086 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "2bqi_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1669375 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "2bqi_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 120151 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2bqk.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 592454 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2bqk.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2200836 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2bqk_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2bqk_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2200096 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2bqk_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 120151 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2bqk_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4395 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2bqk_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2196 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2bqk_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3229606 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2bqk_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3228895 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2bqk_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113536 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2bqm.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 592458 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2bqm.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1459236 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2bqm_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2bqm_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1458496 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2bqm_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113536 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2bqm_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4696 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2bqm_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2130 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2bqm_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1839106 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2bqm_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1838395 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2bqm_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106216 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2c8o.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577623 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2c8o.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1316324 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2c8o_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2c8o_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1315584 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2c8o_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106216 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2c8o_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4245 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2c8o_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2133 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2c8o_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1593241 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2c8o_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1592508 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2c8o_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106351 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2c8p.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577626 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2c8p.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1294076 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2c8p_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2c8p_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1293336 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2c8p_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106351 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2c8p_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4073 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2c8p_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2161 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2c8p_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1551526 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2c8p_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1550793 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "2c8p_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 94773 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2epe.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577981 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2epe.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1292780 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2epe_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2epe_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1292064 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2epe_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 94773 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2epe_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4029 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2epe_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2130 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2epe_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1547778 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2epe_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1546977 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2epe_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101355 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "2hs7.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577619 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "2hs7.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1382996 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "2hs7_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "2hs7_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1382256 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "2hs7_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101355 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "2hs7_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4148 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "2hs7_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2124 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "2hs7_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1718250 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "2hs7_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1717518 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "2hs7_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100815 ; + schema1:dateModified "2024-03-05T11:04:23" ; + schema1:name "2hs9.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577616 ; + schema1:dateModified "2024-03-05T11:04:23" ; + schema1:name "2hs9.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1399916 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2hs9_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2hs9_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1399176 ; + schema1:dateModified "2024-03-05T11:04:23" ; + schema1:name "2hs9_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100815 ; + schema1:dateModified "2024-03-05T11:04:23" ; + schema1:name "2hs9_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4771 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2hs9_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2148 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2hs9_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1749975 ; + schema1:dateModified "2024-03-05T11:04:23" ; + schema1:name "2hs9_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1749243 ; + schema1:dateModified "2024-03-05T11:04:23" ; + schema1:name "2hs9_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100950 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2hso.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577619 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2hso.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1383572 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2hso_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2hso_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1382832 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2hso_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100950 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2hso_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4452 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2hso_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2129 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2hso_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1719330 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2hso_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1718598 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "2hso_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110032 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2lhm.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594097 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2lhm.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1241404 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2lhm_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2lhm_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1240568 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2lhm_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110032 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2lhm_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4290 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2lhm_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2lhm_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1428442 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2lhm_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1427911 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "2lhm_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108645 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2lym.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577622 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2lym.top" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1448896 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2lym_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2lym_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1448156 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2lym_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108645 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2lym_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4584 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2lym_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2lym_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1841775 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2lym_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1841064 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "2lym_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2lyz.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577622 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2lyz.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1342048 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2lyz_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2lyz_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1341308 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2lyz_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2lyz_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3645 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2lyz_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2131 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2lyz_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1641435 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2lyz_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1640724 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "2lyz_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99183 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "2yvb.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577610 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "2yvb.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1296676 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2yvb_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2yvb_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1295936 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2yvb_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99183 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "2yvb_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4086 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2yvb_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2162 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2yvb_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1556373 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "2yvb_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1555662 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "2yvb_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98913 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "2zq4.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577610 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "2zq4.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1380268 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "2zq4_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "2zq4_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1379528 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "2zq4_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98913 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "2zq4_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4191 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "2zq4_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2160 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "2zq4_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1713108 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "2zq4_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1712397 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "2zq4_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100983 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3a3r.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577063 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3a3r.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1370784 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3a3r_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3a3r_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1369996 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3a3r_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100983 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3a3r_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4381 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3a3r_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2155 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3a3r_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1696008 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3a3r_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1695387 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3a3r_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119703 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3exd.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577610 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3exd.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1351252 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3exd_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3exd_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1350512 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3exd_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119703 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3exd_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4186 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3exd_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2131 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3exd_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1658703 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3exd_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1657992 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3exd_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99183 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3iju.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577607 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3iju.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1341172 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3iju_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3iju_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1340432 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3iju_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99183 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3iju_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4267 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3iju_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2154 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3iju_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1639803 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3iju_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1639092 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3iju_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99183 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3ijv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577610 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3ijv.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1292428 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3ijv_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3ijv_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1291688 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3ijv_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99183 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3ijv_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4031 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3ijv_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2161 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3ijv_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1548408 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3ijv_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1547697 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "3ijv_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 93198 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "3j4g.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578129 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "3j4g.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1307368 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "3j4g_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "3j4g_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1306628 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "3j4g_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 93198 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "3j4g_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4126 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "3j4g_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2163 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "3j4g_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1575768 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "3j4g_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1575057 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "3j4g_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 91353 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "3j6k.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577613 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "3j6k.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1329292 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "3j6k_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "3j6k_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1328552 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "3j6k_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 91353 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "3j6k_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4240 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "3j6k_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2162 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "3j6k_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1617528 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "3j6k_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1616817 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "3j6k_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110265 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3lym.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577619 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3lym.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1445368 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3lym_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3lym_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1444628 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3lym_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110265 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3lym_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4558 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3lym_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2145 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3lym_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1835160 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3lym_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1834449 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "3lym_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3lyz.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577619 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3lyz.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1324336 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3lyz_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3lyz_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1323596 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3lyz_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3lyz_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3648 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3lyz_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2152 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3lyz_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1608225 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3lyz_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1607514 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3lyz_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99813 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "3qy4.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 571160 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "3qy4.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1300228 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "3qy4_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "3qy4_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1299440 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "3qy4_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99813 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "3qy4_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4240 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "3qy4_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2119 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "3qy4_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1572663 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "3qy4_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1572042 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "3qy4_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103098 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3wmk.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577607 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3wmk.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1342756 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3wmk_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3wmk_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1342016 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3wmk_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103098 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3wmk_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4299 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3wmk_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2129 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3wmk_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1642773 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3wmk_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1642062 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "3wmk_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97428 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3wpj.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577610 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3wpj.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1357732 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3wpj_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3wpj_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1356992 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3wpj_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97428 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3wpj_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4128 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3wpj_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3wpj_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1670853 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3wpj_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1670142 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "3wpj_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104223 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "3wvx.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 576940 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "3wvx.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1287888 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "3wvx_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "3wvx_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1287196 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "3wvx_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104223 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "3wvx_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4064 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "3wvx_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2147 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "3wvx_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1540758 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "3wvx_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1539957 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "3wvx_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 109308 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4axt.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577607 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4axt.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1334044 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4axt_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4axt_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1333304 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4axt_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 109308 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4axt_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4087 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4axt_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2145 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4axt_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1626438 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4axt_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1625727 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4axt_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110793 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4b0d.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577607 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4b0d.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1355428 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4b0d_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4b0d_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1354688 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4b0d_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110793 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4b0d_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4150 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4b0d_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2129 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4b0d_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1666533 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4b0d_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1665822 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4b0d_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98778 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4hv1.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577610 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4hv1.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1339660 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4hv1_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4hv1_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1338920 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4hv1_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98778 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4hv1_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4334 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4hv1_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2171 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4hv1_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1636968 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4hv1_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1636257 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4hv1_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 94593 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4hv2.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577613 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4hv2.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1346284 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4hv2_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4hv2_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1345544 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4hv2_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 94593 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4hv2_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4272 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4hv2_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2162 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4hv2_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1649388 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4hv2_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1648677 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4hv2_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103233 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4i8s.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577613 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4i8s.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1343908 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4i8s_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4i8s_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1343168 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4i8s_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103233 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4i8s_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4241 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4i8s_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2150 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4i8s_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1644933 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4i8s_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1644222 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4i8s_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 95943 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4ias.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577607 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4ias.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1345492 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4ias_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4ias_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1344752 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4ias_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 95943 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4ias_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4232 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4ias_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2148 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4ias_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1647903 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4ias_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1647192 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "4ias_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 109455 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4lym.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577625 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4lym.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1375384 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4lym_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4lym_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1374644 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4lym_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 109455 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4lym_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4178 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4lym_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2161 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4lym_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1703940 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4lym_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1703229 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4lym_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96256 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "4lyo.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577985 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "4lyo.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1286444 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "4lyo_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "4lyo_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1285728 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "4lyo_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96256 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "4lyo_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4065 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "4lyo_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2165 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "4lyo_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1535896 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "4lyo_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1535095 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "4lyo_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "4lyz.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577622 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "4lyz.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1287256 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "4lyz_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "4lyz_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1286516 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "4lyz_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "4lyz_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4058 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "4lyz_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2145 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "4lyz_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1538700 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "4lyz_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1537989 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "4lyz_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105258 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4nhi.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577607 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4nhi.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1384156 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4nhi_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4nhi_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1383416 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4nhi_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105258 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4nhi_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4554 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4nhi_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2155 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4nhi_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1720398 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4nhi_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1719687 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "4nhi_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106968 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4qeq.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577742 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4qeq.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1360228 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4qeq_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4qeq_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1359464 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4qeq_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106968 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4qeq_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4419 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4qeq_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2146 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4qeq_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1673778 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4qeq_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1673067 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4qeq_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4953 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4r0p.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 32024 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4r0p.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 229932 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4r0p_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4r0p_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 229932 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4r0p_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4953 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4r0p_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4039 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4r0p_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2153 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4r0p_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 342858 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4r0p_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 342867 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "4r0p_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 93378 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4rlm.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577610 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4rlm.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1301644 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4rlm_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4rlm_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1300904 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4rlm_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 93378 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4rlm_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4063 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4rlm_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2150 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4rlm_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1565688 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4rlm_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1564977 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "4rlm_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 93648 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4rln.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577613 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4rln.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1306180 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4rln_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4rln_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1305440 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4rln_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 93648 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4rln_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4280 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4rln_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2148 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4rln_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1574193 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4rln_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1573482 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "4rln_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88653 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "4wmg.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577610 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "4wmg.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1353772 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "4wmg_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "4wmg_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1353032 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "4wmg_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88653 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "4wmg_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4142 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "4wmg_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2146 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "4wmg_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1663428 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "4wmg_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1662717 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "4wmg_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88743 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5a3e.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 572027 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5a3e.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1295536 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5a3e_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5a3e_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1294796 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5a3e_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88743 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5a3e_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4161 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5a3e_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5a3e_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1562403 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5a3e_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1561692 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5a3e_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103638 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "5amy.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577610 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "5amy.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1296892 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "5amy_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "5amy_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1296152 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "5amy_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103638 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "5amy_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4257 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "5amy_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2146 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "5amy_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1556778 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "5amy_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1556067 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "5amy_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99453 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5e4p.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577607 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5e4p.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1355428 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5e4p_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5e4p_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1354688 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5e4p_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99453 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5e4p_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4218 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5e4p_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2153 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5e4p_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1666533 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5e4p_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1665822 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5e4p_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102288 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5hnc.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577610 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5hnc.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1295164 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5hnc_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5hnc_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1294424 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5hnc_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102288 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5hnc_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4093 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5hnc_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2163 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5hnc_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1553538 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5hnc_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1552827 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5hnc_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 91398 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5hnl.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577984 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5hnl.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1328996 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5hnl_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5hnl_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1328280 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5hnl_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 91398 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5hnl_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4168 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5hnl_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2162 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5hnl_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1615683 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5hnl_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1614882 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5hnl_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115113 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5i4w.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577610 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5i4w.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1356436 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5i4w_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5i4w_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1355696 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5i4w_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115113 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5i4w_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4175 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5i4w_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2129 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5i4w_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1668423 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5i4w_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1667712 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5i4w_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104043 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "5k2k.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577613 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "5k2k.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1272700 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "5k2k_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "5k2k_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1271960 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "5k2k_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104043 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "5k2k_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4078 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "5k2k_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2148 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "5k2k_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1511418 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "5k2k_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1510707 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "5k2k_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108723 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "5k2n.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577742 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "5k2n.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1339564 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "5k2n_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "5k2n_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1338800 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "5k2n_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108723 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "5k2n_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4143 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "5k2n_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2102 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "5k2n_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1635033 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "5k2n_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1634322 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "5k2n_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111063 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5k2p.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577607 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5k2p.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1329580 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5k2p_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5k2p_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1328840 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5k2p_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111063 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5k2p_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4347 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5k2p_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2144 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5k2p_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1618068 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5k2p_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1617357 ; + schema1:dateModified "2024-03-05T11:04:27" ; + schema1:name "5k2p_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107283 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2q.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577610 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2q.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1325188 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2q_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2q_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1324448 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2q_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107283 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2q_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4333 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2q_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2q_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1609833 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2q_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1609122 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2q_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104718 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "5k2r.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577604 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "5k2r.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1299052 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "5k2r_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "5k2r_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1298312 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "5k2r_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104718 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "5k2r_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4048 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "5k2r_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2159 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "5k2r_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1560828 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "5k2r_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1560117 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "5k2r_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106203 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2s.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577610 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2s.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1290916 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2s_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2s_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1290176 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2s_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106203 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2s_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4084 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2s_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2164 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2s_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1545573 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2s_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1544862 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "5k2s_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 120525 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5lyt.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578006 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5lyt.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1424328 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5lyt_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5lyt_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1423564 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5lyt_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 120525 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5lyt_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4317 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5lyt_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2165 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5lyt_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1793580 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5lyt_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1792869 ; + schema1:dateModified "2024-03-05T11:04:28" ; + schema1:name "5lyt_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5lyz.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577619 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5lyz.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1287256 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5lyz_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5lyz_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1286516 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5lyz_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5lyz_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4051 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5lyz_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2149 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5lyz_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1538700 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5lyz_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1537989 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "5lyz_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97563 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "5njm.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577607 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "5njm.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1314172 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "5njm_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "5njm_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1313432 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "5njm_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97563 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "5njm_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4147 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "5njm_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2130 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "5njm_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1589178 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "5njm_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1588467 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "5njm_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107283 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "5yin.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577609 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "5yin.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1226764 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "5yin_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "5yin_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1226024 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "5yin_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107283 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "5yin_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4345 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "5yin_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2155 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "5yin_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1425288 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "5yin_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1424577 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "5yin_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 93198 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6gf0.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 572030 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6gf0.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1350040 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6gf0_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6gf0_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1349300 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6gf0_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 93198 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6gf0_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4386 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6gf0_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2148 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6gf0_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1664598 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6gf0_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1663887 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6gf0_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97248 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "6h0k.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 572027 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "6h0k.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1350616 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "6h0k_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "6h0k_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1349876 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "6h0k_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97248 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "6h0k_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4394 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "6h0k_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2130 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "6h0k_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1665678 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "6h0k_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1664967 ; + schema1:dateModified "2024-03-05T11:04:29" ; + schema1:name "6h0k_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98193 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "6h0l.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 572030 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "6h0l.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1391728 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "6h0l_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "6h0l_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1390988 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "6h0l_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98193 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "6h0l_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4631 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "6h0l_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2163 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "6h0l_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1742763 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "6h0l_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1742052 ; + schema1:dateModified "2024-03-05T11:04:33" ; + schema1:name "6h0l_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101760 ; + schema1:dateModified "2024-03-05T11:04:35" ; + schema1:name "6lyt.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577625 ; + schema1:dateModified "2024-03-05T11:04:35" ; + schema1:name "6lyt.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1333336 ; + schema1:dateModified "2024-03-05T11:04:35" ; + schema1:name "6lyt_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:35" ; + schema1:name "6lyt_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1332596 ; + schema1:dateModified "2024-03-05T11:04:35" ; + schema1:name "6lyt_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101760 ; + schema1:dateModified "2024-03-05T11:04:35" ; + schema1:name "6lyt_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4257 ; + schema1:dateModified "2024-03-05T11:04:35" ; + schema1:name "6lyt_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2134 ; + schema1:dateModified "2024-03-05T11:04:35" ; + schema1:name "6lyt_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1625100 ; + schema1:dateModified "2024-03-05T11:04:35" ; + schema1:name "6lyt_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1624389 ; + schema1:dateModified "2024-03-05T11:04:35" ; + schema1:name "6lyt_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6lyz.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577624 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6lyz.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1214896 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6lyz_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6lyz_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1214156 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6lyz_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6lyz_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3758 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6lyz_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2146 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6lyz_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1403025 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6lyz_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1402314 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "6lyz_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96348 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "6s2n.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577607 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "6s2n.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1348012 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "6s2n_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "6s2n_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1347272 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "6s2n_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96348 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "6s2n_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4224 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "6s2n_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2100 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "6s2n_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1652628 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "6s2n_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1651917 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "6s2n_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102018 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7byo.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577607 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7byo.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1318996 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7byo_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7byo_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1318256 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7byo_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102018 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7byo_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4186 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7byo_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2167 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7byo_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1598223 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7byo_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1597512 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7byo_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101613 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "7byp.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577607 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "7byp.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1328716 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "7byp_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "7byp_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1327976 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "7byp_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101613 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "7byp_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4173 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "7byp_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2157 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "7byp_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1616448 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "7byp_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1615737 ; + schema1:dateModified "2024-03-05T11:04:26" ; + schema1:name "7byp_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96888 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "7c09.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577610 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "7c09.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1351612 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "7c09_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "7c09_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1350872 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "7c09_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96888 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "7c09_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4181 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "7c09_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2132 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "7c09_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1659378 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "7c09_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1658667 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "7c09_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100803 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "7d01.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577610 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "7d01.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1325332 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "7d01_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "7d01_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1324592 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "7d01_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100803 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "7d01_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4087 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "7d01_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2117 ; + schema1:dateModified "2024-03-05T11:04:32" ; + schema1:name "7d01_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1610103 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "7d01_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1609392 ; + schema1:dateModified "2024-03-05T11:04:31" ; + schema1:name "7d01_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99723 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7d02.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577604 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7d02.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1331452 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7d02_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7d02_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1330712 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7d02_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99723 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7d02_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4244 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7d02_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2126 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7d02_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1621578 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7d02_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1620867 ; + schema1:dateModified "2024-03-05T11:04:24" ; + schema1:name "7d02_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97968 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "7d04.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577610 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "7d04.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1326196 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "7d04_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "7d04_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1325456 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "7d04_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97968 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "7d04_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4199 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "7d04_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2119 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "7d04_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1611723 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "7d04_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1611012 ; + schema1:dateModified "2024-03-05T11:04:30" ; + schema1:name "7d04_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98913 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "7d05.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577613 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "7d05.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1333684 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "7d05_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "7d05_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1332944 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "7d05_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98913 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "7d05_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4289 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "7d05_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2126 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "7d05_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1625763 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "7d05_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1625052 ; + schema1:dateModified "2024-03-05T11:04:34" ; + schema1:name "7d05_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88320 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "7lyz.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577596 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "7lyz.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1187464 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "7lyz_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "7lyz_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1186724 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "7lyz_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88260 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "7lyz_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4065 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "7lyz_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2167 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "7lyz_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1351590 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "7lyz_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1350879 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "7lyz_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88260 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "8lyz.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577597 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "8lyz.top" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1343272 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "8lyz_em.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "8lyz_em_energy.edr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1342532 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "8lyz_ions.tpr" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88260 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "8lyz_newbox.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3467 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "8lyz_potential.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2162 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "8lyz_potential.xvg" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1643730 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "8lyz_solv.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1643019 ; + schema1:dateModified "2024-03-05T11:04:25" ; + schema1:name "8lyz_solv_ions.gro" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 231510 ; + schema1:dateModified "2024-03-05T11:04:35" ; + schema1:name "POTENTIAL_RESULTS.png" ; + schema1:sdDatePublished "2024-03-05T11:04:52+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "energy.selection" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "genion.group" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1044 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "ions.mdp" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 971 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "minim.mdp" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 109917 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1aki.pdb" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2998215 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1u3m.pdb" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2918025 ; + schema1:dateModified "2024-01-23T13:30:41" ; + schema1:name "1xyw.pdb" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88246 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1aki.gro" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577586 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1aki.top" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1280044 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1aki_em.tpr" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1aki_em_energy.edr" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1279304 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1aki_ions.tpr" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88246 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1aki_newbox.gro" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2142 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1aki_potential.xvg" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1525186 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1aki_solv.gro" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1524475 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1aki_solv_ions.gro" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 82046 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1u3m.gro" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 537920 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1u3m.top" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1416272 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1u3m_em.tpr" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1u3m_em_energy.edr" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1415196 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1u3m_ions.tpr" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 82046 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1u3m_newbox.gro" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2158 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1u3m_potential.xvg" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1837046 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1u3m_solv.gro" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1836965 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1u3m_solv_ions.gro" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 80112 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1xyw.gro" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 523996 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1xyw.top" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1408872 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1xyw_em.tpr" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1xyw_em_energy.edr" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1407844 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1xyw_ions.tpr" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 80112 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1xyw_newbox.gro" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2099 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1xyw_potential.xvg" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1845237 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1xyw_solv.gro" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1845066 ; + schema1:dateModified "2024-01-24T15:00:13" ; + schema1:name "1xyw_solv_ions.gro" ; + schema1:sdDatePublished "2024-01-24T15:00:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3 ; + schema1:name "energy.selection" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3 ; + schema1:name "genion.group" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1044 ; + schema1:name "ions.mdp" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 971 ; + schema1:name "minim.mdp" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116154 ; + schema1:name "133l.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 114615 ; + schema1:name "134l.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116316 ; + schema1:name "1aki.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 117369 ; + schema1:name "1bhz.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115425 ; + schema1:name "1bvx.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115668 ; + schema1:name "1bwh.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115506 ; + schema1:name "1bwi.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116802 ; + schema1:name "1bwj.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 132678 ; + schema1:name "1c46.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 126279 ; + schema1:name "1ckh.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 121338 ; + schema1:name "1f0w.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119961 ; + schema1:name "1f10.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110322 ; + schema1:name "1fly.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 212058 ; + schema1:name "1gxv.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 213030 ; + schema1:name "1gxx.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123687 ; + schema1:name "1hel.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 121662 ; + schema1:name "1hem.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 121986 ; + schema1:name "1hen.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123282 ; + schema1:name "1heo.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 124254 ; + schema1:name "1hep.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123201 ; + schema1:name "1heq.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 124902 ; + schema1:name "1her.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119880 ; + schema1:name "1hsw.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 120042 ; + schema1:name "1hsx.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 118422 ; + schema1:name "1i20.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115506 ; + schema1:name "1ioq.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 114210 ; + schema1:name "1ior.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113400 ; + schema1:name "1ios.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111051 ; + schema1:name "1iot.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111942 ; + schema1:name "1ir7.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 114858 ; + schema1:name "1ir8.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112428 ; + schema1:name "1ir9.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 178848 ; + schema1:name "1iy3.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 178605 ; + schema1:name "1iy4.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 95661 ; + schema1:name "1ja2.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 95499 ; + schema1:name "1ja4.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 95013 ; + schema1:name "1ja6.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 120690 ; + schema1:name "1jis.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 121419 ; + schema1:name "1jit.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119880 ; + schema1:name "1jiy.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 117369 ; + schema1:name "1jj1.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110241 ; + schema1:name "1jpo.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 133974 ; + schema1:name "1jwr.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111537 ; + schema1:name "1kxw.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113238 ; + schema1:name "1kxx.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115101 ; + schema1:name "1kxy.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119070 ; + schema1:name "1laa.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 118989 ; + schema1:name "1lhh.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 120285 ; + schema1:name "1lhi.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123444 ; + schema1:name "1lhj.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119718 ; + schema1:name "1lhk.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119394 ; + schema1:name "1lhl.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115992 ; + schema1:name "1lhm.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 109755 ; + schema1:name "1loz.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 165483 ; + schema1:name "1lsa.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 162162 ; + schema1:name "1lsb.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 157707 ; + schema1:name "1lsc.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 158355 ; + schema1:name "1lsd.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 157059 ; + schema1:name "1lse.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 162567 ; + schema1:name "1lsf.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123120 ; + schema1:name "1lsm.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 127332 ; + schema1:name "1lsn.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 122472 ; + schema1:name "1lsy.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111618 ; + schema1:name "1lyo.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 117612 ; + schema1:name "1lyy.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 145881 ; + schema1:name "1lyz.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 118665 ; + schema1:name "1lz1.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116478 ; + schema1:name "1lza.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115668 ; + schema1:name "1lzd.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 124254 ; + schema1:name "1lzt.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 114129 ; + schema1:name "1rex.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 114372 ; + schema1:name "1rfp.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113724 ; + schema1:name "1tay.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115587 ; + schema1:name "1tby.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 114291 ; + schema1:name "1tcy.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 114615 ; + schema1:name "1tdy.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110889 ; + schema1:name "1uia.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 109998 ; + schema1:name "1uic.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113562 ; + schema1:name "1uid.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111294 ; + schema1:name "1uie.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111051 ; + schema1:name "1uif.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111699 ; + schema1:name "1uig.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115425 ; + schema1:name "1vdq.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 141507 ; + schema1:name "1vds.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 145314 ; + schema1:name "1vdt.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113967 ; + schema1:name "1ved.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 118908 ; + schema1:name "1xei.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116964 ; + schema1:name "1xej.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 120042 ; + schema1:name "1xek.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 94284 ; + schema1:name "2a6u.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116397 ; + schema1:name "2aub.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 125793 ; + schema1:name "2bqg.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 125550 ; + schema1:name "2bqh.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 124740 ; + schema1:name "2bqi.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 129114 ; + schema1:name "2bqk.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 124821 ; + schema1:name "2bqm.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 173745 ; + schema1:name "2c8o.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 173826 ; + schema1:name "2c8p.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112833 ; + schema1:name "2cds.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 118989 ; + schema1:name "2epe.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101412 ; + schema1:name "2hs7.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103113 ; + schema1:name "2hs9.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99711 ; + schema1:name "2hso.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 121824 ; + schema1:name "2lhm.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123525 ; + schema1:name "2lym.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 133488 ; + schema1:name "2lyz.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 122472 ; + schema1:name "2yvb.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116640 ; + schema1:name "2zq4.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 117288 ; + schema1:name "3a3r.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 218214 ; + schema1:name "3exd.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115182 ; + schema1:name "3iju.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115992 ; + schema1:name "3ijv.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 109755 ; + schema1:name "3j4g.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106029 ; + schema1:name "3j6k.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 125469 ; + schema1:name "3lym.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119394 ; + schema1:name "3lyz.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116397 ; + schema1:name "3qy4.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 211977 ; + schema1:name "3wmk.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119475 ; + schema1:name "3wpj.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 122553 ; + schema1:name "3wvx.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 206955 ; + schema1:name "4axt.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 209790 ; + schema1:name "4b0d.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115425 ; + schema1:name "4hv1.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112914 ; + schema1:name "4hv2.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 117693 ; + schema1:name "4i8s.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113967 ; + schema1:name "4ias.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123849 ; + schema1:name "4iat.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 128385 ; + schema1:name "4lym.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110727 ; + schema1:name "4lyo.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 133812 ; + schema1:name "4lyz.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 210762 ; + schema1:name "4nhi.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 208089 ; + schema1:name "4qeq.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 30294 ; + schema1:name "4r0p.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113724 ; + schema1:name "4rlm.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112347 ; + schema1:name "4rln.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 190107 ; + schema1:name "4wmg.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 186624 ; + schema1:name "5a3e.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 125064 ; + schema1:name "5amy.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:58+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112995 ; + schema1:name "5e4p.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116235 ; + schema1:name "5hnc.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105138 ; + schema1:name "5hnl.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 125388 ; + schema1:name "5i4w.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123282 ; + schema1:name "5k2k.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 218052 ; + schema1:name "5k2n.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 222183 ; + schema1:name "5k2p.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 217890 ; + schema1:name "5k2q.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 121743 ; + schema1:name "5k2r.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123201 ; + schema1:name "5k2s.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 141345 ; + schema1:name "5lyt.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 133893 ; + schema1:name "5lyz.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 277182 ; + schema1:name "5njm.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 122391 ; + schema1:name "5yin.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112023 ; + schema1:name "6gf0.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111537 ; + schema1:name "6h0k.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112428 ; + schema1:name "6h0l.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 127170 ; + schema1:name "6lyt.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 134379 ; + schema1:name "6lyz.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111456 ; + schema1:name "6s2n.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116883 ; + schema1:name "7byo.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 118098 ; + schema1:name "7byp.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 203391 ; + schema1:name "7c09.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115992 ; + schema1:name "7d01.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 116964 ; + schema1:name "7d02.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 114372 ; + schema1:name "7d04.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 117855 ; + schema1:name "7d05.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103275 ; + schema1:name "7lyz.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 120771 ; + schema1:name "8lyz.pdb" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100402 ; + schema1:name "133l.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 593089 ; + schema1:name "133l.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1244624 ; + schema1:name "133l_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "133l_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1243836 ; + schema1:name "133l_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100402 ; + schema1:name "133l_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2129 ; + schema1:name "133l_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1435957 ; + schema1:name "133l_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1435336 ; + schema1:name "133l_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100177 ; + schema1:name "134l.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 592325 ; + schema1:name "134l.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1377720 ; + schema1:name "134l_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "134l_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1376884 ; + schema1:name "134l_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100177 ; + schema1:name "134l_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2115 ; + schema1:name "134l_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1686427 ; + schema1:name "134l_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1685896 ; + schema1:name "134l_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98776 ; + schema1:name "1aki.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577534 ; + schema1:name "1aki.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1388044 ; + schema1:name "1aki_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "1aki_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1387304 ; + schema1:name "1aki_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98776 ; + schema1:name "1aki_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2098 ; + schema1:name "1aki_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1727686 ; + schema1:name "1aki_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1726975 ; + schema1:name "1aki_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88471 ; + schema1:name "1bhz.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578150 ; + schema1:name "1bhz.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1331016 ; + schema1:name "1bhz_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1bhz_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1330276 ; + schema1:name "1bhz_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88471 ; + schema1:name "1bhz_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2143 ; + schema1:name "1bhz_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1617481 ; + schema1:name "1bhz_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1616680 ; + schema1:name "1bhz_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101081 ; + schema1:name "1bvx.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577547 ; + schema1:name "1bvx.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1315836 ; + schema1:name "1bvx_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1bvx_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1315096 ; + schema1:name "1bvx_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101081 ; + schema1:name "1bvx_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2127 ; + schema1:name "1bvx_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1592291 ; + schema1:name "1bvx_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1591580 ; + schema1:name "1bvx_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100676 ; + schema1:name "1bwh.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577547 ; + schema1:name "1bwh.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1447164 ; + schema1:name "1bwh_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1bwh_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1446424 ; + schema1:name "1bwh_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100676 ; + schema1:name "1bwh_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2115 ; + schema1:name "1bwh_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1838531 ; + schema1:name "1bwh_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1837820 ; + schema1:name "1bwh_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100676 ; + schema1:name "1bwi.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577550 ; + schema1:name "1bwi.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1300500 ; + schema1:name "1bwi_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "1bwi_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1299760 ; + schema1:name "1bwi_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100676 ; + schema1:name "1bwi_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2097 ; + schema1:name "1bwi_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1563536 ; + schema1:name "1bwi_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1562825 ; + schema1:name "1bwi_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102296 ; + schema1:name "1bwj.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577550 ; + schema1:name "1bwj.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1325988 ; + schema1:name "1bwj_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "1bwj_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1325248 ; + schema1:name "1bwj_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102296 ; + schema1:name "1bwj_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2092 ; + schema1:name "1bwj_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1611326 ; + schema1:name "1bwj_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1610615 ; + schema1:name "1bwj_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123706 ; + schema1:name "1c46.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 596738 ; + schema1:name "1c46.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1619344 ; + schema1:name "1c46_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1c46_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1618604 ; + schema1:name "1c46_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123706 ; + schema1:name "1c46_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2112 ; + schema1:name "1c46_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2133316 ; + schema1:name "1c46_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2132605 ; + schema1:name "1c46_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123221 ; + schema1:name "1ckh.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 595447 ; + schema1:name "1ckh.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1463304 ; + schema1:name "1ckh_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1ckh_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1462564 ; + schema1:name "1ckh_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 123221 ; + schema1:name "1ckh_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2098 ; + schema1:name "1ckh_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1842311 ; + schema1:name "1ckh_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1841600 ; + schema1:name "1ckh_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106741 ; + schema1:name "1f0w.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577537 ; + schema1:name "1f0w.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1386532 ; + schema1:name "1f0w_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "1f0w_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1385792 ; + schema1:name "1f0w_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106741 ; + schema1:name "1f0w_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2093 ; + schema1:name "1f0w_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1724851 ; + schema1:name "1f0w_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1724140 ; + schema1:name "1f0w_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105301 ; + schema1:name "1f10.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577917 ; + schema1:name "1f10.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1330940 ; + schema1:name "1f10_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1f10_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1330224 ; + schema1:name "1f10_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105301 ; + schema1:name "1f10_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2133 ; + schema1:name "1f10_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1619326 ; + schema1:name "1f10_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1618525 ; + schema1:name "1f10_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97291 ; + schema1:name "1fly.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578440 ; + schema1:name "1fly.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1289252 ; + schema1:name "1fly_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1fly_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1288512 ; + schema1:name "1fly_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97291 ; + schema1:name "1fly_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2120 ; + schema1:name "1fly_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1540981 ; + schema1:name "1fly_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1540270 ; + schema1:name "1fly_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88968 ; + schema1:name "1gxv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577913 ; + schema1:name "1gxv.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1243100 ; + schema1:name "1gxv_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1gxv_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1242384 ; + schema1:name "1gxv_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88968 ; + schema1:name "1gxv_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2133 ; + schema1:name "1gxv_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1454628 ; + schema1:name "1gxv_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1453827 ; + schema1:name "1gxv_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88968 ; + schema1:name "1gxx.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577918 ; + schema1:name "1gxx.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1234244 ; + schema1:name "1gxx_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1gxx_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1233528 ; + schema1:name "1gxx_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88968 ; + schema1:name "1gxx_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2116 ; + schema1:name "1gxx_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1438023 ; + schema1:name "1gxx_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1437222 ; + schema1:name "1gxx_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113235 ; + schema1:name "1hel.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577554 ; + schema1:name "1hel.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1615216 ; + schema1:name "1hel_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1hel_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1614476 ; + schema1:name "1hel_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113235 ; + schema1:name "1hel_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2122 ; + schema1:name "1hel_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2153625 ; + schema1:name "1hel_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2152914 ; + schema1:name "1hel_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113370 ; + schema1:name "1hem.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578453 ; + schema1:name "1hem.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1613872 ; + schema1:name "1hem_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1hem_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1613132 ; + schema1:name "1hem_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113370 ; + schema1:name "1hem_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2133 ; + schema1:name "1hem_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2149710 ; + schema1:name "1hem_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2148999 ; + schema1:name "1hem_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113235 ; + schema1:name "1hen.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577550 ; + schema1:name "1hen.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1620400 ; + schema1:name "1hen_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1hen_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1619660 ; + schema1:name "1hen_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113235 ; + schema1:name "1hen_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2128 ; + schema1:name "1hen_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2163345 ; + schema1:name "1hen_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2162634 ; + schema1:name "1hen_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113100 ; + schema1:name "1heo.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 576651 ; + schema1:name "1heo.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1614040 ; + schema1:name "1heo_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1heo_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1613300 ; + schema1:name "1heo_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113100 ; + schema1:name "1heo_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2143 ; + schema1:name "1heo_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2152815 ; + schema1:name "1heo_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2152104 ; + schema1:name "1heo_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113100 ; + schema1:name "1hep.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 576654 ; + schema1:name "1hep.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1636144 ; + schema1:name "1hep_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1hep_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1635404 ; + schema1:name "1hep_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113100 ; + schema1:name "1hep_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2133 ; + schema1:name "1hep_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2194260 ; + schema1:name "1hep_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2193549 ; + schema1:name "1hep_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113235 ; + schema1:name "1heq.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577551 ; + schema1:name "1heq.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1649488 ; + schema1:name "1heq_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1heq_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1648748 ; + schema1:name "1heq_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113235 ; + schema1:name "1heq_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2116 ; + schema1:name "1heq_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2217885 ; + schema1:name "1heq_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2217174 ; + schema1:name "1heq_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113100 ; + schema1:name "1her.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 576652 ; + schema1:name "1her.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1620304 ; + schema1:name "1her_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1her_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1619564 ; + schema1:name "1her_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113100 ; + schema1:name "1her_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2114 ; + schema1:name "1her_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2164560 ; + schema1:name "1her_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2163849 ; + schema1:name "1her_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106643 ; + schema1:name "1hsw.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577574 ; + schema1:name "1hsw.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1299040 ; + schema1:name "1hsw_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1hsw_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1298300 ; + schema1:name "1hsw_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106643 ; + schema1:name "1hsw_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2126 ; + schema1:name "1hsw_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1560863 ; + schema1:name "1hsw_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1560106 ; + schema1:name "1hsw_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108919 ; + schema1:name "1hsx.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577558 ; + schema1:name "1hsx.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1371328 ; + schema1:name "1hsx_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1hsx_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1370588 ; + schema1:name "1hsx_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108919 ; + schema1:name "1hsx_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2128 ; + schema1:name "1hsx_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1696384 ; + schema1:name "1hsx_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1695646 ; + schema1:name "1hsx_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102153 ; + schema1:name "1i20.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 595476 ; + schema1:name "1i20.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1302028 ; + schema1:name "1i20_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1i20_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1301240 ; + schema1:name "1i20_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102153 ; + schema1:name "1i20_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2099 ; + schema1:name "1i20_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1540038 ; + schema1:name "1i20_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1539417 ; + schema1:name "1i20_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101928 ; + schema1:name "1ioq.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 579407 ; + schema1:name "1ioq.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1355956 ; + schema1:name "1ioq_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1ioq_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1355216 ; + schema1:name "1ioq_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101928 ; + schema1:name "1ioq_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2128 ; + schema1:name "1ioq_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1665498 ; + schema1:name "1ioq_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1664787 ; + schema1:name "1ioq_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100308 ; + schema1:name "1ior.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 579307 ; + schema1:name "1ior.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1307236 ; + schema1:name "1ior_em.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "1ior_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1306520 ; + schema1:name "1ior_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100308 ; + schema1:name "1ior_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2085 ; + schema1:name "1ior_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1573158 ; + schema1:name "1ior_solv.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1572357 ; + schema1:name "1ior_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99048 ; + schema1:name "1ios.gro" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578933 ; + schema1:name "1ios.top" ; + schema1:sdDatePublished "2022-07-20T08:59:59+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1296156 ; + schema1:name "1ios_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1ios_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1295416 ; + schema1:name "1ios_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99048 ; + schema1:name "1ios_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2121 ; + schema1:name "1ios_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1553673 ; + schema1:name "1ios_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1552962 ; + schema1:name "1ios_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98103 ; + schema1:name "1iot.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578836 ; + schema1:name "1iot.top" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1349028 ; + schema1:name "1iot_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1iot_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1348312 ; + schema1:name "1iot_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98103 ; + schema1:name "1iot_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2112 ; + schema1:name "1iot_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1651818 ; + schema1:name "1iot_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1651017 ; + schema1:name "1iot_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96661 ; + schema1:name "1ir7.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 576573 ; + schema1:name "1ir7.top" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1293636 ; + schema1:name "1ir7_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1ir7_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1292896 ; + schema1:name "1ir7_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96661 ; + schema1:name "1ir7_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2113 ; + schema1:name "1ir7_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1552096 ; + schema1:name "1ir7_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1551385 ; + schema1:name "1ir7_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101521 ; + schema1:name "1ir8.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 576573 ; + schema1:name "1ir8.top" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1305300 ; + schema1:name "1ir8_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1ir8_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1304560 ; + schema1:name "1ir8_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101521 ; + schema1:name "1ir8_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2121 ; + schema1:name "1ir8_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1573966 ; + schema1:name "1ir8_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1573255 ; + schema1:name "1ir8_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97516 ; + schema1:name "1ir9.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 576953 ; + schema1:name "1ir9.top" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1295356 ; + schema1:name "1ir9_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1ir9_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1294640 ; + schema1:name "1ir9_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97516 ; + schema1:name "1ir9_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2129 ; + schema1:name "1ir9_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1554031 ; + schema1:name "1ir9_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1553230 ; + schema1:name "1ir9_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 90991 ; + schema1:name "1iy3.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594780 ; + schema1:name "1iy3.top" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1236936 ; + schema1:name "1iy3_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1iy3_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1236196 ; + schema1:name "1iy3_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 90991 ; + schema1:name "1iy3_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2121 ; + schema1:name "1iy3_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1418851 ; + schema1:name "1iy3_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1418140 ; + schema1:name "1iy3_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 90991 ; + schema1:name "1iy4.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594784 ; + schema1:name "1iy4.top" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1309944 ; + schema1:name "1iy4_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1iy4_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1309204 ; + schema1:name "1iy4_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 90991 ; + schema1:name "1iy4_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2122 ; + schema1:name "1iy4_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1555741 ; + schema1:name "1iy4_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1555030 ; + schema1:name "1iy4_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88258 ; + schema1:name "1ja2.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577530 ; + schema1:name "1ja2.top" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1287376 ; + schema1:name "1ja2_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1ja2_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1286636 ; + schema1:name "1ja2_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88258 ; + schema1:name "1ja2_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2121 ; + schema1:name "1ja2_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1538968 ; + schema1:name "1ja2_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1538236 ; + schema1:name "1ja2_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88258 ; + schema1:name "1ja4.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577524 ; + schema1:name "1ja4.top" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1294144 ; + schema1:name "1ja4_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1ja4_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1293404 ; + schema1:name "1ja4_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88258 ; + schema1:name "1ja4_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2116 ; + schema1:name "1ja4_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1551658 ; + schema1:name "1ja4_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1550926 ; + schema1:name "1ja4_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88258 ; + schema1:name "1ja6.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577530 ; + schema1:name "1ja6.top" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1270384 ; + schema1:name "1ja6_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1ja6_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1269644 ; + schema1:name "1ja6_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88258 ; + schema1:name "1ja6_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2123 ; + schema1:name "1ja6_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1507108 ; + schema1:name "1ja6_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1506376 ; + schema1:name "1ja6_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107461 ; + schema1:name "1jis.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577914 ; + schema1:name "1jis.top" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1390844 ; + schema1:name "1jis_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1jis_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1390128 ; + schema1:name "1jis_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107461 ; + schema1:name "1jis_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2127 ; + schema1:name "1jis_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1731646 ; + schema1:name "1jis_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1730845 ; + schema1:name "1jis_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108541 ; + schema1:name "1jit.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577914 ; + schema1:name "1jit.top" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1332740 ; + schema1:name "1jit_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1jit_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1332024 ; + schema1:name "1jit_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108541 ; + schema1:name "1jit_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2133 ; + schema1:name "1jit_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1622701 ; + schema1:name "1jit_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1621900 ; + schema1:name "1jit_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108631 ; + schema1:name "1jiy.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577540 ; + schema1:name "1jiy.top" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1413028 ; + schema1:name "1jiy_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "1jiy_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1412288 ; + schema1:name "1jiy_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108631 ; + schema1:name "1jiy_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2104 ; + schema1:name "1jiy_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1774531 ; + schema1:name "1jiy_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1773820 ; + schema1:name "1jiy_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106336 ; + schema1:name "1jj1.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577537 ; + schema1:name "1jj1.top" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1448524 ; + schema1:name "1jj1_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1jj1_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1447784 ; + schema1:name "1jj1_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106336 ; + schema1:name "1jj1_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2121 ; + schema1:name "1jj1_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1841086 ; + schema1:name "1jj1_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1840375 ; + schema1:name "1jj1_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97021 ; + schema1:name "1jpo.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577537 ; + schema1:name "1jpo.top" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1278316 ; + schema1:name "1jpo_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1jpo_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1277576 ; + schema1:name "1jpo_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97021 ; + schema1:name "1jpo_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2115 ; + schema1:name "1jpo_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1521946 ; + schema1:name "1jpo_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1521235 ; + schema1:name "1jpo_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 132436 ; + schema1:name "1jwr.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594803 ; + schema1:name "1jwr.top" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1414416 ; + schema1:name "1jwr_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1jwr_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1413676 ; + schema1:name "1jwr_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 132436 ; + schema1:name "1jwr_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2097 ; + schema1:name "1jwr_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1751626 ; + schema1:name "1jwr_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1750915 ; + schema1:name "1jwr_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96121 ; + schema1:name "1kxw.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 576993 ; + schema1:name "1kxw.top" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1355304 ; + schema1:name "1kxw_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1kxw_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1354516 ; + schema1:name "1kxw_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96121 ; + schema1:name "1kxw_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2114 ; + schema1:name "1kxw_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1666981 ; + schema1:name "1kxw_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1666360 ; + schema1:name "1kxw_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96346 ; + schema1:name "1kxx.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577537 ; + schema1:name "1kxx.top" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1269820 ; + schema1:name "1kxx_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1kxx_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1269080 ; + schema1:name "1kxx_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96346 ; + schema1:name "1kxx_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2126 ; + schema1:name "1kxx_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1506016 ; + schema1:name "1kxx_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1505305 ; + schema1:name "1kxx_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99406 ; + schema1:name "1kxy.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578088 ; + schema1:name "1kxy.top" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1286792 ; + schema1:name "1kxy_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1kxy_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1286100 ; + schema1:name "1kxy_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99406 ; + schema1:name "1kxy_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2116 ; + schema1:name "1kxy_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1537156 ; + schema1:name "1kxy_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1536355 ; + schema1:name "1kxy_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106252 ; + schema1:name "1laa.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 595712 ; + schema1:name "1laa.top" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1330060 ; + schema1:name "1laa_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1laa_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1329320 ; + schema1:name "1laa_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106252 ; + schema1:name "1laa_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2128 ; + schema1:name "1laa_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1592062 ; + schema1:name "1laa_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1591351 ; + schema1:name "1laa_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104542 ; + schema1:name "1lhh.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594748 ; + schema1:name "1lhh.top" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1291572 ; + schema1:name "1lhh_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lhh_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1290832 ; + schema1:name "1lhh_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104542 ; + schema1:name "1lhh_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2133 ; + schema1:name "1lhh_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1521367 ; + schema1:name "1lhh_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1520656 ; + schema1:name "1lhh_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107287 ; + schema1:name "1lhi.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 592076 ; + schema1:name "1lhi.top" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1386532 ; + schema1:name "1lhi_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lhi_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1385792 ; + schema1:name "1lhi_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107287 ; + schema1:name "1lhi_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2129 ; + schema1:name "1lhi_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1703527 ; + schema1:name "1lhi_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1702816 ; + schema1:name "1lhi_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107962 ; + schema1:name "1lhj.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 592079 ; + schema1:name "1lhj.top" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1258732 ; + schema1:name "1lhj_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lhj_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1257992 ; + schema1:name "1lhj_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107962 ; + schema1:name "1lhj_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2126 ; + schema1:name "1lhj_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1463902 ; + schema1:name "1lhj_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1463191 ; + schema1:name "1lhj_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106612 ; + schema1:name "1lhk.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 596021 ; + schema1:name "1lhk.top" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1300688 ; + schema1:name "1lhk_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lhk_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1299996 ; + schema1:name "1lhk_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106612 ; + schema1:name "1lhk_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2126 ; + schema1:name "1lhk_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1536532 ; + schema1:name "1lhk_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1535731 ; + schema1:name "1lhk_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106702 ; + schema1:name "1lhl.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 596642 ; + schema1:name "1lhl.top" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1296228 ; + schema1:name "1lhl_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lhl_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1295488 ; + schema1:name "1lhl_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106702 ; + schema1:name "1lhl_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2135 ; + schema1:name "1lhl_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1527307 ; + schema1:name "1lhl_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1526596 ; + schema1:name "1lhl_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104632 ; + schema1:name "1lhm.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594293 ; + schema1:name "1lhm.top" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1270792 ; + schema1:name "1lhm_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lhm_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1270052 ; + schema1:name "1lhm_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104632 ; + schema1:name "1lhm_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2120 ; + schema1:name "1lhm_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1482982 ; + schema1:name "1lhm_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1482271 ; + schema1:name "1lhm_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96841 ; + schema1:name "1loz.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 593659 ; + schema1:name "1loz.top" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1316392 ; + schema1:name "1loz_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1loz_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1315628 ; + schema1:name "1loz_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96841 ; + schema1:name "1loz_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2097 ; + schema1:name "1loz_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1568071 ; + schema1:name "1loz_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1567360 ; + schema1:name "1loz_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105405 ; + schema1:name "1lsa.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577554 ; + schema1:name "1lsa.top" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1447456 ; + schema1:name "1lsa_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lsa_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1446716 ; + schema1:name "1lsa_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105405 ; + schema1:name "1lsa_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2113 ; + schema1:name "1lsa_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1839075 ; + schema1:name "1lsa_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1838364 ; + schema1:name "1lsa_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104730 ; + schema1:name "1lsb.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577551 ; + schema1:name "1lsb.top" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1392808 ; + schema1:name "1lsb_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lsb_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1392068 ; + schema1:name "1lsb_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104730 ; + schema1:name "1lsb_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2121 ; + schema1:name "1lsb_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1736610 ; + schema1:name "1lsb_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1735899 ; + schema1:name "1lsb_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102435 ; + schema1:name "1lsc.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577551 ; + schema1:name "1lsc.top" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1353784 ; + schema1:name "1lsc_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lsc_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1353044 ; + schema1:name "1lsc_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102435 ; + schema1:name "1lsc_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2120 ; + schema1:name "1lsc_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1663440 ; + schema1:name "1lsc_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1662729 ; + schema1:name "1lsc_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101220 ; + schema1:name "1lsd.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577554 ; + schema1:name "1lsd.top" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1919056 ; + schema1:name "1lsd_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lsd_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1918316 ; + schema1:name "1lsd_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101220 ; + schema1:name "1lsd_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2126 ; + schema1:name "1lsd_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2723325 ; + schema1:name "1lsd_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2722614 ; + schema1:name "1lsd_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101625 ; + schema1:name "1lse.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577554 ; + schema1:name "1lse.top" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1584688 ; + schema1:name "1lse_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lse_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1583948 ; + schema1:name "1lse_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101625 ; + schema1:name "1lse_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2127 ; + schema1:name "1lse_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2096385 ; + schema1:name "1lse_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2095674 ; + schema1:name "1lse_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104325 ; + schema1:name "1lsf.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577548 ; + schema1:name "1lsf.top" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1400224 ; + schema1:name "1lsf_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lsf_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1399484 ; + schema1:name "1lsf_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104325 ; + schema1:name "1lsf_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2120 ; + schema1:name "1lsf_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1750515 ; + schema1:name "1lsf_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1749804 ; + schema1:name "1lsf_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111300 ; + schema1:name "1lsm.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578100 ; + schema1:name "1lsm.top" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1660268 ; + schema1:name "1lsm_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lsm_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1659576 ; + schema1:name "1lsm_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111300 ; + schema1:name "1lsm_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2114 ; + schema1:name "1lsm_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:01+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2237145 ; + schema1:name "1lsm_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2236344 ; + schema1:name "1lsm_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112200 ; + schema1:name "1lsn.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577738 ; + schema1:name "1lsn.top" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1581756 ; + schema1:name "1lsn_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lsn_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1581064 ; + schema1:name "1lsn_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112200 ; + schema1:name "1lsn_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2121 ; + schema1:name "1lsn_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2090625 ; + schema1:name "1lsn_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2089824 ; + schema1:name "1lsn_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107025 ; + schema1:name "1lsy.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577679 ; + schema1:name "1lsy.top" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1369716 ; + schema1:name "1lsy_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lsy_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1369048 ; + schema1:name "1lsy_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107025 ; + schema1:name "1lsy_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2113 ; + schema1:name "1lsy_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1692465 ; + schema1:name "1lsy_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1691574 ; + schema1:name "1lsy_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98101 ; + schema1:name "1lyo.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577539 ; + schema1:name "1lyo.top" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1227412 ; + schema1:name "1lyo_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lyo_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1226672 ; + schema1:name "1lyo_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98101 ; + schema1:name "1lyo_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2135 ; + schema1:name "1lyo_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1426501 ; + schema1:name "1lyo_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1425790 ; + schema1:name "1lyo_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105946 ; + schema1:name "1lyy.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 597044 ; + schema1:name "1lyy.top" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1415876 ; + schema1:name "1lyy_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lyy_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1415184 ; + schema1:name "1lyy_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105886 ; + schema1:name "1lyy_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2125 ; + schema1:name "1lyy_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1751401 ; + schema1:name "1lyy_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1750600 ; + schema1:name "1lyy_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102165 ; + schema1:name "1lyz.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577938 ; + schema1:name "1lyz.top" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1288896 ; + schema1:name "1lyz_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lyz_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1288132 ; + schema1:name "1lyz_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102165 ; + schema1:name "1lyz_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2103 ; + schema1:name "1lyz_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1539645 ; + schema1:name "1lyz_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1538934 ; + schema1:name "1lyz_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 90997 ; + schema1:name "1lz1.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594789 ; + schema1:name "1lz1.top" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1240180 ; + schema1:name "1lz1_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lz1_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1239440 ; + schema1:name "1lz1_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 90997 ; + schema1:name "1lz1_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2126 ; + schema1:name "1lz1_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1424932 ; + schema1:name "1lz1_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1424221 ; + schema1:name "1lz1_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102300 ; + schema1:name "1lza.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577551 ; + schema1:name "1lza.top" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1292224 ; + schema1:name "1lza_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lza_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1291484 ; + schema1:name "1lza_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102300 ; + schema1:name "1lza_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2128 ; + schema1:name "1lza_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1548015 ; + schema1:name "1lza_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1547304 ; + schema1:name "1lza_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98790 ; + schema1:name "1lzd.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 576329 ; + schema1:name "1lzd.top" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1262000 ; + schema1:name "1lzd_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1lzd_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1261260 ; + schema1:name "1lzd_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98790 ; + schema1:name "1lzd_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2133 ; + schema1:name "1lzd_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1493205 ; + schema1:name "1lzd_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1492494 ; + schema1:name "1lzd_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 118020 ; + schema1:name "1lzt.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577554 ; + schema1:name "1lzt.top" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1383664 ; + schema1:name "1lzt_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 940 ; + schema1:name "1lzt_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1382924 ; + schema1:name "1lzt_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 117960 ; + schema1:name "1lzt_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 947 ; + schema1:name "1lzt_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1719465 ; + schema1:name "1lzt_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1718754 ; + schema1:name "1lzt_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105301 ; + schema1:name "1rex.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594803 ; + schema1:name "1rex.top" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1268112 ; + schema1:name "1rex_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1rex_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1267372 ; + schema1:name "1rex_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105301 ; + schema1:name "1rex_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2122 ; + schema1:name "1rex_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1477306 ; + schema1:name "1rex_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1476595 ; + schema1:name "1rex_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101476 ; + schema1:name "1rfp.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577540 ; + schema1:name "1rfp.top" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1304092 ; + schema1:name "1rfp_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1rfp_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1303352 ; + schema1:name "1rfp_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101476 ; + schema1:name "1rfp_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2122 ; + schema1:name "1rfp_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1570276 ; + schema1:name "1rfp_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1569565 ; + schema1:name "1rfp_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100492 ; + schema1:name "1tay.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 591365 ; + schema1:name "1tay.top" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1333676 ; + schema1:name "1tay_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "1tay_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1332936 ; + schema1:name "1tay_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100492 ; + schema1:name "1tay_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2106 ; + schema1:name "1tay_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1605067 ; + schema1:name "1tay_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1604356 ; + schema1:name "1tay_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101302 ; + schema1:name "1tby.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594109 ; + schema1:name "1tby.top" ; + schema1:sdDatePublished "2022-07-20T09:00:02+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1328348 ; + schema1:name "1tby_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "1tby_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1327608 ; + schema1:name "1tby_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101302 ; + schema1:name "1tby_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2099 ; + schema1:name "1tby_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1590892 ; + schema1:name "1tby_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1590181 ; + schema1:name "1tby_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101077 ; + schema1:name "1tcy.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594586 ; + schema1:name "1tcy.top" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1299300 ; + schema1:name "1tcy_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1tcy_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1298560 ; + schema1:name "1tcy_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101077 ; + schema1:name "1tcy_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2128 ; + schema1:name "1tcy_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1536127 ; + schema1:name "1tcy_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1535416 ; + schema1:name "1tcy_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101122 ; + schema1:name "1tdy.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 596034 ; + schema1:name "1tdy.top" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1329372 ; + schema1:name "1tdy_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1tdy_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1328632 ; + schema1:name "1tdy_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101122 ; + schema1:name "1tdy_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2143 ; + schema1:name "1tdy_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1590307 ; + schema1:name "1tdy_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1589596 ; + schema1:name "1tdy_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96391 ; + schema1:name "1uia.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 565479 ; + schema1:name "1uia.top" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1336384 ; + schema1:name "1uia_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1uia_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1335596 ; + schema1:name "1uia_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96391 ; + schema1:name "1uia_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2121 ; + schema1:name "1uia_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1649296 ; + schema1:name "1uia_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1648675 ; + schema1:name "1uia_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96706 ; + schema1:name "1uic.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 575198 ; + schema1:name "1uic.top" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1285608 ; + schema1:name "1uic_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1uic_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1284868 ; + schema1:name "1uic_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96706 ; + schema1:name "1uic_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2128 ; + schema1:name "1uic_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1540261 ; + schema1:name "1uic_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1539550 ; + schema1:name "1uic_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97831 ; + schema1:name "1uid.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578422 ; + schema1:name "1uid.top" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1309984 ; + schema1:name "1uid_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1uid_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1309244 ; + schema1:name "1uid_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97831 ; + schema1:name "1uid_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2137 ; + schema1:name "1uid_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1581481 ; + schema1:name "1uid_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1580770 ; + schema1:name "1uid_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96841 ; + schema1:name "1uie.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 574301 ; + schema1:name "1uie.top" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1293608 ; + schema1:name "1uie_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1uie_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1292868 ; + schema1:name "1uie_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96841 ; + schema1:name "1uie_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2127 ; + schema1:name "1uie_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1556731 ; + schema1:name "1uie_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1556020 ; + schema1:name "1uie_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96706 ; + schema1:name "1uif.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577092 ; + schema1:name "1uif.top" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1294008 ; + schema1:name "1uif_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1uif_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1293268 ; + schema1:name "1uif_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96706 ; + schema1:name "1uif_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2121 ; + schema1:name "1uif_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1553221 ; + schema1:name "1uif_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1552510 ; + schema1:name "1uif_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97291 ; + schema1:name "1uig.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577537 ; + schema1:name "1uig.top" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1298476 ; + schema1:name "1uig_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1uig_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1297736 ; + schema1:name "1uig_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97291 ; + schema1:name "1uig_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2116 ; + schema1:name "1uig_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1559746 ; + schema1:name "1uig_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1559035 ; + schema1:name "1uig_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98250 ; + schema1:name "1vdq.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577551 ; + schema1:name "1vdq.top" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1358804 ; + schema1:name "1vdq_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1vdq_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1358064 ; + schema1:name "1vdq_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98250 ; + schema1:name "1vdq_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2125 ; + schema1:name "1vdq_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1672890 ; + schema1:name "1vdq_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1672158 ; + schema1:name "1vdq_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104055 ; + schema1:name "1vds.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577551 ; + schema1:name "1vds.top" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1316828 ; + schema1:name "1vds_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1vds_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1316088 ; + schema1:name "1vds_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104055 ; + schema1:name "1vds_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2098 ; + schema1:name "1vds_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1594185 ; + schema1:name "1vds_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1593453 ; + schema1:name "1vds_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103785 ; + schema1:name "1vdt.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577551 ; + schema1:name "1vdt.top" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1330076 ; + schema1:name "1vdt_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:03+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "1vdt_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1329336 ; + schema1:name "1vdt_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103785 ; + schema1:name "1vdt_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2093 ; + schema1:name "1vdt_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1619025 ; + schema1:name "1vdt_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1618293 ; + schema1:name "1vdt_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98790 ; + schema1:name "1ved.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577551 ; + schema1:name "1ved.top" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1335260 ; + schema1:name "1ved_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1ved_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1334520 ; + schema1:name "1ved_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98790 ; + schema1:name "1ved_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2123 ; + schema1:name "1ved_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1628745 ; + schema1:name "1ved_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1628013 ; + schema1:name "1ved_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102508 ; + schema1:name "1xei.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577564 ; + schema1:name "1xei.top" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1448080 ; + schema1:name "1xei_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1xei_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1447340 ; + schema1:name "1xei_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102448 ; + schema1:name "1xei_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2130 ; + schema1:name "1xei_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1840303 ; + schema1:name "1xei_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1839556 ; + schema1:name "1xei_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98863 ; + schema1:name "1xej.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577567 ; + schema1:name "1xej.top" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1393504 ; + schema1:name "1xej_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1xej_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1392764 ; + schema1:name "1xej_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98803 ; + schema1:name "1xej_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2128 ; + schema1:name "1xej_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1737973 ; + schema1:name "1xej_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1737226 ; + schema1:name "1xej_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 94543 ; + schema1:name "1xek.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577564 ; + schema1:name "1xek.top" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1390264 ; + schema1:name "1xek_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "1xek_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1389524 ; + schema1:name "1xek_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 94483 ; + schema1:name "1xek_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2122 ; + schema1:name "1xek_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1731898 ; + schema1:name "1xek_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1731151 ; + schema1:name "1xek_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88248 ; + schema1:name "2a6u.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577517 ; + schema1:name "2a6u.top" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1307908 ; + schema1:name "2a6u_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "2a6u_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1307168 ; + schema1:name "2a6u_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88248 ; + schema1:name "2a6u_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2087 ; + schema1:name "2a6u_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1577433 ; + schema1:name "2a6u_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1576722 ; + schema1:name "2a6u_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 95820 ; + schema1:name "2aub.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577551 ; + schema1:name "2aub.top" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1351460 ; + schema1:name "2aub_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "2aub_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1350720 ; + schema1:name "2aub_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 95820 ; + schema1:name "2aub_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2122 ; + schema1:name "2aub_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1659120 ; + schema1:name "2aub_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1658388 ; + schema1:name "2aub_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115291 ; + schema1:name "2bqg.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 592390 ; + schema1:name "2bqg.top" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1458228 ; + schema1:name "2bqg_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "2bqg_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1457488 ; + schema1:name "2bqg_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115291 ; + schema1:name "2bqg_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2116 ; + schema1:name "2bqg_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1837216 ; + schema1:name "2bqg_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1836505 ; + schema1:name "2bqg_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112456 ; + schema1:name "2bqh.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 592393 ; + schema1:name "2bqh.top" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2198604 ; + schema1:name "2bqh_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "2bqh_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2197864 ; + schema1:name "2bqh_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 112456 ; + schema1:name "2bqh_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2150 ; + schema1:name "2bqh_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3225421 ; + schema1:name "2bqh_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3224710 ; + schema1:name "2bqh_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110701 ; + schema1:name "2bqi.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 592390 ; + schema1:name "2bqi.top" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1369092 ; + schema1:name "2bqi_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "2bqi_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1368352 ; + schema1:name "2bqi_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110701 ; + schema1:name "2bqi_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2127 ; + schema1:name "2bqi_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1670086 ; + schema1:name "2bqi_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1669375 ; + schema1:name "2bqi_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 120151 ; + schema1:name "2bqk.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 592389 ; + schema1:name "2bqk.top" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2200836 ; + schema1:name "2bqk_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:04+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "2bqk_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2200096 ; + schema1:name "2bqk_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 120151 ; + schema1:name "2bqk_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2170 ; + schema1:name "2bqk_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3229606 ; + schema1:name "2bqk_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 3228895 ; + schema1:name "2bqk_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113536 ; + schema1:name "2bqm.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 592390 ; + schema1:name "2bqm.top" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1459236 ; + schema1:name "2bqm_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "2bqm_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1458496 ; + schema1:name "2bqm_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 113536 ; + schema1:name "2bqm_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2098 ; + schema1:name "2bqm_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1839106 ; + schema1:name "2bqm_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1838395 ; + schema1:name "2bqm_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106216 ; + schema1:name "2c8o.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577555 ; + schema1:name "2c8o.top" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1316324 ; + schema1:name "2c8o_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "2c8o_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1315584 ; + schema1:name "2c8o_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106216 ; + schema1:name "2c8o_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2100 ; + schema1:name "2c8o_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1593241 ; + schema1:name "2c8o_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1592508 ; + schema1:name "2c8o_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106351 ; + schema1:name "2c8p.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577552 ; + schema1:name "2c8p.top" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1294076 ; + schema1:name "2c8p_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "2c8p_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1293336 ; + schema1:name "2c8p_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106351 ; + schema1:name "2c8p_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2125 ; + schema1:name "2c8p_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1551526 ; + schema1:name "2c8p_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1550793 ; + schema1:name "2c8p_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97586 ; + schema1:name "2cds.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577562 ; + schema1:name "2cds.top" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1313332 ; + schema1:name "2cds_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "2cds_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1312592 ; + schema1:name "2cds_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97586 ; + schema1:name "2cds_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2127 ; + schema1:name "2cds_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1587581 ; + schema1:name "2cds_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1586870 ; + schema1:name "2cds_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 94773 ; + schema1:name "2epe.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577916 ; + schema1:name "2epe.top" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1292780 ; + schema1:name "2epe_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "2epe_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1292064 ; + schema1:name "2epe_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 94773 ; + schema1:name "2epe_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2106 ; + schema1:name "2epe_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1547778 ; + schema1:name "2epe_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1546977 ; + schema1:name "2epe_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101355 ; + schema1:name "2hs7.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577554 ; + schema1:name "2hs7.top" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1382996 ; + schema1:name "2hs7_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "2hs7_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1382256 ; + schema1:name "2hs7_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101355 ; + schema1:name "2hs7_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2069 ; + schema1:name "2hs7_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1718250 ; + schema1:name "2hs7_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1717518 ; + schema1:name "2hs7_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100815 ; + schema1:name "2hs9.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577548 ; + schema1:name "2hs9.top" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1399916 ; + schema1:name "2hs9_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "2hs9_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1399176 ; + schema1:name "2hs9_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100815 ; + schema1:name "2hs9_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2128 ; + schema1:name "2hs9_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1749975 ; + schema1:name "2hs9_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1749243 ; + schema1:name "2hs9_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100950 ; + schema1:name "2hso.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577551 ; + schema1:name "2hso.top" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1383572 ; + schema1:name "2hso_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "2hso_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1382832 ; + schema1:name "2hso_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100950 ; + schema1:name "2hso_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2107 ; + schema1:name "2hso_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1719330 ; + schema1:name "2hso_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1718598 ; + schema1:name "2hso_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110032 ; + schema1:name "2lhm.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 594032 ; + schema1:name "2lhm.top" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1241404 ; + schema1:name "2lhm_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "2lhm_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1240568 ; + schema1:name "2lhm_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110032 ; + schema1:name "2lhm_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2115 ; + schema1:name "2lhm_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1428442 ; + schema1:name "2lhm_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1427911 ; + schema1:name "2lhm_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108645 ; + schema1:name "2lym.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577554 ; + schema1:name "2lym.top" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1448896 ; + schema1:name "2lym_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "2lym_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1448156 ; + schema1:name "2lym_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108645 ; + schema1:name "2lym_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2133 ; + schema1:name "2lym_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1841775 ; + schema1:name "2lym_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1841064 ; + schema1:name "2lym_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:name "2lyz.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577551 ; + schema1:name "2lyz.top" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1342048 ; + schema1:name "2lyz_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "2lyz_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1341308 ; + schema1:name "2lyz_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:name "2lyz_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2124 ; + schema1:name "2lyz_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1641435 ; + schema1:name "2lyz_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1640724 ; + schema1:name "2lyz_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99183 ; + schema1:name "2yvb.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577542 ; + schema1:name "2yvb.top" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1296676 ; + schema1:name "2yvb_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "2yvb_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1295936 ; + schema1:name "2yvb_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99183 ; + schema1:name "2yvb_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2122 ; + schema1:name "2yvb_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1556373 ; + schema1:name "2yvb_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1555662 ; + schema1:name "2yvb_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98913 ; + schema1:name "2zq4.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577542 ; + schema1:name "2zq4.top" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1380268 ; + schema1:name "2zq4_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "2zq4_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1379528 ; + schema1:name "2zq4_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98913 ; + schema1:name "2zq4_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2098 ; + schema1:name "2zq4_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1713108 ; + schema1:name "2zq4_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1712397 ; + schema1:name "2zq4_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100983 ; + schema1:name "3a3r.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 576995 ; + schema1:name "3a3r.top" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1370784 ; + schema1:name "3a3r_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "3a3r_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1369996 ; + schema1:name "3a3r_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100983 ; + schema1:name "3a3r_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2128 ; + schema1:name "3a3r_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1696008 ; + schema1:name "3a3r_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1695387 ; + schema1:name "3a3r_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119703 ; + schema1:name "3exd.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577539 ; + schema1:name "3exd.top" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1351252 ; + schema1:name "3exd_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "3exd_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1350512 ; + schema1:name "3exd_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 119703 ; + schema1:name "3exd_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2107 ; + schema1:name "3exd_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1658703 ; + schema1:name "3exd_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1657992 ; + schema1:name "3exd_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99183 ; + schema1:name "3iju.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577542 ; + schema1:name "3iju.top" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1341172 ; + schema1:name "3iju_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "3iju_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1340432 ; + schema1:name "3iju_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99183 ; + schema1:name "3iju_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2114 ; + schema1:name "3iju_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1639803 ; + schema1:name "3iju_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1639092 ; + schema1:name "3iju_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99183 ; + schema1:name "3ijv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577539 ; + schema1:name "3ijv.top" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1292428 ; + schema1:name "3ijv_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "3ijv_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1291688 ; + schema1:name "3ijv_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99183 ; + schema1:name "3ijv_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2133 ; + schema1:name "3ijv_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1548408 ; + schema1:name "3ijv_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1547697 ; + schema1:name "3ijv_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 93198 ; + schema1:name "3j4g.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:06+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 578055 ; + schema1:name "3j4g.top" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1307368 ; + schema1:name "3j4g_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "3j4g_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1306628 ; + schema1:name "3j4g_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 93198 ; + schema1:name "3j4g_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2130 ; + schema1:name "3j4g_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1575768 ; + schema1:name "3j4g_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1575057 ; + schema1:name "3j4g_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 91353 ; + schema1:name "3j6k.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577536 ; + schema1:name "3j6k.top" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1329292 ; + schema1:name "3j6k_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "3j6k_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1328552 ; + schema1:name "3j6k_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 91353 ; + schema1:name "3j6k_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2113 ; + schema1:name "3j6k_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1617528 ; + schema1:name "3j6k_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1616817 ; + schema1:name "3j6k_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110265 ; + schema1:name "3lym.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577554 ; + schema1:name "3lym.top" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1445368 ; + schema1:name "3lym_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "3lym_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1444628 ; + schema1:name "3lym_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110265 ; + schema1:name "3lym_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2127 ; + schema1:name "3lym_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1835160 ; + schema1:name "3lym_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1834449 ; + schema1:name "3lym_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:name "3lyz.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577551 ; + schema1:name "3lyz.top" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1324336 ; + schema1:name "3lyz_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "3lyz_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1323596 ; + schema1:name "3lyz_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:name "3lyz_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2119 ; + schema1:name "3lyz_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1608225 ; + schema1:name "3lyz_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1607514 ; + schema1:name "3lyz_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99813 ; + schema1:name "3qy4.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 571095 ; + schema1:name "3qy4.top" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1300228 ; + schema1:name "3qy4_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "3qy4_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1299440 ; + schema1:name "3qy4_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99813 ; + schema1:name "3qy4_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2097 ; + schema1:name "3qy4_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1572663 ; + schema1:name "3qy4_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1572042 ; + schema1:name "3qy4_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103098 ; + schema1:name "3wmk.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577539 ; + schema1:name "3wmk.top" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1342756 ; + schema1:name "3wmk_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "3wmk_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1342016 ; + schema1:name "3wmk_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103098 ; + schema1:name "3wmk_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2098 ; + schema1:name "3wmk_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1642773 ; + schema1:name "3wmk_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1642062 ; + schema1:name "3wmk_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97428 ; + schema1:name "3wpj.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577542 ; + schema1:name "3wpj.top" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1357732 ; + schema1:name "3wpj_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "3wpj_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1356992 ; + schema1:name "3wpj_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97428 ; + schema1:name "3wpj_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2116 ; + schema1:name "3wpj_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1670853 ; + schema1:name "3wpj_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1670142 ; + schema1:name "3wpj_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104223 ; + schema1:name "3wvx.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 576872 ; + schema1:name "3wvx.top" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1287888 ; + schema1:name "3wvx_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "3wvx_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1287196 ; + schema1:name "3wvx_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104223 ; + schema1:name "3wvx_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2125 ; + schema1:name "3wvx_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1540758 ; + schema1:name "3wvx_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1539957 ; + schema1:name "3wvx_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 109308 ; + schema1:name "4axt.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:07+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577539 ; + schema1:name "4axt.top" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1334044 ; + schema1:name "4axt_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "4axt_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1333304 ; + schema1:name "4axt_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 109308 ; + schema1:name "4axt_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2129 ; + schema1:name "4axt_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1626438 ; + schema1:name "4axt_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1625727 ; + schema1:name "4axt_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110793 ; + schema1:name "4b0d.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577542 ; + schema1:name "4b0d.top" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1355428 ; + schema1:name "4b0d_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "4b0d_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1354688 ; + schema1:name "4b0d_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 110793 ; + schema1:name "4b0d_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2122 ; + schema1:name "4b0d_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1666533 ; + schema1:name "4b0d_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1665822 ; + schema1:name "4b0d_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98778 ; + schema1:name "4hv1.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577542 ; + schema1:name "4hv1.top" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1339660 ; + schema1:name "4hv1_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "4hv1_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1338920 ; + schema1:name "4hv1_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98778 ; + schema1:name "4hv1_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2137 ; + schema1:name "4hv1_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1636968 ; + schema1:name "4hv1_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1636257 ; + schema1:name "4hv1_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 94593 ; + schema1:name "4hv2.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577539 ; + schema1:name "4hv2.top" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1346284 ; + schema1:name "4hv2_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "4hv2_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1345544 ; + schema1:name "4hv2_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 94593 ; + schema1:name "4hv2_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2122 ; + schema1:name "4hv2_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1649388 ; + schema1:name "4hv2_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1648677 ; + schema1:name "4hv2_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103233 ; + schema1:name "4i8s.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577539 ; + schema1:name "4i8s.top" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1343908 ; + schema1:name "4i8s_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "4i8s_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1343168 ; + schema1:name "4i8s_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103233 ; + schema1:name "4i8s_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2127 ; + schema1:name "4i8s_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1644933 ; + schema1:name "4i8s_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1644222 ; + schema1:name "4i8s_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 95943 ; + schema1:name "4ias.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577539 ; + schema1:name "4ias.top" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1345492 ; + schema1:name "4ias_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "4ias_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1344752 ; + schema1:name "4ias_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 95943 ; + schema1:name "4ias_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2121 ; + schema1:name "4ias_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1647903 ; + schema1:name "4ias_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1647192 ; + schema1:name "4ias_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105798 ; + schema1:name "4iat.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577542 ; + schema1:name "4iat.top" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1335412 ; + schema1:name "4iat_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "4iat_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1334672 ; + schema1:name "4iat_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105798 ; + schema1:name "4iat_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2129 ; + schema1:name "4iat_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1629003 ; + schema1:name "4iat_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1628292 ; + schema1:name "4iat_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 109455 ; + schema1:name "4lym.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577551 ; + schema1:name "4lym.top" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1375384 ; + schema1:name "4lym_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "4lym_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1374644 ; + schema1:name "4lym_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 109455 ; + schema1:name "4lym_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2129 ; + schema1:name "4lym_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:08+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1703940 ; + schema1:name "4lym_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1703229 ; + schema1:name "4lym_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96256 ; + schema1:name "4lyo.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577917 ; + schema1:name "4lyo.top" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1286444 ; + schema1:name "4lyo_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "4lyo_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1285728 ; + schema1:name "4lyo_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96256 ; + schema1:name "4lyo_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2143 ; + schema1:name "4lyo_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1535896 ; + schema1:name "4lyo_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1535095 ; + schema1:name "4lyo_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:name "4lyz.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577551 ; + schema1:name "4lyz.top" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1287256 ; + schema1:name "4lyz_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "4lyz_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1286516 ; + schema1:name "4lyz_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:name "4lyz_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2104 ; + schema1:name "4lyz_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1538700 ; + schema1:name "4lyz_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1537989 ; + schema1:name "4lyz_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105258 ; + schema1:name "4nhi.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577539 ; + schema1:name "4nhi.top" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1384156 ; + schema1:name "4nhi_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "4nhi_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1383416 ; + schema1:name "4nhi_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 105258 ; + schema1:name "4nhi_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2116 ; + schema1:name "4nhi_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1720398 ; + schema1:name "4nhi_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1719687 ; + schema1:name "4nhi_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106968 ; + schema1:name "4qeq.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577668 ; + schema1:name "4qeq.top" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1360228 ; + schema1:name "4qeq_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "4qeq_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1359464 ; + schema1:name "4qeq_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106968 ; + schema1:name "4qeq_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2107 ; + schema1:name "4qeq_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1673778 ; + schema1:name "4qeq_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1673067 ; + schema1:name "4qeq_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4953 ; + schema1:name "4r0p.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 31947 ; + schema1:name "4r0p.top" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 229932 ; + schema1:name "4r0p_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "4r0p_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 229932 ; + schema1:name "4r0p_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 4953 ; + schema1:name "4r0p_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2126 ; + schema1:name "4r0p_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 342858 ; + schema1:name "4r0p_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 342867 ; + schema1:name "4r0p_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 93378 ; + schema1:name "4rlm.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577542 ; + schema1:name "4rlm.top" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1301644 ; + schema1:name "4rlm_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "4rlm_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1300904 ; + schema1:name "4rlm_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 93378 ; + schema1:name "4rlm_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2129 ; + schema1:name "4rlm_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1565688 ; + schema1:name "4rlm_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1564977 ; + schema1:name "4rlm_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 93648 ; + schema1:name "4rln.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577539 ; + schema1:name "4rln.top" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1306180 ; + schema1:name "4rln_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "4rln_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1305440 ; + schema1:name "4rln_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 93648 ; + schema1:name "4rln_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2125 ; + schema1:name "4rln_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1574193 ; + schema1:name "4rln_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1573482 ; + schema1:name "4rln_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88653 ; + schema1:name "4wmg.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:09+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577542 ; + schema1:name "4wmg.top" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1353772 ; + schema1:name "4wmg_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "4wmg_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1353032 ; + schema1:name "4wmg_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88653 ; + schema1:name "4wmg_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2122 ; + schema1:name "4wmg_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1663428 ; + schema1:name "4wmg_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1662717 ; + schema1:name "4wmg_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88743 ; + schema1:name "5a3e.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 571959 ; + schema1:name "5a3e.top" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1295536 ; + schema1:name "5a3e_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "5a3e_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1294796 ; + schema1:name "5a3e_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88743 ; + schema1:name "5a3e_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2116 ; + schema1:name "5a3e_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1562403 ; + schema1:name "5a3e_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1561692 ; + schema1:name "5a3e_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103638 ; + schema1:name "5amy.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577542 ; + schema1:name "5amy.top" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1296892 ; + schema1:name "5amy_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "5amy_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1296152 ; + schema1:name "5amy_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 103638 ; + schema1:name "5amy_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2128 ; + schema1:name "5amy_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1556778 ; + schema1:name "5amy_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1556067 ; + schema1:name "5amy_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99453 ; + schema1:name "5e4p.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577539 ; + schema1:name "5e4p.top" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1355428 ; + schema1:name "5e4p_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "5e4p_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1354688 ; + schema1:name "5e4p_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99453 ; + schema1:name "5e4p_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2123 ; + schema1:name "5e4p_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1666533 ; + schema1:name "5e4p_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1665822 ; + schema1:name "5e4p_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102288 ; + schema1:name "5hnc.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577539 ; + schema1:name "5hnc.top" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1295164 ; + schema1:name "5hnc_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "5hnc_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1294424 ; + schema1:name "5hnc_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102288 ; + schema1:name "5hnc_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2098 ; + schema1:name "5hnc_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1553538 ; + schema1:name "5hnc_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1552827 ; + schema1:name "5hnc_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 91398 ; + schema1:name "5hnl.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577919 ; + schema1:name "5hnl.top" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1328996 ; + schema1:name "5hnl_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "5hnl_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1328280 ; + schema1:name "5hnl_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 91398 ; + schema1:name "5hnl_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2122 ; + schema1:name "5hnl_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1615683 ; + schema1:name "5hnl_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1614882 ; + schema1:name "5hnl_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115113 ; + schema1:name "5i4w.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577542 ; + schema1:name "5i4w.top" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1356436 ; + schema1:name "5i4w_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "5i4w_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1355696 ; + schema1:name "5i4w_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 115113 ; + schema1:name "5i4w_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2078 ; + schema1:name "5i4w_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1668423 ; + schema1:name "5i4w_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1667712 ; + schema1:name "5i4w_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104043 ; + schema1:name "5k2k.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577536 ; + schema1:name "5k2k.top" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1272700 ; + schema1:name "5k2k_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "5k2k_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1271960 ; + schema1:name "5k2k_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104043 ; + schema1:name "5k2k_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2114 ; + schema1:name "5k2k_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1511418 ; + schema1:name "5k2k_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1510707 ; + schema1:name "5k2k_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108723 ; + schema1:name "5k2n.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577668 ; + schema1:name "5k2n.top" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1339564 ; + schema1:name "5k2n_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "5k2n_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1338800 ; + schema1:name "5k2n_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 108723 ; + schema1:name "5k2n_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2087 ; + schema1:name "5k2n_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1635033 ; + schema1:name "5k2n_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1634322 ; + schema1:name "5k2n_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111063 ; + schema1:name "5k2p.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577542 ; + schema1:name "5k2p.top" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1329580 ; + schema1:name "5k2p_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "5k2p_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1328840 ; + schema1:name "5k2p_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 111063 ; + schema1:name "5k2p_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2115 ; + schema1:name "5k2p_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1618068 ; + schema1:name "5k2p_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1617357 ; + schema1:name "5k2p_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107283 ; + schema1:name "5k2q.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577539 ; + schema1:name "5k2q.top" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1325188 ; + schema1:name "5k2q_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "5k2q_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1324448 ; + schema1:name "5k2q_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107283 ; + schema1:name "5k2q_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2125 ; + schema1:name "5k2q_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1609833 ; + schema1:name "5k2q_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1609122 ; + schema1:name "5k2q_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104718 ; + schema1:name "5k2r.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577539 ; + schema1:name "5k2r.top" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1299052 ; + schema1:name "5k2r_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "5k2r_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1298312 ; + schema1:name "5k2r_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 104718 ; + schema1:name "5k2r_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2127 ; + schema1:name "5k2r_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1560828 ; + schema1:name "5k2r_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1560117 ; + schema1:name "5k2r_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106203 ; + schema1:name "5k2s.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577542 ; + schema1:name "5k2s.top" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1290916 ; + schema1:name "5k2s_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "5k2s_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1290176 ; + schema1:name "5k2s_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 106203 ; + schema1:name "5k2s_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2143 ; + schema1:name "5k2s_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1545573 ; + schema1:name "5k2s_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1544862 ; + schema1:name "5k2s_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 120525 ; + schema1:name "5lyt.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577941 ; + schema1:name "5lyt.top" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1424328 ; + schema1:name "5lyt_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "5lyt_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1423564 ; + schema1:name "5lyt_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 120525 ; + schema1:name "5lyt_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2135 ; + schema1:name "5lyt_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1793580 ; + schema1:name "5lyt_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:11+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1792869 ; + schema1:name "5lyt_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:name "5lyz.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577551 ; + schema1:name "5lyz.top" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1287256 ; + schema1:name "5lyz_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "5lyz_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1286516 ; + schema1:name "5lyz_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:name "5lyz_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2125 ; + schema1:name "5lyz_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1538700 ; + schema1:name "5lyz_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1537989 ; + schema1:name "5lyz_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97563 ; + schema1:name "5njm.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577539 ; + schema1:name "5njm.top" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1314172 ; + schema1:name "5njm_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "5njm_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1313432 ; + schema1:name "5njm_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97563 ; + schema1:name "5njm_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2097 ; + schema1:name "5njm_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1589178 ; + schema1:name "5njm_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1588467 ; + schema1:name "5njm_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107283 ; + schema1:name "5yin.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577541 ; + schema1:name "5yin.top" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1226764 ; + schema1:name "5yin_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "5yin_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1226024 ; + schema1:name "5yin_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 107283 ; + schema1:name "5yin_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2143 ; + schema1:name "5yin_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1425288 ; + schema1:name "5yin_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1424577 ; + schema1:name "5yin_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 93198 ; + schema1:name "6gf0.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 571956 ; + schema1:name "6gf0.top" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1350040 ; + schema1:name "6gf0_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "6gf0_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1349300 ; + schema1:name "6gf0_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 93198 ; + schema1:name "6gf0_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2091 ; + schema1:name "6gf0_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1664598 ; + schema1:name "6gf0_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1663887 ; + schema1:name "6gf0_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97248 ; + schema1:name "6h0k.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 571959 ; + schema1:name "6h0k.top" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1350616 ; + schema1:name "6h0k_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "6h0k_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1349876 ; + schema1:name "6h0k_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97248 ; + schema1:name "6h0k_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2115 ; + schema1:name "6h0k_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1665678 ; + schema1:name "6h0k_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1664967 ; + schema1:name "6h0k_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98193 ; + schema1:name "6h0l.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 571956 ; + schema1:name "6h0l.top" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1391728 ; + schema1:name "6h0l_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "6h0l_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1390988 ; + schema1:name "6h0l_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98193 ; + schema1:name "6h0l_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2116 ; + schema1:name "6h0l_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1742763 ; + schema1:name "6h0l_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1742052 ; + schema1:name "6h0l_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101760 ; + schema1:name "6lyt.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577548 ; + schema1:name "6lyt.top" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1333336 ; + schema1:name "6lyt_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:12+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "6lyt_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1332596 ; + schema1:name "6lyt_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101760 ; + schema1:name "6lyt_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2068 ; + schema1:name "6lyt_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1625100 ; + schema1:name "6lyt_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1624389 ; + schema1:name "6lyt_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:name "6lyz.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577550 ; + schema1:name "6lyz.top" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1214896 ; + schema1:name "6lyz_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "6lyz_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1214156 ; + schema1:name "6lyz_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101895 ; + schema1:name "6lyz_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2110 ; + schema1:name "6lyz_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1403025 ; + schema1:name "6lyz_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1402314 ; + schema1:name "6lyz_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96348 ; + schema1:name "6s2n.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577542 ; + schema1:name "6s2n.top" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1348012 ; + schema1:name "6s2n_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "6s2n_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1347272 ; + schema1:name "6s2n_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96348 ; + schema1:name "6s2n_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2114 ; + schema1:name "6s2n_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1652628 ; + schema1:name "6s2n_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1651917 ; + schema1:name "6s2n_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102018 ; + schema1:name "7byo.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577539 ; + schema1:name "7byo.top" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1318996 ; + schema1:name "7byo_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "7byo_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1318256 ; + schema1:name "7byo_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 102018 ; + schema1:name "7byo_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2116 ; + schema1:name "7byo_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1598223 ; + schema1:name "7byo_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1597512 ; + schema1:name "7byo_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101613 ; + schema1:name "7byp.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577542 ; + schema1:name "7byp.top" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1328716 ; + schema1:name "7byp_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "7byp_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1327976 ; + schema1:name "7byp_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 101613 ; + schema1:name "7byp_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2123 ; + schema1:name "7byp_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1616448 ; + schema1:name "7byp_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1615737 ; + schema1:name "7byp_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96888 ; + schema1:name "7c09.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577542 ; + schema1:name "7c09.top" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1351612 ; + schema1:name "7c09_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "7c09_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1350872 ; + schema1:name "7c09_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 96888 ; + schema1:name "7c09_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2123 ; + schema1:name "7c09_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1659378 ; + schema1:name "7c09_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1658667 ; + schema1:name "7c09_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100803 ; + schema1:name "7d01.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577539 ; + schema1:name "7d01.top" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1325332 ; + schema1:name "7d01_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "7d01_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1324592 ; + schema1:name "7d01_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:13+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 100803 ; + schema1:name "7d01_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2100 ; + schema1:name "7d01_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1610103 ; + schema1:name "7d01_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1609392 ; + schema1:name "7d01_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99723 ; + schema1:name "7d02.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577539 ; + schema1:name "7d02.top" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1331452 ; + schema1:name "7d02_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "7d02_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1330712 ; + schema1:name "7d02_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 99723 ; + schema1:name "7d02_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2098 ; + schema1:name "7d02_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1621578 ; + schema1:name "7d02_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1620867 ; + schema1:name "7d02_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97968 ; + schema1:name "7d04.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577542 ; + schema1:name "7d04.top" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1326196 ; + schema1:name "7d04_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "7d04_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1325456 ; + schema1:name "7d04_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 97968 ; + schema1:name "7d04_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2108 ; + schema1:name "7d04_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1611723 ; + schema1:name "7d04_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1611012 ; + schema1:name "7d04_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98913 ; + schema1:name "7d05.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577539 ; + schema1:name "7d05.top" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1333684 ; + schema1:name "7d05_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8780 ; + schema1:name "7d05_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1332944 ; + schema1:name "7d05_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 98913 ; + schema1:name "7d05_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2096 ; + schema1:name "7d05_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1625763 ; + schema1:name "7d05_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1625052 ; + schema1:name "7d05_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88320 ; + schema1:name "7lyz.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577528 ; + schema1:name "7lyz.top" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1187464 ; + schema1:name "7lyz_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "7lyz_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1186724 ; + schema1:name "7lyz_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88260 ; + schema1:name "7lyz_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2129 ; + schema1:name "7lyz_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1351590 ; + schema1:name "7lyz_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1350879 ; + schema1:name "7lyz_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88260 ; + schema1:name "8lyz.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 577529 ; + schema1:name "8lyz.top" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1343272 ; + schema1:name "8lyz_em.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 8976 ; + schema1:name "8lyz_em_energy.edr" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1342532 ; + schema1:name "8lyz_ions.tpr" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 88260 ; + schema1:name "8lyz_newbox.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2126 ; + schema1:name "8lyz_potential.xvg" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1643730 ; + schema1:name "8lyz_solv.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1643019 ; + schema1:name "8lyz_solv_ions.gro" ; + schema1:sdDatePublished "2022-07-20T09:00:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 127 ; + schema1:dateModified "2023-07-20T15:48:27" ; + schema1:name "analytic.out" ; + schema1:sdDatePublished "2023-07-20T15:48:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1 ; + schema1:dateModified "2023-07-20T15:48:27" ; + schema1:name "file_1" ; + schema1:sdDatePublished "2023-07-20T15:48:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 39 ; + schema1:dateModified "2023-07-20T15:48:27" ; + schema1:name "file_1.out" ; + schema1:sdDatePublished "2023-07-20T15:48:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1 ; + schema1:dateModified "2023-07-20T15:48:27" ; + schema1:name "file_2" ; + schema1:sdDatePublished "2023-07-20T15:48:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 39 ; + schema1:dateModified "2023-07-20T15:48:27" ; + schema1:name "file_2.out" ; + schema1:sdDatePublished "2023-07-20T15:48:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1 ; + schema1:dateModified "2023-07-20T15:48:27" ; + schema1:name "file_3" ; + schema1:sdDatePublished "2023-07-20T15:48:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 39 ; + schema1:dateModified "2023-07-20T15:48:27" ; + schema1:name "file_3.out" ; + schema1:sdDatePublished "2023-07-20T15:48:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777334 ; + schema1:dateModified "2024-01-22T13:48:22" ; + schema1:name "file0.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777273 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file1.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777631 ; + schema1:dateModified "2024-01-22T13:48:22" ; + schema1:name "file10.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777365 ; + schema1:dateModified "2024-01-22T13:48:23" ; + schema1:name "file11.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777823 ; + schema1:dateModified "2024-01-22T13:48:22" ; + schema1:name "file12.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777530 ; + schema1:dateModified "2024-01-22T13:48:23" ; + schema1:name "file13.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777823 ; + schema1:dateModified "2024-01-22T13:48:22" ; + schema1:name "file14.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777615 ; + schema1:dateModified "2024-01-22T13:48:23" ; + schema1:name "file15.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777819 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file16.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777771 ; + schema1:dateModified "2024-01-22T13:48:22" ; + schema1:name "file17.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777684 ; + schema1:dateModified "2024-01-22T13:48:23" ; + schema1:name "file18.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777600 ; + schema1:dateModified "2024-01-22T13:48:22" ; + schema1:name "file19.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777443 ; + schema1:dateModified "2024-01-22T13:48:21" ; + schema1:name "file2.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777653 ; + schema1:dateModified "2024-01-22T13:48:23" ; + schema1:name "file20.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777454 ; + schema1:dateModified "2024-01-22T13:48:21" ; + schema1:name "file21.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777851 ; + schema1:dateModified "2024-01-22T13:48:23" ; + schema1:name "file22.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777584 ; + schema1:dateModified "2024-01-22T13:48:23" ; + schema1:name "file23.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777580 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file24.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777605 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file25.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777806 ; + schema1:dateModified "2024-01-22T13:48:23" ; + schema1:name "file26.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777360 ; + schema1:dateModified "2024-01-22T13:48:22" ; + schema1:name "file27.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777505 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file28.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777513 ; + schema1:dateModified "2024-01-22T13:48:22" ; + schema1:name "file29.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777723 ; + schema1:dateModified "2024-01-22T13:48:23" ; + schema1:name "file3.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777327 ; + schema1:dateModified "2024-01-22T13:48:23" ; + schema1:name "file30.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777312 ; + schema1:dateModified "2024-01-22T13:48:23" ; + schema1:name "file31.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16778157 ; + schema1:dateModified "2024-01-22T13:48:23" ; + schema1:name "file32.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777453 ; + schema1:dateModified "2024-01-22T13:48:21" ; + schema1:name "file33.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777676 ; + schema1:dateModified "2024-01-22T13:48:22" ; + schema1:name "file34.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777547 ; + schema1:dateModified "2024-01-22T13:48:21" ; + schema1:name "file35.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777985 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file36.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777330 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file37.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777368 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file38.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777520 ; + schema1:dateModified "2024-01-22T13:48:22" ; + schema1:name "file39.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777456 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file4.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777740 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file40.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777473 ; + schema1:dateModified "2024-01-22T13:48:22" ; + schema1:name "file41.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777533 ; + schema1:dateModified "2024-01-22T13:48:22" ; + schema1:name "file42.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777612 ; + schema1:dateModified "2024-01-22T13:48:22" ; + schema1:name "file43.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777761 ; + schema1:dateModified "2024-01-22T13:48:23" ; + schema1:name "file44.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777252 ; + schema1:dateModified "2024-01-22T13:48:22" ; + schema1:name "file45.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777684 ; + schema1:dateModified "2024-01-22T13:48:22" ; + schema1:name "file46.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777316 ; + schema1:dateModified "2024-01-22T13:48:23" ; + schema1:name "file47.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777747 ; + schema1:dateModified "2024-01-22T13:48:23" ; + schema1:name "file48.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777238 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file49.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777494 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file5.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777282 ; + schema1:dateModified "2024-01-22T13:48:21" ; + schema1:name "file50.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777662 ; + schema1:dateModified "2024-01-22T13:48:22" ; + schema1:name "file51.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777583 ; + schema1:dateModified "2024-01-22T13:48:23" ; + schema1:name "file52.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777519 ; + schema1:dateModified "2024-01-22T13:48:21" ; + schema1:name "file53.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777329 ; + schema1:dateModified "2024-01-22T13:48:21" ; + schema1:name "file54.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777557 ; + schema1:dateModified "2024-01-22T13:48:22" ; + schema1:name "file55.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777223 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file56.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777622 ; + schema1:dateModified "2024-01-22T13:48:23" ; + schema1:name "file57.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777407 ; + schema1:dateModified "2024-01-22T13:48:22" ; + schema1:name "file58.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777409 ; + schema1:dateModified "2024-01-22T13:48:23" ; + schema1:name "file59.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777574 ; + schema1:dateModified "2024-01-22T13:48:21" ; + schema1:name "file6.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777376 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file60.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777222 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file61.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777495 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file62.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777656 ; + schema1:dateModified "2024-01-22T13:48:23" ; + schema1:name "file63.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777516 ; + schema1:dateModified "2024-01-22T13:48:22" ; + schema1:name "file7.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777769 ; + schema1:dateModified "2024-01-22T13:48:21" ; + schema1:name "file8.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777430 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file9.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:Dataset ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:name "dataset_64f_16mb" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777334 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file0.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777273 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file1.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777631 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file10.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777365 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file11.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777823 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file12.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777530 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file13.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777823 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file14.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777615 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file15.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777819 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file16.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777771 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file17.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777684 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file18.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777600 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file19.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777443 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file2.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777653 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file20.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777454 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file21.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777851 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file22.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777584 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file23.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777580 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file24.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777605 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file25.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777806 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file26.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777360 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file27.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777505 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file28.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777513 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file29.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777723 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file3.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777327 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file30.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777312 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file31.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16778157 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file32.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777453 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file33.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777676 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file34.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777547 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file35.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777985 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file36.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777330 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file37.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777368 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file38.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777520 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file39.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777456 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file4.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777740 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file40.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777473 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file41.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777533 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file42.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777612 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file43.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777761 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file44.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777252 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file45.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777684 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file46.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777316 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file47.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777747 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file48.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777238 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file49.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777494 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file5.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777282 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file50.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777662 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file51.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777583 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file52.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777519 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file53.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777329 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file54.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777557 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file55.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777223 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file56.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777622 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file57.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777407 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file58.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777409 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file59.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777574 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file6.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777376 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file60.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777222 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file61.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777495 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file62.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777656 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file63.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777516 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file7.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777769 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file8.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 16777430 ; + schema1:dateModified "2024-01-22T13:48:24" ; + schema1:name "file9.txt" ; + schema1:sdDatePublished "2024-01-22T16:32:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 687939 ; + schema1:name "21123123_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:name "21123123_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256196 ; + schema1:name "22010100_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 11277 ; + schema1:name "22010100_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 255094 ; + schema1:name "22010101_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 11272 ; + schema1:name "22010101_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 692232 ; + schema1:name "22010102_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:name "22010102_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256476 ; + schema1:name "22010103_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 11278 ; + schema1:name "22010103_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 255353 ; + schema1:name "22010104_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 11280 ; + schema1:name "22010104_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 693555 ; + schema1:name "22010105_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:name "22010105_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 260775 ; + schema1:name "22010106_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 26313 ; + schema1:name "22010106_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 684795 ; + schema1:name "22010107_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:name "22010107_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256807 ; + schema1:name "22010108_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 11267 ; + schema1:name "22010108_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 275764 ; + schema1:name "22010109_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 52616 ; + schema1:name "22010109_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 690921 ; + schema1:name "22010110_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:name "22010110_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 691522 ; + schema1:name "22010111_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:name "22010111_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 696702 ; + schema1:name "22010112_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:name "22010112_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256656 ; + schema1:name "22010113_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 11267 ; + schema1:name "22010113_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 253888 ; + schema1:name "22010114_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 11271 ; + schema1:name "22010114_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 691096 ; + schema1:name "22010115_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:name "22010115_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 691623 ; + schema1:name "22010116_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:name "22010116_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 698358 ; + schema1:name "22010117_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:name "22010117_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:50+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 696109 ; + schema1:name "22010118_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:name "22010118_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 256064 ; + schema1:name "22010119_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 11277 ; + schema1:name "22010119_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 260455 ; + schema1:name "22010120_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 22551 ; + schema1:name "22010120_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 693734 ; + schema1:name "22010121_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 0 ; + schema1:name "22010121_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 255243 ; + schema1:name "22010122_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_FIG2.png" ; + schema1:sdDatePublished "2022-07-14T17:40:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 11268 ; + schema1:name "22010122_50fq1.0_49.0hz_0.525.00.18.0_kurtosis_UD_P_trig0.9_OUT2.dat" ; + schema1:sdDatePublished "2022-07-14T17:40:51+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:14+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:15+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:16+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:17+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:18+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:19+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:20+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:21+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:22+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:23+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:24+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:25+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:26+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:27+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:28+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:30+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:32+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:35+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:36+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:38+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:39+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:41+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:42+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:43+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:44+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:45+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:46+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST001.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST002.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST003.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST004.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST005.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST006.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST007.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST008.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST009.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST010.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST011.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST012.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST013.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST014.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST015.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST016.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST017.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST018.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST019.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST020.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST021.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST022.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST023.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:47+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST024.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST025.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST026.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST027.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST028.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST029.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST030.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST031.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST032.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST033.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST034.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST035.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST036.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST037.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST038.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST039.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST040.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST041.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST042.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST043.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST044.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST045.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST046.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST047.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST048.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST049.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST050.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST051.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST052.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST053.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST054.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST055.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST056.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST057.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST058.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST059.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST060.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST061.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST062.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST063.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:48+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST064.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST065.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST066.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST067.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST068.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST069.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST070.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST071.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST072.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST073.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST074.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST075.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST076.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST077.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST078.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST079.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST080.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST081.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST082.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST083.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST084.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST085.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST086.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST087.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST088.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST089.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST090.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST091.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST092.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST093.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST094.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST095.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST096.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST097.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST098.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST099.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2924544 ; + schema1:name "IM.ST100.00.UD" ; + schema1:sdDatePublished "2022-07-14T17:40:49+00:00" . + + a schema1:SoftwareApplication ; + schema1:citation ; + schema1:downloadUrl "https://manual.gromacs.org/documentation/2019.1/download.html" ; + schema1:installUrl "https://anaconda.org/bioconda/gromacs" ; + schema1:license ; + schema1:name "GROMACS" ; + schema1:softwareHelp "https://doi.org/10.5281/zenodo.2564761" ; + schema1:url "https://manual.gromacs.org/documentation/2019.1/" ; + schema1:version "2019.1" . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0001-5691-7000" ; + schema1:name "Elliott J. Price" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Jacopo Selva" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0001-6409-8009" ; + schema1:name "Jessica Gomez-Garrido" . + + a schema1:Person ; + schema1:name "Simon Rolph" . + + a schema1:Person ; + schema1:name "Viktoria Isabel Schwarz" . + + a schema1:Person ; + schema1:name "Christoph Steinbeck" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0001-7522-4007" ; + schema1:name "Julian Uszkoreit" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Roberto Tonini" . + + a schema1:Person ; + schema1:name "Job van Riet" . + + a schema1:Person ; + schema1:name "Maria Sorokina" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-0324-8728" ; + schema1:name "Nandan Deshpande" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Alessandro D'Anca" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-0399-8713" ; + schema1:name "Marie-Dominique Devignes" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Fabrizio Bernardi" . + + a schema1:Person ; + schema1:name "Fabrice Touzain" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-1235-9462" ; + schema1:name "Valentine Murigneux" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Stefano Lorito" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-1480-3563" ; + schema1:name "Mike Thang" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-2021-6883" ; + schema1:name "Cristiane Taniguti" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-2151-7418" ; + schema1:name "Kary Ocaña" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Jorge Macías" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-3503-6383" ; + schema1:name "Varsha Kale" . + + a schema1:Person ; + schema1:name "Adam Tofilski" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-4032-5331" ; + schema1:name "Ziad Al-Bkhetan" . + + a schema1:Person ; + schema1:name "Daniel Seeliger" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-4131-4458" ; + schema1:name "Diego Garrido-Martín" . + + a schema1:Person ; + schema1:name "Kristian Peters" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-5192-126X" ; + schema1:name "Pavankumar Videm" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Carlos Sánchez Linares" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-5738-4477" ; + schema1:name "Roderic Guigó" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-7025-2241" ; + schema1:name "Hrishikesh Dhondge" . + + a schema1:Person ; + schema1:name "Isaure Chauvot de Beauchêne" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-7139-4073" ; + schema1:name "Cyril Noel" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-7429-1846" ; + schema1:name "Marco De La Pierre" . + + a schema1:Person ; + schema1:name "Daan Hazelaar", + "Daan Hazelaar" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Steven J. Gibbons" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-8122-9522" ; + schema1:name "Luiz Gadelha" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-8330-4071" ; + schema1:name "Mahnoor Zulfiqar" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Finn Løvholt" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Louise Cordrie" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Sonia Scardigno" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-2556-2125" ; + schema1:name "Jasper Ouwerkerk" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Fabrizio Romano" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-2949-6719" ; + schema1:name "Damon-Lee Pointon" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Marc de la Asunción" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Manuel J. Castro" . + + a schema1:Person ; + schema1:name "Bert L. de Groot" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-3683-6208" ; + schema1:name "Michael Hall" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Manuela Volpe" . + + a schema1:Person ; + schema1:name "Ying Sims" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0009-0006-9956-0404" ; + schema1:name "Will Eagles", + "William Eagles" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Archit Dabral" . + + a schema1:SoftwareApplication ; + schema1:license ; + schema1:name "numpy" ; + schema1:version "1.21.1" . + + a schema1:Organization ; + schema1:name "Indian Institute of Technology, BHU" . + + a schema1:CollegeOrUniversity, + schema1:Organization ; + schema1:alternateName "UNIMAN" ; + schema1:name "The University of Manchester" ; + schema1:url "https://www.manchester.ac.uk/" . + + a schema1:Thing ; + schema1:alternateName "GÖG" ; + schema1:name "Gesundheit Österreich" ; + schema1:url "https://goeg.at/" . + + a schema1:Organization ; + schema1:name "Virginia Tech" . + + a schema1:Organization ; + schema1:name "International Center for Numerical Methods in Engineering" . + + a schema1:Organization ; + schema1:name "Sciensano" ; + schema1:url "https://www.sciensano.be/" . + + a schema1:CreativeWork ; + schema1:identifier "BSD-3-Clause" ; + schema1:name "BSD 3-Clause \"New\" or \"Revised\" License" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Nextflow" ; + schema1:url . + + a schema1:Collection ; + schema1:name "TronFlow" . + + a schema1:Collection ; + schema1:name "Vertebrate Genomes Pipelines (VGP) workflows" . + + a schema1:Person ; + schema1:name "Marco De La Pierre" . + + a schema1:Person ; + schema1:name "Julian Uszkoreit" . + + a schema1:Person ; + schema1:name "Valentine Murigneux" . + + a schema1:Person ; + schema1:name "Alexandre Cormier" . + + a schema1:Person ; + schema1:name "Cyril Noel" . + + a schema1:Person ; + schema1:name "Pierre Cuzin" . + + a schema1:Person ; + schema1:name "Mike Thang" . + + a schema1:Person ; + schema1:name "Hrishikesh Dhondge" . + + a schema1:Person ; + schema1:name "Jasper Ouwerkerk" . + + a schema1:Person ; + schema1:name "Kary Ocaña" . + + a schema1:Person ; + schema1:name "Marie-Dominique Devignes" . + + a schema1:Person ; + schema1:name "Varsha Kale" . + + a schema1:Person ; + schema1:name "Tatiana Gurbich" . + + a schema1:Person ; + schema1:name "Cristiane Taniguti" . + + a schema1:Person ; + schema1:name "Peter Menzel" . + + a schema1:Person ; + schema1:name "Diego Garrido-Martín" . + + a schema1:Person ; + schema1:name "Roderic Guigó" . + + a schema1:Person ; + schema1:name "Mahnoor Zulfiqar" . + + a schema1:Person ; + schema1:name "Luiz Gadelha" . + + a schema1:Person ; + schema1:name "Anne Fouilloux" . + + a schema1:Person ; + schema1:name "Jessica Gomez-Garrido" . + + a schema1:Person ; + schema1:name "Michael Hall" . + + a schema1:Person ; + schema1:name "Ziad Al-Bkhetan" . + + a schema1:Person ; + schema1:name "Elliott J. Price" . + + a schema1:Person ; + schema1:name "Pavankumar Videm" . + + a schema1:Person ; + schema1:name "Paul Brack" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Air Quality Prediction" . + + a schema1:Organization, + schema1:Project ; + schema1:name "CAPSID" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Read2Map" . + + a schema1:Organization, + schema1:Project ; + schema1:name "ParslRNA-Seq: an efficient and scalable RNAseq analysis workflow for studies of differentiated gene expression" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Apis-wings" . + + a schema1:Organization, + schema1:Project ; + schema1:name "BioDT Use Case 4.1.1.2 Ecosystem services" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Metabolomics-Reproducibility" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Workflows for geographic science" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Katdetectr" . + + a schema1:Organization, + schema1:Project ; + schema1:name "ANSES-Ploufragan" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Tree of Life Genome Assembly" . + + a schema1:Organization, + schema1:Project ; + schema1:name "CholGen" . + + a schema1:Organization, + schema1:Project ; + schema1:name "BY-COVID (general)" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Pangenome database project" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Peter Menzel's Team" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Genome Data Compression Team" . + + a schema1:Organization, + schema1:Project ; + schema1:name "UX trial team" . + + a schema1:Organization, + schema1:Project ; + schema1:name "EOSC-Life-WP6-Demos" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Pawsey Supercomputing Research Centre" . + + a schema1:Organization, + schema1:Project ; + schema1:name "SeBiMER" . + + a schema1:Organization, + schema1:Project ; + schema1:name "nf-core viralrecon" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Applied Computational Biology at IEG/HMGU" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Single Cell Unit" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Workflows Australia" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Medizinisches Proteom-Center, Medical Bioinformatics" . + + a schema1:Organization, + schema1:Project ; + schema1:name "usegalaxy.be workflows" . + + a schema1:WebSite ; + schema1:name "Acrobat PDF 1.7 - Portable Document Format" . + + a schema1:MediaObject ; + schema1:name "randomsvd_matrix_cfg.tar" . + + a schema1:ContactPoint ; + schema1:contactType "Author" ; + schema1:email "ashishbhawvl@gmail.com" ; + schema1:identifier "ashishbhawvl@gmail.com" ; + schema1:url "https://orcid.org/0000-0003-0606-2512", + "https://orcid.org/0009-0007-1637-4487" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R1.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R1.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R3.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/params.yml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R1.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R2.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/QuantifiedPeptides.tsv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R2.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R2.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/lfq_param_file.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R1.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R2.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R2.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R2.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R1.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R1_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R3.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R3.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R2.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R1.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R2_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R2.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/Normalyzer_design.tsv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/mzRange.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R1.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R2.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R1.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R1.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R3.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R3.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R3.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R1.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R3.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R1.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R1.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/modificationSpecificPeptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R2.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R1.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R2_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R3.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R1.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R1.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R2.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R2.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R3.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R2.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R2.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R1.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R2.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R3.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/all_pep_quant_merged.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R2.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R1_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R2.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R3.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R3_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/evidence.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R3.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R1.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R3_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R3.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R3.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/msms.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R2.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R1.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R1.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R2.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R1.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R1.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R2.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R3.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R1.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R3_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R2.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R1.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R3.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R1.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R3.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R1_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R1.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R1.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R2.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R2.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R2.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R2.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R2_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R2.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R2.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R3.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R3.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R3.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R3.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R3.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R1.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R2.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R2.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R3.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R1.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R1.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R3.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R3_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R1_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R3.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R3_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R3.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R3.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R2.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R2.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R1.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R3_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R2.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R2.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R3.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R2.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R1.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R1.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R2.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R3.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R2.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R2.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R2.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R3.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R1.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R2.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R1.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R2_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R3.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R1.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R3.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R1.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R3.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R3.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R3.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R3.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R2.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/import_file_list.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R3.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R3.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R1.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R3.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R3_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R3.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R3.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/searchgui.par" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R2.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R1.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R3.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R3.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R2.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R2.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R2.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R1.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R3.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R3_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R1.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R2.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R3_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R2.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R1.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R3_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R2.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/changed_params.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R3.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R3.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R2.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R1.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R3_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R2.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/versions.yml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R1.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R1.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R2_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R3.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R2.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R2.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R1.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R2_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R1_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R1_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R1.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R3.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R1_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R1.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R3.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R2.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R3.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R1.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R3.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R1.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/quant_exp_design.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R1.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R2.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R2.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/allPeptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R1_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R1_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R2.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R2.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/sdrf_merge.version.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/stand_pep_quant_merged.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R1.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R1.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R2.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R2.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/prepare_files.version.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R2_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R3.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R2_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R1.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R2.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R1.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R1.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R1.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R3.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R3.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R1_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R1.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R2.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R1_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R3.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R2.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R2_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R3.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R3_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R1.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R3.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R2.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R1.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R1.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R2.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R2_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R2.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/yeast_UPS_concatenated_target_decoy.fasta" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R3.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R2_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R3.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R3.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R2.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R1.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R3.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R3_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R1.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R3.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R3.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R1.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R3.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R1_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R2.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R2.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R2_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R1.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R2.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R1.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R1.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R3.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R3.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R1.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/params_out.yml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R2.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R1.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R2.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R3.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R1.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R2.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R2_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R2.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R3.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R1.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R1_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R2.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R1.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R3.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R3.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R3.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R3.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R3.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R3.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R2.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/proline_results_2686ac07-85e9-4b13-a22e-186821fd3e84.xlsx" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R3.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R1.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R3_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R1.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R2_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R3.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R3.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R1.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/all_prot_quant_merged.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R3.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R1.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R1.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R3_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R3.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R2.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R3.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R1.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R3.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R3.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R2.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R1_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R2.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R2.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R2.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R3_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/mqpar.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R2.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R2.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R1_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R1.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R3.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R2_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R2.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R1.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R3_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R2_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R2.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R2.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R3.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R3.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R1.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R2.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R2.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R1.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R3.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R2.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R3_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R3.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R1.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R2.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R1.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R2.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R3.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R3_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R2.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R2.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R3.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R1.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R1.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R3_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R2_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R2.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R2.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R3.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R1.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R3.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R3.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R1.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R2.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R1.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R3_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/exp_design.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R3.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R1.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R3.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R3.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R1.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R2.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R1.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R1.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/parameters.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R1.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R2.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R2.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R2.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R2.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R3.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/Oxidation (M)Sites.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R2.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R3_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R2_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R2.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R3.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R2.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R2.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R1_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R1.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R3.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R1.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R1.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R1.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/Normalyzer_comparisons.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R2_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R2.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R3.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R1.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R2.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R3.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R1.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R3.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R1.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R2.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R1_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R1.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R2.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/QuantifiedProteins.tsv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R3.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R3.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R1.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R2_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R2.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R3.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R1.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R1.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R3.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R3.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R3.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R3.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R2.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R3.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/summary.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R2.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R1_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R2.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R3.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R1.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R3.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R2_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R3.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R3.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R1.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R1.pep.interact.pep.prot_stpeter.prot_prot.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/stand_prot_quant_merged.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R1_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R1.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R3.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R1.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R1.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R1.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R1_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R3_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R2.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R3.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R1.mzDB" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R2.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/msmsScans.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R1.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R3_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/comet.params" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R1_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R2.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R2.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R2.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/sdrf_temp.tsv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R3_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R2.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R1.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R2.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R2_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R1.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R1.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R2_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R2.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/msScans.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R3_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R3.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R1.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R2.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R3.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R1.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/ms3Scans.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/QuantifiedPeaks.tsv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/sdrf_local.tsv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R1_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R2_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R2.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R1_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R3_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R1.pep.interact.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R3.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R2_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R3.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R1.pep.interact.pep.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R3.zip" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R2_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R1.pep.interact.pep.prot_stpeter.prot.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R2.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R1.mgf" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_12500amol_R2.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R1.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R3.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R3.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R2.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50000amol_R2.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R3.t.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_25000amol_R1_filtered.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/proteinGroups.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_125amol_R1_proteins.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_2500amol_R3.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R1.pep.xml" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R1.psdb" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R1_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_500amol_R1_peptides.txt" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R2.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_5000amol_R3.pep.interact.pep.prot_stpeter.prot_pep.csv" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_50amol_R1.mzML" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R3.raw" . + + a schema1:MediaObject ; + schema1:isPartOf ; + schema1:name "outdir::/UPS1_250amol_R2_proteins.txt" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Assembly-Hifi-Trio-phasing-VGP5/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5" ; + schema1:version "0.1.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "fragment-based-docking-scoring/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/fragment-based-docking-scoring" ; + schema1:version "0.1.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Assembly-Hifi-only-VGP3/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-only-VGP3" ; + schema1:version "0.1.8" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2023-12-04T14:19:45+00:00" ; + schema1:description """**Name:** Java Wordcount +**Contact Person**: support-compss@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs + +# Description +Wordcount application. There are two versions of Wordcount, depending on how the input data is given. + +## Version 1 +''Single input file'', where all the text is given in the same file and the chunks are calculated with a BLOCK_SIZE parameter. + +## Version 2 +''Multiple input files'', where the text fragments are already in different files under the same directory + +# Execution instructions +Usage: +``` +runcompss --classpath=application_sources/jar/wordcount.jar wordcount.multipleFiles.Wordcount DATA_FOLDER +runcompss --classpath=application_sources/jar/wordcount.jar wordcount.uniqueFile.Wordcount DATA_FILE BLOCK_SIZE +``` + +where: +* DATA_FOLDER: Absolute path to the base folder of the dataset files +* DATA_FILE: Absolute path to the dabase file +* BLOCK_SIZE: Number of bytes of each block + +# Execution Examples +``` +runcompss --classpath=application_sources/jar/wordcount.jar wordcount.multipleFiles.Wordcount dataset/data-set/ +runcompss --classpath=application_sources/jar/wordcount.jar wordcount.uniqueFile.Wordcount dataset/data-set/file_small.txt 650 +runcompss --classpath=application_sources/jar/wordcount.jar wordcount.uniqueFile.Wordcount dataset/data-set/file_long.txt 250000 + +``` + +# Build + +## Option 1: Native java +``` +cd application_sources/; javac src/main/java/wordcount/*.java +cd src/main/java/; jar cf wordcount.jar wordcount/ +cd ../../../; mv src/main/java/wordcount.jar jar/ +``` + +## Option 2: Maven +``` +cd application_sources/ +mvn clean package +``` +""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Java COMPSs wordcount example (files used as inputs)" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "chipseq-pe/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/chipseq-pe" ; + schema1:version "0.5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Scaffolding-HiC-VGP8/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Scaffolding-HiC-VGP8" ; + schema1:version "0.2.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "rnaseq-pe/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/rnaseq-pe" ; + schema1:version "0.5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:hasPart ; + schema1:name "conf/metadata/experiment_data" ; + schema1:programmingLanguage ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Assembly-Hifi-only-VGP3/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-only-VGP3" ; + schema1:version "0.1.6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "VGP-meryldb-creation-trio/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/VGP-meryldb-creation-trio" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "dada2/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/dada2" ; + schema1:version "0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "allele-based-pathogen-identification/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/allele-based-pathogen-identification" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "baredsc/baredSC-1d-logNorm" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/baredsc" ; + schema1:version "0.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "Assembly-decontamination-VGP9/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-decontamination-VGP9" ; + schema1:version "0.1.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "protein-ligand-complex-parameterization/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/protein-ligand-complex-parameterization" ; + schema1:version "0.1.1" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-chip-sr" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.6" . + + a schema1:Dataset ; + schema1:creator , + ; + schema1:datePublished "2022-07-20T08:59:58+00:00" ; + schema1:description "Lysozyme in Water simplest version, from COMPSs Tutorial" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:name "Lysozyme in Water COMPSs workflow" ; + schema1:publisher , + . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 7912 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "lysozyme_in_water.py" ; + schema1:programmingLanguage ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "sars-cov-2-consensus-from-variation/COVID-19-CONSENSUS-CONSTRUCTION" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-consensus-from-variation" ; + schema1:version "0.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "rnaseq-sr/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/rnaseq-sr" ; + schema1:version "0.4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "qiime2-II-denoising/IIa-denoising-se" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/qiime2-II-denoising" ; + schema1:version "0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "chipseq-sr/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/chipseq-sr" ; + schema1:version "0.9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + , + , + ; + schema1:name "fastq-to-matrix-10x/scrna-seq-fastq-to-matrix-10x-cellplex" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/fastq-to-matrix-10x" ; + schema1:version "0.4" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2023-05-30T08:48:00+00:00" ; + schema1:description """**Name:** Matrix multiplication with Files +**Contact Person**: support-compss@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs + +# Description +Matrix multiplication is a binary operation that takes a pair of matrices and produces another matrix. + +If A is an n×m matrix and B is an m×p matrix, the result AB of their multiplication is an n×p matrix defined only if the number of columns m in A is equal to the number of rows m in B. When multiplying A and B, the elements of the rows in A are multiplied with corresponding columns in B. + +In this implementation, A and B are square matrices (same number of rows and columns), and so it is the result matrix C. Each matrix is divided in N blocks of M doubles. The multiplication of two blocks is done by a multiply task method with a simple three-nested-loop implementation. When executed with COMPSs, the main program generates N^3^ tasks arranged as N^2^ chains of N tasks in the dependency graph. + +# Execution instructions +Usage: +``` +runcompss --lang=python src/matmul_files.py numberOfBlocks blockSize +``` + +where: +* numberOfBlocks: Number of blocks inside each matrix +* blockSize: Size of each block + + +# Execution Examples +``` +runcompss --lang=python src/matmul_files.py 4 4 +runcompss src/matmul_files.py 4 4 +python -m pycompss src/matmul_files.py 4 4 +``` + +# Build +No build is required +""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "PyCOMPSs Matrix Multiplication, out-of-core using files" ; + schema1:publisher . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.0.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.0.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.0.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.0.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.0.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.0.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.0.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.0.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.1.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.1.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.1.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.1.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.1.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.1.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.1.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.1.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.2.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.2.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.2.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.2.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.2.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.2.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.2.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.2.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.3.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.3.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.3.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.3.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.3.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.3.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.3.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.3.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.4.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.4.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.4.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.4.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.4.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.4.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.4.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.4.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.5.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.5.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.5.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.5.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.5.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.5.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.5.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.5.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.6.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.6.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.6.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.6.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.6.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.6.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.6.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.6.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.7.0" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.7.1" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.7.2" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.7.3" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.7.4" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.7.5" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.7.6" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 28672 ; + schema1:dateModified "2023-05-30T08:47:57" ; + schema1:name "C.7.7" ; + schema1:sdDatePublished "2023-05-30T08:48:00+00:00" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "assembly-with-flye/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/assembly-with-flye" ; + schema1:version "0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "chipseq-pe/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/chipseq-pe" ; + schema1:version "0.9" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-03-22T11:45:33+00:00" ; + schema1:description """**Name:** Matmul GPU +**Contact Person**: cristian.tatu@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs +**Machine**: Minotauro-MN4 + +Matmul running on the GPU leveraging COMPSs GPU Cache for deserialization speedup. +Launched using 32 GPUs (16 nodes). +C = A @ B +Where A: shape (320, 56_900_000) block_size (10, 11_380_000) + B: shape (56_900_000, 10) block_size (11_380_000, 10) + C: shape (320, 10) block_size (10, 10) +Total dataset size 291 GB. +Version dislib-0.9 (https://github.com/bsc-wdc/dislib/tree/release-0.9) +""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "COMPSs GPU Cache Matrix Multiplication" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "repeatmasking/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/repeatmasking" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "Assembly-decontamination-VGP9/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-decontamination-VGP9" ; + schema1:version "0.1.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "chipseq-pe/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/chipseq-pe" ; + schema1:version "0.11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Assembly-Hifi-only-VGP3/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-only-VGP3" ; + schema1:version "0.1.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "dada2/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/dada2" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + , + , + ; + schema1:name "fastq-to-matrix-10x/scrna-seq-fastq-to-matrix-10x-cellplex" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/fastq-to-matrix-10x" ; + schema1:version "0.2" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-07-05T11:38:51+00:00" ; + schema1:description """**Name:** KMeans +**Contact Person**: support-compss@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs +**Machine**: MareNostrum5 + +KMEans for clustering the housing.csv dataset (https://github.com/sonarsushant/California-House-Price-Prediction/blob/master/housing.csv). +This application used [dislib-0.9.0](https://github.com/bsc-wdc/dislib/tree/release-0.9) +""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "KMeans housing" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Assembly-Hifi-Trio-phasing-VGP5/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5" ; + schema1:version "0.1.7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "lcms-preprocessing/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/lcms-preprocessing" ; + schema1:version "1.0" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "chipseq-sr/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/chipseq-sr" ; + schema1:version "0.1" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator , + , + , + ; + schema1:datePublished "2023-10-20T11:05:06+00:00" ; + schema1:description "A demonstration workflow for Reduced Order Modeling (ROM) within the eFlows4HPC project, implemented using Kratos Multiphysics, EZyRB, COMPSs, and Dislib." ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "https://spdx.org/licenses/BSD-4-Clause.html" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "eFlows4HPC Demo ROM Workflow" ; + schema1:publisher , + , + . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Scaffolding-HiC-VGP8/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Scaffolding-HiC-VGP8" ; + schema1:version "0.2.4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "atacseq/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/atacseq" ; + schema1:version "0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "Assembly-decontamination-VGP9/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-decontamination-VGP9" ; + schema1:version "0.1.6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "pathogen-detection-pathogfair-samples-aggregation-and-visualisation/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/pathogen-detection-pathogfair-samples-aggregation-and-visualisation" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "sars-cov-2-pe-illumina-artic-variant-calling/COVID-19-PE-ARTIC-ILLUMINA" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-variant-calling" ; + schema1:version "0.4.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "gcms-metams/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/gcms-metams" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "chipseq-pe/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/chipseq-pe" ; + schema1:version "0.10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "amr_gene_detection/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/amr_gene_detection" ; + schema1:version "1.1.1" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2023-05-30T07:15:34+00:00" ; + schema1:description """**Name:** Matrix Multiplication +**Contact Person:** support-compss@bsc.es +**Access Level:** public +**License Agreement:** Apache2 +**Platform:** COMPSs + +# Description +Matrix multiplication is a binary operation that takes a pair of matrices and produces another matrix. + +If A is an n×m matrix and B is an m×p matrix, the result AB of their multiplication is an n×p matrix defined only if the number of columns m in A is equal to the number of rows m in B. When multiplying A and B, the elements of the rows in A are multiplied with corresponding columns in B. + +In this implementation, A and B are square matrices (same number of rows and columns), and so it is the result matrix C. Each matrix is divided in N blocks of M doubles. The multiplication of two blocks is done by a multiply task method with a simple three-nested-loop implementation. When executed with COMPSs, the main program generates N^3^ tasks arranged as N^2^ chains of N tasks in the dependency graph. + +# Versions +There are three versions of Matrix Multiplication, depending on the data types used to store the blocks. +## Version 1 +''files'', where the matrix blocks are stored in files. +## Version 2 +''objects'', where the matrix blocks are represented by objects. +## Version 3 +''arrays'', where the matrix blocks are stored in arrays. + +# Execution instructions +Usage: +``` +runcompss matmul.files.Matmul numberOfBlocks blockSize +runcompss matmul.objects.Matmul numberOfBlocks blockSize +runcompss matmul.arrays.Matmul numberOfBlocks blockSize +``` + +where: + * numberOfBlocks: Number of blocks inside each matrix + * blockSize: Size of each block + + +# Execution Example +``` +runcompss matmul.objects.Matmul 16 4 +runcompss matmul.files.Matmul 16 4 +runcompss matmul.arrays.Matmul 16 4 +``` + +# Build +## Option 1: Native java +``` +cd ~/tutorial_apps/java/matmul/; javac src/main/java/matmul/*/*.java +cd src/main/java/; jar cf matmul.jar matmul/ +cd ../../../; mv src/main/java/matmul.jar jar/ +``` + +## Option 2: Maven +``` +cd ~/tutorial_apps/java/matmul/ +mvn clean package +``` +""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "COMPSs Matrix Multiplication, out-of-core using files" ; + schema1:publisher . + + a schema1:MediaObject ; + schema1:contentSize 295 ; + schema1:dateModified "2023-05-30T07:15:32" ; + schema1:name "C.0.0" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295 ; + schema1:dateModified "2023-05-30T07:15:32" ; + schema1:name "C.0.1" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 301 ; + schema1:dateModified "2023-05-30T07:15:32" ; + schema1:name "C.0.2" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 301 ; + schema1:dateModified "2023-05-30T07:15:32" ; + schema1:name "C.0.3" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 296 ; + schema1:dateModified "2023-05-30T07:15:32" ; + schema1:name "C.1.0" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 296 ; + schema1:dateModified "2023-05-30T07:15:32" ; + schema1:name "C.1.1" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295 ; + schema1:dateModified "2023-05-30T07:15:32" ; + schema1:name "C.1.2" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 296 ; + schema1:dateModified "2023-05-30T07:15:32" ; + schema1:name "C.1.3" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295 ; + schema1:dateModified "2023-05-30T07:15:32" ; + schema1:name "C.2.0" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 298 ; + schema1:dateModified "2023-05-30T07:15:32" ; + schema1:name "C.2.1" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 299 ; + schema1:dateModified "2023-05-30T07:15:32" ; + schema1:name "C.2.2" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 294 ; + schema1:dateModified "2023-05-30T07:15:32" ; + schema1:name "C.2.3" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 299 ; + schema1:dateModified "2023-05-30T07:15:32" ; + schema1:name "C.3.0" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 297 ; + schema1:dateModified "2023-05-30T07:15:32" ; + schema1:name "C.3.1" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 299 ; + schema1:dateModified "2023-05-30T07:15:32" ; + schema1:name "C.3.2" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 297 ; + schema1:dateModified "2023-05-30T07:15:32" ; + schema1:name "C.3.3" ; + schema1:sdDatePublished "2023-05-30T07:15:34+00:00" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "sars-cov-2-pe-illumina-artic-ivar-analysis/SARS-COV-2-ILLUMINA-AMPLICON-IVAR-PANGOLIN-NEXTCLADE" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-ivar-analysis" ; + schema1:version "0.2.2" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-07-12T11:16:08+00:00" ; + schema1:description "Lysozyme in water full COMPSs application" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Lysozyme in water full" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Scaffolding-HiC-VGP8/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Scaffolding-HiC-VGP8" ; + schema1:version "0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "qiime2-I-import/Ia-import-multiplexed-se" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/qiime2-I-import" ; + schema1:version "0.2" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-03-05T11:04:51+00:00" ; + schema1:description "Lysozyme in water full COMPSs application run at MareNostrum IV, using dataset_small with two workers" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Lysozyme in water full version, using dataset_small, two workers, data_persistence True" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "gene-based-pathogen-identification/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/gene-based-pathogen-identification" ; + schema1:version "0.1" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2023-11-02T10:54:22+00:00" ; + schema1:description """**Name:** TruncatedSVD (Randomized SVD) +**Contact Person**: support-compss@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs +**Machine**: MareNostrum4 + +TruncatedSVD (Randomized SVD) for computing just 456 singular values out of a (3.6M x 1200) size matrix. +The input matrix represents a CFD transient simulation of aire moving past a cylinder. +This application used dislib-0.9.0 (https://github.com/bsc-wdc/dislib/tree/release-0.9) +""" ; + schema1:hasPart , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Randomized SVD" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Purge-duplicates-one-haplotype-VGP6b/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Purge-duplicates-one-haplotype-VGP6b" ; + schema1:version "0.6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "parallel-accession-download/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/parallel-accession-download" ; + schema1:version "0.1.13" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "sars-cov-2-variation-reporting/COVID-19-VARIATION-REPORTING" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-variation-reporting" ; + schema1:version "0.1.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:version "0.1.10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Mitogenome-assembly-VGP0/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Mitogenome-assembly-VGP0" ; + schema1:version "0.1" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-chip-sr" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "1.0" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "COVID-19: variation analysis on WGS PE data" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-wgs-variant-calling" ; + schema1:version "0.2.1" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2023-12-19T11:21:53+00:00" ; + schema1:description """**Name:** Lanczos SVD +**Contact Person**: support-compss@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs +**Machine**: MareNostrum4 + +Lanczos SVD for computing singular values needed to reach an epsilon of 1e-3 on a matrix of (150000, 150). +The input matrix is generated synthetically. +This application used [dislib-0.9.0](https://github.com/bsc-wdc/dislib/tree/release-0.9) +""" ; + schema1:hasPart , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Lanczos SVD" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Purge-duplicates-one-haplotype-VGP6b/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Purge-duplicates-one-haplotype-VGP6b" ; + schema1:version "0.5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "parallel-accession-download/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/parallel-accession-download" ; + schema1:version "0.1.6" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-chip-sr" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:version "0.6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "fragment-based-docking-scoring/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/fragment-based-docking-scoring" ; + schema1:version "0.1.4" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2023-11-10T13:57:34+00:00" ; + schema1:description """**Name:** K-means +**Contact Person**: support-compss@bsc.es +**Access Level**: Public +**License Agreement**: Apache2 +**Platform**: COMPSs + +# Description +K-means clustering is a method of cluster analysis that aims to partition ''n'' points into ''k'' clusters in which each point belongs to the cluster with the nearest mean. It follows an iterative refinement strategy to find the centers of natural clusters in the data. + +When executed with COMPSs, K-means first generates the input points by means of initialization tasks. For parallelism purposes, the points are split in a number of fragments received as parameter, each fragment being created by an initialization task and filled with random points. + +After the initialization, the algorithm goes through a set of iterations. In every iteration, a computation task is created for each fragment; then, there is a reduction phase where the results of each computation are accumulated two at a time by merge tasks; finally, at the end of the iteration the main program post-processes the merged result, generating the current clusters that will be used in the next iteration. Consequently, if ''F'' is the total number of fragments, K-means generates ''F'' computation tasks and ''F-1'' merge tasks per iteration. + +# Execution instructions +Usage: +``` +runcompss --classpath=application_sources/jar/kmeans.jar kmeans.KMeans <...> +``` + +where ''<...>'': +* -c Number of clusters +* -i Number of iterations +* -n Number of points +* -d Number of dimensions +* -f Number of fragments + +# Execution Examples +``` +runcompss --classpath=application_sources/jar/kmeans.jar kmeans.KMeans +runcompss --classpath=application_sources/jar/kmeans.jar kmeans.KMeans -c 4 -i 10 -n 2000 -d 2 -f 2 +``` + +# Build +## Option 1: Native java +``` +cd application_sources/; javac src/main/java/kmeans/*.java +cd src/main/java/; jar cf kmeans.jar kmeans/ +cd ../../../; mv src/main/java/kmeans.jar jar/ +``` + +## Option 2: Maven +``` +cd application_sources/ +mvn clean package +``` +""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Java COMPSs K-means clustering example (executed at Marenostrum IV supercomputer, inputs generated by the code)" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "velocyto/Velocyto-on10X-from-bundled" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/velocyto" ; + schema1:version "0.2" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-04-30T10:49:33+00:00" ; + schema1:description """**Name:** Matrix multiplication with Files, reproducibility example +**Contact Person**: support-compss@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs + +# Description +Matrix multiplication is a binary operation that takes a pair of matrices and produces another matrix. + +If A is an n×m matrix and B is an m×p matrix, the result AB of their multiplication is an n×p matrix defined only if the number of columns m in A is equal to the number of rows m in B. When multiplying A and B, the elements of the rows in A are multiplied with corresponding columns in B. + +In this implementation, A and B are square matrices (same number of rows and columns), and so it is the result matrix C. Each matrix is divided in N blocks of M doubles (N hardcoded to 2, and M hardcoded to 8). The multiplication of two blocks is done by a multiply task method with a simple three-nested-loop implementation. When executed with COMPSs, the main program generates N^3^ tasks arranged as N^2^ chains of N tasks in the dependency graph. + +# Reproducibility +To reproduce the exact results of this example, follow the instructions at the [Workflow Provenance section at COMPSs User Manual](https://compss-doc.readthedocs.io/en/stable/Sections/05_Tools/04_Workflow_Provenance.html) + +# Execution instructions +Usage: +``` +runcompss --lang=python src/matmul_files.py inputs_folder/ outputs_folder/ +``` + +where: +* inputs_folder/: Folder where A and B matrices are located +* outputs_folder/: Folder with the resulting C matrix + + +# Execution Examples +``` +runcompss --lang=python src/matmul_files.py inputs/ outputs/ +runcompss src/matmul_files.py inputs/ outputs/ +python -m pycompss src/matmul_files.py inputs/ outputs/ +``` + +# Build +No build is required +""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "PyCOMPSs Matrix Multiplication, out-of-core using files, reproducibility example" ; + schema1:publisher . + + a schema1:MediaObject ; + schema1:contentSize 320 ; + schema1:dateModified "2024-04-30T10:49:29+00:00" ; + schema1:name "C.0.0" ; + schema1:sdDatePublished "2024-04-30T10:49:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 320 ; + schema1:dateModified "2024-04-30T10:49:29+00:00" ; + schema1:name "C.0.1" ; + schema1:sdDatePublished "2024-04-30T10:49:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 320 ; + schema1:dateModified "2024-04-30T10:49:29+00:00" ; + schema1:name "C.1.0" ; + schema1:sdDatePublished "2024-04-30T10:49:33+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 320 ; + schema1:dateModified "2024-04-30T10:49:29+00:00" ; + schema1:name "C.1.1" ; + schema1:sdDatePublished "2024-04-30T10:49:33+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2023-11-02T10:55:02+00:00" ; + schema1:description """**Name:** Word Count +**Contact Person**: support-compss@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs + +# Description +Wordcount is an application that counts the number of words for a given set of files. + +To allow parallelism every file is treated separately and merged afterwards. + +# Execution instructions +Usage: +``` +runcompss --lang=python src/wordcount.py datasetPath +``` + +where: +* datasetPath: Absolute path of the file to parse (e.g. /home/compss/tutorial_apps/python/wordcount/data/) + +# Execution Examples +``` +runcompss --lang=python src/wordcount.py $(pwd)/data/ +runcompss src/wordcount.py $(pwd)/data/ +python -m pycompss src/wordcount.py $(pwd)/data/ +``` + +# Build +No build is required +""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "PyCOMPSs Wordcount test, using files" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:version "0.2.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "cutandrun/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/cutandrun" ; + schema1:version "0.4" . + + a schema1:HowTo, + schema1:MediaObject, + schema1:SoftwareSourceCode ; + schema1:contentSize 21805 ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + , + ; + schema1:name "bacterial_genome_annotation/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/bacterial_genome_annotation" ; + schema1:version "1.0" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "COVID-19: variation analysis reporting" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-variation-reporting" ; + schema1:version "0.1.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:version "0.1.6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "gromacs-mmgbsa/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/gromacs-mmgbsa" ; + schema1:version "0.1.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "cutandrun/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/cutandrun" ; + schema1:version "0.12" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "protein-ligand-complex-parameterization/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/protein-ligand-complex-parameterization" ; + schema1:version "0.1.2" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-08-02T08:27:40+00:00" ; + schema1:description """**Name:** Matrix Multiplication +**Contact Person:** support-compss@bsc.es +**Access Level:** public +**License Agreement:** Apache2 +**Platform:** COMPSs + +# Description +Matrix multiplication is a binary operation that takes a pair of matrices and produces another matrix. + +If A is an n×m matrix and B is an m×p matrix, the result AB of their multiplication is an n×p matrix defined only if the number of columns m in A is equal to the number of rows m in B. When multiplying A and B, the elements of the rows in A are multiplied with corresponding columns in B. + +In this implementation, A and B are square matrices (same number of rows and columns), and so it is the result matrix C. Each matrix is divided in N blocks of M doubles. The multiplication of two blocks is done by a multiply task method with a simple three-nested-loop implementation. When executed with COMPSs, the main program generates N^3^ tasks arranged as N^2^ chains of N tasks in the dependency graph. + +N and M have been hardcoded to 6 and 8 respectively. + +# Execution instructions +Usage: +``` +runcompss --classpath=application_sources/jar/matmul.jar matmul.files.Matmul inputFolder/ outputFolder/ +``` + +where: + * inputFolder: folder where input files are located + * outputFolder: folder where output files are located + + +# Execution Example +``` +runcompss matmul.files.Matmul dataset/inputs/ dataset/outputs/ +``` + +# Build +## Option 1: Native java +``` +javac src/main/java/matmul/*/*.java +cd src/main/java/; jar cf matmul.jar matmul/ +cd ../../../; mv src/main/java/matmul.jar jar/ +``` + +## Option 2: Maven +``` +mvn clean package +``` +""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Java COMPSs Matrix Multiplication, out-of-core using files, reproducible example, data persistence False, MareNostrum V" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + , + ; + schema1:name "bacterial-genome-assembly/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/bacterial-genome-assembly" ; + schema1:version "1.1.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "ChIPseq_PE/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/ChIPseq_PE" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "sars-cov-2-pe-illumina-artic-variant-calling/COVID-19-PE-ARTIC-ILLUMINA" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-variant-calling" ; + schema1:version "0.5.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Assembly-Hifi-Trio-phasing-VGP5/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5" ; + schema1:version "0.1.4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "generic-variant-calling-wgs-pe/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/generic-variant-calling-wgs-pe" ; + schema1:version "0.1" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2023-06-23T13:59:37+00:00" ; + schema1:description """**Name:** SparseLU +**Contact Person:** support-compss@bsc.es +**Access Level:** public +**License Agreement:** Apache2 +**Platform:** COMPSs + +# Description +The Sparse LU application computes an LU matrix factorization on a sparse blocked matrix. The matrix size (number of blocks) and the block size are parameters of the application. + +As the algorithm progresses, the area of the matrix that is accessed is smaller; concretely, at each iteration, the 0th row and column of the current matrix are discarded. On the other hand, due to the sparseness of the matrix, some of its blocks might not be allocated and, therefore, no work is generated for them. + +When executed with COMPSs, Sparse LU produces several types of task with different granularity and numerous dependencies between them. + +# Versions +There are three versions of Sparse LU, depending on the data types used to store the blocks. +## Version 1 +''files'', where the matrix blocks are stored in files. +## Version 2 +''objects'', where the matrix blocks are represented by objects. +## Version 3 +''arrays'', where the matrix blocks are stored in arrays. + + +# Execution instructions +Usage: +``` +runcompss sparseLU.files.SparseLU numberOfBlocks blockSize +runcompss sparseLU.objects.SparseLU numberOfBlocks blockSize +runcompss sparseLU.arrays.SparseLU numberOfBlocks blockSize +``` + +where: + * numberOfBlocks: Number of blocks inside each matrix + * blockSize: Size of each block + + +# Execution Example +``` +runcompss sparseLU.objects.SparseLU 16 4 +runcompss sparseLU.files.SparseLU 16 4 +runcompss sparseLU.arrays.SparseLU 16 4 +``` + + +# Build +## Option 1: Native java +``` +cd application_sources/; javac src/main/java/sparseLU/*/*.java +cd src/main/java/; jar cf sparseLU.jar sparseLU/ +cd ../../../; mv src/main/java/sparseLU.jar jar/ +``` + +## Option 2: Maven +``` +cd application_sources/ +mvn clean package +``` +""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Java COMPSs LU Factorization for Sparse Matrices" ; + schema1:publisher . + + a schema1:MediaObject ; + schema1:contentSize 1234 ; + schema1:dateModified "2023-06-23T13:59:35" ; + schema1:name "A.0.0" ; + schema1:sdDatePublished "2023-06-23T13:59:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1182 ; + schema1:dateModified "2023-06-23T13:59:35" ; + schema1:name "A.0.1" ; + schema1:sdDatePublished "2023-06-23T13:59:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1217 ; + schema1:dateModified "2023-06-23T13:59:35" ; + schema1:name "A.0.2" ; + schema1:sdDatePublished "2023-06-23T13:59:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1244 ; + schema1:dateModified "2023-06-23T13:59:35" ; + schema1:name "A.1.0" ; + schema1:sdDatePublished "2023-06-23T13:59:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1238 ; + schema1:dateModified "2023-06-23T13:59:35" ; + schema1:name "A.1.1" ; + schema1:sdDatePublished "2023-06-23T13:59:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1233 ; + schema1:dateModified "2023-06-23T13:59:35" ; + schema1:name "A.1.2" ; + schema1:sdDatePublished "2023-06-23T13:59:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1214 ; + schema1:dateModified "2023-06-23T13:59:35" ; + schema1:name "A.2.0" ; + schema1:sdDatePublished "2023-06-23T13:59:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1255 ; + schema1:dateModified "2023-06-23T13:59:35" ; + schema1:name "A.2.1" ; + schema1:sdDatePublished "2023-06-23T13:59:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1251 ; + schema1:dateModified "2023-06-23T13:59:35" ; + schema1:name "A.2.2" ; + schema1:sdDatePublished "2023-06-23T13:59:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1219 ; + schema1:dateModified "2023-06-23T13:59:35" ; + schema1:name "A.2.3" ; + schema1:sdDatePublished "2023-06-23T13:59:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1290 ; + schema1:dateModified "2023-06-23T13:59:35" ; + schema1:name "A.3.2" ; + schema1:sdDatePublished "2023-06-23T13:59:37+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 1250 ; + schema1:dateModified "2023-06-23T13:59:35" ; + schema1:name "A.3.3" ; + schema1:sdDatePublished "2023-06-23T13:59:37+00:00" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Assembly-Hifi-Trio-phasing-VGP5/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "chipseq-sr/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/chipseq-sr" ; + schema1:version "0.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + , + ; + schema1:name "quality-and-contamination-control/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/quality-and-contamination-control" ; + schema1:version "1.0" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "atacseq/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/atacseq" ; + schema1:version "0.8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "sars-cov-2-pe-illumina-artic-ivar-analysis/SARS-COV-2-ILLUMINA-AMPLICON-IVAR-PANGOLIN-NEXTCLADE" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-ivar-analysis" ; + schema1:version "0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "sars-cov-2-ont-artic-variant-calling/COVID-19-ARTIC-ONT" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-ont-artic-variant-calling" ; + schema1:version "0.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "assembly-with-flye/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/assembly-with-flye" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "gromacs-dctmd/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/gromacs-dctmd" ; + schema1:version "0.1.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "cutandrun/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/cutandrun" ; + schema1:version "0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "sars-cov-2-pe-illumina-wgs-variant-calling/COVID-19-PE-WGS-ILLUMINA" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-wgs-variant-calling" ; + schema1:version "0.2.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "atacseq/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/atacseq" ; + schema1:version "0.15" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "atacseq/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/atacseq" ; + schema1:version "0.6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "sars-cov-2-variation-reporting/COVID-19-VARIATION-REPORTING" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-variation-reporting" ; + schema1:version "0.1.2" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-07-05T11:38:51+00:00" ; + schema1:description """**Name:** KMeans +**Contact Person**: support-compss@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs +**Machine**: MareNostrum5 + +KMEans for clustering the housing.csv dataset (https://github.com/sonarsushant/California-House-Price-Prediction/blob/master/housing.csv). +This application used [dislib-0.9.0](https://github.com/bsc-wdc/dislib/tree/release-0.9) +""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "KMeans housing" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Scaffolding-HiC-VGP8/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Scaffolding-HiC-VGP8" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:version "0.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + , + ; + schema1:name "bacterial_genome_annotation/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/bacterial_genome_annotation" ; + schema1:version "1.1.2" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-06-18T13:22:31+00:00" ; + schema1:description """**Name:** SparseLU +**Contact Person:** support-compss@bsc.es +**Access Level:** public +**License Agreement:** Apache2 +**Platform:** COMPSs + +# Description +The Sparse LU application computes an LU matrix factorization on a sparse blocked matrix. The matrix size (number of blocks) and the block size are parameters of the application. + +As the algorithm progresses, the area of the matrix that is accessed is smaller; concretely, at each iteration, the 0th row and column of the current matrix are discarded. On the other hand, due to the sparseness of the matrix, some of its blocks might not be allocated and, therefore, no work is generated for them. + +When executed with COMPSs, Sparse LU produces several types of task with different granularity and numerous dependencies between them. + +# Versions +There are three versions of Sparse LU, depending on the data types used to store the blocks. +## Version 1 +''files'', where the matrix blocks are stored in files. +## Version 2 +''objects'', where the matrix blocks are represented by objects. +## Version 3 +''arrays'', where the matrix blocks are stored in arrays. + + +# Execution instructions +Usage: +``` +runcompss sparseLU.files.SparseLU numberOfBlocks blockSize +runcompss sparseLU.objects.SparseLU numberOfBlocks blockSize +runcompss sparseLU.arrays.SparseLU numberOfBlocks blockSize +``` + +where: + * numberOfBlocks: Number of blocks inside each matrix + * blockSize: Size of each block + + +# Execution Example +``` +runcompss sparseLU.objects.SparseLU 16 4 +runcompss sparseLU.files.SparseLU 16 4 +runcompss sparseLU.arrays.SparseLU 16 4 +``` + + +# Build +## Option 1: Native java +``` +cd application_sources/; javac src/main/java/sparseLU/*/*.java +cd src/main/java/; jar cf sparseLU.jar sparseLU/ +cd ../../../; mv src/main/java/sparseLU.jar jar/ +``` + +## Option 2: Maven +``` +cd application_sources/ +mvn clean package +``` +""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Java COMPSs LU Factorization for Sparse Matrices, MareNostrum V, 3 nodes, no data persistence" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + , + , + ; + schema1:name "fastq-to-matrix-10x/scrna-seq-fastq-to-matrix-10x-cellplex" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/fastq-to-matrix-10x" ; + schema1:version "0.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "cutandrun/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/cutandrun" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + , + , + ; + schema1:name "fastq-to-matrix-10x/scrna-seq-fastq-to-matrix-10x-cellplex" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/fastq-to-matrix-10x" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "fragment-based-docking-scoring/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/fragment-based-docking-scoring" ; + schema1:version "0.1.1" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-01-24T14:28:04+00:00" ; + schema1:description "Lysozyme in water full COMPSs application, using dataset_small" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Lysozyme in water full version, using dataset_small" ; + schema1:publisher . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator , + ; + schema1:datePublished "2024-01-22T16:06:45+00:00" ; + schema1:description "Cluster Comparison COMPSs application" ; + schema1:hasPart , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Cluster Comparison" ; + schema1:publisher , + . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-08-02T07:02:05+00:00" ; + schema1:description """**Name:** Matrix Multiplication +**Contact Person:** support-compss@bsc.es +**Access Level:** public +**License Agreement:** Apache2 +**Platform:** COMPSs + +# Description +Matrix multiplication is a binary operation that takes a pair of matrices and produces another matrix. + +If A is an n×m matrix and B is an m×p matrix, the result AB of their multiplication is an n×p matrix defined only if the number of columns m in A is equal to the number of rows m in B. When multiplying A and B, the elements of the rows in A are multiplied with corresponding columns in B. + +In this implementation, A and B are square matrices (same number of rows and columns), and so it is the result matrix C. Each matrix is divided in N blocks of M doubles. The multiplication of two blocks is done by a multiply task method with a simple three-nested-loop implementation. When executed with COMPSs, the main program generates N^3^ tasks arranged as N^2^ chains of N tasks in the dependency graph. + +N and M have been hardcoded to 6 and 8 respectively. + +# Execution instructions +Usage: +``` +runcompss --classpath=application_sources/jar/matmul.jar matmul.files.Matmul inputFolder/ outputFolder/ +``` + +where: + * inputFolder: folder where input files are located + * outputFolder: folder where output files are located + + +# Execution Example +``` +runcompss matmul.files.Matmul dataset/inputs/ dataset/outputs/ +``` + +# Build +## Option 1: Native java +``` +javac src/main/java/matmul/*/*.java +cd src/main/java/; jar cf matmul.jar matmul/ +cd ../../../; mv src/main/java/matmul.jar jar/ +``` + +## Option 2: Maven +``` +mvn clean package +``` +""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Java COMPSs Matrix Multiplication, out-of-core using files, reproducible example, data persistence True" ; + schema1:publisher . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T07:02:02+00:00" ; + schema1:name "C.0.0" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T07:02:02+00:00" ; + schema1:name "C.0.1" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T07:02:02+00:00" ; + schema1:name "C.0.2" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T07:02:02+00:00" ; + schema1:name "C.0.3" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T07:02:02+00:00" ; + schema1:name "C.0.4" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T07:02:02+00:00" ; + schema1:name "C.0.5" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T07:02:02+00:00" ; + schema1:name "C.1.0" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T07:02:02+00:00" ; + schema1:name "C.1.1" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T07:02:02+00:00" ; + schema1:name "C.1.2" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T07:02:02+00:00" ; + schema1:name "C.1.3" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T07:02:02+00:00" ; + schema1:name "C.1.4" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T07:02:02+00:00" ; + schema1:name "C.1.5" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T07:02:02+00:00" ; + schema1:name "C.2.0" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T07:02:02+00:00" ; + schema1:name "C.2.1" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T07:02:02+00:00" ; + schema1:name "C.2.2" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T07:02:02+00:00" ; + schema1:name "C.2.3" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T07:02:02+00:00" ; + schema1:name "C.2.4" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T07:02:02+00:00" ; + schema1:name "C.2.5" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T07:02:02+00:00" ; + schema1:name "C.3.0" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T07:02:02+00:00" ; + schema1:name "C.3.1" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T07:02:02+00:00" ; + schema1:name "C.3.2" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T07:02:02+00:00" ; + schema1:name "C.3.3" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T07:02:02+00:00" ; + schema1:name "C.3.4" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T07:02:02+00:00" ; + schema1:name "C.3.5" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T07:02:02+00:00" ; + schema1:name "C.4.0" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T07:02:02+00:00" ; + schema1:name "C.4.1" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T07:02:02+00:00" ; + schema1:name "C.4.2" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T07:02:02+00:00" ; + schema1:name "C.4.3" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T07:02:02+00:00" ; + schema1:name "C.4.4" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T07:02:02+00:00" ; + schema1:name "C.4.5" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T07:02:02+00:00" ; + schema1:name "C.5.0" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T07:02:02+00:00" ; + schema1:name "C.5.1" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T07:02:02+00:00" ; + schema1:name "C.5.2" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T07:02:02+00:00" ; + schema1:name "C.5.3" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T07:02:02+00:00" ; + schema1:name "C.5.4" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T07:02:02+00:00" ; + schema1:name "C.5.5" ; + schema1:sdDatePublished "2024-08-02T07:02:05+00:00" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-06-18T13:37:29+00:00" ; + schema1:description "COMPSs Matrix Multiplication, out-of-core using files. Hypermatrix size used 2x2 blocks (MSIZE=2), block size used 2x2 elements (BSIZE=2)" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "PyCOMPSs Matrix Multiplication, out-of-core using files. Example using DIRECTORY parameters executed at laptop, data persistence True." ; + schema1:publisher . + + a schema1:Dataset ; + schema1:dateModified "2024-06-18T13:37:24+00:00" ; + schema1:hasPart , + , + , + ; + schema1:name "C" ; + schema1:sdDatePublished "2024-06-18T13:37:29+00:00" . + + a schema1:Dataset ; + schema1:creator , + , + ; + schema1:datePublished "2022-07-14T17:40:14+00:00" ; + schema1:description "Multi-band array detection and location of seismic sources. BackTrackBB is a program for detection and space-time location of seismic sources based on multi-scale, frequency-selective statistical coherence of the wave field recorded by dense large-scale seismic networks and local antennas. The method is designed to enhance coherence of the signal statistical features across the array of sensors and consists of three steps. They are signal processing, space-time imaging and detection and location." ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "CECILL-2.1" ; + schema1:mainEntity ; + schema1:name "BackTrackBB" ; + schema1:publisher , + . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 20476 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "btbb_continuous.py" ; + schema1:programmingLanguage ; + ns1:input , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + ns1:output , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Assembly-Hifi-Trio-phasing-VGP5/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5" ; + schema1:version "0.1.2" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-chip-sr" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "kmer-profiling-hifi-trio-VGP2/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/kmer-profiling-hifi-trio-VGP2" ; + schema1:version "0.1.2" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-chip-sr" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "1.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "rnaseq-pe/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/rnaseq-pe" ; + schema1:version "0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:version "0.7" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-03-22T17:53:30+00:00" ; + schema1:description """**Name:** K-Means GPU +**Contact Person**: cristian.tatu@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs +**Machine**: Minotauro-MN4 + +K-Means running on the GPU leveraging COMPSs GPU Cache for deserialization speedup. +Launched using 32 GPUs (16 nodes). Parameters used: K=40 and 32 blocks of size (1_000_000, 1200). +It creates a block for each GPU. Total dataset shape is (32_000_000, 1200). +Version dislib-0.9.0 (https://github.com/bsc-wdc/dislib/tree/release-0.9) +""" ; + schema1:hasPart , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "COMPSs GPU Cache K-Means" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "atacseq/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/atacseq" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "parallel-accession-download/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/parallel-accession-download" ; + schema1:version "0.1.4" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-07-18T11:10:51+00:00" ; + schema1:description "Lysozyme in water full COMPSs application" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Lysozyme in water full no MPI" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "rnaseq-pe/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/rnaseq-pe" ; + schema1:version "0.4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:version "0.4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "sars-cov-2-se-illumina-wgs-variant-calling/COVID-19-SE-WGS-ILLUMINA" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-se-illumina-wgs-variant-calling" ; + schema1:version "0.1.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "kmer-profiling-hifi-trio-VGP2/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/kmer-profiling-hifi-trio-VGP2" ; + schema1:version "0.1.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Assembly-Hifi-Trio-phasing-VGP5/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5" ; + schema1:version "0.1.6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Scaffolding-HiC-VGP8/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Scaffolding-HiC-VGP8" ; + schema1:version "0.1.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "nanopore-pre-processing/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/nanopore-pre-processing" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "Assembly-decontamination-VGP9/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-decontamination-VGP9" ; + schema1:version "0.1.4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "kmer-profiling-hifi-trio-VGP2/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/kmer-profiling-hifi-trio-VGP2" ; + schema1:version "0.1.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Assembly-Hifi-only-VGP3/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-only-VGP3" ; + schema1:version "0.1.7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "velocyto/Velocyto-on10X-from-bundled" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/velocyto" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "cutandrun/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/cutandrun" ; + schema1:version "0.6.1" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-03-22T11:39:26+00:00" ; + schema1:description """**Name:** Matmul GPU +**Contact Person**: cristian.tatu@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs +**Machine**: Minotauro-MN4 + +Matmul running on the GPU leveraging COMPSs GPU Cache for deserialization speedup. +Launched using 32 GPUs (16 nodes). +C = A @ B +Where A: shape (320, 56_900_000) block_size (10, 11_380_000) + B: shape (56_900_000, 10) block_size (11_380_000, 10) + C: shape (320, 10) block_size (10, 10) +Total dataset size 291 GB. +Version dislib-0.9 (https://github.com/bsc-wdc/dislib/tree/release-0.9) +""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "COMPSs GPU Cache Matrix Multiplication" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Assembly-Hifi-only-VGP3/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-only-VGP3" ; + schema1:version "0.1.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + , + ; + schema1:name "bacterial-genome-assembly/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/bacterial-genome-assembly" ; + schema1:version "1.1" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-03-25T11:02:59+00:00" ; + schema1:description """**Name:** PyTorch CNN - Imagenet +**Contact Person**: cristian.tatu@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs +**Machine**: Minotauro-MN4 + +PyTorch distributed training of CNN on GPU and leveraging COMPSs GPU Cache for deserialization speedup. +Launched using 32 GPUs (16 nodes). +Version dislib-0.9 +Version PyTorch 1.7.1+cu101 +""" ; + schema1:hasPart , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "COMPSs GPU Cache PyTorch CNN Distributed Training" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "allele-based-pathogen-identification/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/allele-based-pathogen-identification" ; + schema1:version "0.1.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "parallel-accession-download/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/parallel-accession-download" ; + schema1:version "0.1.5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "brew3r/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/brew3r" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Scaffolding-Bionano-VGP7/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Scaffolding-Bionano-VGP7" ; + schema1:version "0.1.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + , + ; + schema1:name "bacterial-genome-assembly/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/bacterial-genome-assembly" ; + schema1:version "1.0" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "atacseq/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/atacseq" ; + schema1:version "0.7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:version "0.2.2" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-03-25T10:49:09+00:00" ; + schema1:description """**Name:** PyTorch CNN - Imagenet +**Contact Person**: cristian.tatu@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs +**Machine**: Minotauro-MN4 + +PyTorch distributed training of CNN on GPU and leveraging COMPSs GPU Cache for deserialization speedup. +Launched using 32 GPUs (16 nodes). +Version dislib-0.9 +Version PyTorch 1.7.1+cu101 +""" ; + schema1:hasPart , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "COMPSs GPU Cache PyTorch CNN Distributed Training" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "Assembly-decontamination-VGP9/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-decontamination-VGP9" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "taxonomy-profiling-and-visualization-with-krona/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/taxonomy-profiling-and-visualization-with-krona" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "cutandrun/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/cutandrun" ; + schema1:version "0.6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "sars-cov-2-pe-illumina-artic-variant-calling/COVID-19-PE-ARTIC-ILLUMINA" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-variant-calling" ; + schema1:version "0.5.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "parallel-accession-download/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/parallel-accession-download" ; + schema1:version "0.1.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "rnaseq-sr/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/rnaseq-sr" ; + schema1:version "0.8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "rnaseq-sr/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/rnaseq-sr" ; + schema1:version "0.7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Scaffolding-HiC-VGP8/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Scaffolding-HiC-VGP8" ; + schema1:version "0.1.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + , + ; + schema1:name "quality-and-contamination-control/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/quality-and-contamination-control" ; + schema1:version "1.1.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "gromacs-dctmd/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/gromacs-dctmd" ; + schema1:version "0.1.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:version "0.1.7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "chipseq-sr/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/chipseq-sr" ; + schema1:version "0.10" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "atacseq/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/atacseq" ; + schema1:version "0.16" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Scaffolding-Bionano-VGP7/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Scaffolding-Bionano-VGP7" ; + schema1:version "0.1.1" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator , + ; + schema1:datePublished "2023-07-20T15:48:28+00:00" ; + schema1:description "Sample workflow that combines simulations with data analytics. It is not a real workflow, but mimics this type of workflows." ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "My Workflow Multiple" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "rnaseq-pe/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/rnaseq-pe" ; + schema1:version "0.7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "rnaseq-pe/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/rnaseq-pe" ; + schema1:version "0.8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:version "0.1.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "atacseq/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/atacseq" ; + schema1:version "0.5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "atacseq/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/atacseq" ; + schema1:version "0.9" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "chipseq-sr/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/chipseq-sr" ; + schema1:version "0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "sars-cov-2-variation-reporting/COVID-19-VARIATION-REPORTING" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-variation-reporting" ; + schema1:version "0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "sars-cov-2-consensus-from-variation/COVID-19-CONSENSUS-CONSTRUCTION" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-consensus-from-variation" ; + schema1:version "0.2.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "cutandrun/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/cutandrun" ; + schema1:version "0.11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "atacseq/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/atacseq" ; + schema1:version "0.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "hic-hicup-cooler/hic-fastq-to-cool-hicup-cooler" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/hic-hicup-cooler" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:version "0.1.11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Scaffolding-Bionano-VGP7/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Scaffolding-Bionano-VGP7" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "gromacs-mmgbsa/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/gromacs-mmgbsa" ; + schema1:version "0.1.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "average-bigwig-between-replicates/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/average-bigwig-between-replicates" ; + schema1:version "0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Assembly-Hifi-only-VGP3/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-only-VGP3" ; + schema1:version "0.1.2" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-07-12T10:15:07+00:00" ; + schema1:description "Lysozyme in water full COMPSs application" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Lysozyme in water full" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "hic-hicup-cooler/hic-fastq-to-cool-hicup-cooler" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/hic-hicup-cooler" ; + schema1:version "0.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "atacseq/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/atacseq" ; + schema1:version "0.4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "chipseq-pe/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/chipseq-pe" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "rnaseq-pe/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/rnaseq-pe" ; + schema1:version "0.4.1" . + + a schema1:Dataset ; + dct:conformsTo ; + schema1:assesses "Research Question: How effective have the SARS-CoV-2 vaccination programmes been in preventing SARS-CoV-2 infections?" ; + schema1:author , + , + , + , + ; + schema1:codeRepository "https://github.com/by-covid/BY-COVID_WP5_T5.2_baseline-use-case" ; + schema1:dateModified "2023-10-05" ; + schema1:datePublished "2023-04-19" ; + schema1:description "This publication corresponds to the Research Objects (RO) of the Baseline Use Case proposed in T.5.2 (WP5) in the BY-COVID project on “COVID-19 Vaccine(s) effectiveness in preventing SARS-CoV-2 infection”." ; + schema1:distribution ; + schema1:funder ; + schema1:funding ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.6913045" ; + schema1:isBasedOn , + , + , + ; + schema1:keywords "COVID-19, vaccines, comparative effectiveness, causal inference, international comparison, SARS-CoV-2, common data model, directed acyclic graph, synthetic data" ; + schema1:license ; + schema1:material "Cohort definition: All individuals (from 5 to 115 years old, included) vaccinated with at least one dose of the SARS-CoV-2 vaccine (any of the available brands) and all individuals eligible to be vaccinated with a documented positive diagnosis (irrespective of the type of test) for a SARS-CoV-2 infection during the data extraction period." ; + schema1:materialExtent """Inclusion criteria: + All people vaccinated with at least one dose of the COVID-19 vaccine (any of the available brands) in an area of residence. Any person eligible to be vaccinated (from 5 to 115 years old, included) with a positive diagnosis (irrespective of the type of test) for SARS-CoV-2 infection (COVID-19) during the period of data extraction. + + Exclusion criteria: + People not eligible for the vaccine (from 0 to 4 years old, included)""" ; + schema1:mentions , + , + , + , + , + ; + schema1:name "BY-COVID WP5 T5.2 Baseline Use Case" ; + schema1:publisher ; + schema1:publishingPrinciples """Study Design: An observational retrospective longitudinal study to assess the effectiveness of the SARS-CoV-2 vaccines in preventing SARS-CoV-2 infections using routinely collected social, health and care data from several countries. + +A causal model was established using Directed Acyclic Graphs (DAGs) to map domain knowledge, theories and assumptions about the causal relationship between exposure and outcome. """ ; + schema1:releaseNotes """- Updated Causal model to eliminate the consideration of 'vaccination_schedule_cd' as a mediator +- Adjusted the study period to be consistent with the Study Protocol +- Updated 'sex_cd' as a required variable +- Added 'chronic_liver_disease_bl' as a comorbidity at the individual level +- Updated 'socecon_lvl_cd' at the area level as a recommended variable + -Added crosswalks for the definition of 'chronic_liver_disease_bl' in a separate sheet + -Updated the 'vaccination_schedule_cd' reference to the 'Vaccine' node in the updated DAG + -Updated the description of the 'confirmed_case_dt' and 'previous_infection_dt' variables to clarify the definition and the need for a single registry per person""" ; + schema1:temporalCoverage "Study Period: From the date of the first documented SARS-CoV-2 infection in each country to the most recent date in which data is available at the time of analysis. Roughly from 01-03-2020 to 30-06-2022, depending on the country." ; + schema1:url "https://by-covid.github.io/BY-COVID_WP5_T5.2_baseline-use-case/" ; + schema1:usageInfo "The scripts (software) included in the publication are offered \"as-is\", without warranty, and disclaiming liability for damages resulting from using it. The software is released under the CC-BY-4.0 licence, which gives you permission to use the content for almost any purpose (but does not grant you any trademark permissions), so long as you note the license and give credit." ; + schema1:version "1.2.0" ; + rel:cite-as . + + a schema1:Dataset ; + schema1:datePublished "2023-01-26" ; + schema1:description "following the causal model" ; + schema1:hasPart , + , + , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.6913045" ; + schema1:name "Common data model specification" ; + schema1:url "https://zenodo.org/record/7572373" ; + schema1:version "1.1.0" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "atacseq/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/atacseq" ; + schema1:version "0.14" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Scaffolding-HiC-VGP8/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Scaffolding-HiC-VGP8" ; + schema1:version "0.2.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "sars-cov-2-ont-artic-variant-calling/COVID-19-ARTIC-ONT" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-ont-artic-variant-calling" ; + schema1:version "0.3.1" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-04-30T13:04:29+00:00" ; + schema1:description """**Name:** Matrix multiplication with Files, reproducibility example, without data persistence +**Contact Person**: support-compss@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs + +# Description +Matrix multiplication is a binary operation that takes a pair of matrices and produces another matrix. + +If A is an n×m matrix and B is an m×p matrix, the result AB of their multiplication is an n×p matrix defined only if the number of columns m in A is equal to the number of rows m in B. When multiplying A and B, the elements of the rows in A are multiplied with corresponding columns in B. + +In this implementation, A and B are square matrices (same number of rows and columns), and so it is the result matrix C. Each matrix is divided in N blocks of M doubles (N hardcoded to 2, and M hardcoded to 8). The multiplication of two blocks is done by a multiply task method with a simple three-nested-loop implementation. When executed with COMPSs, the main program generates N^3^ tasks arranged as N^2^ chains of N tasks in the dependency graph. + +# Reproducibility +To reproduce the exact results of this example, follow the instructions at the [Workflow Provenance section at COMPSs User Manual](https://compss-doc.readthedocs.io/en/stable/Sections/05_Tools/04_Workflow_Provenance.html), WITHOUT data persistence, PyCOMPSs application. + +# Execution instructions +Usage: +``` +runcompss --lang=python src/matmul_files.py inputs_folder/ outputs_folder/ +``` + +where: +* inputs_folder/: Folder where A and B matrices are located +* outputs_folder/: Folder with the resulting C matrix + + +# Execution Examples +``` +runcompss --lang=python src/matmul_files.py inputs/ outputs/ +runcompss src/matmul_files.py inputs/ outputs/ +python -m pycompss src/matmul_files.py inputs/ outputs/ +``` + +# Build +No build is required +""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "PyCOMPSs Matrix Multiplication, out-of-core using files, MareNostrum V, reproducibility example, without data persistence" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "cutandrun/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/cutandrun" ; + schema1:version "0.10" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-07-04T13:38:20+00:00" ; + schema1:description """**Name:** TruncatedSVD (Randomized SVD) +**Contact Person**: support-compss@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs +**Machine**: MareNostrum5 + +TruncatedSVD (Randomized SVD) for computing just 456 singular values out of a (4.5M x 850) size matrix. +The input matrix represents a CFD transient simulation of air moving past a cylinder. +This application used [dislib-0.9.0](https://github.com/bsc-wdc/dislib/tree/release-0.9) +""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Randomized SVD" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 13899 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:name "random_svd_compss.py" ; + schema1:programmingLanguage ; + schema1:softwareRequirements . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "amr_gene_detection/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/amr_gene_detection" ; + schema1:version "1.1" . + + a schema1:Dataset ; + dct:conformsTo ; + schema1:assesses "Research Question: How effective have the SARS-CoV-2 vaccination programmes been in preventing SARS-CoV-2 infections?" ; + schema1:author , + , + , + , + ; + schema1:codeRepository "https://github.com/by-covid/BY-COVID_WP5_T5.2_baseline-use-case" ; + schema1:datePublished "2023-04-19" ; + schema1:description "This publication corresponds to the Research Objects (RO) of the Baseline Use Case proposed in T.5.2 (WP5) in the BY-COVID project on “COVID-19 Vaccine(s) effectiveness in preventing SARS-CoV-2 infection”." ; + schema1:funder ; + schema1:funding ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.6913045" ; + schema1:isBasedOn , + , + , + ; + schema1:keywords "COVID-19, vaccines, comparative effectiveness, causal inference, international comparison, SARS-CoV-2, common data model, directed acyclic graph, synthetic data" ; + schema1:license ; + schema1:material "Cohort definition: All individuals (from 5 to 115 years old, included) vaccinated with at least one dose of the SARS-CoV-2 vaccine (any of the available brands) and all individuals eligible to be vaccinated with a documented positive diagnosis (irrespective of the type of test) for a SARS-CoV-2 infection during the data extraction period." ; + schema1:materialExtent """Inclusion criteria: + All people vaccinated with at least one dose of the COVID-19 vaccine (any of the available brands) in an area of residence. Any person eligible to be vaccinated (from 5 to 115 years old, included) with a positive diagnosis (irrespective of the type of test) for SARS-CoV-2 infection (COVID-19) during the period of data extraction. + + Exclusion criteria: + People not eligible for the vaccine (from 0 to 4 years old, included)""" ; + schema1:mentions , + , + , + , + , + ; + schema1:name "BY-COVID WP5 T5.2 Baseline Use Case" ; + schema1:publisher ; + schema1:publishingPrinciples """Study Design: An observational retrospective longitudinal study to assess the effectiveness of the SARS-CoV-2 vaccines in preventing SARS-CoV-2 infections using routinely collected social, health and care data from several countries. + +A causal model was established using Directed Acyclic Graphs (DAGs) to map domain knowledge, theories and assumptions about the causal relationship between exposure and outcome. """ ; + schema1:releaseNotes """- Updated Causal model to eliminate the consideration of 'vaccination_schedule_cd' as a mediator +- Adjusted the study period to be consistent with the Study Protocol +- Updated 'sex_cd' as a required variable +- Added 'chronic_liver_disease_bl' as a comorbidity at the individual level +- Updated 'socecon_lvl_cd' at the area level as a recommended variable + -Added crosswalks for the definition of 'chronic_liver_disease_bl' in a separate sheet + -Updated the 'vaccination_schedule_cd' reference to the 'Vaccine' node in the updated DAG + -Updated the description of the 'confirmed_case_dt' and 'previous_infection_dt' variables to clarify the definition and the need for a single registry per person""" ; + schema1:temporalCoverage "Study Period: From the date of the first documented SARS-CoV-2 infection in each country to the most recent date in which data is available at the time of analysis. Roughly from 01-03-2020 to 30-06-2022, depending on the country." ; + schema1:url "https://by-covid.github.io/BY-COVID_WP5_T5.2_baseline-use-case/" ; + schema1:usageInfo "The scripts (software) included in the publication are offered \"as-is\", without warranty, and disclaiming liability for damages resulting from using it. The software is released under the CC-BY-4.0 licence, which gives you permission to use the content for almost any purpose (but does not grant you any trademark permissions), so long as you note the license and give credit." ; + schema1:version "1.2.0" . + + a schema1:Dataset ; + schema1:datePublished "2023-01-26" ; + schema1:description "following the causal model" ; + schema1:hasPart , + , + , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.6913045" ; + schema1:name "Common data model specification" ; + schema1:url "https://zenodo.org/record/7572373" ; + schema1:version "1.1.0" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2023-11-16T14:25:14+00:00" ; + schema1:description """**Contact Person:** support-compss@bsc.es +**Access Level:** public +**License Agreement:** Apache2 +**Platform:** COMPSs + +# Description + +Simple is an application that takes one value and increases it by five units. The purpose of this application is to show how tasks are managed by COMPSs. + +# Execution instructions +Usage: +``` +runcompss --lang=python src/simple.py initValue +``` + +where: +* initValue: Initial value for counter + +# Execution Examples +``` +runcompss --lang=python src/simple.py 1 +runcompss src/simple.py 1 +python -m pycompss src/simple.py 1 +``` + +# Build +No build is required +""" ; + schema1:hasPart , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "PyCOMPSs simple example (ran on macOS laptop, input generated by the code, INOUT file example)" ; + schema1:publisher . + + a schema1:MediaObject ; + schema1:contentSize 1 ; + schema1:dateModified "2023-11-16T14:25:10" ; + schema1:name "counter" ; + schema1:sdDatePublished "2023-11-16T14:25:14+00:00" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:version "0.2.4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "openms-metaprosip/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/openms-metaprosip" ; + schema1:version "0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "openms-metaprosip/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/openms-metaprosip" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "parallel-accession-download/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/parallel-accession-download" ; + schema1:version "0.1.3" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-03-28T09:14:42+00:00" ; + schema1:description "Calculates the Fibonacci series up to a specified length." ; + schema1:hasPart , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "My COMPSs Fibonacci Series" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Purge-duplicates-one-haplotype-VGP6b/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Purge-duplicates-one-haplotype-VGP6b" ; + schema1:version "0.4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "polish-with-long-reads/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/polish-with-long-reads" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "gromacs-mmgbsa/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/gromacs-mmgbsa" ; + schema1:version "0.1.4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Purge-duplicates-one-haplotype-VGP6b/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Purge-duplicates-one-haplotype-VGP6b" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Assembly-Hifi-only-VGP3/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-only-VGP3" ; + schema1:version "0.1.4" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-chip-sr" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.4" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:assesses "Research Question: How effective have the SARS-CoV-2 vaccination programmes been in preventing SARS-CoV-2 infections?" ; + schema1:author , + , + , + , + ; + schema1:codeRepository "https://github.com/by-covid/BY-COVID_WP5_T5.2_baseline-use-case" ; + schema1:dateModified "2023-10-05" ; + schema1:datePublished "2023-04-19" ; + schema1:description "This publication corresponds to the Research Objects (RO) of the Baseline Use Case proposed in T.5.2 (WP5) in the BY-COVID project on “COVID-19 Vaccine(s) effectiveness in preventing SARS-CoV-2 infection”." ; + schema1:distribution ; + schema1:funder ; + schema1:funding ; + schema1:hasPart , + , + , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.6913045" ; + schema1:isBasedOn , + , + , + ; + schema1:keywords "COVID-19, vaccines, comparative effectiveness, causal inference, international comparison, SARS-CoV-2, common data model, directed acyclic graph, synthetic data" ; + schema1:license ; + schema1:material "Cohort definition: All individuals (from 5 to 115 years old, included) vaccinated with at least one dose of the SARS-CoV-2 vaccine (any of the available brands) and all individuals eligible to be vaccinated with a documented positive diagnosis (irrespective of the type of test) for a SARS-CoV-2 infection during the data extraction period." ; + schema1:materialExtent """Inclusion criteria: + All people vaccinated with at least one dose of the COVID-19 vaccine (any of the available brands) in an area of residence. Any person eligible to be vaccinated (from 5 to 115 years old, included) with a positive diagnosis (irrespective of the type of test) for SARS-CoV-2 infection (COVID-19) during the period of data extraction. + + Exclusion criteria: + People not eligible for the vaccine (from 0 to 4 years old, included)""" ; + schema1:mentions , + , + , + , + , + ; + schema1:name "BY-COVID WP5 T5.2 Baseline Use Case" ; + schema1:publisher ; + schema1:publishingPrinciples """Study Design: An observational retrospective longitudinal study to assess the effectiveness of the SARS-CoV-2 vaccines in preventing SARS-CoV-2 infections using routinely collected social, health and care data from several countries. + +A causal model was established using Directed Acyclic Graphs (DAGs) to map domain knowledge, theories and assumptions about the causal relationship between exposure and outcome. """ ; + schema1:releaseNotes """- Updated Causal model to eliminate the consideration of 'vaccination_schedule_cd' as a mediator +- Adjusted the study period to be consistent with the Study Protocol +- Updated 'sex_cd' as a required variable +- Added 'chronic_liver_disease_bl' as a comorbidity at the individual level +- Updated 'socecon_lvl_cd' at the area level as a recommended variable + -Added crosswalks for the definition of 'chronic_liver_disease_bl' in a separate sheet + -Updated the 'vaccination_schedule_cd' reference to the 'Vaccine' node in the updated DAG + -Updated the description of the 'confirmed_case_dt' and 'previous_infection_dt' variables to clarify the definition and the need for a single registry per person""" ; + schema1:temporalCoverage "Study Period: From the date of the first documented SARS-CoV-2 infection in each country to the most recent date in which data is available at the time of analysis. Roughly from 01-03-2020 to 30-06-2022, depending on the country." ; + schema1:url "https://by-covid.github.io/BY-COVID_WP5_T5.2_baseline-use-case/" ; + schema1:usageInfo "The scripts (software) included in the publication are offered \"as-is\", without warranty, and disclaiming liability for damages resulting from using it. The software is released under the CC-BY-4.0 licence, which gives you permission to use the content for almost any purpose (but does not grant you any trademark permissions), so long as you note the license and give credit." ; + schema1:version "1.2.0" ; + rel:cite-as . + + a schema1:Dataset ; + schema1:datePublished "2023-01-26" ; + schema1:description "following the causal model" ; + schema1:hasPart , + , + , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.6913045" ; + schema1:name "Common data model specification" ; + schema1:url "https://zenodo.org/record/7572373" ; + schema1:version "1.1.0" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Assembly-Hifi-only-VGP3/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-only-VGP3" ; + schema1:version "0.1.5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Scaffolding-Bionano-VGP7/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Scaffolding-Bionano-VGP7" ; + schema1:version "0.1.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "chipseq-pe/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/chipseq-pe" ; + schema1:version "0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:version "0.1.9" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-chip-sr" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.5" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-02-14T13:47:39+00:00" ; + schema1:description """**Name:** Random Forest +**Contact Person**: support-compss@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs +**Machine**: MareNostrum4 +This is an example of Random Forest algorithm from dislib. To show the usage, the code generates a synthetical input matrix. +The results are printed by screen. +This application used [dislib-0.9.0](https://github.com/bsc-wdc/dislib/tree/release-0.9) +""" ; + schema1:hasPart , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Random Forest executed in 3 worker nodes, with a generated dataset, using 1 Million rows x 100 features" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "rnaseq-pe/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/rnaseq-pe" ; + schema1:version "0.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "average-bigwig-between-replicates/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/average-bigwig-between-replicates" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "baredsc/baredSC-1d-logNorm" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/baredsc" ; + schema1:version "0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "chipseq-pe/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/chipseq-pe" ; + schema1:version "0.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:version "0.2" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-04-19T08:05:56+00:00" ; + schema1:description """**Name:** Incrementation and Fibonacci +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs + +# Description +**Brief Overview:** Demonstrates COMPSs task parallelism with increment and Fibonacci computations. + +**Detailed Description:** + Performs multiple increments of input values in parallel using COMPSs. + Concurrently calculates Fibonacci numbers using recursive COMPSs tasks. + Demonstrates task synchronization via `compss_wait_on`. + +# Execution instructions +Usage: +``` +runcompss src/increment_fibonacci.py value1 Value2 Value3 +#add more values if you want + +``` + +# Execution Examples +``` +runcompss src/increment_fibonacci.py 1 4 3 9 6 9 + +``` + +# Build +No build is required +""" ; + schema1:hasPart , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "COMPSs Incrementation and Fibonacci series example" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "baredsc/baredSC-1d-logNorm" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/baredsc" ; + schema1:version "0.4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:image ; + schema1:name "" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/crs4/fair-crcc-send-data" ; + schema1:version "1.0" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Purge-duplicates-one-haplotype-VGP6b/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Purge-duplicates-one-haplotype-VGP6b" ; + schema1:version "0.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Assembly-Hifi-only-VGP3/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-only-VGP3" ; + schema1:version "0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Assembly-Hifi-Trio-phasing-VGP5/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5" ; + schema1:version "0.1.8" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + , + ; + schema1:name "bacterial_genome_annotation/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/bacterial_genome_annotation" ; + schema1:version "1.1.1" . + + a schema1:Dataset ; + dct:conformsTo ; + schema1:assesses "Research Question: How effective have the SARS-CoV-2 vaccination programmes been in preventing SARS-CoV-2 infections?" ; + schema1:author , + , + , + , + ; + schema1:codeRepository "https://github.com/by-covid/BY-COVID_WP5_T5.2_baseline-use-case" ; + schema1:dateModified "2023-10-05" ; + schema1:datePublished "2023-04-19" ; + schema1:description "This publication corresponds to the Research Objects (RO) of the Baseline Use Case proposed in T.5.2 (WP5) in the BY-COVID project on “COVID-19 Vaccine(s) effectiveness in preventing SARS-CoV-2 infection”." ; + schema1:distribution ; + schema1:funder ; + schema1:funding ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.6913045" ; + schema1:isBasedOn , + , + , + ; + schema1:keywords "COVID-19, vaccines, comparative effectiveness, causal inference, international comparison, SARS-CoV-2, common data model, directed acyclic graph, synthetic data" ; + schema1:license ; + schema1:material "Cohort definition: All individuals (from 5 to 115 years old, included) vaccinated with at least one dose of the SARS-CoV-2 vaccine (any of the available brands) and all individuals eligible to be vaccinated with a documented positive diagnosis (irrespective of the type of test) for a SARS-CoV-2 infection during the data extraction period." ; + schema1:materialExtent """Inclusion criteria: + All people vaccinated with at least one dose of the COVID-19 vaccine (any of the available brands) in an area of residence. Any person eligible to be vaccinated (from 5 to 115 years old, included) with a positive diagnosis (irrespective of the type of test) for SARS-CoV-2 infection (COVID-19) during the period of data extraction. + + Exclusion criteria: + People not eligible for the vaccine (from 0 to 4 years old, included)""" ; + schema1:mentions , + , + , + , + , + ; + schema1:name "BY-COVID WP5 T5.2 Baseline Use Case" ; + schema1:publisher ; + schema1:publishingPrinciples """Study Design: An observational retrospective longitudinal study to assess the effectiveness of the SARS-CoV-2 vaccines in preventing SARS-CoV-2 infections using routinely collected social, health and care data from several countries. + +A causal model was established using Directed Acyclic Graphs (DAGs) to map domain knowledge, theories and assumptions about the causal relationship between exposure and outcome. """ ; + schema1:releaseNotes """- Updated Causal model to eliminate the consideration of 'vaccination_schedule_cd' as a mediator +- Adjusted the study period to be consistent with the Study Protocol +- Updated 'sex_cd' as a required variable +- Added 'chronic_liver_disease_bl' as a comorbidity at the individual level +- Updated 'socecon_lvl_cd' at the area level as a recommended variable + -Added crosswalks for the definition of 'chronic_liver_disease_bl' in a separate sheet + -Updated the 'vaccination_schedule_cd' reference to the 'Vaccine' node in the updated DAG + -Updated the description of the 'confirmed_case_dt' and 'previous_infection_dt' variables to clarify the definition and the need for a single registry per person""" ; + schema1:temporalCoverage "Study Period: From the date of the first documented SARS-CoV-2 infection in each country to the most recent date in which data is available at the time of analysis. Roughly from 01-03-2020 to 30-06-2022, depending on the country." ; + schema1:url "https://by-covid.github.io/BY-COVID_WP5_T5.2_baseline-use-case/" ; + schema1:usageInfo "The scripts (software) included in the publication are offered \"as-is\", without warranty, and disclaiming liability for damages resulting from using it. The software is released under the CC-BY-4.0 licence, which gives you permission to use the content for almost any purpose (but does not grant you any trademark permissions), so long as you note the license and give credit." ; + schema1:version "1.2.0" ; + rel:cite-as . + + a schema1:Dataset ; + schema1:datePublished "2023-01-26" ; + schema1:description "following the causal model" ; + schema1:hasPart , + , + , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.6913045" ; + schema1:name "Common data model specification" ; + schema1:url "https://zenodo.org/record/7572373" ; + schema1:version "1.1.0" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2023-11-24T08:24:54+00:00" ; + schema1:description """**Name:** Increment +**Contact Person**: support-compss@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs + +# Description +Increment is an application that takes three different values and increases them a number of given times. + +The purpose of this application is to show parallelism between the different increments. + +# Execution instructions +Usage: +``` +runcompss --lang=python src/increment.py N initValue1 initValue2 initValue3 +``` + +where: +* N: Number of times to increase the counters +* initValue1: Initial value for counter 1 +* initValue2: Initial value for counter 2 +* initValue3: Initial value for counter 3 + +# Execution Examples +``` +runcompss --lang=python src/increment.py 10 1 2 3 +runcompss src/wordcount.py src/increment.py 10 1 2 3 +python -m pycompss src/wordcount.py src/increment.py 10 1 2 3 +``` + +# Build +No build is required +""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "PyCOMPSs Increment example, ran at Marenostrum IV supercomputer, example of INOUT file and compss_open usage" ; + schema1:publisher . + + a schema1:MediaObject ; + schema1:contentSize 2 ; + schema1:dateModified "2023-11-24T08:24:50" ; + schema1:name "file1" ; + schema1:sdDatePublished "2023-11-24T08:24:54+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2 ; + schema1:dateModified "2023-11-24T08:24:50" ; + schema1:name "file2" ; + schema1:sdDatePublished "2023-11-24T08:24:54+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 2 ; + schema1:dateModified "2023-11-24T08:24:50" ; + schema1:name "file3" ; + schema1:sdDatePublished "2023-11-24T08:24:54+00:00" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "chipseq-sr/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/chipseq-sr" ; + schema1:version "0.11" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "fragment-based-docking-scoring/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/fragment-based-docking-scoring" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Assembly-Hifi-only-VGP3/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-only-VGP3" ; + schema1:version "0.1" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator , + ; + schema1:datePublished "2024-01-22T15:53:19+00:00" ; + schema1:description "K-means COMPSs application" ; + schema1:hasPart , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "K-means" ; + schema1:publisher , + . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Assembly-Hifi-Trio-phasing-VGP5/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-Trio-phasing-VGP5" ; + schema1:version "0.1.3" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-06-06T10:12:50+00:00" ; + schema1:description "Small program using PyCOMPSs to perform a Monte Carlo simulation to estimate the value of Pi. The idea is to randomly generate points in a unit square and count how many fall inside the unit circle. The ratio of the points inside the circle to the total number of points gives an approximation of Pi/4." ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Monte Carlo Simulation for Pi Estimation" ; + schema1:publisher . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-01-25T16:09:26+00:00" ; + schema1:description "Lysozyme in water sample COMPSs application" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Lysozyme in water sample" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Scaffolding-HiC-VGP8/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Scaffolding-HiC-VGP8" ; + schema1:version "0.2.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Purge-duplicates-one-haplotype-VGP6b/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Purge-duplicates-one-haplotype-VGP6b" ; + schema1:version "0.7" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "cutandrun/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/cutandrun" ; + schema1:version "0.5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "rnaseq-sr/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/rnaseq-sr" ; + schema1:version "0.5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:version "0.2" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-03-22T17:53:30+00:00" ; + schema1:description """**Name:** K-Means GPU +**Contact Person**: cristian.tatu@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs +**Machine**: Minotauro-MN4 + +K-Means running on the GPU leveraging COMPSs GPU Cache for deserialization speedup. +Launched using 32 GPUs (16 nodes). Parameters used: K=40 and 32 blocks of size (1_000_000, 1200). +It creates a block for each GPU. Total dataset shape is (32_000_000, 1200). +Version dislib-0.9.0 (https://github.com/bsc-wdc/dislib/tree/release-0.9) +""" ; + schema1:hasPart , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "COMPSs GPU Cache K-Means" ; + schema1:publisher . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator , + ; + schema1:datePublished "2024-01-22T16:19:50+00:00" ; + schema1:description "Wordcount COMPSs application" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Wordcount" ; + schema1:publisher , + . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "parallel-accession-download/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/parallel-accession-download" ; + schema1:version "0.1.14" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "baredsc/baredSC-1d-logNorm" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/baredsc" ; + schema1:version "0.5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "kmer-profiling-hifi-VGP1/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/kmer-profiling-hifi-VGP1" ; + schema1:version "0.1.5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "COVID-19: variation analysis on ARTIC PE data" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-variant-calling" ; + schema1:version "0.4.1" . + + a schema1:Dataset ; + schema1:author , + ; + schema1:citation ; + schema1:contactPoint ; + schema1:datePublished "2021-09-28T17:27:00+0100" ; + schema1:description "This PyCOMPSs workflow tutorial aims to illustrate the process of setting up a simulation system containing a protein, step by step, using the BioExcel Building Blocks library (biobb) in PyCOMPSs for execution on HPC. Three variants of the MD Setup workflows are included, supporting a list of structures, a list of mutations, or a cumulative set of mutations. " ; + schema1:hasPart , + , + ; + schema1:license ; + schema1:mainEntity ; + schema1:name "Protein MD Setup HPC tutorial using BioExcel Building Blocks (biobb) in PyCOMPSs" ; + schema1:publisher ; + schema1:url "https://github.com/bioexcel/biobb_hpc_workflows" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "amr_gene_detection/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/amr_gene_detection" ; + schema1:version "1.0" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "hic-hicup-cooler/hic-fastq-to-cool-hicup-cooler" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/hic-hicup-cooler" ; + schema1:version "0.2.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "chipseq-sr/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/chipseq-sr" ; + schema1:version "0.5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Plot-Nx-Size/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Plot-Nx-Size" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "COVID-19: variation analysis on WGS SE data" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-se-illumina-wgs-variant-calling" ; + schema1:version "0.1.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:version "0.5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "baredsc/baredSC-1d-logNorm" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/baredsc" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "fluorescence-nuclei-segmentation-and-counting/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/fluorescence-nuclei-segmentation-and-counting" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "rnaseq-pe/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/rnaseq-pe" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "kmer-profiling-hifi-VGP1/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/kmer-profiling-hifi-VGP1" ; + schema1:version "0.1.6" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "sars-cov-2-pe-illumina-artic-variant-calling/COVID-19-PE-ARTIC-ILLUMINA" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-variant-calling" ; + schema1:version "0.5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "sars-cov-2-consensus-from-variation/COVID-19-CONSENSUS-CONSTRUCTION" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-consensus-from-variation" ; + schema1:version "0.4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "rnaseq-sr/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/rnaseq-sr" ; + schema1:version "0.2" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "rnaseq-sr/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/rnaseq-sr" ; + schema1:version "0.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Scaffolding-HiC-VGP8/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Scaffolding-HiC-VGP8" ; + schema1:version "0.2.5" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator , + ; + schema1:datePublished "2024-01-22T15:55:23+00:00" ; + schema1:description "Cholesky COMPSs application" ; + schema1:hasPart , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Cholesky" ; + schema1:publisher , + . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2023-11-03T08:10:10+00:00" ; + schema1:description """**Name:** TruncatedSVD (Randomized SVD) +**Contact Person**: support-compss@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs +**Machine**: MareNostrum4 + +TruncatedSVD (Randomized SVD) for computing just 456 singular values out of a (3.6M x 1200) size matrix. +The input matrix represents a CFD transient simulation of air moving past a cylinder. +This application used dislib-0.9.0 (https://github.com/bsc-wdc/dislib/tree/release-0.9) +""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Randomized SVD" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Parallel Accession Download" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/parallel-accession-download" ; + schema1:version "0.1.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:version "0.1.8" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator , + ; + schema1:datePublished "2024-01-22T16:32:29+00:00" ; + schema1:description "Wordcount COMPSs application" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Wordcount" ; + schema1:publisher , + . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "chipseq-sr/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/chipseq-sr" ; + schema1:version "0.4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + , + ; + schema1:name "bacterial_genome_annotation/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/bacterial_genome_annotation" ; + schema1:version "1.1" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-chip-sr" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.2" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "consensus-peaks/consensus-peaks-chip-sr" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/consensus-peaks" ; + schema1:version "0.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "Assembly-Hifi-HiC-phasing-VGP4/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-Hifi-HiC-phasing-VGP4" ; + schema1:version "0.1.4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:version "0.2.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "gromacs-mmgbsa/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/gromacs-mmgbsa" ; + schema1:version "0.1.5" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "Plot-Nx-Size/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Plot-Nx-Size" ; + schema1:version "0.1.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "atacseq/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/atacseq" ; + schema1:version "0.5.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + ; + schema1:name "sra-manifest-to-concatenated-fastqs/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sra-manifest-to-concatenated-fastqs" ; + schema1:version "0.2.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + ; + schema1:name "VGP-meryldb-creation/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/VGP-meryldb-creation" ; + schema1:version "0.1" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2024-01-24T15:00:23+00:00" ; + schema1:description "Lysozyme in water full COMPSs application, using dataset_small" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Lysozyme in water full version, using dataset_small, data_persistence False" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "sars-cov-2-pe-illumina-artic-ivar-analysis/SARS-COV-2-ILLUMINA-AMPLICON-IVAR-PANGOLIN-NEXTCLADE" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-pe-illumina-artic-ivar-analysis" ; + schema1:version "0.2.1" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2023-10-27T13:17:52+00:00" ; + schema1:description """**Name:** Matrix multiplication with Objects +**Contact Person**: support-compss@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs + +# Description +Matrix multiplication is a binary operation that takes a pair of matrices and produces another matrix. + +If A is an n×m matrix and B is an m×p matrix, the result AB of their multiplication is an n×p matrix defined only if the number of columns m in A is equal to the number of rows m in B. When multiplying A and B, the elements of the rows in A are multiplied with corresponding columns in B. + +In this implementation, A and B are square matrices (same number of rows and columns), and so it is the result matrix C. Each matrix is divided in N blocks of M doubles. The multiplication of two blocks is done by a multiply task method with a simple three-nested-loop implementation. When executed with COMPSs, the main program generates N^3^ tasks arranged as N^2^ chains of N tasks in the dependency graph. + +# Execution instructions +Usage: +``` +runcompss --lang=python src/matmul_objects.py numberOfBlocks blockSize +``` + +where: +* numberOfBlocks: Number of blocks inside each matrix +* blockSize: Size of each block + + +# Execution Examples +``` +runcompss --lang=python src/matmul_objects.py 16 4 +runcompss src/matmul_objects.py 16 4 +python -m pycompss src/matmul_objects.py 16 4 +``` + +# Build +No build is required +""" ; + schema1:hasPart , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "PyCOMPSs Matrix Multiplication with Objects (inputs generated by the code)" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "rnaseq-sr/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/rnaseq-sr" ; + schema1:version "0.4.1" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2023-12-15T14:53:21+00:00" ; + schema1:description """**Name:** Word Count +**Contact Person**: support-compss@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs + +# Description +Wordcount is an application that counts the number of words for a given set of files. + +To allow parallelism the file is divided in blocks that are treated separately and merged afterwards. + +Results are printed to a Pickle binary file, so they can be checked using: python -mpickle result.txt + +This example also shows how to manually add input or output datasets to the workflow provenance recording (using the 'input' and 'output' terms in the ro-crate-info.yaml file). + +# Execution instructions +Usage: +``` +runcompss --lang=python $(pwd)/application_sources/src/wordcount_blocks.py filePath resultPath blockSize +``` + +where: +* filePath: Absolute path of the file to parse +* resultPath: Absolute path to the result file +* blockSize: Size of each block. The lower the number, the more tasks will be generated in the workflow + +# Execution Examples +``` +runcompss --lang=python $(pwd)/application_sources/src/wordcount_blocks.py $(pwd)/dataset/data/compss.txt result.txt 300 +runcompss $(pwd)/application_sources/src/wordcount_blocks.py $(pwd)/dataset/data/compss.txt result.txt 300 +python -m pycompss $(pwd)/application_sources/src/wordcount.py $(pwd)/dataset/data/compss.txt result.txt 300 +``` + +# Build +No build is required +""" ; + schema1:hasPart , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "PyCOMPSs Wordcount test, dividing input file in blocks, only Python dictionaries used as task parameters (run at MareNostrum IV)" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "sars-cov-2-variation-reporting/COVID-19-VARIATION-REPORTING" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-variation-reporting" ; + schema1:version "0.3" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator , + , + , + ; + schema1:name "quality-and-contamination-control/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/quality-and-contamination-control" ; + schema1:version "1.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "Assembly-decontamination-VGP9/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/Assembly-decontamination-VGP9" ; + schema1:version "0.1.1" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "cutandrun/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/cutandrun" ; + schema1:version "0.3" . + + a schema1:Dataset ; + dct:conformsTo , + , + ; + schema1:creator ; + schema1:datePublished "2023-11-03T08:10:10+00:00" ; + schema1:description """**Name:** TruncatedSVD (Randomized SVD) +**Contact Person**: support-compss@bsc.es +**Access Level**: public +**License Agreement**: Apache2 +**Platform**: COMPSs +**Machine**: MareNostrum4 + +TruncatedSVD (Randomized SVD) for computing just 456 singular values out of a (3.6M x 1200) size matrix. +The input matrix represents a CFD transient simulation of air moving past a cylinder. +This application used dislib-0.9.0 (https://github.com/bsc-wdc/dislib/tree/release-0.9) +""" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:license "Apache-2.0" ; + schema1:mainEntity ; + schema1:mentions ; + schema1:name "Randomized SVD" ; + schema1:publisher . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "chipseq-pe/main" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/chipseq-pe" ; + schema1:version "0.4" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:creator ; + schema1:name "COVID-19: consensus construction" ; + schema1:programmingLanguage ; + schema1:url "https://github.com/iwc-workflows/sars-cov-2-consensus-from-variation" ; + schema1:version "0.2.1" . + + a schema1:MediaObject ; + schema1:contentSize 304 ; + schema1:dateModified "2024-06-18T13:22:30+00:00" ; + schema1:name "A.0.0" ; + schema1:sdDatePublished "2024-06-18T13:22:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 303 ; + schema1:dateModified "2024-06-18T13:22:30+00:00" ; + schema1:name "A.0.1" ; + schema1:sdDatePublished "2024-06-18T13:22:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 306 ; + schema1:dateModified "2024-06-18T13:22:30+00:00" ; + schema1:name "A.0.2" ; + schema1:sdDatePublished "2024-06-18T13:22:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 311 ; + schema1:dateModified "2024-06-18T13:22:30+00:00" ; + schema1:name "A.1.0" ; + schema1:sdDatePublished "2024-06-18T13:22:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 320 ; + schema1:dateModified "2024-06-18T13:22:30+00:00" ; + schema1:name "A.1.1" ; + schema1:sdDatePublished "2024-06-18T13:22:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 312 ; + schema1:dateModified "2024-06-18T13:22:30+00:00" ; + schema1:name "A.1.2" ; + schema1:sdDatePublished "2024-06-18T13:22:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 319 ; + schema1:dateModified "2024-06-18T13:22:30+00:00" ; + schema1:name "A.2.0" ; + schema1:sdDatePublished "2024-06-18T13:22:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 323 ; + schema1:dateModified "2024-06-18T13:22:30+00:00" ; + schema1:name "A.2.1" ; + schema1:sdDatePublished "2024-06-18T13:22:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 311 ; + schema1:dateModified "2024-06-18T13:22:30+00:00" ; + schema1:name "A.2.2" ; + schema1:sdDatePublished "2024-06-18T13:22:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 303 ; + schema1:dateModified "2024-06-18T13:22:30+00:00" ; + schema1:name "A.2.3" ; + schema1:sdDatePublished "2024-06-18T13:22:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 320 ; + schema1:dateModified "2024-06-18T13:22:30+00:00" ; + schema1:name "A.3.2" ; + schema1:sdDatePublished "2024-06-18T13:22:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 310 ; + schema1:dateModified "2024-06-18T13:22:30+00:00" ; + schema1:name "A.3.3" ; + schema1:sdDatePublished "2024-06-18T13:22:31+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 320 ; + schema1:dateModified "2024-04-30T13:04:24+00:00" ; + schema1:name "C.0.0" ; + schema1:sdDatePublished "2024-04-30T13:04:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 320 ; + schema1:dateModified "2024-04-30T13:04:24+00:00" ; + schema1:name "C.0.1" ; + schema1:sdDatePublished "2024-04-30T13:04:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 320 ; + schema1:dateModified "2024-04-30T13:04:24+00:00" ; + schema1:name "C.1.0" ; + schema1:sdDatePublished "2024-04-30T13:04:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 320 ; + schema1:dateModified "2024-04-30T13:04:24+00:00" ; + schema1:name "C.1.1" ; + schema1:sdDatePublished "2024-04-30T13:04:29+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T08:27:39+00:00" ; + schema1:name "C.0.0" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T08:27:39+00:00" ; + schema1:name "C.0.1" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T08:27:39+00:00" ; + schema1:name "C.0.2" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T08:27:39+00:00" ; + schema1:name "C.0.3" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T08:27:39+00:00" ; + schema1:name "C.0.4" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T08:27:39+00:00" ; + schema1:name "C.0.5" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T08:27:39+00:00" ; + schema1:name "C.1.0" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T08:27:39+00:00" ; + schema1:name "C.1.1" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T08:27:39+00:00" ; + schema1:name "C.1.2" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T08:27:39+00:00" ; + schema1:name "C.1.3" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T08:27:39+00:00" ; + schema1:name "C.1.4" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T08:27:39+00:00" ; + schema1:name "C.1.5" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T08:27:39+00:00" ; + schema1:name "C.2.0" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T08:27:39+00:00" ; + schema1:name "C.2.1" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T08:27:39+00:00" ; + schema1:name "C.2.2" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T08:27:39+00:00" ; + schema1:name "C.2.3" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T08:27:39+00:00" ; + schema1:name "C.2.4" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T08:27:39+00:00" ; + schema1:name "C.2.5" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T08:27:39+00:00" ; + schema1:name "C.3.0" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T08:27:39+00:00" ; + schema1:name "C.3.1" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T08:27:39+00:00" ; + schema1:name "C.3.2" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T08:27:39+00:00" ; + schema1:name "C.3.3" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T08:27:39+00:00" ; + schema1:name "C.3.4" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T08:27:39+00:00" ; + schema1:name "C.3.5" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T08:27:39+00:00" ; + schema1:name "C.4.0" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T08:27:39+00:00" ; + schema1:name "C.4.1" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T08:27:39+00:00" ; + schema1:name "C.4.2" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T08:27:39+00:00" ; + schema1:name "C.4.3" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T08:27:39+00:00" ; + schema1:name "C.4.4" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T08:27:39+00:00" ; + schema1:name "C.4.5" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T08:27:39+00:00" ; + schema1:name "C.5.0" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T08:27:39+00:00" ; + schema1:name "C.5.1" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T08:27:39+00:00" ; + schema1:name "C.5.2" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T08:27:39+00:00" ; + schema1:name "C.5.3" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T08:27:39+00:00" ; + schema1:name "C.5.4" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 392 ; + schema1:dateModified "2024-08-02T08:27:39+00:00" ; + schema1:name "C.5.5" ; + schema1:sdDatePublished "2024-08-02T08:27:40+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 29586272 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshot_matrix_at_final_time.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_10.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_100.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1000.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1010.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1020.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1030.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1040.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1050.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1060.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1070.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1080.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1090.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_110.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1100.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1110.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1120.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1130.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1140.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1150.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1160.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1170.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1180.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1190.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_120.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_1200.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_130.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_140.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_150.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_160.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_170.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_180.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_190.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_20.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_200.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_210.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_220.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_230.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_240.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_250.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_260.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_270.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_280.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_290.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_30.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_300.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_310.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_320.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_330.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_340.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_350.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_360.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_370.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_380.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_390.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_40.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_400.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_410.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_420.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_430.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_440.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_450.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_460.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_470.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_480.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_490.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_50.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_500.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_510.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_520.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_530.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_540.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_550.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_560.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_570.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_580.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_590.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_60.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_600.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_610.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_620.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_630.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_640.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_650.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_660.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_670.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_680.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_690.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_70.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_700.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_710.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_720.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_730.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_740.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_750.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_760.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_770.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_780.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_790.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_80.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_800.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_810.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_820.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_830.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_840.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_850.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_860.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_870.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_880.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_890.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_90.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_900.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_910.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_920.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_930.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_940.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_950.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_960.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_970.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_980.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject ; + schema1:contentSize 295861568 ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:name "snapshots_at_step_990.npy" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:SoftwareApplication ; + schema1:name "dislib" ; + schema1:url "https://github.com/bsc-wdc/dislib" ; + schema1:version "0.9.0" . + + a schema1:DataDownload ; + schema1:contentSize "10978259" ; + schema1:contentUrl "https://github.com/by-covid/BY-COVID_WP5_T5.2_baseline-use-case/archive/refs/heads/main.zip" ; + schema1:encodingFormat "application/zip" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0001-5980-8002" ; + schema1:name "Ozan Ozisik" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0001-6353-0808" ; + schema1:name "Volodymyr Savchenko" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0001-6744-996X" ; + schema1:name "Helge Hecht" . + + a schema1:Person ; + schema1:affiliation ; + schema1:name "Bruno P. Kinoshita" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0001-9091-257X" ; + schema1:name "Jordi Rambla" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0001-9818-9320" ; + schema1:name "Johannes Köster" . + + a schema1:Person ; + schema1:name "Bugra Oezdemir" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-0613-3651" ; + schema1:name "Cenna Doornbos" . + + a schema1:Person ; + schema1:name "Simon Bray" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-0637-9950" ; + schema1:name "Juma Bayjan" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-2853-6742" ; + schema1:name "Arnau Soler Costa" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-2961-9670" ; + schema1:name "Michael R. Crusoe" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-4405-6802" ; + schema1:name "Haris Zafeiropoulos" . + + a schema1:Person ; + schema1:name "Andrii Neronov" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-8080-9170" ; + schema1:name "Jeanette Reinshagen" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-9009-7453" ; + schema1:name "Katherine Farquharson" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-1086-2483" ; + schema1:name "Amy Curwin" . + + a schema1:Person ; + schema1:contactPoint ; + schema1:name "Ashish Bhawel" . + + a schema1:SoftwareApplication ; + schema1:description "Bio Building Blocks to analyse Molecular Dynamics trajectories" ; + schema1:downloadUrl "https://quay.io/biocontainers/biobb_analysis:3.6.0--pyhdfd78af_0" ; + schema1:installUrl "https://anaconda.org/bioconda/biobb_analysis" ; + schema1:isPartOf "https://pypi.org/project/biobb/" ; + schema1:license ; + schema1:name "biobb_analysis" ; + schema1:publisher ; + schema1:softwareHelp "https://biobb-analysis.readthedocs.io/en/latest/" ; + schema1:softwareRequirements , + ; + schema1:url "https://github.com/bioexcel/biobb_analysis" ; + schema1:version "3.6.0" . + + a schema1:SoftwareApplication ; + schema1:description "Bio Building Blocks to setup and run Molecular Dynamics simulations." ; + schema1:downloadUrl "https://quay.io/biocontainers/biobb_md:3.6.0--pyhdfd78af_0" ; + schema1:installUrl "https://anaconda.org/bioconda/biobb_md" ; + schema1:isPartOf "https://pypi.org/project/biobb/" ; + schema1:license ; + schema1:name "biobb_md" ; + schema1:publisher ; + schema1:softwareHelp "https://biobb-md.readthedocs.io/en/latest/" ; + schema1:softwareRequirements ; + schema1:url "https://github.com/bioexcel/biobb_md" ; + schema1:version "3.6.0" . + + a schema1:SoftwareApplication ; + schema1:description "Bio Building Blocks to setup and run Alchemical Free Energy calculations" ; + schema1:downloadUrl "https://quay.io/biocontainers/biobb_pmx:3.6.0--pyhdfd78af_0" ; + schema1:installUrl "https://anaconda.org/bioconda/biobb_pmx" ; + schema1:isPartOf "https://pypi.org/project/biobb/" ; + schema1:license ; + schema1:name "biobb_pmx" ; + schema1:publisher ; + schema1:softwareHelp "https://pypi.org/project/biobb-pmx/3.6.0/en/latest/" ; + schema1:softwareRequirements ; + schema1:url "https://github.com/bioexcel/biobb_pmx" ; + schema1:version "3.6.0" . + + a schema1:SoftwareApplication ; + schema1:description "Bio Building Blocks to modify or extract information from a PDB structure file" ; + schema1:downloadUrl "https://quay.io/biocontainers/biobb_structure_utils:3.6.1--pyhdfd78af_0" ; + schema1:installUrl "https://anaconda.org/bioconda/biobb_structure_utils" ; + schema1:isPartOf "https://pypi.org/project/biobb/" ; + schema1:license ; + schema1:name "biobb_structure_utils" ; + schema1:publisher ; + schema1:softwareHelp "https://biobb-structure-utils.readthedocs.io/en/latest/" ; + schema1:softwareRequirements , + ; + schema1:url "https://github.com/bioexcel/biobb_structure_utils" ; + schema1:version "3.6.1" . + + a schema1:ComputerLanguage, + schema1:SoftwareApplication ; + schema1:description "Python Binding for COMP Superscalar Runtime" ; + schema1:name "PyCOMPSs" ; + schema1:publisher ; + schema1:softwareRequirements ; + schema1:url "http://compss.bsc.es/" ; + schema1:version "2.9" . + + a schema1:Organization ; + schema1:name "Institut de Physique du Globe de Paris (IPGP)" . + + a schema1:Organization ; + schema1:name "Universitat Politècnica de Catalunya" . + + a schema1:Organization ; + schema1:name "University of Naples Federico II" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url . + + a schema1:Person ; + schema1:name "Juma Bayjan" . + + a schema1:Person ; + schema1:name "Ozan Ozisik" . + + a schema1:Person ; + schema1:name "Patrick Durand" . + + a schema1:Person ; + schema1:name "Laura Leroi" . + + a schema1:Person ; + schema1:name "Volodymyr Savchenko" . + + a schema1:Person ; + schema1:name "Cenna Doornbos" . + + a schema1:Person ; + schema1:name "Haris Zafeiropoulos" . + + a schema1:Person ; + schema1:name "Ambarish Kumar" . + + a schema1:Person ; + schema1:name "Marvin Martens" . + + a schema1:Person ; + schema1:name "Jeanette Reinshagen" . + + a schema1:Person ; + schema1:name "Katherine Farquharson" . + + a schema1:Person ; + schema1:name "Vasiliki Panagi" . + + a schema1:Person ; + schema1:name "Konstantinos Kyritsis" . + + a schema1:Person ; + schema1:name "Denys Savchenko" . + + a schema1:Person ; + schema1:name "Michael R. Crusoe" . + + a schema1:Person ; + schema1:name "Amy Curwin" . + + a schema1:Person ; + schema1:name "Arnau Soler Costa" . + + a schema1:Person ; + schema1:name "Helge Hecht" . + + a schema1:Person ; + schema1:name "Zargham Ahmad" . + + a schema1:Person ; + schema1:name "Jordi Rambla" . + + a schema1:Organization, + schema1:Project ; + schema1:name "TRON gGmbH" . + + a schema1:Organization, + schema1:Project ; + schema1:name "NGFF Tools" . + + a schema1:Organization, + schema1:Project ; + schema1:name "NHM Clark group" . + + a schema1:Organization, + schema1:Project ; + schema1:name "RECETOX SpecDatRI" . + + a schema1:Organization, + schema1:Project ; + schema1:name "EGA QC" . + + a schema1:Organization, + schema1:Project ; + schema1:name "SARS-CoV-2 Data Hubs" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Snakemake-Workflows" . + + a schema1:Organization, + schema1:Project ; + schema1:name "emo-bon" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Toxicology community" . + + a schema1:Organization, + schema1:Project ; + schema1:name "EU-Openscreen" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 9085 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "ptf_workflow.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 9085 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "ptf_workflow.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 3683 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "Wordcount.java" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 1948 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "matmul_files.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 5243 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "cch_matmul_test.py" ; + schema1:programmingLanguage ; + schema1:softwareRequirements . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 2271 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "main.py" ; + schema1:programmingLanguage ; + schema1:softwareRequirements . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 46258 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "Workflow.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 4503 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "Matmul.java" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 8973 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "lysozyme_in_water_full.py" ; + schema1:programmingLanguage . + + a schema1:Person ; + schema1:name "Debjyoti Ghosh" . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 8973 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "lysozyme_in_water_full.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 9298 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "random_svd_compss.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 1254 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "lanczos_dislib_version.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 7564 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "KMeans.java" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 1959 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "matmul_files.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 2897 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "wordcount.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 4697 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "Matmul.java" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 6602 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "SparseLU.java" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 2271 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "main.py" ; + schema1:programmingLanguage ; + schema1:softwareRequirements . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 6602 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "SparseLU.java" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 7601 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "lysozyme_in_water.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 5344 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "cc.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 4697 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "Matmul.java" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 2312 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "matmul_directory.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 1101 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "cch_kmeans_test.py" ; + schema1:programmingLanguage ; + schema1:softwareRequirements . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 8868 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "lysozyme_in_water_full_no_mpi.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 5243 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "cch_matmul_test.py" ; + schema1:programmingLanguage ; + schema1:softwareRequirements . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 2107 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "main_pytorch_sync_5_nodes.py" ; + schema1:programmingLanguage ; + schema1:softwareRequirements , + . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 2107 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "main_pytorch_sync_5_nodes.py" ; + schema1:programmingLanguage ; + schema1:softwareRequirements , + . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 2415 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "my_workflow_multiple.py" ; + schema1:programmingLanguage . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 8973 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "lysozyme_in_water_full.py" ; + schema1:programmingLanguage . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 1959 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "matmul_files.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 1765 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "simple.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 480 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "fibonacci.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 1276 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "main_rf.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 1380 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "increment_fibonacci.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 2836 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "increment.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 6421 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "kmeans.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 2277 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "monte_carlo_pi.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 7601 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "lysozyme_in_water.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 1101 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "cch_kmeans_test.py" ; + schema1:programmingLanguage ; + schema1:softwareRequirements . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 2168 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "wc_reduce.py" ; + schema1:programmingLanguage . + + a schema1:ComputerLanguage ; + schema1:identifier ; + schema1:name "Galaxy" ; + schema1:url ; + schema1:version "v19_09" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 4293 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "cholesky.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 9301 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "random_svd_compss.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 2782 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "wc_merge.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 7601 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "lysozyme_in_water.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 1955 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "matmul_objects.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 2285 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "wordcount_blocks.py" ; + schema1:programmingLanguage . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:contentSize 9301 ; + schema1:description "Main file of the COMPSs workflow source files" ; + schema1:encodingFormat "text/plain" ; + schema1:image ; + schema1:name "random_svd_compss.py" ; + schema1:programmingLanguage . + + a schema1:Dataset ; + schema1:dateModified "2023-09-06T13:18:09" ; + schema1:hasPart , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + schema1:name "MakeTrainingMatricesSerial" ; + schema1:sdDatePublished "2023-11-03T08:10:10+00:00" . + + a schema1:Project ; + schema1:description "BeYond-COVID, make data from COVID-19 and other infectious diseases open and accessible to everyone" ; + schema1:funding ; + schema1:name "BY-COVID" ; + schema1:url "https://by-covid.eu/" . + + a schema1:WebPage ; + schema1:about ; + schema1:name "dataspice CSV template" . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:SoftwareApplication ; + schema1:name "pandas-profiling v3.1.0" ; + schema1:url "https://github.com/pandas-profiling/pandas-profiling" ; + schema1:version "3.1.0" . + + a schema1:SoftwareApplication ; + schema1:name "Quarto" ; + schema1:url "https://quarto.org/" ; + schema1:version "1.0.8" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0001-6867-2039" ; + schema1:name "Ekaterina Sakharova" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0001-8035-341X" ; + schema1:name "Konstantinos Kyritsis" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0001-8222-008X" ; + schema1:name "Samuel Lambert" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0001-8449-1502" ; + schema1:name "Cali Willet" . + + a schema1:Person ; + schema1:name "Fotis Psomopoulos" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-2913-0779" ; + schema1:name "Benjamin Wingfield" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-4159-6096" ; + schema1:name "Asier Gonzalez-Uriarte" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-6454-2680" ; + schema1:name "Bryan Raubenolt" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-7341-1842" ; + schema1:name "Laurence Livermore" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-7664-8490" ; + schema1:name "Denys Savchenko" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-7770-620X" ; + schema1:name "Friederike Ehrhart" . + + a schema1:Person ; + schema1:name "Matthieu Muffato" . + + a schema1:Person ; + schema1:name "Laurent Gil" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-9984-4689" ; + schema1:name "Melchior du Lac" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-0914-2470" ; + schema1:name "Anthony Bretaudeau" . + + a schema1:Person ; + schema1:name "Nikolaos Pechlivanis" . + + a schema1:WebPage ; + schema1:name "Quarto Markdown" . + + a schema1:MediaObject ; + schema1:name "housing.csv" . + + a schema1:Organization ; + schema1:name "Euro-Mediterranean Center for Climate Change" . + + a schema1:Organization ; + schema1:name "Norwegian Geotechnical Institute" . + + a schema1:Organization ; + schema1:alternateName "IACS" ; + schema1:legalName "Instituto Aragonés de Ciencias de la Salud" ; + schema1:name "Instute for Health Science in Aragon (IACS)" ; + schema1:url "https://www.iacs.es/" . + + a schema1:Collection ; + schema1:name "CEITEC CryoEM Facility Workflows" . + + a schema1:Person ; + schema1:name "Friederike Ehrhart" . + + a schema1:Person ; + schema1:name "Melchior du Lac" . + + a schema1:Person ; + schema1:name "Asier Gonzalez-Uriarte" . + + a schema1:Person ; + schema1:name "Laurence Livermore" . + + a schema1:Person ; + schema1:name "Cali Willet" . + + a schema1:Person ; + schema1:name "Benjamin Wingfield" . + + a schema1:Person ; + schema1:name "Samuel Lambert" . + + a schema1:Person ; + schema1:name "Ekaterina Sakharova" . + + a schema1:Person ; + schema1:name "Bryan Raubenolt" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Seq4AMR" . + + a schema1:Organization, + schema1:Project ; + schema1:name "NanoGalaxy" . + + a schema1:Organization, + schema1:Project ; + schema1:name "SANBI Pathogen Bioinformatics" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Biodata Analysis Group" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Polygenic Score Catalog" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Scipion CNB" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Tree of Life Genome Analysis" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Generalized Open-Source Workflows for Atomistic Molecular Dynamics Simulations of Viral Helicases" . + + a schema1:Organization, + schema1:Project ; + schema1:name "EOSC-Life - Demonstrator 7: Rare Diseases" . + + a schema1:Organization, + schema1:Project ; + schema1:name "ODA" . + + a schema1:Organization, + schema1:Project ; + schema1:name "OpenEBench" . + + a schema1:ComputerLanguage, + schema1:SoftwareApplication ; + schema1:name "Python 3.8.0" ; + schema1:url "https://www.python.org/downloads/release/python-380/" ; + schema1:version "3.8.0" . + + a schema1:MediaObject, + schema1:SoftwareSourceCode, + ; + schema1:author , + ; + schema1:dateModified "2021-09-03T00:00:00.000Z" ; + schema1:description "Performs a system setup and runs a molecular dynamics simulation on each one of the structures listed in the YAML properties file." ; + schema1:hasPart , + , + , + ; + schema1:isBasedOn ; + schema1:name "md_list.py" ; + schema1:programmingLanguage ; + schema1:runtimePlatform . + + a schema1:SoftwareApplication ; + schema1:name "dataspice" ; + schema1:url "https://docs.ropensci.org/dataspice/" ; + schema1:version "1.1.0" . + + a schema1:Grant ; + schema1:funder ; + schema1:identifier "https://doi.org/10.3030/101046203" ; + schema1:name "HORIZON-INFRA-2021-EMERGENCY-01 101046203" . + + a schema1:MediaObject . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-3877-4170" ; + schema1:name "Sergi Sayols" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-4166-4343" ; + schema1:name "Kim Philipp Jablonski" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-4319-260X" ; + schema1:name "Jorrit Mesman" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-5516-8391" ; + schema1:name "Germán Royval" . + + a schema1:Person ; + schema1:name " Priyanka Surana", + "Priyanka Surana" . + + a schema1:Person ; + schema1:name "Petr Walczysko" . + + a schema1:Person ; + schema1:name "Pau Andrio" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-4308-5617" ; + schema1:name "Coline Royaux" . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Organization ; + schema1:name "European Commission" . + + a schema1:Person ; + schema1:name "Oliver Woolland" . + + a schema1:Person ; + schema1:name "Germán Royval" . + + a schema1:Person ; + schema1:name "Kim Philipp Jablonski" . + + a schema1:Person ; + schema1:name "Jorrit Mesman" . + + a schema1:Person ; + schema1:name "Coline Royaux" . + + a schema1:Person ; + schema1:name "Sergi Sayols" . + + a schema1:Organization, + schema1:Project ; + schema1:name "TRE-FX" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Euro-BioImaging" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Lake Erken modelling setup" . + + a schema1:Organization, + schema1:Project ; + schema1:name "IMBforge" . + + a schema1:Organization, + schema1:Project ; + schema1:name "iPC: individualizedPaediatricCure" . + + a schema1:Organization, + schema1:Project ; + schema1:name "CO2MICS Lab" . + + a schema1:Organization ; + schema1:name "Nadolina Brajuka" . + + a schema1:MediaObject . + + a schema1:Organization ; + schema1:memberOf , + , + , + , + ; + schema1:name "Molecular Modeling and Bioinformatics unit" ; + schema1:url "https://mmb.irbbarcelona.org/" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0001-8293-4816" ; + schema1:name "Tom Brown" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-7561-0810" ; + schema1:name "Ivan Topolsky" . + + a schema1:Organization ; + schema1:name "Universidad de Málaga" . + + a schema1:CreativeWork ; + schema1:description """Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License.""" ; + schema1:identifier "https://www.apache.org/licenses/LICENSE-2.0" ; + schema1:name "Apache License 2.0" . + + a schema1:CreativeWork ; + schema1:name "Process Run Crate" ; + schema1:version "0.5" . + + a schema1:CreativeWork ; + schema1:name "Workflow Run Crate" ; + schema1:version "0.5" . + + a schema1:Collection ; + schema1:name "ERGA Assembly Galaxy HiFi & HiC Pipelines (Hifiasm-HiC + Purge_Dups + YaHS)" . + + a schema1:Collection ; + schema1:name "16S Microbial Analysis with mothur (on Galaxy Australia)" . + + a schema1:Collection ; + schema1:name "ERGA Assembly Galaxy ONT+Illumina & HiC Pipelines (NextDenovo-HyPo + Purge_Dups + YaHS)" . + + a schema1:Collection ; + schema1:name "ERGA Assembly Galaxy ONT+Illumina & HiC Pipelines (Flye-HyPo + Purge_Dups + YaHS)" . + + a schema1:Collection ; + schema1:name "ERGA Assembly Snakemake HiFi & HiC Pipelines" . + + a schema1:Collection ; + schema1:name "IDR" . + + a schema1:Person ; + schema1:name "Ivan Topolsky" . + + a schema1:Person ; + schema1:name "Delphine Lariviere" . + + a schema1:Person ; + schema1:name "Tom Brown" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Integrated and Urban Plant Pathology Laboratory" . + + a schema1:Organization, + schema1:Project ; + schema1:name "CWL workflow SARS-CoV-2" . + + a schema1:Organization, + schema1:Project ; + schema1:name "V-Pipe" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Specimen Data Refinery" . + + a schema1:Dataset ; + schema1:contributor , + , + , + ; + schema1:creator , + , + , + , + ; + schema1:datePublished "2023-01-26" ; + schema1:description "Synthetic data set to test the causal model proposed by the baseline use case in BY-COVID WP5 assessing the effectiveness of the COVID-19 vaccine(s)" ; + schema1:distribution ; + schema1:hasPart , + , + , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.6913045" ; + schema1:keywords "causal model", + "covid-19", + "dataset", + "effectiveness", + "synthetic data", + "vaccines" ; + schema1:license ; + schema1:name "COVID-19 vaccine(s) effectiveness assessment (synthetic dataset)" ; + schema1:spatialCoverage _:b0 ; + schema1:subjectOf ; + schema1:temporalCoverage "01-01-2020/31-12-2024" ; + schema1:url "https://zenodo.org/record/7572373" ; + schema1:variableMeasured _:b1, + _:b10, + _:b11, + _:b12, + _:b13, + _:b14, + _:b15, + _:b16, + _:b17, + _:b18, + _:b19, + _:b2, + _:b20, + _:b21, + _:b22, + _:b23, + _:b24, + _:b25, + _:b26, + _:b27, + _:b28, + _:b29, + _:b3, + _:b30, + _:b31, + _:b32, + _:b33, + _:b34, + _:b35, + _:b36, + _:b37, + _:b38, + _:b39, + _:b4, + _:b40, + _:b41, + _:b5, + _:b6, + _:b7, + _:b8, + _:b9 ; + schema1:version "1.1.0" . + + a schema1:Dataset ; + schema1:contributor , + , + , + ; + schema1:creator , + , + , + , + ; + schema1:datePublished "2023-01-26" ; + schema1:description "Synthetic data set to test the causal model proposed by the baseline use case in BY-COVID WP5 assessing the effectiveness of the COVID-19 vaccine(s)" ; + schema1:distribution ; + schema1:hasPart , + , + , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.6913045" ; + schema1:keywords "causal model", + "covid-19", + "dataset", + "effectiveness", + "synthetic data", + "vaccines" ; + schema1:name "COVID-19 vaccine(s) effectiveness assessment (synthetic dataset)" ; + schema1:spatialCoverage _:b0 ; + schema1:subjectOf ; + schema1:temporalCoverage "01-01-2020/31-12-2024" ; + schema1:url "https://zenodo.org/record/7572373" ; + schema1:variableMeasured _:b1, + _:b10, + _:b11, + _:b12, + _:b13, + _:b14, + _:b15, + _:b16, + _:b17, + _:b18, + _:b19, + _:b2, + _:b20, + _:b21, + _:b22, + _:b23, + _:b24, + _:b25, + _:b26, + _:b27, + _:b28, + _:b29, + _:b3, + _:b30, + _:b31, + _:b32, + _:b33, + _:b34, + _:b35, + _:b36, + _:b37, + _:b38, + _:b39, + _:b4, + _:b40, + _:b41, + _:b5, + _:b6, + _:b7, + _:b8, + _:b9 ; + schema1:version "1.1.0" . + + a schema1:Dataset ; + schema1:contributor , + , + , + ; + schema1:creator , + , + , + , + ; + schema1:datePublished "2023-01-26" ; + schema1:description "Synthetic data set to test the causal model proposed by the baseline use case in BY-COVID WP5 assessing the effectiveness of the COVID-19 vaccine(s)" ; + schema1:distribution ; + schema1:hasPart , + , + , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.6913045" ; + schema1:keywords "causal model", + "covid-19", + "dataset", + "effectiveness", + "synthetic data", + "vaccines" ; + schema1:license ; + schema1:mainEntity ; + schema1:name "COVID-19 vaccine(s) effectiveness assessment (synthetic dataset)" ; + schema1:spatialCoverage _:b0 ; + schema1:subjectOf ; + schema1:temporalCoverage "01-01-2020/31-12-2024" ; + schema1:url "https://zenodo.org/record/7572373" ; + schema1:variableMeasured _:b1, + _:b10, + _:b11, + _:b12, + _:b13, + _:b14, + _:b15, + _:b16, + _:b17, + _:b18, + _:b19, + _:b2, + _:b20, + _:b21, + _:b22, + _:b23, + _:b24, + _:b25, + _:b26, + _:b27, + _:b28, + _:b29, + _:b3, + _:b30, + _:b31, + _:b32, + _:b33, + _:b34, + _:b35, + _:b36, + _:b37, + _:b38, + _:b39, + _:b4, + _:b40, + _:b41, + _:b5, + _:b6, + _:b7, + _:b8, + _:b9 ; + schema1:version "1.1.0" . + + a schema1:Dataset ; + schema1:contributor , + , + , + ; + schema1:creator , + , + , + , + ; + schema1:datePublished "2023-01-26" ; + schema1:description "Synthetic data set to test the causal model proposed by the baseline use case in BY-COVID WP5 assessing the effectiveness of the COVID-19 vaccine(s)" ; + schema1:distribution ; + schema1:hasPart , + , + , + ; + schema1:identifier "https://doi.org/10.5281/zenodo.6913045" ; + schema1:keywords "causal model", + "covid-19", + "dataset", + "effectiveness", + "synthetic data", + "vaccines" ; + schema1:license ; + schema1:name "COVID-19 vaccine(s) effectiveness assessment (synthetic dataset)" ; + schema1:spatialCoverage _:b0 ; + schema1:subjectOf ; + schema1:temporalCoverage "01-01-2020/31-12-2024" ; + schema1:url "https://zenodo.org/record/7572373" ; + schema1:variableMeasured _:b1, + _:b10, + _:b11, + _:b12, + _:b13, + _:b14, + _:b15, + _:b16, + _:b17, + _:b18, + _:b19, + _:b2, + _:b20, + _:b21, + _:b22, + _:b23, + _:b24, + _:b25, + _:b26, + _:b27, + _:b28, + _:b29, + _:b3, + _:b30, + _:b31, + _:b32, + _:b33, + _:b34, + _:b35, + _:b36, + _:b37, + _:b38, + _:b39, + _:b4, + _:b40, + _:b41, + _:b5, + _:b6, + _:b7, + _:b8, + _:b9 ; + schema1:version "1.1.0" . + + a schema1:Person ; + schema1:name "Mridul Johari" . + + a schema1:Person ; + schema1:name "Yasmmin Martins" . + + a schema1:Person ; + schema1:name "Daphne Wijnbergen" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Jorge Ejarque" . + + a schema1:Organization ; + schema1:name "IRB Barcelona", + "Institute for Research in Biomedicine" ; + schema1:url "https://www.irbbarcelona.org/" . + + a schema1:Person ; + schema1:name "Saskia Hiltemann" . + + a schema1:Organization, + schema1:Project ; + schema1:name "IBISBA Workflows" . + + a schema1:Organization, + schema1:Project ; + schema1:name "yPublish - Bioinfo tools" . + + a schema1:MediaObject . + + a schema1:MediaObject . + + a schema1:Organization ; + schema1:name "IWC" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Fernando Vázquez-Novoa" . + + a schema1:Person ; + schema1:affiliation ; + schema1:name "Alexander Degelsegger-Marquez" . + + a schema1:Person ; + schema1:affiliation ; + schema1:name "Simon Saldner" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-1789-1861" ; + schema1:name "Jean-Marie Burel" . + + a schema1:Person ; + schema1:name "Hans-Rudolf Hotz" . + + a schema1:Person ; + schema1:name "Mehmet Tekman" . + + a schema1:Person ; + schema1:name "Daniel Blankenberg" . + + a schema1:Person ; + schema1:affiliation ; + schema1:name "Lorenz Dolanski-Aghamanoukjan" . + + a schema1:Person ; + schema1:name "Wendi Bacon" . + + a schema1:Person ; + schema1:name "Marius van den Beek" . + + a schema1:Organization ; + schema1:name "National Institute of Geophysics and Volcanology" . + + a schema1:CreativeWork, + schema1:Thing ; + schema1:identifier "CC-BY-4.0" ; + schema1:name "Creative Commons Attribution 4.0 International" ; + schema1:url "https://creativecommons.org/licenses/by/4.0/" . + + a schema1:Person ; + schema1:name "Jean-Marie Burel" . + + a schema1:Organization, + schema1:Project ; + schema1:name "PerMedCoE" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Galaxy Climate" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Biodiversity Genomics Europe (general)" . + + a schema1:Organization, + schema1:Project ; + schema1:name "OME" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-2439-8650" ; + schema1:name "Gareth Price" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-2488-953X" ; + schema1:name "Rosemarie Sadsad" . + + a schema1:Collection ; + schema1:name "HiFi genome assembly on Galaxy" . + + a schema1:Person ; + schema1:name "Rosemarie Sadsad" . + + a schema1:Person ; + schema1:name "Gareth Price" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-8504-068X" ; + schema1:name "Yvan Le Bras" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-0419-1476" ; + schema1:name "Georgina Samaha" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-3472-3736" ; + schema1:name "Martin Beracochea" . + + a schema1:Person ; + schema1:name "Clea Siguret" . + + a schema1:Collection ; + schema1:name "Workflows in EuroScienceGateway" . + + a schema1:Person ; + schema1:name "Georgina Samaha" . + + a schema1:Person ; + schema1:name "Martin Beracochea" . + + a schema1:Person ; + schema1:name "Yvan Le Bras" . + + a schema1:Organization, + schema1:Project ; + schema1:name "HoloFood at MGnify" . + + a schema1:Organization, + schema1:Project ; + schema1:name "MGnify" . + + a schema1:WebSite ; + schema1:name "Java Archive Format" . + + a schema1:Person ; + schema1:affiliation ; + schema1:identifier "https://orcid.org/0000-0001-9842-9718" ; + schema1:name "Stian Soiland-Reyes" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-4806-5140" ; + schema1:name "José Mª Fernández" . + + a schema1:Person ; + schema1:name "Pierre Osteil" . + + a schema1:Collection ; + schema1:name "BioExcel Building Blocks (BioBB) Protein MD Setup Tutorials" . + + a schema1:Person ; + schema1:name "José Mª Fernández" . + + a schema1:Organization, + schema1:Project ; + schema1:name "PNDB" . + + a schema1:Organization, + schema1:Project ; + schema1:name "EJPRD WP13 case-studies workflows" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-5752-2155" ; + schema1:name "Cristóbal Gallardo" . + + a schema1:CreativeWork ; + schema1:name "Process Run Crate" ; + schema1:version "0.4" . + + a schema1:CreativeWork ; + schema1:name "Workflow Run Crate" ; + schema1:version "0.4" . + + a schema1:Collection ; + schema1:name "TSI annotation workflows" . + + a schema1:Person ; + schema1:name "Cristóbal Gallardo" . + + a schema1:Person ; + schema1:name "Diego De Panis" . + + a schema1:Organization, + schema1:Project ; + schema1:name "EuroScienceGateway" . + + a schema1:Person ; + schema1:name "Clea Siguret", + "Pierre Marin" . + + a schema1:Person ; + schema1:name "Tracy Chew" . + + a schema1:Organization ; + schema1:name "abromics-consortium" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint , + ; + schema1:name "Rosa M Badia", + "Rosa M. Badia" . + + a schema1:Organization, + schema1:Project ; + schema1:name "TransBioNet" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint , + ; + schema1:name "Javier Conejero" . + + a schema1:Person ; + schema1:affiliation ; + schema1:name "Martínez-Lizaga, Natalia" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Sydney Informatics Hub" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0001-6421-3484" ; + schema1:name "Delphine Lariviere" . + + a schema1:Organization, + schema1:Project ; + schema1:name "usegalaxy-eu" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0001-8172-8981" ; + schema1:name "Jasper Koehorst" . + + a schema1:Person ; + schema1:name "Jasper Koehorst" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Galaxy Training Network" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0001-9524-5964" ; + schema1:name "Bart Nijsse" . + + a schema1:Person ; + schema1:affiliation ; + schema1:email "jgonzalezga.iacs@aragon.es" ; + schema1:name "Javier González-Galindo" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:name "Cristian Tatu" . + + a schema1:Person ; + schema1:name "Bart Nijsse" . + + a schema1:Organization, + schema1:Project ; + schema1:name "UNLOCK" . + + a schema1:Organization, + schema1:Project ; + schema1:name "ERGA Assembly" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-4929-1219" ; + schema1:name "Laura Rodriguez-Navas" . + + a schema1:Person ; + schema1:name "Laura Rodriguez-Navas" . + + a schema1:Organization, + schema1:Project ; + schema1:name "GalaxyProject SARS-CoV-2" . + + a schema1:Collection ; + schema1:name "scRNAseq processing in galaxy" . + + a schema1:Person ; + schema1:affiliation ; + schema1:email "ebernal.iacs@aragon.es" ; + schema1:name "Enrique Bernal-Delgado" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-1248-3594" ; + schema1:name "Douglas Lowe" . + + a schema1:Person ; + schema1:affiliation ; + schema1:email "festupinnan.iacs@aragon.es" ; + schema1:name "Francisco Estupiñán-Romero" . + + a schema1:Person ; + schema1:name "Douglas Lowe" . + + a schema1:Person ; + schema1:name "Anna Syme" . + + a schema1:Person ; + schema1:affiliation ; + schema1:email "Nina.VanGoethem@sciensano.be" ; + schema1:name "Nina Van Goethem" . + + a schema1:Person ; + schema1:affiliation ; + schema1:email "Marjan.Meurisse@sciensano.be" ; + schema1:name "Marjan Meurisse" . + + a schema1:WebSite ; + schema1:name "Java Compiled Object Code" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0002-9906-0673" ; + schema1:name "Anna Syme" . + + a schema1:Organization, + schema1:Project ; + schema1:name "QCIF Bioinformatics" . + + a schema1:Person ; + schema1:name "Wolfgang Maier" . + + a schema1:CreativeWork ; + schema1:name "Workflow Run Crate" ; + schema1:version "0.1" . + + a schema1:CreativeWork ; + schema1:name "Process Run Crate" ; + schema1:version "0.1" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Galaxy Australia" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint , + , + ; + schema1:name "Ashish Bhawel", + "Raül Sirvent" . + + a schema1:Collection ; + schema1:name "BioCommons ‘Bring Your Own Data’ Expansion Project" . + + a schema1:WebSite ; + schema1:name "YAML" . + + a schema1:WebSite ; + schema1:name "Scalable Vector Graphics" . + + a schema1:WebSite ; + schema1:name "JSON Data Interchange Format" . + + a schema1:Organization, + schema1:Project ; + schema1:name "Australian BioCommons" . + + a schema1:Organization ; + schema1:name "Barcelona Supercomputing Center", + "Barcelona Supercomputing Centre" ; + schema1:url "https://www.bsc.es/" . + + a schema1:Collection ; + schema1:name "Tutorials for BioExcel Building Blocks (BioBB)" . + + a schema1:Organization ; + schema1:name "VGP" . + + a schema1:Collection ; + schema1:name "Interactive Jupyter Notebooks for FAIR and reproducible biomolecular simulation workflows" . + + a schema1:Person ; + schema1:name "Lucille Delisle" . + + a schema1:ComputerLanguage ; + schema1:alternateName "Jupyter" ; + schema1:identifier ; + schema1:name "Jupyter Notebook" ; + schema1:url . + + a schema1:Person ; + schema1:name "Adam Hospital" . + + a schema1:Person ; + schema1:identifier "https://orcid.org/0000-0003-0513-0288" ; + schema1:name "Genís Bayarri" . + + a schema1:Person ; + schema1:name "Genís Bayarri" . + + a schema1:Organization, + schema1:Project ; + schema1:name "BioBB Building Blocks" . + + a schema1:Person ; + schema1:affiliation ; + schema1:contactPoint ; + schema1:identifier "https://orcid.org/0000-0002-8291-8071" ; + schema1:name "Adam Hospital" . + +ns2:PlanemoEngine a schema1:SoftwareApplication ; + schema1:name "Planemo" ; + schema1:url . + +ns2:GithubService a ns2:TestService ; + schema1:name "Github Actions" ; + schema1:url . + + a schema1:Organization, + schema1:Project ; + schema1:name "nf-core" . + + a schema1:Organization ; + schema1:name "WorkflowHub" ; + schema1:url "https://about.workflowhub.eu/" . + + a schema1:CreativeWork ; + schema1:name "Workflow RO-Crate" ; + schema1:version "1.0" . + diff --git a/ro-crate-metadata/ro-crate-metadata.json b/ro-crate-metadata/ro-crate-metadata.json new file mode 100644 index 0000000..46955e4 --- /dev/null +++ b/ro-crate-metadata/ro-crate-metadata.json @@ -0,0 +1,77 @@ +{ + "@context": "https://w3id.org/ro/crate/1.1/context", + "@graph": [ + { + "@id": "./", + "@type": "Dataset", + "datePublished": "2024-08-05T09:34:27+00:00", + "hasPart": [ + { + "@id": "merged.ttl" + }, + { + "@id": "Snakefile" + } + ] + }, + { + "@id": "ro-crate-metadata.json", + "@type": "CreativeWork", + "about": { + "@id": "./" + }, + "conformsTo": { + "@id": "https://w3id.org/ro/crate/1.1" + } + }, + { + "@id": "merged.ttl", + "@type": "File", + "author": [ + { + "@id": "https://orcid.org/0000-0000-0000-0000" + }, + { + "@id": "https://orcid.org/0000-0000-0000-0001" + } + ], + "description": "This file contains merged RDF triples from multiple RO-Crates sourced from WorkflowHub.", + "encodingFormat": "text/turtle", + "name": "Merged Data File" + }, + { + "@id": "Snakefile", + "@type": "File", + "author": [ + { + "@id": "https://orcid.org/0000-0000-0000-0000" + }, + { + "@id": "https://orcid.org/0000-0000-0000-0001" + } + ], + "description": "This is the Snakemake workflow used to generate the merged RDF triples.", + "name": "Snakemake Workflow", + "output": { + "@id": "merged.ttl" + }, + "programmingLanguage": { + "@id": "https://w3id.org/workflowhub/workflow-ro-crate#Snakemake", + "name": "Snakemake" + }, + "url": "Snakefile" + }, + { + "@id": "https://orcid.org/0000-0000-0000-0000", + "@type": "Person", + "affiliation": "University of Flatland", + "name": "Alice Doe" + }, + { + "@id": "https://orcid.org/0000-0000-0000-0001", + "@type": "Person", + "affiliation": "University of Flatland", + "name": "Bob Doe" + } + ] +} \ No newline at end of file diff --git a/workflowhub_graph/cached_url_open.py b/workflowhub_graph/cached_url_open.py index a6cedd0..1071e50 100644 --- a/workflowhub_graph/cached_url_open.py +++ b/workflowhub_graph/cached_url_open.py @@ -1,3 +1,4 @@ +import json import os import re from unittest.mock import patch, MagicMock @@ -42,11 +43,6 @@ def patch_rdflib_urlopen( def cached_urlopen(request): url = request.get_full_url() - if not allowed_urls_re.match(url): - raise ValueError( - f"URL {url} not allowed to cache, allowed: {allowed_urls_pattern}" - ) - class Response(io.StringIO): content_type = "text/html" headers = {"Content-Type": "text/html"} @@ -57,6 +53,12 @@ def info(self): def geturl(self): return url + if not allowed_urls_re.match(url): + return Response(json.dumps({"@context": {}})) + # raise ValueError( + # f"URL {url} not allowed to cache, allowed: {allowed_urls_pattern}" + # ) + cached_filename = os.path.join(cache_base_dir, url_to_filename(url)) if not os.path.exists(cached_filename): diff --git a/workflowhub_graph/check_outputs.py b/workflowhub_graph/check_outputs.py index 245e0ce..6cf4248 100644 --- a/workflowhub_graph/check_outputs.py +++ b/workflowhub_graph/check_outputs.py @@ -1,6 +1,7 @@ import argparse import json import os +import re def parse_args() -> argparse.Namespace: @@ -15,7 +16,6 @@ def parse_args() -> argparse.Namespace: parser.add_argument( "--workflow-ids", type=str, - required=True, help="Range of workflow IDs to process (e.g., '1-10').", ) parser.add_argument( @@ -33,6 +33,24 @@ def parse_args() -> argparse.Namespace: return parser.parse_args() +def get_max_id_from_files(output_dir: str) -> int: + """ + If no workflow ID parameter is provided, get the maximum workflow ID from the files in the output directory. + + :param output_dir: The directory where output files are stored. + :return: The maximum workflow ID. + """ + max_id = 0 + pattern = re.compile(r"^(\d+)_\d+_ro-crate-metadata\.json$") + for filename in os.listdir(output_dir): + match = pattern.match(filename) + if match: + wf_id = int(match.group(1)) + if wf_id > max_id: + max_id = wf_id + return max_id + + def generate_expected_files( output_dir: str, workflow_ids: range, versions: list[str] ) -> list[str]: @@ -64,10 +82,15 @@ def verify_created_files(expected_files: list[str]) -> list[str]: def main(): - # Parse workflow IDs and versions: args = parse_args() - min_id, max_id = map(int, args.workflow_ids.split("-")) - workflow_ids = range(min_id, max_id + 1) + + if args.workflow_ids: + min_id, max_id = map(int, args.workflow_ids.split("-")) + workflow_ids = range(min_id, max_id + 1) + else: + max_id = get_max_id_from_files(args.output_dir) + workflow_ids = range(1, max_id + 1) + versions = args.versions.split(",") # Generate expected file paths @@ -80,8 +103,7 @@ def main(): with open("created_files.json", "w") as f: json.dump(created_files, f) - print("Created files list written to created_files.json") - print(f"Created files: {created_files}") + print("\nFile names written to created_files.json") if __name__ == "__main__": diff --git a/workflowhub_graph/cli.py b/workflowhub_graph/cli.py new file mode 100644 index 0000000..f29894a --- /dev/null +++ b/workflowhub_graph/cli.py @@ -0,0 +1,17 @@ +import sys + + +def update_progress_bar(progress: int, total: int, bar_length: int = 50): + """ + Updates the progress bar. + + :param progress: Current progress. + :param total: The total value when the progress is complete. + :param bar_length: The length of the progress bar in characters. + """ + fraction = progress / total + arrow = int(fraction * bar_length) * "=" + padding = int(bar_length - len(arrow)) * " " + percent = int(fraction * 100) + sys.stdout.write(f"\r[{arrow}{padding}] {percent}%") + sys.stdout.flush() diff --git a/workflowhub_graph/create_ro_crate.py b/workflowhub_graph/create_ro_crate.py new file mode 100644 index 0000000..c94ba59 --- /dev/null +++ b/workflowhub_graph/create_ro_crate.py @@ -0,0 +1,70 @@ +import sys + +from rocrate.model import Person +from rocrate.rocrate import ROCrate + + +def create_ro_crate(input_file: str, workflow_file: str, output_dir: str) -> None: + """ + Create an RO-Crate metadata file for the given input file and output directory. + :param input_file: The input file provided by the Snakemake workflow (e.g., merged data file). + :param workflow_file: Reference to the Snakemake workflow. + :param output_dir: The output directory to store the RO-Crate metadata file. + :return: + """ + crate = ROCrate() + + data_entity = crate.add_file( + input_file, + properties={ + "name": "Merged Data File", + "description": "This file contains merged RDF triples from multiple RO-Crates sourced from WorkflowHub.", + "encodingFormat": "text/turtle", + }, + ) + + workflow_entity = crate.add_file( + workflow_file, + properties={ + "name": "Snakemake Workflow", + "description": "This is the Snakemake workflow used to generate the merged RDF triples.", + "programmingLanguage": { + "@id": "https://w3id.org/workflowhub/workflow-ro-crate#Snakemake", + "name": "Snakemake", + }, + "url": workflow_file, + }, + ) + + # Linking the data file to the workflow: + workflow_entity["output"] = data_entity + + # Authors: + alice_id = "https://orcid.org/0000-0000-0000-0000" + bob_id = "https://orcid.org/0000-0000-0000-0001" + alice = crate.add( + Person( + crate, + alice_id, + properties={"name": "Alice Doe", "affiliation": "University of Flatland"}, + ) + ) + bob = crate.add( + Person( + crate, + bob_id, + properties={"name": "Bob Doe", "affiliation": "University of Flatland"}, + ) + ) + + data_entity["author"] = [alice, bob] + workflow_entity["author"] = [alice, bob] + + # Writing the RO-Crate metadata: + crate.write(output_dir) + + +if __name__ == "__main__": + create_ro_crate( + input_file=sys.argv[1], workflow_file=sys.argv[2], output_dir=sys.argv[3] + ) diff --git a/workflowhub_graph/merge.py b/workflowhub_graph/merge.py index 438c2ce..d63435a 100644 --- a/workflowhub_graph/merge.py +++ b/workflowhub_graph/merge.py @@ -8,12 +8,15 @@ from workflowhub_graph.absolutize import make_paths_absolute from workflowhub_graph.cached_url_open import patch_rdflib_urlopen +from workflowhub_graph.cli import update_progress_bar from workflowhub_graph.constants import BASE_URL # TODO: check if names like "#Husen" are correctly represented in the graph def merge_all_files( - pattern="data/*.json", base_url: str = BASE_URL, cache_kwargs: dict | None = None + pattern: str = "data/*.json", + base_url: str = BASE_URL, + cache_kwargs: dict | None = None, ) -> rdflib.Graph: """ Merges all JSON-LD files in the given pattern into a single RDF graph. @@ -32,7 +35,7 @@ def merge_all_files( for i, fn in enumerate(filenames): with open(fn, "r") as f: - print(f"Processing {fn}, {i}/{len(filenames)}") + update_progress_bar(i + 1, len(filenames)) basename = os.path.basename(fn) @@ -49,7 +52,6 @@ def merge_all_files( json_data = make_paths_absolute(json.load(f), base_url, w_id, w_version) - # TODO: Is there an issue here? Linting shows "Expected type 'str | bytes | None', got 'dict' instead" with patch_rdflib_urlopen(**cache_kwargs): graph.parse(data=json_data, format="json-ld") diff --git a/workflowhub_graph/source_crates.py b/workflowhub_graph/source_crates.py index 816567d..fa49cc1 100644 --- a/workflowhub_graph/source_crates.py +++ b/workflowhub_graph/source_crates.py @@ -1,12 +1,14 @@ import argparse import json import os +import sys import traceback import requests from io import BytesIO from zipfile import ZipFile +from workflowhub_graph.cli import update_progress_bar from workflowhub_graph.constants import ( BASE_URL_DEV, BASE_URL_PROD, @@ -168,9 +170,7 @@ def process_workflow_ids( for i_workflow, workflow in enumerate(workflows): workflow_id = workflow["id"] - print( - f"Processing workflow ID {workflow_id} ({i_workflow + 1}/{len(workflows)})..." - ) + update_progress_bar(i_workflow + 1, len(workflows)) workflow_json = get_dot_json_endpoint( base_url + DOT_JSON_ENDPOINT.format(w_id=workflow_id) @@ -217,7 +217,7 @@ def process_workflow_ids( ) with open(output_file_path, "wb") as output_file: output_file.write(json_content) - print(f"Content saved to {output_file_path}") + n_successful += 1 else: @@ -233,9 +233,17 @@ def main(): parser.add_argument( "--workflow-ids", type=str, - default="-", - help="Range of workflow IDs to process. Use a hyphen to specify a range, e.g. 1-10.", + help="Range of workflow IDs to process. Use a hyphen to specify a range, e.g. 1-10. " + "If not provided, all IDs will be processed.", + ) + parser.add_argument( + "--zip", + default=False, + action="store_true", + help="Download and extract JSON files from zip archive.", ) + + # TODO: Change this to `dev` to use the development WorkflowHub: parser.add_argument( "--prod", default=False, @@ -258,30 +266,30 @@ def main(): base_url = BASE_URL_DEV workflows_url = WORKFLOWS_URL_DEV - min_workflow_id, max_workflow_id = args.workflow_ids.split("-") - # Example usage: workflows_ids = download_workflow_ids(workflows_url) - if min_workflow_id != "": - workflows_ids["data"] = [ - workflow - for workflow in workflows_ids["data"] - if int(workflow["id"]) >= int(min_workflow_id) - ] - - if max_workflow_id != "": - workflows_ids["data"] = [ - workflow - for workflow in workflows_ids["data"] - if int(workflow["id"]) <= int(max_workflow_id) - ] - - # Check if root key 'data' exists + if args.workflow_ids: + min_workflow_id, max_workflow_id = args.workflow_ids.split("-") + if min_workflow_id != "": + workflows_ids["data"] = [ + workflow + for workflow in workflows_ids["data"] + if int(workflow["id"]) >= int(min_workflow_id) + ] + + if max_workflow_id != "": + workflows_ids["data"] = [ + workflow + for workflow in workflows_ids["data"] + if int(workflow["id"]) <= int(max_workflow_id) + ] + # If no workflow_ids argument is provided, we use all IDs + if workflows_ids and "data" in workflows_ids: process_workflow_ids( workflows_ids, - is_metadata_endpoint=True, + is_metadata_endpoint=not args.zip, base_url=base_url, all_versions=args.all_versions, )